Compare commits

..

1 commit

Author SHA1 Message Date
mwiegand
2518cb6864 wip 2022-08-31 12:10:15 +02:00
281 changed files with 1525 additions and 5868 deletions

12
.envrc
View file

@ -1,7 +1,13 @@
#!/usr/bin/env bash
python3 -m venv .venv
source ./.venv/bin/activate
PATH_add .venv/bin
PATH_add bin
python3 -m pip install --upgrade pip
source_env ~/.local/share/direnv/pyenv
source_env ~/.local/share/direnv/venv
source_env ~/.local/share/direnv/bundlewrap
rm -rf .cache/bw/git_deploy
export BW_GIT_DEPLOY_CACHE=.cache/bw/git_deploy
export EXPERIMENTAL_UPLOAD_VIA_CAT=1
mkdir -p "$BW_GIT_DEPLOY_CACHE"
unset PS1

View file

@ -15,7 +15,7 @@ Raspberry pi as soundcard
# install bw fork
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork@main#egg=bundlewrap
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork#egg=bundlewrap
# monitor timers
@ -35,14 +35,3 @@ fi
```
telegraf: execd for daemons
TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

View file

@ -1,32 +0,0 @@
#!/usr/bin/env python3
from sys import argv
from os.path import realpath, dirname
from shlex import quote
from bundlewrap.repo import Repository
repo = Repository(dirname(dirname(realpath(__file__))))
if len(argv) == 1:
for node in repo.nodes:
for name in node.metadata.get('left4dead2/servers', {}):
print(name)
exit(0)
server = argv[1]
command = argv[2]
remote_code = """
from rcon.source import Client
with Client('127.0.0.1', {port}, passwd='''{password}''') as client:
response = client.run('''{command}''')
print(response)
"""
for node in repo.nodes:
for name, conf in node.metadata.get('left4dead2/servers', {}).items():
if name == server:
response = node.run('python3 -c ' + quote(remote_code.format(port=conf['port'], password=conf['rcon_password'], command=command)))
print(response.stdout.decode())

View file

@ -10,6 +10,7 @@ nodes = [
for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy
]
reboot_nodes = []
print('updating nodes:', sorted(node.name for node in nodes))
@ -23,13 +24,14 @@ for node in nodes:
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
reboot_nodes.append(node)
# REBOOT IN ORDER
wireguard_servers = [
node
for node in nodes
for node in reboot_nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
@ -39,7 +41,7 @@ wireguard_servers = [
wireguard_s2s = [
node
for node in nodes
for node in reboot_nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
@ -49,7 +51,7 @@ wireguard_s2s = [
everything_else = [
node
for node in nodes
for node in reboot_nodes
if not node.has_bundle('wireguard')
]
@ -60,11 +62,8 @@ for node in [
*wireguard_s2s,
*wireguard_servers,
]:
print('rebooting', node.name)
try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
except Exception as e:
print(e)

View file

@ -5,17 +5,9 @@ from os.path import realpath, dirname
from sys import argv
from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
@ -28,7 +20,9 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network))
conf = f'''
conf = \
f'''>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
[Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820
@ -41,12 +35,11 @@ PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.ge
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'''
print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
if input("print qrcode? [yN]: ").upper() == 'Y':
import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1))

View file

@ -1,6 +1,3 @@
# https://manpages.debian.org/latest/apt/sources.list.5.de.html
# https://repolib.readthedocs.io/en/latest/deb822-format.html
```python
{
'apt': {
@ -8,29 +5,8 @@
'apt-transport-https': {},
},
'sources': {
'debian': {
'types': { # optional, defaults to `{'deb'}``
'deb',
'deb-src',
},
'urls': {
'https://deb.debian.org/debian',
},
'suites': { # at least one
'{codename}',
'{codename}-updates',
'{codename}-backports',
},
'components': { # optional
'main',
'contrib',
'non-frese',
},
# key:
# - optional, defaults to source name (`debian` in this example)
# - place key under data/apt/keys/debian-12.{asc|gpg}
'key': 'debian-{version}',
},
# place key under data/apt/keys/packages.cloud.google.com.{asc|gpg}
'deb https://packages.cloud.google.com/apt cloud-sdk main',
},
},
}

View file

@ -1,15 +0,0 @@
#!/bin/bash
apt update -qq --silent 2> /dev/null
UPGRADABLE=$(apt list --upgradable -qq 2> /dev/null | cut -d '/' -f 1)
if test "$UPGRADABLE" != ""
then
echo "$(wc -l <<< $UPGRADABLE) package(s) upgradable:"
echo
echo "$UPGRADABLE"
exit 1
else
exit 0
fi

View file

@ -1,66 +1,32 @@
# TODO pin repo: https://superuser.com/a/1595920
from os.path import join
from urllib.parse import urlparse
from glob import glob
from os.path import join, basename
directories = {
'/etc/apt': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/apt.conf.d': {
# existance is expected
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/keyrings': {
# https://askubuntu.com/a/1307181
'purge': True,
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf.d': {
# 'purge': True,
# 'triggers': {
# 'action:apt_update',
# },
# },
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/trusted.gpg.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
files = {
'/etc/apt/apt.conf': {
'content': repo.libs.apt.render_apt_conf(node.metadata.get('apt/config')),
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list': {
'content': '# managed by bundlewrap\n',
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755',
'content': '# managed'
},
}
@ -75,22 +41,41 @@ actions = {
},
}
# create sources.lists and respective keyfiles
# group sources by apt server hostname
for name, config in node.metadata.get('apt/sources').items():
# place keyfile
keyfile_destination_path = repo.libs.apt.format_variables(node, config['options']['Signed-By'])
files[keyfile_destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', basename(keyfile_destination_path)),
'content_type': 'binary',
hosts = {}
for source_string in node.metadata.get('apt/sources'):
source = repo.libs.apt.AptSource(source_string)
hosts\
.setdefault(source.url.hostname, list())\
.append(source)
# create sources lists and keyfiles
for host, sources in hosts.items():
keyfile = basename(glob(join(repo.path, 'data', 'apt', 'keys', f'{host}.*'))[0])
destination_path = f'/etc/apt/trusted.gpg.d/{keyfile}'
for source in sources:
source.options['signed-by'] = [destination_path]
files[f'/etc/apt/sources.list.d/{host}.list'] = {
'content': '\n'.join(sorted(set(
str(source).format(
release=node.metadata.get('os_release'),
version=node.os_version[0], # WIP crystal
)
for source in sources
))),
'triggers': {
'action:apt_update',
},
}
# place sources.list
files[f'/etc/apt/sources.list.d/{name}.sources'] = {
'content': repo.libs.apt.render_source(node, name),
files[destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', keyfile),
'content_type': 'binary',
'triggers': {
'action:apt_update',
},
@ -98,14 +83,14 @@ for name, config in node.metadata.get('apt/sources').items():
# create backport pinnings
for package, options in node.metadata.get('apt/packages', {}).items():
for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options
if pkg_apt[package].pop('backports', False):
files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([
f"Package: {package}",
f"Pin: release a={node.metadata.get('os_codename')}-backports",
f"Pin: release a={node.metadata.get('os_release')}-backports",
f"Pin-Priority: 900",
]),
'needed_by': [
@ -115,25 +100,3 @@ for package, options in node.metadata.get('apt/packages', {}).items():
'action:apt_update',
},
}
# unattended upgrades
#
# unattended-upgrades.service: delays shutdown if necessary
# apt-daily.timer: performs apt update
# apt-daily-upgrade.timer: performs apt upgrade
svc_systemd['unattended-upgrades.service'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily-upgrade.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}

View file

@ -1,177 +1,6 @@
defaults = {
'apt': {
'packages': {
'apt-listchanges': {
'installed': False,
},
},
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true',
},
'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
},
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
},
'APT': {
'NeverAutoRemove': {
'^firmware-linux.*',
'^linux-firmware$',
'^linux-image-[a-z0-9]*$',
'^linux-image-[a-z0-9]*-[a-z0-9]*$',
},
'VersionedKernelPackages': {
# kernels
'linux-.*',
'kfreebsd-.*',
'gnumach-.*',
# (out-of-tree) modules
'.*-modules',
'.*-kernel',
},
'Never-MarkAuto-Sections': {
'metapackages',
'tasks',
},
'Move-Autobit-Sections': {
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
},
},
'sources': {},
},
'monitoring': {
'services': {
'apt upgradable': {
'vars.command': '/usr/lib/nagios/plugins/check_apt_upgradable',
'vars.sudo': True,
'check_interval': '1h',
},
'current kernel': {
'vars.command': 'ls /boot/vmlinuz-* | sort -V | tail -n 1 | xargs -n1 basename | cut -d "-" -f 2- | grep -q "^$(uname -r)$"',
'check_interval': '1h',
},
'apt reboot-required': {
'vars.command': 'ls /var/run/reboot-required 2> /dev/null && exit 1 || exit 0',
'check_interval': '1h',
},
},
'packages': {},
'sources': set(),
},
}
@metadata_reactor.provides(
'apt/sources',
)
def key(metadata):
return {
'apt': {
'sources': {
source_name: {
'key': source_name,
}
for source_name, source_config in metadata.get('apt/sources').items()
if 'key' not in source_config
},
},
}
@metadata_reactor.provides(
'apt/sources',
)
def signed_by(metadata):
return {
'apt': {
'sources': {
source_name: {
'options': {
'Signed-By': '/etc/apt/keyrings/' + metadata.get(f'apt/sources/{source_name}/key') + '.' + repo.libs.apt.find_keyfile_extension(node, metadata.get(f'apt/sources/{source_name}/key')),
},
}
for source_name in metadata.get('apt/sources')
},
},
}
@metadata_reactor.provides(
'apt/config',
'apt/packages',
)
def unattended_upgrades(metadata):
return {
'apt': {
'config': {
'APT': {
'Periodic': {
'Update-Package-Lists': '1',
'Unattended-Upgrade': '1',
},
},
'Unattended-Upgrade': {
'Origins-Pattern': {
"origin=*",
},
},
},
'packages': {
'unattended-upgrades': {},
},
},
}
# @metadata_reactor.provides(
# 'apt/config',
# 'apt/list_changes',
# )
# def listchanges(metadata):
# return {
# 'apt': {
# 'config': {
# 'DPkg': {
# 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# },
# 'Tools': {
# 'Options': {
# '/usr/bin/apt-listchanges': {
# 'Version': '2',
# 'InfoFD': '20',
# },
# },
# },
# },
# 'Dir': {
# 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d',
# },
# },
# },
# 'list_changes': {
# 'apt': {
# 'frontend': 'pager',
# 'which': 'news',
# 'email_address': 'root',
# 'email_format': 'text',
# 'confirm': 'false',
# 'headers': 'false',
# 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db',
# },
# },
# },
# }

View file

@ -1,47 +0,0 @@
#!/usr/bin/env python3
import json
from subprocess import check_output
from datetime import datetime, timedelta
now = datetime.now()
two_days_ago = now - timedelta(days=2)
with open('/etc/backup-freshness-check.json', 'r') as file:
config = json.load(file)
local_datasets = check_output(['zfs', 'list', '-H', '-o', 'name']).decode().splitlines()
errors = set()
for dataset in config['datasets']:
if f'tank/{dataset}' not in local_datasets:
errors.add(f'dataset "{dataset}" not present at all')
continue
snapshots = [
snapshot
for snapshot in check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot', f'tank/{dataset}', '-s', 'creation']).decode().splitlines()
if f"@{config['prefix']}" in snapshot
]
if not snapshots:
errors.add(f'dataset "{dataset}" has no backup snapshots')
continue
newest_backup_snapshot = snapshots[-1]
snapshot_datetime = datetime.utcfromtimestamp(
int(check_output(['zfs', 'list', '-p', '-H', '-o', 'creation', '-t', 'snapshot', newest_backup_snapshot]).decode())
)
if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
continue
if errors:
for error in errors:
print(error)
exit(2)
else:
print(f"all {len(config['datasets'])} datasets have fresh backups.")

View file

@ -1,15 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/backup-freshness-check.json': {
'content': dumps({
'prefix': node.metadata.get('backup-freshness-check/prefix'),
'datasets': node.metadata.get('backup-freshness-check/datasets'),
}, indent=4, sort_keys=True, cls=MetadataJSONEncoder),
},
'/usr/lib/nagios/plugins/check_backup_freshness': {
'mode': '0755',
},
}

View file

@ -1,37 +0,0 @@
defaults = {
'backup-freshness-check': {
'server': node.name,
'prefix': 'auto-backup_',
'datasets': {},
},
'monitoring': {
'services': {
'backup freshness': {
'vars.command': '/usr/lib/nagios/plugins/check_backup_freshness',
'check_interval': '6h',
'vars.sudo': True,
},
},
},
}
@metadata_reactor.provides(
'backup-freshness-check/datasets'
)
def backup_freshness_check(metadata):
return {
'backup-freshness-check': {
'datasets': {
f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes
if not other_node.dummy
and other_node.has_bundle('backup')
and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items()
if options.get('backup', True)
and not options.get('mountpoint', None) in [None, 'none']
},
},
}

View file

@ -16,14 +16,7 @@ defaults = {
'/usr/bin/rsync',
'/sbin/zfs',
},
},
'zfs': {
'datasets': {
'tank': {
'recordsize': "1048576",
},
},
},
}
}
@ -32,10 +25,9 @@ defaults = {
)
def zfs(metadata):
datasets = {}
for other_node in repo.nodes:
if (
not other_node.dummy and
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
@ -50,7 +42,7 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# for rsync backups
datasets[f'{base_dataset}/fs'] = {
'mountpoint': f"/mnt/backups/{id}",
@ -59,10 +51,10 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'true',
'backup': False,
}
# for zfs send/recv
if other_node.has_bundle('zfs'):
# base datasets for each tank
for pool in other_node.metadata.get('zfs/pools'):
datasets[f'{base_dataset}/{pool}'] = {
@ -72,7 +64,7 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# actual datasets
for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
@ -99,7 +91,7 @@ def zfs(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('backup-server/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('backup-server/hostname'): repo.libs.dns.get_a_records(metadata),
}
}

View file

@ -1,31 +1,11 @@
#!/bin/bash
set -u
# FIXME: inelegant
% if wol_command:
${wol_command}
% endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do
echo backing up $path
/opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,13 +1,11 @@
#!/bin/bash
set -exu
path=$1
if zfs list -H -o mountpoint | grep -q "^$path$"
if zfs list -H -o mountpoint | grep -q "$path"
then
/opt/backup/backup_path_via_zfs "$path"
elif test -e "$path"
elif test -d "$path"
then
/opt/backup/backup_path_via_rsync "$path"
else

View file

@ -7,14 +7,5 @@ uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
if test -d "$path"
then
postfix="/"
elif test -f "$path"
then
postfix=""
else
exit 1
fi
rsync -av --rsync-path="sudo rsync" "$path$postfix" "backup-receiver@$server:/mnt/backups/$uuid$path$postfix"
rsync -av --rsync-path="sudo rsync" "$path/" "backup-receiver@$server:/mnt/backups/$uuid$path/"
$ssh sudo zfs snap "tank/$uuid/fs@auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"

View file

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
@ -39,20 +39,20 @@ else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -L -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
$(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$source_dataset" | grep "^$source_dataset#$bookmark_prefix")
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete remote snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$target_dataset" | grep "^$target_dataset@$bookmark_prefix" | grep -v "$new_bookmark")
# delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done

View file

@ -20,11 +20,7 @@ defaults = {
'systemd-timers': {
f'backup': {
'command': '/opt/backup/backup_all',
'when': '1:00',
'persistent': True,
'after': {
'network-online.target',
},
'when': 'daily',
},
},
}

View file

@ -1,5 +1,7 @@
from ipaddress import ip_address, ip_interface
from datetime import datetime
import json
from bundlewrap.metadata import MetadataJSONEncoder
from hashlib import sha3_512
@ -19,7 +21,7 @@ directories[f'/var/lib/bind'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -29,7 +31,7 @@ files['/etc/default/bind9'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -43,7 +45,7 @@ files['/etc/bind/named.conf'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -63,7 +65,7 @@ files['/etc/bind/named.conf.options'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -93,7 +95,7 @@ files['/etc/bind/named.conf.local'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -106,7 +108,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -127,10 +129,10 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
svc_systemd['bind9'] = {}
@ -139,6 +141,6 @@ actions['named-checkconf'] = {
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
]
}

View file

@ -41,12 +41,6 @@ defaults = {
},
'zones': set(),
},
'nftables': {
'input': {
'tcp dport 53 accept',
'udp dport 53 accept',
},
},
'telegraf': {
'config': {
'inputs': {
@ -92,7 +86,7 @@ def master_slave(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('bind/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('bind/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@ -103,7 +97,7 @@ def dns(metadata):
def collect_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
views = {}
for view_name, view_conf in metadata.get('bind/views').items():
@ -123,7 +117,7 @@ def collect_records(metadata):
name = fqdn[0:-len(zone) - 1]
for type, values in records.items():
for type, values in records.items():
for value in values:
if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata):
views\
@ -134,7 +128,7 @@ def collect_records(metadata):
.add(
h({'name': name, 'type': type, 'value': value})
)
return {
'bind': {
'views': views,
@ -166,7 +160,7 @@ def ns_records(metadata):
# FIXME: bw currently cant handle lists of dicts :(
h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."})
for nameserver in nameservers
}
}
}
for zone_name, zone_conf in view_conf['zones'].items()
}
@ -183,7 +177,7 @@ def ns_records(metadata):
def slaves(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'slaves': [

View file

@ -1,10 +1,6 @@
from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides(
'users/build-ci/authorized_users',
'sudoers/build-ci',
@ -22,7 +18,7 @@ def ssh_keys(metadata):
},
'sudoers': {
'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values()
}
},

View file

@ -71,7 +71,6 @@ def nginx(metadata):
'context': {
'target': 'http://127.0.0.1:4000',
},
'check_path': '/status',
},
},
},

View file

@ -1,20 +1,10 @@
debian_version = min([node.os_version, (11,)])[0] # FIXME
defaults = {
'apt': {
'packages': {
'crystal': {},
},
'sources': {
'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},
'suites': {
'/',
},
},
'deb http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_{version}/ /',
},
},
}

View file

@ -1,12 +1,9 @@
DOVECOT
=======
rescan index
------------
https://doc.dovecot.org/configuration_manual/fts/#rescan
rescan index: https://doc.dovecot.org/configuration_manual/fts/#rescan
```
doveadm fts rescan -u 'i@ckn.li'
doveadm index -u 'i@ckn.li' -q '*'
sudo -u vmail doveadm fts rescan -u 'test@mail2.sublimity.de'
sudo -u vmail doveadm index -u 'test@mail2.sublimity.de' -q '*'
```

View file

@ -66,7 +66,8 @@ xmlunzip() {
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat | /usr/lib/dovecot/xml2text
find . -name "$name" -print0 | xargs -0 cat |
$libexec_dir/xml2text
}
wait_timeout() {

View file

@ -2,14 +2,7 @@ connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql
default_pass_scheme = ARGON2ID
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password \
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password\
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \

View file

@ -6,26 +6,26 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
mail_location = maildir:~
mail_plugins = fts fts_xapian
namespace inbox {
inbox = yes
separator = .
mailbox Drafts {
auto = subscribe
auto = subscribe
special_use = \Drafts
}
mailbox Junk {
auto = create
auto = create
special_use = \Junk
}
mailbox Trash {
auto = subscribe
auto = subscribe
special_use = \Trash
}
mailbox Sent {
auto = subscribe
auto = subscribe
special_use = \Sent
}
}
@ -34,10 +34,9 @@ passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
# use sql for userdb too, to enable iterate_query
userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
driver = static
args = uid=vmail gid=vmail home=/var/vmail/%u
}
service auth {
@ -81,10 +80,10 @@ protocol imap {
mail_plugins = $mail_plugins imap_sieve
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
}
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
}
}
protocol sieve {
plugin {
sieve = /var/vmail/sieve/%u.sieve
@ -118,7 +117,7 @@ plugin {
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/bin
sieve_extensions = +vnd.dovecot.pipe
sieve_extensions = +vnd.dovecot.pipe
sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve

View file

@ -20,10 +20,6 @@ directories = {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': {
'owner': 'vmail',
'group': 'vmail',

View file

@ -13,26 +13,15 @@ defaults = {
'catdoc': {}, # catdoc, catppt, xls2csv
},
},
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
'letsencrypt': {
'reload_after': {
'dovecot',
},
},
'nftables': {
'input': {
'tcp dport {143, 993, 4190} accept',
},
},
'systemd-timers': {
'dovecot-optimize-index': {
'command': '/usr/bin/doveadm fts optimize -A',
'when': 'daily',
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
}

View file

@ -0,0 +1,6 @@
# directories = {
# '/var/lib/downloads': {
# 'owner': 'downloads',
# 'group': 'www-data',
# }
# }

View file

@ -1,23 +0,0 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -1,66 +0,0 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -1,121 +0,0 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

View file

@ -8,15 +8,7 @@ defaults = {
'python3-crcmod': {},
},
'sources': {
'google-cloud': {
'url': 'https://packages.cloud.google.com/apt/',
'suites': {
'cloud-sdk',
},
'components': {
'main',
},
},
'deb https://packages.cloud.google.com/apt cloud-sdk main',
},
},
}

View file

@ -1,4 +1,3 @@
[DEFAULT]
APP_NAME = ckn-gitea
RUN_USER = git
RUN_MODE = prod
@ -14,24 +13,40 @@ MEMBERS_PAGING_NUM = 100
[server]
PROTOCOL = http
SSH_DOMAIN = ${domain}
DOMAIN = ${domain}
HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3500
ROOT_URL = https://${domain}/
DISABLE_SSH = true
SSH_PORT = 22
LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
LFS_JWT_SECRET = ${lfs_secret_key}
OFFLINE_MODE = true
START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore
[database]
DB_TYPE = postgres
HOST = ${database.get('host')}:${database.get('port')}
NAME = ${database.get('database')}
USER = ${database.get('username')}
PASSWD = ${database.get('password')}
SSL_MODE = disable
LOG_SQL = false
[admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true
[security]
INTERNAL_TOKEN = ${internal_token}
INSTALL_LOCK = true
SECRET_KEY = ${security_secret_key}
LOGIN_REMEMBER_DAYS = 30
DISABLE_GIT_HOOKS = ${str(not enable_git_hooks).lower()}
[openid]
ENABLE_OPENID_SIGNIN = false
@ -47,6 +62,12 @@ REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.${domain}
[mailer]
ENABLED = true
MAILER_TYPE = sendmail
FROM = "${app_name}" <noreply@${domain}>
[session]
PROVIDER = file
@ -59,6 +80,9 @@ ENABLE_FEDERATED_AVATAR = false
MODE = console
LEVEL = warn
[oauth2]
JWT_SECRET = ${oauth_secret_key}
[other]
SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
@ -66,10 +90,3 @@ SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
[webhook]
ALLOWED_HOST_LIST = *
DELIVER_TIMEOUT = 600
[indexer]
REPO_INDEXER_ENABLED = true
MAX_FILE_SIZE = 10240000
[queue.issue_indexer]
LENGTH = 20

View file

@ -1,15 +1,8 @@
from os.path import join
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
version = version=node.metadata.get('gitea/version')
downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
'sha256_url': '{url}.sha256',
'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'sha256': node.metadata.get('gitea/sha256'),
'triggers': {
'svc_systemd:gitea:restart',
},
@ -41,14 +34,8 @@ actions = {
}
files['/etc/gitea/app.ini'] = {
'content': repo.libs.ini.dumps(
merge_dict(
repo.libs.ini.parse(open(join(repo.path, 'bundles', 'gitea', 'files', 'app.ini')).read()),
node.metadata.get('gitea/conf'),
),
),
'content_type': 'mako',
'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'],
'triggers': {
'svc_systemd:gitea:restart',

View file

@ -1,4 +1,4 @@
database_password = repo.vault.password_for(f'{node.name} postgresql gitea').value
database_password = repo.vault.password_for(f'{node.name} postgresql gitea')
defaults = {
'apt': {
@ -11,20 +11,18 @@ defaults = {
},
},
'gitea': {
'conf': {
'DEFAULT': {
'WORK_PATH': '/var/lib/gitea',
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
'database': {
'host': 'localhost',
'port': '5432',
'username': 'gitea',
'password': database_password,
'database': 'gitea',
},
'app_name': 'Gitea',
'lfs_secret_key': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
'security_secret_key': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
'oauth_secret_key': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
'internal_token': repo.vault.password_for(f'{node.name} gitea internal_token'),
},
'postgresql': {
'roles': {
@ -43,7 +41,8 @@ defaults = {
'gitea.service': {
'Unit': {
'Description': 'gitea',
'After': {'syslog.target', 'network.target'},
'After': 'syslog.target',
'After': 'network.target',
'Requires': 'postgresql.service',
},
'Service': {
@ -67,40 +66,21 @@ defaults = {
'home': '/home/git',
},
},
'zfs': {
'datasets': {
'tank/gitea': {
'mountpoint': '/var/lib/gitea',
},
},
},
}
@metadata_reactor.provides(
'gitea/conf',
'zfs/datasets',
)
def conf(metadata):
domain = metadata.get('gitea/domain')
def zfs(metadata):
if not node.has_bundle('zfs'):
return {}
return {
'gitea': {
'conf': {
'server': {
'SSH_DOMAIN': domain,
'DOMAIN': domain,
'ROOT_URL': f'https://{domain}/',
'LFS_JWT_SECRET': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
},
'security': {
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
},
'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}',
},
'oauth2': {
'JWT_SECRET': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
'zfs': {
'datasets': {
f"{metadata.get('zfs/storage_classes/ssd')}/gitea": {
'mountpoint': '/var/lib/gitea',
},
},
},
@ -118,7 +98,7 @@ def nginx(metadata):
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3500',
},
}
},
},
},

View file

@ -18,17 +18,16 @@ admin_password = node.metadata.get('grafana/config/security/admin_password')
port = node.metadata.get('grafana/config/server/http_port')
actions['reset_grafana_admin_password'] = {
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
'unless': f"sleep 5 && curl http://admin:{quote(admin_password)}@localhost:{port}/api/org --fail",
'unless': f"curl http://admin:{quote(admin_password)}@localhost:{port}/api/org",
'needs': [
'svc_systemd:grafana-server',
],
}
directories = {
'/etc/grafana': {},
'/etc/grafana': {
},
'/etc/grafana/provisioning': {
'owner': 'grafana',
'group': 'grafana',
},
'/etc/grafana/provisioning/datasources': {
'purge': True,
@ -36,13 +35,8 @@ directories = {
'/etc/grafana/provisioning/dashboards': {
'purge': True,
},
'/var/lib/grafana': {
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana': {},
'/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
'purge': True,
'triggers': [
'svc_systemd:grafana-server:restart',
@ -53,8 +47,6 @@ directories = {
files = {
'/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -64,8 +56,6 @@ files = {
'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()),
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -82,8 +72,6 @@ files = {
},
}],
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -172,8 +160,6 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
]

View file

@ -8,19 +8,8 @@ defaults = {
'grafana': {},
},
'sources': {
'grafana': {
'urls': {
'https://packages.grafana.com/oss/deb',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
'deb https://packages.grafana.com/oss/deb stable main',
},
},
'grafana': {
'config': {
@ -77,7 +66,7 @@ def domain(metadata):
'domain': metadata.get('grafana/hostname'),
},
},
},
},
}
@metadata_reactor.provides(
@ -85,7 +74,7 @@ def domain(metadata):
)
def influxdb2(metadata):
influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb')
return {
'grafana': {
'datasources': {
@ -104,7 +93,7 @@ def influxdb2(metadata):
'isDefault': True,
},
},
},
},
}
@ -117,7 +106,7 @@ def datasource_key_to_name(metadata):
'datasources': {
name: {'name': name} for name in metadata.get('grafana/datasources').keys()
},
},
},
}
@ -127,7 +116,7 @@ def datasource_key_to_name(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('grafana/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('grafana/hostname'): repo.libs.dns.get_a_records(metadata),
}
}

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -0,0 +1,20 @@
users = {
'homeassistant': {
'home': '/var/lib/homeassistant',
},
}
directories = {
'/var/lib/homeassistant': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/config': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/venv': {
'owner': 'homeassistant',
},
}
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/

View file

@ -0,0 +1,9 @@
defaults = {
'apt': {
'packages': {
'python3-dev': {},
'python3-pip': {},
'python3-venv': {},
},
},
}

View file

@ -23,6 +23,6 @@ def hostname_file(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('hostname'): repo.libs.dns.get_a_records(metadata, external=False),
},
}

View file

@ -24,7 +24,7 @@ header_margin=1
detailed_cpu_time=0
cpu_count_from_one=1
show_cpu_usage=0
show_cpu_frequency=1
show_cpu_frequency=0
show_cpu_temperature=0
degree_fahrenheit=0
update_process_names=0

View file

@ -2,7 +2,11 @@
UNKNOWN=3
if [ -z "$SSHMON_COMMAND" ]
if [ -z "$SSHMON_TEST" ]
then
echo 'check_by_sshmon: Env SSHMON_TEST missing' >&2
exit $UNKNOWN
elif [ -z "$SSHMON_COMMAND" ]
then
echo 'check_by_sshmon: Env SSHMON_COMMAND missing' >&2
exit $UNKNOWN
@ -12,14 +16,7 @@ then
exit $UNKNOWN
fi
if [ -z "$SSHMON_SUDO" ]
then
PREFIX=""
else
PREFIX="sudo "
fi
ssh sshmon@"$SSHMON_HOST" "$PREFIX$SSHMON_COMMAND"
ssh sshmon@"$SSHMON_HOST" "sudo $SSHMON_COMMAND"
exitcode=$?

View file

@ -12,9 +12,9 @@ object CheckCommand "sshmon" {
command = [ "/usr/lib/nagios/plugins/check_by_sshmon" ]
env.SSHMON_TEST = "1234"
env.SSHMON_COMMAND = "$command$"
env.SSHMON_HOST = "$address$"
env.SSHMON_SUDO = "$sudo$"
}

View file

@ -13,9 +13,9 @@ apply Notification "mail-icingaadmin" to Host {
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail
}
@ -25,9 +25,9 @@ apply Notification "mail-icingaadmin" to Service {
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail
}

View file

@ -1,7 +0,0 @@
/**
* The JournaldLogger type writes log information to the systemd journal.
*/
object JournaldLogger "journald" {
severity = "warning"
}

View file

@ -1,4 +0,0 @@
/**
* This file is requires for inital apt install.
* The JournaldLogger type writes log information to the systemd journal.
*/

View file

@ -0,0 +1,3 @@
object SyslogLogger "syslog" {
severity = "warning"
}

View file

@ -14,8 +14,7 @@
if key.endswith('_interval'):
return value
else:
escaped_value = value.replace('$', '$$').replace('"', '\\"')
return f'"{escaped_value}"'
return f'"{value}"'
elif isinstance(value, (list, set)):
return '[' + ', '.join(render_value(e) for e in sorted(value)) + ']'
else:

View file

@ -5,6 +5,6 @@ include <itl>
include <plugins>
include <plugins-contrib>
include "features-enabled/*.conf"
include "features.d/*.conf"
include_recursive "conf.d"
include "hosts.d/*.conf"

View file

@ -10,24 +10,6 @@ directories = {
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/pki': { # required for apt install
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/zones.d': { # required for apt install
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/conf.d': {
'purge': True,
'owner': 'nagios',
@ -46,16 +28,7 @@ directories = {
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features-available': {
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features-enabled': {
'/etc/icinga2/features.d': {
'purge': True,
'owner': 'nagios',
'group': 'nagios',
@ -196,6 +169,50 @@ files = {
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/ido-pgsql.conf': {
'source': 'features/ido-pgsql.conf',
'content_type': 'mako',
'owner': 'nagios',
'group': 'nagios',
'context': {
'db_password': node.metadata.get('postgresql/roles/icinga2/password')
},
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/syslog.conf': {
'source': 'features/syslog.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/notification.conf': {
'source': 'features/notification.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/checker.conf': {
'source': 'features/checker.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/api.conf': {
'source': 'features/api.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/var/lib/icinga2/certs/ca.crt': {
'content_type': 'download',
'source': f'https://letsencrypt.org/certs/isrg-root-x1-cross-signed.pem',
@ -210,39 +227,6 @@ files = {
},
}
# FEATURES
for feature, context in {
'mainlog': {},
# 'journald': {}, FIXME
'notification': {},
'checker': {},
'api': {},
'ido-pgsql': {
'db_password': node.metadata.get('postgresql/roles/icinga2/password'),
},
}.items():
files[f'/etc/icinga2/features-available/{feature}.conf'] = {
'content_type': 'mako' if context else 'text',
'context': context,
'source': f'features/{feature}.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
}
symlinks[f'/etc/icinga2/features-enabled/{feature}.conf'] = {
'target': f'/etc/icinga2/features-available/{feature}.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
}
# HOSTS
for other_node in repo.nodes:
if other_node.dummy:
continue
@ -269,7 +253,7 @@ svc_systemd = {
'icinga2.service': {
'needs': [
'pkg_apt:icinga2-ido-pgsql',
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
},
}

View file

@ -9,21 +9,7 @@ defaults = {
'monitoring-plugins': {},
},
'sources': {
'icinga': {
'types': {
'deb',
'deb-src',
},
'urls': {
'https://packages.icinga.com/debian',
},
'suites': {
'icinga-{codename}',
},
'components': {
'main',
},
},
'deb https://packages.icinga.com/debian icinga-{release} main',
},
},
'icinga2': {
@ -34,11 +20,6 @@ defaults = {
}
},
},
'nftables': {
'input': {
'tcp dport 5665 accept',
},
},
'postgresql': {
'databases': {
'icinga2': {
@ -63,6 +44,7 @@ defaults = {
'mountpoint': '/var/lib/icinga2',
'needed_by': {
'pkg_apt:icinga2',
'pkg_apt:icingaweb2',
'pkg_apt:icinga2-ido-pgsql',
},
},
@ -72,7 +54,7 @@ defaults = {
@metadata_reactor.provides(
'letsencrypt/domains',
'nginx/vhosts',
)
def letsencrypt(metadata):
return {

14
bundles/icingadb/items.py Normal file
View file

@ -0,0 +1,14 @@
import yaml, json
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/icingadb/config.yml': {
'content': yaml.dump(
json.loads(
json.dumps(node.metadata.get('icingadb'), sort_keys=True, cls=MetadataJSONEncoder)
),
),
'mode': '0640',
'owner': 'icingadb',
},
}

View file

@ -0,0 +1,53 @@
defaults = {
'apt': {
'packages': {
'icingadb': {},
'icingadb-redis': {},
'icingadb-web': {},
},
'sources': {
'deb https://packages.icinga.com/debian icinga-{release} main',
'deb https://packages.icinga.com/debian icinga-{release}-snapshots main',
},
},
'postgresql': {
'databases': {
'icingadb': {
'owner': 'icingadb',
},
},
'roles': {
'icingadb': {
'password': repo.vault.password_for(f'psql icingadb on {node.name}'),
},
},
},
'redis': {
'icingadb': {
'port': '6381',
},
},
}
@metadata_reactor.provides(
'icingadb',
)
def config(metadata):
return {
'icingadb': {
'database': {
'type': 'postgresql',
'host': 'localhost',
'port': 3306,
'database': 'icingadb',
'user': 'icingadb',
'password': metadata.get('postgresql/roles/icingadb/password'),
},
'redis': {
'address': 'localhost:6380',
},
'logging': {
'level': 'info',
},
},
}

View file

@ -2,4 +2,3 @@
- open /icingaweb2/setup in browser
- fill in values from metadata
- apply
- make sure tls cert exists and is owned by nagios

View file

@ -4,27 +4,18 @@ directories = {
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
},
'/etc/icingaweb2/enabledModules': {
# 'purge': True,
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
},
'/etc/icingaweb2/modules': {
# 'purge': True,
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
},
}
@ -34,9 +25,6 @@ files = {
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
},
}
@ -45,9 +33,6 @@ symlinks = {
'target': '/usr/share/icingaweb2/modules/monitoring',
'owner': 'www-data',
'group': 'icingaweb2',
'needs': [
'pkg_apt:icingaweb2',
],
},
}
@ -63,9 +48,6 @@ for name in [
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
}
for name in [
@ -78,7 +60,4 @@ for name in [
'owner': 'www-data',
'group': 'icingaweb2',
'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
}

View file

@ -3,6 +3,7 @@ from hashlib import sha3_256
defaults = {
'apt': {
'packages': {
'icingaweb2': {},
'php-ldap': {},
'php-json': {},
'php-intl': {},
@ -10,25 +11,11 @@ defaults = {
'php-gd': {},
'php-imagick': {},
'php-pgsql': {},
'icingaweb2': {},
#'icingaweb2-module-monitoring': {}, # ?
'icingaweb2-module-monitoring': {},
},
'sources': {
'icinga': {
'types': {
'deb',
'deb-src',
},
'urls': {
'https://packages.icinga.com/debian',
},
'suites': {
'icinga-{codename}',
},
'components': {
'main',
},
},
'deb https://packages.icinga.com/debian icinga-{release} main',
'deb https://packages.icinga.com/debian icinga-{release}-snapshots main',
},
},
'icingaweb2': {
@ -131,7 +118,7 @@ defaults = {
@metadata_reactor.provides(
'icingaweb2/hostname',
'icingaweb2/resources.ini/icinga_ido/password',
'icingaweb2/resources.ini/icinga_ido/icinga2/password',
'icingaweb2/monitoring/commandtransports.ini/icinga2/password',
)
def stuff(metadata):
@ -177,7 +164,6 @@ def nginx(metadata):
metadata.get('icingaweb2/hostname'): {
'content': 'icingaweb2/vhost.conf',
'context': {
'php_version': metadata.get('php/version'),
},
},
},

View file

@ -4,9 +4,8 @@ from shlex import quote
directories['/var/lib/influxdb'] = {
'owner': 'influxdb',
'group': 'influxdb',
'mode': '0750',
'needs': [
'zfs_dataset:tank/influxdb',
f"zfs_dataset:{node.metadata.get('zfs/storage_classes/ssd')}/influxdb",
],
}

View file

@ -7,22 +7,7 @@ defaults = {
'influxdb2-cli': {},
},
'sources': {
'influxdata': {
'urls': {
'https://repos.influxdata.com/debian',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
},
},
'nftables': {
'input': {
'tcp dport 8200 accept',
'deb https://repos.influxdata.com/debian {release} stable',
},
},
'influxdb': {
@ -62,7 +47,7 @@ def zfs(metadata):
return {
'zfs': {
'datasets': {
'tank/influxdb': {
f"{metadata.get('zfs/storage_classes/ssd')}/influxdb": {
'mountpoint': '/var/lib/influxdb',
'recordsize': '8192',
'atime': 'off',
@ -78,7 +63,7 @@ def zfs(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('influxdb/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('influxdb/hostname'): repo.libs.dns.get_a_records(metadata),
}
}

View file

@ -19,7 +19,7 @@ def apt(metadata):
return {
'apt': {
'packages': {
f'openjdk-{metadata.get("java/version")}-jre-headless': {},
f'openjdk-{metadata.get("java/version")}-jre': {},
}
}
}

View file

@ -1,21 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/kea/kea-dhcp4.conf': {
'content': dumps(node.metadata.get('kea'), indent=4, sort_keys=True, cls=MetadataJSONEncoder),
'triggers': [
'svc_systemd:kea-dhcp4-server:restart',
],
},
}
svc_systemd = {
'kea-dhcp4-server': {
'needs': [
'pkg_apt:kea-dhcp4-server',
'file:/etc/kea/kea-dhcp4.conf',
'svc_systemd:systemd-networkd:restart',
],
},
}

View file

@ -1,96 +0,0 @@
from ipaddress import ip_interface, ip_network
hashable = repo.libs.hashable.hashable
defaults = {
'apt': {
'packages': {
'kea-dhcp4-server': {},
},
},
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': set(),
},
'lease-database': {
'type': 'memfile',
'lfc-interval': 3600
},
'subnet4': set(),
'loggers': set([
hashable({
'name': 'kea-dhcp4',
'output_options': [
{
'output': 'syslog',
}
],
'severity': 'INFO',
}),
]),
},
},
}
@metadata_reactor.provides(
'kea/Dhcp4/interfaces-config/interfaces',
'kea/Dhcp4/subnet4',
)
def subnets(metadata):
subnet4 = set()
interfaces = set()
reservations = set(
hashable({
'hw-address': network_conf['mac'],
'ip-address': str(ip_interface(network_conf['ipv4']).ip),
})
for other_node in repo.nodes
for network_conf in other_node.metadata.get('network', {}).values()
if 'mac' in network_conf
)
for network_name, network_conf in metadata.get('network').items():
dhcp_server_config = network_conf.get('dhcp_server_config', None)
if dhcp_server_config:
_network = ip_network(dhcp_server_config['subnet'])
subnet4.add(hashable({
'subnet': dhcp_server_config['subnet'],
'pools': [
{
'pool': f'{dhcp_server_config['pool_from']} - {dhcp_server_config['pool_to']}',
},
],
'option-data': [
{
'name': 'routers',
'data': dhcp_server_config['router'],
},
{
'name': 'domain-name-servers',
'data': '10.0.10.2',
},
],
'reservations': set(
reservation
for reservation in reservations
if ip_interface(reservation['ip-address']).ip in _network
),
}))
interfaces.add(network_conf.get('interface', network_name))
return {
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': interfaces,
},
'subnet4': subnet4,
},
},
}

View file

@ -1,40 +0,0 @@
hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de"
sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}"
motd_enabled 0
sv_cheats 1
sv_consistency 0
sv_lan 0
sv_allow_lobby_connect_only 0
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
sv_minrate 30000
sv_maxrate 60000
sv_mincmdrate 66
sv_maxcmdrate 101
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console.
sv_logfile 1 //default 1; Log server information in the log file.
sv_log_onefile 0 //default 0; Log server information to only one file.
sv_logbans 1 //default 0;Log server bans in the server logs.
sv_logflush 0 //default 0; Flush the log files to disk on each write (slow).

View file

@ -1,122 +1,106 @@
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = {
'/opt/steam/left4dead2-servers': {
'/opt/left4dead2': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway -
# to another dir. Also move workdir alongside it, as it has to be on same fs.
'/opt/steam-zfs-overlay-workarounds': {
'/opt/left4dead2/ems/admin system': {
'owner': 'steam',
},
'/opt/left4dead2/left4dead2/cfg': {
'owner': 'steam',
},
'/opt/left4dead2/left4dead2/addons': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
}
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
symlinks = {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
files = {
'/opt/left4dead2/ems/admin system/admins.txt': {
'owner': 'steam',
'group': 'steam',
'content': '\n'.join(node.metadata.get('left4dead2/admins')),
}
}
#
# SERVERS
#
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
},
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# service
svc_systemd[f'left4dead2-{name}.service'] = {
svc_systemd = {
'left4dead2-workshop': {
'running': False,
'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
'svc_systemd:steam-update',
],
}
},
}
#
# ADDONS
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
for id in node.metadata.get('left4dead2/workshop'):
directories[f'/opt/left4dead2/left4dead2/addons/{id}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
'svc_systemd:left4dead2-workshop:restart',
],
}
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# admin system
server_units = set()
for name, config in node.metadata.get('left4dead2/servers').items():
config.pop('port')
config = {
'hostname': name,
'sv_steamgroup': ','.join(
str(gid) for gid in node.metadata.get('left4dead2/steamgroups')
),
'z_difficulty': 'Impossible',
'sv_gametypes': 'realism',
'sv_region': 3, # europe
'log': 'on',
'sv_logecho': 1,
'sv_logfile': 1,
'sv_log_onefile': 0,
'sv_logbans': 1,
'sv_logflush': 0,
'sv_logsdir': 'logs', # /opt/left4dead2/left4dead2/logs
**config,
}
files[f'/opt/left4dead2/left4dead2/cfg/server-{name}.cfg'] = {
'content': '\n'.join(
f'{key} "{value}"' for key, value in sorted(config.items())
) + '\n',
'owner': 'steam',
'triggers': [
f'svc_systemd:left4dead2-server-{name}:restart',
],
}
svc_systemd[f'left4dead2-server-{name}'] = {
'needs': [
f'file:/usr/local/lib/systemd/system/left4dead2-server-{name}.service',
],
}
server_units.add(f'left4dead2-server-{name}')
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = {
for id in node.metadata.get('left4dead2/workshop'):
directories[f'/opt/left4dead2/addons/{id}'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
'svc_systemd:left4dead2-workshop:restart',
],
}
# TIDYUP
find_obsolete_units = (
'find /usr/local/lib/systemd/system -type f -name "left4dead2-server-*.service" ' +
' '.join(f"! -name '{name}.service'" for name in server_units)
)
actions['remove_obsolete_left4dead2_units'] = {
'command': (
f'for unitfile in $({find_obsolete_units}); '
f'do '
f'systemctl stop $(basename "$unitfile"); '
f'systemctl disable $(basename "$unitfile"); '
f'rm "$unitfile"; '
f'systemctl daemon-reload; '
f'done'
),
'unless': (
find_obsolete_units + " | wc -l | grep -q '^0$'"
),
}

View file

@ -5,7 +5,7 @@ from shlex import quote
defaults = {
'steam': {
'games': {
'left4dead2': 222860,
'left4dead2': '222860',
},
},
'left4dead2': {
@ -17,85 +17,64 @@ defaults = {
@metadata_reactor.provides(
'left4dead2/servers',
'systemd/units',
)
def rconn_password(metadata):
# only works from localhost!
def workshop(metadata):
command = (
'set -x; '
'for ID in ' + ' '.join(metadata.get('left4dead2/workshop')) + '; '
'do '
'if ! ls /opt/left4dead2/left4dead2/addons/$ID/*.vpk; '
'then '
'cd /opt/left4dead2/left4dead2/addons/$ID; '
'/opt/steam-workshop-downloader https://steamcommunity.com/sharedfiles/filedetails\?id\=$ID; '
'unzip $ID.zip; '
'fi; '
'done'
)
return {
'left4dead2': {
'servers': {
server: {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24),
'systemd': {
'units': {
'left4dead2-workshop.service': {
'Unit': {
'Description': 'install workshop items',
'After': 'network.target',
'Requires': 'steam-update.service',
'PartOf': 'steam-update.service'
},
'Service': {
'Type': 'oneshot',
'User': 'steam',
'ExecStart': f'/bin/bash -c {quote(command)}',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
}
for server in metadata.get('left4dead2/servers')
},
},
}
}
}
@metadata_reactor.provides(
'steam-workshop-download',
'systemd/units',
)
def server_units(metadata):
units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items():
# mount overlay
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
units[f'left4dead2-server-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'steam-update.service'},
'Requires': {'steam-update.service'},
'After': 'network.target',
'Requires': 'steam-update.service',
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg',
'WorkingDirectory': '/opt/left4dead2',
'ExecStart': f'/opt/left4dead2/srcds_run -port {config["port"]} -insecure +map {config["map"]} +exec server-{name}.cfg',
'Restart': 'on-failure',
},
'Install': {
@ -104,24 +83,7 @@ def server_units(metadata):
}
return {
'steam-workshop-download': workshop,
'systemd': {
'units': units,
},
}
@metadata_reactor.provides(
'nftables/input',
)
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
return {
'nftables': {
'input': {
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
},
},
}

View file

@ -1,6 +1,6 @@
https://github.com/dehydrated-io/dehydrated/wiki/example-dns-01-nsupdate-script
```sh
```
printf "server 127.0.0.1
zone acme.resolver.name.
update add _acme-challenge.ckn.li.acme.resolver.name. 600 IN TXT "hello"

View file

@ -4,7 +4,7 @@ set -o pipefail
deploy_challenge() {
echo "
server ${server}
server 10.0.11.3
zone ${zone}.
update add $1.${zone}. 60 IN TXT \"$3\"
send
@ -13,7 +13,7 @@ deploy_challenge() {
clean_challenge() {
echo "
server ${server}
server 10.0.11.3
zone ${zone}.
update delete $1.${zone}. TXT
send

View file

@ -56,7 +56,6 @@ for domain in node.metadata.get('letsencrypt/domains').keys():
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
'needs': {
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
'pkg_apt:dehydrated',
},
'needed_by': {
'svc_systemd:nginx',

View file

@ -1,41 +0,0 @@
from shlex import quote
def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
if isinstance(json_data, dict):
for key, value in json_data.items():
yield from generate_sysctl_key_value_pairs_from_json(value, [*parents, key])
elif isinstance(json_data, list):
raise ValueError(f"List not supported: '{json_data}'")
else:
# If it's a leaf node, yield the path
yield (parents, json_data)
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
files= {
'/etc/sysctl.conf': {
'content': '\n'.join(
sorted(
f"{'.'.join(path)}={value}"
for path, value in key_value_pairs
),
),
'triggers': [
'svc_systemd:systemd-sysctl.service:restart',
],
},
}
svc_systemd = {
'systemd-sysctl.service': {},
}
for path, value in key_value_pairs:
actions[f'reload_sysctl.conf_{path}'] = {
'command': f"sysctl --values {'.'.join(path)} | grep -q {quote('^'+value+'$')}",
'needs': [
f'action:systemd-sysctl.service',
f'action:systemd-sysctl.service:restart',
],
}

View file

@ -1,3 +0,0 @@
defaults = {
'sysctl': {},
}

View file

@ -20,19 +20,18 @@ files = {
}
actions = {
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'triggers': {
'action:locale-gen',
},
},
'locale-gen': {
'command': 'locale-gen',
'triggered': True,
'needs': {
'pkg_apt:locales',
'action:systemd-locale',
},
},
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'preceded_by': {
'action:locale-gen',
},
},
}

View file

@ -1,6 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10)
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")

View file

@ -1,6 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin"
PATH_add "/opt/homebrew/opt/grep/libexec/gnubin"

View file

@ -1,46 +0,0 @@
#!/bin/bash -l
sudo tee /etc/pam.d/sudo << EOT
# sudo: auth account password session
auth sufficient pam_tid.so
auth sufficient pam_smartcard.so
auth required pam_opendirectory.so
account required pam_permit.so
password required pam_deny.so
session required pam_permit.so
EOT
sudo xcodebuild -license accept
xcode-select --install
git -C ~/.zsh/oh-my-zsh pull
brew upgrade
brew upgrade --cask --greedy
pyenv install --skip-existing
sudo softwareupdate -ia --verbose
if test "$(defaults read com.apple.dock autohide-time-modifier)" == 0.16
then
defaults write com.apple.dock autohide-time-modifier -float 0.16
RESTART_DOCK=TRUE
fi
if test "$(defaults read com.apple.dock autohide-delay)" -ne 0
then
defaults write com.apple.dock autohide-delay -float 0
RESTART_DOCK=TRUE
fi
if test "$RESTART_DOCK" = TRUE
then
killall Dock
fi
sudo systemsetup -setremotelogin on # enable ssh
pip install --upgrade pip
# https://sysadmin-journal.com/apache-directory-studio-on-the-apple-m1/

View file

@ -1,9 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
then
echo "rebuilding venv für new python version"
rm -rf .venv .pip_upgrade_timestamp
fi

View file

@ -1,3 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"

View file

@ -1,27 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
python3 -m venv .venv
source .venv/bin/activate
PATH_add .venv/bin
NOW=$(date +%s)
if test -e .pip_upgrade_timestamp
then
LAST=$(cat .pip_upgrade_timestamp)
else
LAST=0
fi
DELTA=$(expr "$NOW" - "$LAST")
echo "last pip upgrade $DELTA seconds ago"
if test "$DELTA" -gt 86400
then
python3 -m pip --require-virtualenv install pip wheel --upgrade
python3 -m pip --require-virtualenv install -r requirements.txt --upgrade
if test -e optional-requirements.txt
then
python3 -m pip --require-virtualenv install -r optional-requirements.txt --upgrade
fi
date +%s > .pip_upgrade_timestamp
fi

View file

@ -1,33 +0,0 @@
export PATH=~/.bin:$PATH
export PATH=~/.cargo/bin:$PATH
export ZSH=~/.zsh/oh-my-zsh
export ZSH_HOSTNAME='sm'
ZSH_THEME="bw"
HIST_STAMPS="yyyy/mm/dd"
plugins=(
zsh-autosuggestions
git
)
source $ZSH/oh-my-zsh.sh
ulimit -S -n 24000
antivir() {
printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' &&
sleep 1 && echo '\nyour computer is safe!'
}
eval "$(rbenv init -)"
eval "$(pyenv init -)"
eval "$(direnv hook zsh)"
eval "$(op completion zsh)"; compdef _op op
# //S/M
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}

View file

@ -1,47 +0,0 @@
# brew install
actions['brew_install'] = {
'command': '/opt/homebrew/bin/brew install ' + ' '.join(node.metadata.get('brew')),
'unless': f"""PKGS=$(/opt/homebrew/bin/brew leaves); for p in {' '.join(node.metadata.get('brew'))}; do grep -q "$p" <<< $PKGS || exit 9; done"""
}
# bw init
directories['/Users/mwiegand/.config/bundlewrap/lock'] = {}
# home
files['/Users/mwiegand/.zshrc'] = {
'source': 'zshrc',
'mode': '0644',
}
# updater
files['/Users/mwiegand/.bin/macbook-update'] = {
'mode': '755',
}
with open(f'{repo.path}/bundles/zsh/files/bw.zsh-theme') as f:
files['/Users/mwiegand/.zsh/oh-my-zsh/themes/bw.zsh-theme'] = {
'content': f.read(),
'mode': '0644',
}
# direnv
directories['/Users/mwiegand/.local/share/direnv'] = {}
files['/Users/mwiegand/.local/share/direnv/gnu'] = {}
files['/Users/mwiegand/.local/share/direnv/pyenv'] = {}
files['/Users/mwiegand/.local/share/direnv/venv'] = {}
files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {}
##################
for element in [*files.values(), *directories.values()]:
element.update({
'owner': 'mwiegand',
'group': 'staff',
**element,
})

View file

@ -1,3 +0,0 @@
defaults = {
'brew': {},
}

View file

@ -1,6 +1,6 @@
<?php
// https://raw.githubusercontent.com/Radiergummi/autodiscover/master/autodiscover/autodiscover.php
/********************************
* Autodiscover responder
@ -8,45 +8,45 @@
* This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml.
* If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the
* appropriate mail services.
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices.
*
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* domain names, adjust ports and SSL.
*/
//get raw POST data so we can extract the email address
$request = file_get_contents("php://input");
// optional debug log
# file_put_contents( 'request.log', $request, FILE_APPEND );
// retrieve email address from client request
preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email );
// check for invalid mail, to prevent XSS
if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) {
throw new Exception('Invalid E-Mail provided');
}
// get domain from email address
$domain = substr( strrchr( $email[1], "@" ), 1 );
/**************************************
* Port and server settings below *
**************************************/
// IMAP settings
$imapServer = 'imap.' . $domain; // imap.example.com
$imapPort = 993;
$imapSSL = true;
// SMTP settings
$smtpServer = 'smtp.' . $domain; // smtp.example.com
$smtpPort = 587;
$smtpSSL = true;
//set Content-Type
header( 'Content-Type: application/xml' );
?>
<?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?>

View file

@ -24,7 +24,6 @@ def nginx(metadata):
'context': {
'root': f"/var/www/{metadata.get('mailserver/autoconfig_hostname')}",
},
'check_path': '/mail/config-v1.1.xml',
},
},
},
@ -60,7 +59,7 @@ def letsencrypt(metadata):
)
def autoconfig(metadata):
dns = {}
for domain in metadata.get('mailserver/domains'):
dns.update({
f'autoconfig.{domain}': {
@ -88,7 +87,7 @@ def autoconfig(metadata):
'SRV': {f"0 1 993 {metadata.get('mailserver/hostname')}."},
},
})
return {
'dns': dns,
}

View file

@ -1,12 +1 @@
mailserver
==========
argin2 hashes
-------------
`echo -n 'WarumGehtDasNicht?' | argon2 FAPf+gTwqTRr+3H0cDktqw`
logs
----
`journalctl -u postfix@-.service -u dovecot.service -u rspamd.service -o cat -f`
echo -n 'WarumGehtDasNicht?' | argon2 FAPf+gTwqTRr+3H0cDktqw

View file

@ -33,12 +33,6 @@ defaults = {
'mountpoint': '/var/vmail',
'compression': 'on',
},
'tank/vmail/index': {
'mountpoint': '/var/vmail/index',
'compression': 'on',
'com.sun:auto-snapshot': 'false',
'backup': False,
},
},
},
}
@ -49,30 +43,12 @@ defaults = {
)
def dns(metadata):
dns = {}
for domain in metadata.get('mailserver/domains'):
dns[domain] = {
'MX': [f"5 {metadata.get('mailserver/hostname')}."],
'TXT': ['v=spf1 a mx -all'],
}
report_email = metadata.get('mailserver/dmarc_report_email')
dns[f'_dmarc.{domain}'] = {
'TXT': ['; '.join(f'{k}={v}' for k, v in {
# dmarc version
'v': 'DMARC1',
# reject on failure
'p': 'reject',
# standard reports
'rua': f'mailto:{report_email}',
# forensic reports
'fo': 1,
'ruf': f'mailto:{report_email}',
# require alignment between the DKIM domain and the parent Header From domain
'adkim': 's',
# require alignment between the SPF domain (the sender) and the Header From domain
'aspf': 's',
}.items())]
}
return {
'dns': dns,
@ -90,4 +66,4 @@ def letsencrypt(metadata):
},
},
},
}
}

View file

@ -1 +0,0 @@
https://mariadb.com/kb/en/systemd/#configuring-mariadb-to-write-the-error-log-to-syslog

View file

@ -1,11 +0,0 @@
% for section, options in sorted(conf.items()):
[${section}]
% for key, value in sorted(options.items()):
% if value is None:
${key}
% else:
${key} = ${value}
% endif
% endfor
% endfor

View file

@ -1,91 +0,0 @@
from shlex import quote
def mariadb(sql, **kwargs):
kwargs_string = ''.join(f" --{k} {v}" for k, v in kwargs.items())
return f"mariadb{kwargs_string} -Bsr --execute {quote(sql)}"
directories = {
'/var/lib/mysql': {
'owner': 'mysql',
'group': 'mysql',
'needs': [
'zfs_dataset:tank/mariadb',
],
'needed_by': [
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
files = {
'/etc/mysql/conf.d/override.conf': {
'context': {
'conf': node.metadata.get('mariadb/conf'),
},
'content_type': 'mako',
},
}
svc_systemd = {
'mariadb.service': {
'needs': [
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
actions = {
'mariadb_sec_remove_anonymous_users': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User=''"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User = ''") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
'mariadb_sec_remove_remote_root': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
}
for db, conf in node.metadata.get('mariadb/databases', {}).items():
actions[f'mariadb_create_database_{db}'] = {
'command': mariadb(f"CREATE DATABASE {db}"),
'unless': mariadb(f"SHOW DATABASES LIKE '{db}'") + f" | grep -q '^{db}$'",
'needs': [
'svc_systemd:mariadb.service',
],
}
actions[f'mariadb_user_{db}_create'] = {
'command': mariadb(f"CREATE USER {db}"),
'unless': mariadb(f"SELECT User FROM mysql.user WHERE User = '{db}'") + f" | grep -q '^{db}$'",
'needs': [
f'action:mariadb_create_database_{db}',
],
}
pw = conf['password']
actions[f'mariadb_user_{db}_password'] = {
'command': mariadb(f"SET PASSWORD FOR {db} = PASSWORD('{conf['password']}')"),
'unless': f'echo {quote(pw)} | mariadb -u {db} -e quit -p',
'needs': [
f'action:mariadb_user_{db}_create',
],
}
actions[f'mariadb_grant_privileges_to_{db}'] = {
'command': mariadb(f"GRANT ALL PRIVILEGES ON {db}.* TO '{db}'", database=db),
'unless': mariadb(f"SHOW GRANTS FOR {db}") + f" | grep -q '^GRANT ALL PRIVILEGES ON `{db}`.* TO `{db}`@`%`'",
'needs': [
f'action:mariadb_user_{db}_create',
],
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'mariadb-server': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
'mariadb-client': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
},
},
'mariadb': {
'databases': {},
'conf': {
# https://www.reddit.com/r/zfs/comments/u1xklc/mariadbmysql_database_settings_for_zfs
'mysqld': {
'skip-innodb_doublewrite': None,
'innodb_flush_method': 'fsync',
'innodb_doublewrite': '0',
'innodb_use_atomic_writes': '0',
'innodb_use_native_aio': '0',
'innodb_read_io_threads': '10',
'innodb_write_io_threads': '10',
'innodb_buffer_pool_size': '26G',
'innodb_flush_log_at_trx_commit': '1',
'innodb_log_file_size': '1G',
'innodb_flush_neighbors': '0',
'innodb_fast_shutdown': '2',
},
},
},
'zfs': {
'datasets': {
'tank/mariadb': {
'mountpoint': '/var/lib/mysql',
'recordsize': '16384',
'atime': 'off',
},
},
},
}

View file

@ -1,88 +0,0 @@
#!/usr/bin/env python3
from requests import Session
from requests.exceptions import ConnectionError
from sys import argv
from time import sleep, time
if len(argv) > 1 and argv[1] == "remove":
action = "remove"
else:
action = "add"
duration_seconds = int(argv[1]) if len(argv) == 2 else 60 * 60 * 24
author = 'downtime-script'
node_name = '${node_name}'
api_url = 'https://${icinga_hostname}/api/v1'
session = Session()
session.auth = ('root', '${icinga_password}')
now = int(time())
# wait online
for _ in range(10):
try:
session.get(api_url).raise_for_status()
except ConnectionError as error:
print(f'{error}: retrying...')
sleep(3)
else:
break
# look for existing downtimes
response = session.get(
f'{api_url}/objects/downtimes',
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
},
json={
'filter': f'match("{node_name}", host.name), match("{author}", downtime.author)',
}
)
response.raise_for_status()
# remove existing downtimes
if response.json()['results']:
response = session.post(
f'{api_url}/actions/remove-downtime',
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
},
json={
'type': 'Downtime',
'filter': f'match("{node_name}", host.name), match("{author}", downtime.author)',
'pretty': True,
}
)
response.raise_for_status()
print('removed downtime')
# add downtime
if action == 'add':
response = session.post(
f'{api_url}/actions/schedule-downtime',
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
},
json={
'author': author,
'comment': f'downtime by {argv[0]}',
'start_time': now,
'end_time': now + duration_seconds,
'type': 'Host',
'child_options': 'DowntimeTriggeredChildren',
'all_services': True,
'filter': f'match("{node_name}", host.name)',
'pretty': True,
}
)
response.raise_for_status()
print('added downtime')

View file

@ -1,13 +0,0 @@
icinga_node = repo.get_node(node.metadata.get('monitoring/icinga2_node'))
files = {
'/usr/local/bin/downtime': {
'content_type': 'mako',
'mode': '0750',
'context': {
'node_name': node.name,
'icinga_hostname': icinga_node.metadata.get('icinga2/hostname'),
'icinga_password': icinga_node.metadata.get('icinga2/api_users/root/password'),
},
},
}

View file

@ -1,10 +1,9 @@
defaults = {
'monitoring': {
'services': {
# 'test': {
# 'vars.command': '/bin/ls /',
# 'vars.sudo': True,
# },
'test': {
'vars.command': '/bin/ls /',
},
},
},
}
@ -51,7 +50,6 @@ def user(metadata):
conf['vars.command']
for conf in metadata.get('monitoring/services').values()
if conf['check_command'] == 'sshmon'
and conf.get('vars.sudo', None)
},
},
}

View file

@ -1,35 +1,9 @@
from ipaddress import ip_interface
defaults = {
'network': {},
}
@metadata_reactor.provides(
'network',
)
def dhcp(metadata):
networks = {}
for network_name, network_conf in metadata.get('network').items():
_interface = ip_interface(network_conf['ipv4'])
_ip = _interface.ip
_network = _interface.network
_hosts = list(_network.hosts())
if network_conf.get('dhcp_server', False):
networks[network_name] = {
'dhcp_server_config': {
'subnet': str(_network),
'pool_from': str(_hosts[len(_hosts)//2]),
'pool_to': str(_hosts[-3]),
'router': str(_ip),
'domain-name-servers': str(_ip),
}
}
return {
'network': networks,
'network': {
}
}
@metadata_reactor.provides(
@ -37,58 +11,34 @@ def dhcp(metadata):
)
def units(metadata):
units = {}
for network_name, network_conf in metadata.get('network').items():
interface_type = network_conf.get('type', None)
# network
units[f'{network_name}.network'] = {
for type, network in metadata.get('network').items():
units[f'{type}.network'] = {
'Match': {
'Name': network_name if interface_type == 'vlan' else network_conf['interface'],
'Name': network['interface'],
},
'Network': {
'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(network_conf.get('vlans', set()))
'DHCP': 'no',
'IPv6AcceptRA': 'no',
}
}
# type
if interface_type:
units[f'{network_name}.network']['Match']['Type'] = interface_type
# ips
for i in [4, 6]:
if network_conf.get(f'ipv{i}', None):
units[f'{network_name}.network'].update({
if network.get(f'ipv{i}', None):
units[f'{type}.network'].update({
f'Address#ipv{i}': {
'Address': network_conf[f'ipv{i}'],
'Address': network[f'ipv{i}'],
},
})
if f'gateway{i}' in network_conf:
units[f'{network_name}.network'].update({
if f'gateway{i}' in network:
units[f'{type}.network'].update({
f'Route#ipv{i}': {
'Gateway': network_conf[f'gateway{i}'],
'Gateway': network[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
# as vlan
if interface_type == 'vlan':
units[f"{network_name}.netdev"] = {
'NetDev': {
'Name': network_name,
'Kind': 'vlan',
},
'VLAN': {
'Id': network_conf['id'],
}
}
return {
'systemd': {
'units': units,

View file

@ -72,7 +72,7 @@ if [ "$SCAN" == "TRUE" ]; then
sudo -u www-data php /opt/nextcloud/occ files:scan --path "$REL_SOURCE_PATH"
sudo -u www-data php /opt/nextcloud/occ files:scan --path "$REL_UNSORTABLE_PATH"
sudo -u www-data php /opt/nextcloud/occ files:scan --path "$REL_DEST_PATH"
#sudo -u www-data php /opt/nextcloud/occ preview:pre-generate
sudo -u www-data php /opt/nextcloud/occ preview:pre-generate
fi
echo "FINISH."

View file

@ -1,3 +1,42 @@
<?php
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
$CONFIG = json_decode(file_get_contents("/etc/nextcloud/managed.config.json"), $associative = true);
$CONFIG = array (
"dbuser" => "nextcloud",
"dbpassword" => "${db_password}",
"dbname" => "nextcloud",
"dbhost" => "localhost",
"dbtype" => "pgsql",
"datadirectory" => "/var/lib/nextcloud",
"dbport" => "5432",
"apps_paths" => [
[
"path" => "/opt/nextcloud/apps",
"url" => "/apps",
"writable" => false,
],
[
"path" => "/var/lib/nextcloud/.userapps",
"url" => "/userapps",
"writable" => true,
],
],
"cache_path" => "/var/lib/nextcloud/.cache",
"upgrade.disable-web" => true,
"memcache.local" => "\\OC\\Memcache\\Redis",
"memcache.locking" => "\\OC\\Memcache\\Redis",
"memcache.distributed" => "\OC\Memcache\Redis",
"redis" => [
"host" => "/var/run/redis/nextcloud.sock",
],
'trusted_domains' =>
array (
0 => 'localhost',
1 => '127.0.0.1',
2 => '${hostname}',
),
"log_type" => "syslog",
"syslog_tag" => "nextcloud",
"logfile" => "",
"loglevel" => 3,
"default_phone_region" => "DE",
);

View file

@ -2,4 +2,4 @@
php /opt/nextcloud/occ files:scan --all
php /opt/nextcloud/occ files:scan-app-data
#php /opt/nextcloud/occ preview:generate-all
php /opt/nextcloud/occ preview:generate-all

View file

@ -1,8 +1,9 @@
import json
assert node.has_bundle('php')
from shlex import quote
from os.path import join
from mako.template import Template
version = node.metadata.get('nextcloud/version')
directories = {
@ -68,22 +69,19 @@ symlinks = {
files = {
'/etc/nextcloud/managed.config.php': {
'content_type': 'mako',
'owner': 'www-data',
'group': 'www-data',
'mode': '640',
'context': {
'db_password': node.metadata.get('postgresql/roles/nextcloud/password'),
'hostname': node.metadata.get('nextcloud/hostname'),
},
'needs': [
'directory:/etc/nextcloud',
],
},
'/etc/nextcloud/managed.config.json': {
'content': json.dumps(node.metadata.get('nextcloud/config'), indent=4, sort_keys=True),
'owner': 'www-data',
'group': 'www-data',
'mode': '640',
'needs': [
'directory:/etc/nextcloud',
],
},}
}
# SETUP
@ -126,7 +124,7 @@ files['/opt/nextcloud_upgrade_status.php'] = {
'action:extract_nextcloud',
],
}
actions['upgrade_nextcloud'] = {
'command': repo.libs.nextcloud.occ('upgrade'),
'unless': 'sudo -u www-data php /opt/nextcloud_upgrade_status.php; test $? -ne 99',

Some files were not shown because too many files have changed in this diff Show more