Compare commits

..

57 commits
n8n ... master

Author SHA1 Message Date
90c02e58bf
fix rack switch hostname 2025-07-11 23:58:04 +02:00
8829902e0b
fix indent 2025-07-11 23:55:33 +02:00
e7c5fe9213
fix set not dict 2025-07-11 23:55:02 +02:00
5a1ce55086
fix 2025-07-11 23:52:58 +02:00
cca320e2f4
pppoe 2025-07-11 23:49:46 +02:00
e4e3c57f20
pppoe telekom 2025-07-11 20:44:05 +02:00
5274639ca3
bootshorn recording 2025-07-11 19:10:49 +02:00
3e5ed906bc
cake traffic shaping 2025-07-10 20:39:24 +02:00
9a519432b0
nodes/home.switch-rack-poe.py: introduce 2025-07-10 10:34:27 +02:00
6a3424faf4
fix provides 2025-07-10 09:19:20 +02:00
19a8d28a24
homeassistant os is dummy 2025-07-08 20:10:50 +02:00
a52d9b052f
home.backups fiber 2025-07-06 18:23:09 +02:00
db56385513
vlan interface in vlan netwrok, not in seperate list 2025-07-01 12:20:39 +02:00
7ab96e6a47
router as dns relay 2025-07-01 11:43:22 +02:00
c37bca287e Merge pull request 'routeros' (#23) from routeros into master
Reviewed-on: #23
2025-07-01 11:32:27 +02:00
d17f6da77a
tidy up and try home dns server 2025-07-01 11:30:57 +02:00
460f809403
more routeros 2025-07-01 11:30:57 +02:00
0e6a705d3f
routeros switches ok 2025-07-01 11:30:57 +02:00
d54eff344f
routeros wip 2025-07-01 11:30:37 +02:00
79a54578b8
yourls remove temp leftovers 2025-06-30 09:53:31 +02:00
1d8f20ff25
yurlls fix monitoring and use dehydrated certs 2025-06-29 14:46:39 +02:00
d3b8e2e414
mailman 2025-06-29 12:37:09 +02:00
85daf26174
routeros 2025-06-29 12:32:18 +02:00
53933957a4 Merge pull request 'proxmox_mergable' (#22) from proxmox_mergable into master
Reviewed-on: #22
2025-06-29 12:25:34 +02:00
8d941ebef4
open fw for iperf 2025-06-29 12:24:59 +02:00
800bd90778
remove apcupsd 2025-06-29 12:24:59 +02:00
df38fdb99e
new router 2025-06-29 12:24:59 +02:00
23947bd967
mariadb fixed 2025-06-29 12:24:59 +02:00
32ea52c8f4
mariadb use ini parser 2025-06-29 12:24:59 +02:00
d755267dd9
proxmox 2025-06-29 12:24:50 +02:00
53659b4364
yourls enbale wireguard and backup 2025-06-22 10:57:15 +02:00
0035dd1e6f
remove duplicate 2025-06-22 10:56:33 +02:00
c8680b06ac
remove l4d2 server 2025-06-22 10:56:24 +02:00
3f82d0fc57
fix temp dir, its not a file 2025-06-22 10:55:55 +02:00
5d95a33c5a Merge pull request 'mseibert_yourls and many other fixes' (#19) from mseibert_yourls into master
Reviewed-on: #19
2025-06-22 10:09:58 +02:00
aeb0a4fbe7
nodes/mseibert.yourls.py: introduce 2025-06-22 10:07:10 +02:00
9e139fd422
fix remove leftover 2025-06-22 10:03:38 +02:00
9733a55942
svc_systemd:systemd-networkd add .service to name 2025-06-22 09:53:22 +02:00
befdf5ad6e
fixmo mariadb dependency 2025-06-22 09:51:50 +02:00
663116c778
/var/lib/mysql needs mysql user to exist 2025-06-22 09:50:37 +02:00
187b0440c8
nginx use expected dirs and allow websockets in proxy pass 2025-06-22 09:49:27 +02:00
bdb9fa064d
gitea disable registration 2025-06-22 09:40:41 +02:00
d3ba9db0c6
maybe keep etc/kernel/postinst.d/apt-auto-removal? 2025-06-22 09:40:26 +02:00
3dffc05c9d
apt add docs about options 2025-06-22 09:40:13 +02:00
6616ae7417
fix some redis permissions 2025-06-22 09:37:16 +02:00
dc40295dde
print message on parsing group error 2025-06-22 09:36:56 +02:00
1d8361cc5f
cache_to_disk broken 2025-06-22 09:36:21 +02:00
35243fdba6
offsitebackup offline 2025-06-22 09:35:35 +02:00
43e7c1f3e4
fix redis permissions 2025-06-22 09:30:04 +02:00
dcd2ebc49c
dist-upgrade -> full-upgrade 2025-01-16 10:20:34 +01:00
555350eab7
debian update 2025-01-16 10:20:18 +01:00
e117acac04
backup all doesnt stop on first error 2025-01-09 23:41:21 +01:00
16313b9e40
disable tasnomta charge 2025-01-09 22:45:27 +01:00
033a1cf6e5
macbook gnu grep 2025-01-01 13:04:42 +01:00
8befec9769
readme git sign 2024-12-09 09:07:19 +01:00
d22add5bfd
shortcut 2024-12-09 09:03:14 +01:00
69fb93a664
macbook compat 2024-12-09 08:58:14 +01:00
92 changed files with 2447 additions and 440 deletions

View file

@ -37,3 +37,12 @@ fi
telegraf: execd for daemons telegraf: execd for daemons
TEST TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

View file

@ -23,7 +23,7 @@ for node in nodes:
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()): if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
# REBOOT IN ORDER # REBOOT IN ORDER

View file

@ -13,6 +13,9 @@
'deb', 'deb',
'deb-src', 'deb-src',
}, },
'options': { # optional
'aarch': 'amd64',
},
'urls': { 'urls': {
'https://deb.debian.org/debian', 'https://deb.debian.org/debian',
}, },

View file

@ -62,6 +62,7 @@ files = {
'/usr/lib/nagios/plugins/check_apt_upgradable': { '/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755', 'mode': '0755',
}, },
# /etc/kernel/postinst.d/apt-auto-removal
} }
actions = { actions = {

View file

@ -1,13 +1,31 @@
#!/bin/bash #!/bin/bash
set -exu set -u
# FIXME: inelegant # FIXME: inelegant
% if wol_command: % if wol_command:
${wol_command} ${wol_command}
% endif % endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json) for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do do
echo backing up $path
/opt/backup/backup_path "$path" /opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -exu set -eu
path=$1 path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json) uuid=$(jq -r .client_uuid < /etc/backup/config.json)

View file

@ -10,7 +10,7 @@ options {
% if type == 'master': % if type == 'master':
notify yes; notify yes;
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} }; also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} }; allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
% endif % endif
}; };

View file

@ -3,6 +3,7 @@ from json import dumps
h = repo.libs.hashable.hashable h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo repo.libs.bind.repo = repo
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {

160
bundles/bootshorn/files/process Executable file
View file

@ -0,0 +1,160 @@
#!/usr/bin/env python3
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from scipy.fft import rfft, rfftfreq
import shutil
import traceback
RECORDINGS_DIR = "recordings"
PROCESSED_RECORDINGS_DIR = "recordings/processed"
DETECTIONS_DIR = "events"
DETECT_FREQUENCY = 211 # Hz
DETECT_FREQUENCY_TOLERANCE = 2 # Hz
ADJACENCY_FACTOR = 2 # area to look for the frequency (e.g. 2 means 100Hz to 400Hz for 200Hz detection)
BLOCK_SECONDS = 3 # seconds (longer means more frequency resolution, but less time resolution)
DETECTION_DISTANCE_SECONDS = 30 # seconds (minimum time between detections)
BLOCK_OVERLAP_FACTOR = 0.9 # overlap between blocks (0.2 means 20% overlap)
MIN_SIGNAL_QUALITY = 1000.0 # maximum noise level (relative DB) to consider a detection valid
PLOT_PADDING_START_SECONDS = 2 # seconds (padding before and after the event in the plot)
PLOT_PADDING_END_SECONDS = 3 # seconds (padding before and after the event in the plot)
DETECTION_DISTANCE_BLOCKS = DETECTION_DISTANCE_SECONDS // BLOCK_SECONDS # number of blocks to skip after a detection
DETECT_FREQUENCY_FROM = DETECT_FREQUENCY - DETECT_FREQUENCY_TOLERANCE # Hz
DETECT_FREQUENCY_TO = DETECT_FREQUENCY + DETECT_FREQUENCY_TOLERANCE # Hz
def process_recording(filename):
print('processing', filename)
# get ISO 8601 nanosecond recording date from filename
date_string_from_filename = os.path.splitext(filename)[0]
recording_date = datetime.datetime.strptime(date_string_from_filename, "%Y-%m-%d_%H-%M-%S.%f%z")
# get data and metadata from recording
path = os.path.join(RECORDINGS_DIR, filename)
soundfile = sf.SoundFile(path)
samplerate = soundfile.samplerate
samples_per_block = int(BLOCK_SECONDS * samplerate)
overlapping_samples = int(samples_per_block * BLOCK_OVERLAP_FACTOR)
sample_num = 0
current_event = None
while sample_num < len(soundfile):
soundfile.seek(sample_num)
block = soundfile.read(frames=samples_per_block, dtype='float32', always_2d=False)
if len(block) == 0:
break
# calculate FFT
labels = rfftfreq(len(block), d=1/samplerate)
complex_amplitudes = rfft(block)
amplitudes = np.abs(complex_amplitudes)
# get the frequency with the highest amplitude within the search range
search_amplitudes = amplitudes[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
search_labels = labels[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
max_amplitude = max(search_amplitudes)
max_amplitude_index = np.argmax(search_amplitudes)
max_freq = search_labels[max_amplitude_index]
max_freq_detected = DETECT_FREQUENCY_FROM <= max_freq <= DETECT_FREQUENCY_TO
# calculate signal quality
adjacent_amplitudes = amplitudes[(labels < DETECT_FREQUENCY_FROM) | (labels > DETECT_FREQUENCY_TO)]
signal_quality = max_amplitude/np.mean(adjacent_amplitudes)
good_signal_quality = signal_quality > MIN_SIGNAL_QUALITY
# conclude detection
if (
max_freq_detected and
good_signal_quality
):
block_date = recording_date + datetime.timedelta(seconds=sample_num / samplerate)
# detecting an event
if not current_event:
current_event = {
'start_at': block_date,
'end_at': block_date,
'start_sample': sample_num,
'end_sample': sample_num + samples_per_block,
'start_freq': max_freq,
'end_freq': max_freq,
'max_amplitude': max_amplitude,
}
else:
current_event.update({
'end_at': block_date,
'end_freq': max_freq,
'end_sample': sample_num + samples_per_block,
'max_amplitude': max(max_amplitude, current_event['max_amplitude']),
})
print(f'- {block_date.strftime('%Y-%m-%d %H:%M:%S')}: {max_amplitude:.1f}rDB @ {max_freq:.1f}Hz (signal {signal_quality:.3f}x)')
else:
# not detecting an event
if current_event:
duration = (current_event['end_at'] - current_event['start_at']).total_seconds()
current_event['duration'] = duration
print(f'🔊 {current_event['start_at'].strftime('%Y-%m-%d %H:%M:%S')} ({duration:.1f}s): {current_event['start_freq']:.1f}Hz->{current_event['end_freq']:.1f}Hz @{current_event['max_amplitude']:.0f}rDB')
# read full audio clip again for writing
write_event(current_event=current_event, soundfile=soundfile, samplerate=samplerate)
current_event = None
sample_num += DETECTION_DISTANCE_BLOCKS * samples_per_block
sample_num += samples_per_block - overlapping_samples
# write a spectrogram using the sound from start to end of the event
def write_event(current_event, soundfile, samplerate):
# date and filename
event_date = current_event['start_at'] - datetime.timedelta(seconds=PLOT_PADDING_START_SECONDS)
filename_prefix = event_date.strftime('%Y-%m-%d_%H-%M-%S.%f%z')
# event clip
event_start_sample = current_event['start_sample'] - samplerate * PLOT_PADDING_START_SECONDS
event_end_sample = current_event['end_sample'] + samplerate * PLOT_PADDING_END_SECONDS
total_samples = event_end_sample - event_start_sample
soundfile.seek(event_start_sample)
event_clip = soundfile.read(frames=total_samples, dtype='float32', always_2d=False)
# write flac
flac_path = os.path.join(DETECTIONS_DIR, f"{filename_prefix}.flac")
sf.write(flac_path, event_clip, samplerate, format='FLAC')
# write spectrogram
plt.figure(figsize=(8, 6))
plt.specgram(event_clip, Fs=samplerate, NFFT=samplerate, noverlap=samplerate//2, cmap='inferno', vmin=-100, vmax=-10)
plt.title(f"Bootshorn @{event_date.strftime('%Y-%m-%d %H:%M:%S%z')}")
plt.xlabel(f"Time {current_event['duration']:.1f}s")
plt.ylabel(f"Frequency {current_event['start_freq']:.1f}Hz -> {current_event['end_freq']:.1f}Hz")
plt.colorbar(label="Intensity (rDB)")
plt.ylim(50, 1000)
plt.savefig(os.path.join(DETECTIONS_DIR, f"{filename_prefix}.png"))
plt.close()
def main():
os.makedirs(RECORDINGS_DIR, exist_ok=True)
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
for filename in sorted(os.listdir(RECORDINGS_DIR)):
if filename.endswith(".flac"):
try:
process_recording(filename)
except Exception as e:
print(f"Error processing {filename}: {e}")
# print stacktrace
traceback.print_exc()
if __name__ == "__main__":
main()

23
bundles/bootshorn/files/record Executable file
View file

@ -0,0 +1,23 @@
#!/bin/sh
mkdir -p recordings
while true
do
# get date in ISO 8601 format with nanoseconds
PROGRAMM=$(test $(uname) = "Darwin" && echo "gdate" || echo "date")
DATE=$($PROGRAMM "+%Y-%m-%d_%H-%M-%S.%6N%z")
# record audio using ffmpeg
ffmpeg \
-y \
-f pulse \
-i "alsa_input.usb-HANMUS_USB_AUDIO_24BIT_2I2O_1612310-00.analog-stereo" \
-ac 1 \
-ar 96000 \
-sample_fmt s32 \
-t "3600" \
-c:a flac \
-compression_level 12 \
"recordings/$DATE.flac"
done

View file

@ -0,0 +1,47 @@
# nano /etc/selinux/config
# SELINUX=disabled
# reboot
directories = {
'/opt/bootshorn': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings/processed': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/events': {
'owner': 'ckn',
'group': 'ckn',
},
}
files = {
'/opt/bootshorn/record': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
'/opt/bootshorn/process': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
}
svc_systemd = {
'bootshorn-record.service': {
'needs': {
'file:/opt/bootshorn/record',
},
},
}

View file

@ -0,0 +1,37 @@
defaults = {
'systemd': {
'units': {
'bootshorn-record.service': {
'Unit': {
'Description': 'Bootshorn Recorder',
'After': 'network.target',
},
'Service': {
'User': 'ckn',
'Group': 'ckn',
'Type': 'simple',
'WorkingDirectory': '/opt/bootshorn',
'ExecStart': '/opt/bootshorn/record',
'Restart': 'always',
'RestartSec': 5,
'Environment': {
"XDG_RUNTIME_DIR": "/run/user/1000",
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
},
},
},
},
},
'systemd-timers': {
'bootshorn-process': {
'command': '/opt/bootshorn/process',
'when': 'minutely',
'working_dir': '/opt/bootshorn',
'user': 'ckn',
'group': 'ckn',
'after': {
'bootshorn-process.service',
},
},
},
}

View file

@ -40,7 +40,7 @@ ENABLE_OPENID_SIGNUP = false
[service] [service]
REGISTER_EMAIL_CONFIRM = true REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = false DISABLE_REGISTRATION = true
ALLOW_ONLY_EXTERNAL_REGISTRATION = false ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false REQUIRE_SIGNIN_VIEW = false

View file

@ -26,9 +26,15 @@ defaults = {
'config': { 'config': {
'server': { 'server': {
'http_port': 8300, 'http_port': 8300,
'http_addr': '127.0.0.1',
'enable_gzip': True,
}, },
'database': { 'database': {
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana', 'type': 'postgres',
'host': '127.0.0.1:5432',
'name': 'grafana',
'user': 'grafana',
'password': postgres_password,
}, },
'remote_cache': { 'remote_cache': {
'type': 'redis', 'type': 'redis',
@ -133,11 +139,13 @@ def dns(metadata):
@metadata_reactor.provides( @metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts', 'nginx/vhosts',
) )
def nginx(metadata): def nginx(metadata):
return { return {
'nginx': { 'nginx': {
'has_websockets': True,
'vhosts': { 'vhosts': {
metadata.get('grafana/hostname'): { metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf', 'content': 'grafana/vhost.conf',

View file

@ -0,0 +1,3 @@
# svc_systemd = {
# 'ifupdown.service': {},
# }

View file

@ -15,7 +15,7 @@ svc_systemd = {
'needs': [ 'needs': [
'pkg_apt:kea-dhcp4-server', 'pkg_apt:kea-dhcp4-server',
'file:/etc/kea/kea-dhcp4.conf', 'file:/etc/kea/kea-dhcp4.conf',
'svc_systemd:systemd-networkd:restart', 'svc_systemd:systemd-networkd.service:restart',
], ],
}, },
} }

View file

@ -72,7 +72,7 @@ def subnets(metadata):
}, },
{ {
'name': 'domain-name-servers', 'name': 'domain-name-servers',
'data': '10.0.10.2', 'data': '10.0.0.1',
}, },
], ],
'reservations': set( 'reservations': set(

View file

@ -2,5 +2,5 @@
cd "$OLDPWD" cd "$OLDPWD"
export BW_ITEM_WORKERS=$(expr "$(nproc)" '*' 12 '/' 10) export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10)
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS") export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")

View file

@ -2,7 +2,5 @@
cd "$OLDPWD" cd "$OLDPWD"
GNU_PATH="$HOME/.local/gnu_bin" PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin"
mkdir -p "$GNU_PATH" PATH_add "/opt/homebrew/opt/grep/libexec/gnubin"
test -f "$GNU_PATH/sed" || ln -s "$(which gsed)" "$GNU_PATH/sed"
PATH_add "$GNU_PATH"

View file

@ -0,0 +1,22 @@
# This is the mailman extension configuration file to enable HyperKitty as an
# archiver. Remember to add the following lines in the mailman.cfg file:
#
# [archiver.hyperkitty]
# class: mailman_hyperkitty.Archiver
# enable: yes
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
#
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
# However, if your Mailman installation is accessed via HTTPS, the URL needs
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
base_url: http://${hostname}/mailman3/hyperkitty/
# The shared api_key, must be identical except for quoting to the value of
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
api_key: ${archiver_key}

View file

@ -0,0 +1,190 @@
ACCOUNT_EMAIL_VERIFICATION='none'
# This file is imported by the Mailman Suite. It is used to override
# the default settings from /usr/share/mailman3-web/settings.py.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '${secret_key}'
ADMINS = (
('Mailman Suite Admin', 'root@localhost'),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
# is meant to run behind a webserver reverse proxy anyway.
ALLOWED_HOSTS = [
'${hostname}',
]
# Mailman API credentials
MAILMAN_REST_API_URL = 'http://localhost:8001'
MAILMAN_REST_API_USER = 'restadmin'
MAILMAN_REST_API_PASS = '${api_password}'
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
# Application definition
INSTALLED_APPS = (
'hyperkitty',
'postorius',
'django_mailman3',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_gravatar',
'compressor',
'haystack',
'django_extensions',
'django_q',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_mailman3.lib.auth.fedora',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
'NAME': 'mailman',
# The following settings are not used with sqlite3:
'USER': 'mailman',
'PASSWORD': '${db_password}',
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '127.0.0.1',
# PORT: set to empty string for default.
'PORT': '5432',
# OPTIONS: Extra parameters to use when connecting to the database.
'OPTIONS': {
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
# https://docs.djangoproject.com/en/1.11/ref/
# databases/#setting-sql-mode
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
# Other security settings
# SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
# SECURE_REDIRECT_EXEMPT = [
# "archives/api/mailman/.*", # Request from Mailman.
# ]
# SESSION_COOKIE_SECURE = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# X_FRAME_OPTIONS = 'DENY'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set default domain for email addresses.
EMAILNAME = 'localhost.local'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
# SERVER_EMAIL = 'root@your-domain.org'
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
# Django Allauth
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
#
# Social auth
#
SOCIALACCOUNT_PROVIDERS = {
#'openid': {
# 'SERVERS': [
# dict(id='yahoo',
# name='Yahoo',
# openid_url='http://me.yahoo.com'),
# ],
#},
#'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
#},
#'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
#},
}
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
# significant performance improvement, as CSS files will not need to be
# recompiled on each requests. It means running an additional "compress"
# management command after each code upgrade.
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
COMPRESS_OFFLINE = True
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'

View file

@ -0,0 +1,277 @@
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
# This file contains the Debian configuration for mailman. It uses ini-style
# formats under the lazr.config regime to define all system configuration
# options. See <https://launchpad.net/lazr.config> for details.
[mailman]
# This address is the "site owner" address. Certain messages which must be
# delivered to a human, but which can't be delivered to a list owner (e.g. a
# bounce from a list owner), will be sent to this address. It should point to
# a human.
site_owner: ${site_owner_email}
# This is the local-part of an email address used in the From field whenever a
# message comes from some entity to which there is no natural reply recipient.
# Mailman will append '@' and the host name of the list involved. This
# address must not bounce and it must not point to a Mailman process.
noreply_address: noreply
# The default language for this server.
default_language: de
# Membership tests for posting purposes are usually performed by looking at a
# set of headers, passing the test if any of their values match a member of
# the list. Headers are checked in the order given in this variable. The
# value From_ means to use the envelope sender. Field names are case
# insensitive. This is a space separate list of headers.
sender_headers: from from_ reply-to sender
# Mail command processor will ignore mail command lines after designated max.
email_commands_max_lines: 10
# Default length of time a pending request is live before it is evicted from
# the pending database.
pending_request_life: 3d
# How long should files be saved before they are evicted from the cache?
cache_life: 7d
# A callable to run with no arguments early in the initialization process.
# This runs before database initialization.
pre_hook:
# A callable to run with no arguments late in the initialization process.
# This runs after adapters are initialized.
post_hook:
# Which paths.* file system layout to use.
# You should not change this variable.
layout: debian
# Can MIME filtered messages be preserved by list owners?
filtered_messages_are_preservable: no
# How should text/html parts be converted to text/plain when the mailing list
# is set to convert HTML to plaintext? This names a command to be called,
# where the substitution variable $filename is filled in by Mailman, and
# contains the path to the temporary file that the command should read from.
# The command should print the converted text to stdout.
html_to_plain_text_command: /usr/bin/lynx -dump $filename
# Specify what characters are allowed in list names. Characters outside of
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
# but this specifies a subset as the only allowable characters. This must be
# a valid character class regexp or the effect on list creation is
# unpredictable.
listname_chars: [-_.0-9a-z]
[shell]
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
# can use to interact with an initialized and configured Mailman system. Use
# --help for more information. This section allows you to configure certain
# aspects of this interactive shell.
# Customize the interpreter prompt.
prompt: >>>
# Banner to show on startup.
banner: Welcome to the GNU Mailman shell
# Use IPython as the shell, which must be found on the system. Valid values
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
# that any import errors will be displayed to stderr.
use_ipython: no
# Set this to allow for command line history if readline is available. This
# can be as simple as $var_dir/history.py to put the file in the var directory.
history_file:
[paths.debian]
# Important directories for Mailman operation. These are defined here so that
# different layouts can be supported. For example, a developer layout would
# be different from a FHS layout. Most paths are based off the var_dir, and
# often just setting that will do the right thing for all the other paths.
# You might also have to set spool_dir though.
#
# Substitutions are allowed, but must be of the form $var where 'var' names a
# configuration variable in the paths.* section. Substitutions are expanded
# recursively until no more $-variables are present. Beware of infinite
# expansion loops!
#
# This is the root of the directory structure that Mailman will use to store
# its run-time data.
var_dir: /var/lib/mailman3
# This is where the Mailman queue files directories will be created.
queue_dir: $var_dir/queue
# This is the directory containing the Mailman 'runner' and 'master' commands
# if set to the string '$argv', it will be taken as the directory containing
# the 'mailman' command.
bin_dir: /usr/lib/mailman3/bin
# All list-specific data.
list_data_dir: $var_dir/lists
# Directory where log files go.
log_dir: /var/log/mailman3
# Directory for system-wide locks.
lock_dir: $var_dir/locks
# Directory for system-wide data.
data_dir: $var_dir/data
# Cache files.
cache_dir: $var_dir/cache
# Directory for configuration files and such.
etc_dir: /etc/mailman3
# Directory containing Mailman plugins.
ext_dir: $var_dir/ext
# Directory where the default IMessageStore puts its messages.
messages_dir: $var_dir/messages
# Directory for archive backends to store their messages in. Archivers should
# create a subdirectory in here to store their files.
archive_dir: $var_dir/archives
# Root directory for site-specific template override files.
template_dir: $var_dir/templates
# There are also a number of paths to specific file locations that can be
# defined. For these, the directory containing the file must already exist,
# or be one of the directories created by Mailman as per above.
#
# This is where PID file for the master runner is stored.
pid_file: /run/mailman3/master.pid
# Lock file.
lock_file: $lock_dir/master.lck
[database]
# The class implementing the IDatabase.
class: mailman.database.sqlite.SQLiteDatabase
#class: mailman.database.mysql.MySQLDatabase
#class: mailman.database.postgresql.PostgreSQLDatabase
# Use this to set the Storm database engine URL. You generally have one
# primary database connection for all of Mailman. List data and most rosters
# will store their data in this database, although external rosters may access
# other databases in their own way. This string supports standard
# 'configuration' substitutions.
url: sqlite:///$DATA_DIR/mailman.db
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
#url: postgresql://mailman3:mmpass@localhost/mailman3
debug: no
[logging.debian]
# This defines various log settings. The options available are:
#
# - level -- Overrides the default level; this may be any of the
# standard Python logging levels, case insensitive.
# - format -- Overrides the default format string
# - datefmt -- Overrides the default date format string
# - path -- Overrides the default logger path. This may be a relative
# path name, in which case it is relative to Mailman's LOG_DIR,
# or it may be an absolute path name. You cannot change the
# handler class that will be used.
# - propagate -- Boolean specifying whether to propagate log message from this
# logger to the root "mailman" logger. You cannot override
# settings for the root logger.
#
# In this section, you can define defaults for all loggers, which will be
# prefixed by 'mailman.'. Use subsections to override settings for specific
# loggers. The names of the available loggers are:
#
# - archiver -- All archiver output
# - bounce -- All bounce processing logs go here
# - config -- Configuration issues
# - database -- Database logging (SQLAlchemy and Alembic)
# - debug -- Only used for development
# - error -- All exceptions go to this log
# - fromusenet -- Information related to the Usenet to Mailman gateway
# - http -- Internal wsgi-based web interface
# - locks -- Lock state changes
# - mischief -- Various types of hostile activity
# - runner -- Runner process start/stops
# - smtp -- Successful SMTP activity
# - smtp-failure -- Unsuccessful SMTP activity
# - subscribe -- Information about leaves/joins
# - vette -- Message vetting information
format: %(asctime)s (%(process)d) %(message)s
datefmt: %b %d %H:%M:%S %Y
propagate: no
level: info
path: mailman.log
[webservice]
# The hostname at which admin web service resources are exposed.
hostname: localhost
# The port at which the admin web service resources are exposed.
port: 8001
# Whether or not requests to the web service are secured through SSL.
use_https: no
# Whether or not to show tracebacks in an HTTP response for a request that
# raised an exception.
show_tracebacks: yes
# The API version number for the current (highest) API.
api_version: 3.1
# The administrative username.
admin_user: restadmin
# The administrative password.
admin_pass: ${api_password}
[mta]
# The class defining the interface to the incoming mail transport agent.
#incoming: mailman.mta.exim4.LMTP
incoming: mailman.mta.postfix.LMTP
# The callable implementing delivery to the outgoing mail transport agent.
# This must accept three arguments, the mailing list, the message, and the
# message metadata dictionary.
outgoing: mailman.mta.deliver.deliver
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
# then Mailman will attempt to log into the MTA when making a new connection.
# smtp_host: smtp.ionos.de
# smtp_port: 587
# smtp_user: ${smtp_user}
# smtp_pass: ${smtp_password}
# smtp_secure_mode: starttls
smtp_host: 127.0.0.1
smtp_port: 25
smtp_user:
smtp_pass:
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
# localhost for Postfix integration, because Postfix only consults DNS
# (e.g. not /etc/hosts).
lmtp_host: 127.0.0.1
lmtp_port: 8024
# Where can we find the mail server specific configuration file? The path can
# be either a file system path or a Python import path. If the value starts
# with python: then it is a Python import path, otherwise it is a file system
# path. File system paths must be absolute since no guarantees are made about
# the current working directory. Python paths should not include the trailing
# .cfg, which the file must end with.
#configuration: python:mailman.config.exim4
configuration: python:mailman.config.postfix

View file

@ -0,0 +1,52 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
# fresh installs.
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level=may
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = all
unknown_local_recipient_reject_code = 550
owner_request_special = no
transport_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
local_recipient_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
relay_domains =
hash:/var/lib/mailman3/data/postfix_domains

View file

@ -0,0 +1,50 @@
[uwsgi]
# Port on which uwsgi will be listening.
uwsgi-socket = /run/mailman3-web/uwsgi.sock
#Enable threading for python
enable-threads = true
# Move to the directory wher the django files are.
chdir = /usr/share/mailman3-web
# Use the wsgi file provided with the django project.
wsgi-file = wsgi.py
# Setup default number of processes and threads per process.
master = true
process = 2
threads = 2
# Drop privielges and don't run as root.
uid = www-data
gid = www-data
plugins = python3
# Setup the django_q related worker processes.
attach-daemon = python3 manage.py qcluster
# Setup hyperkitty's cron jobs.
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
# Setup the request log.
#req-logger = file:/var/log/mailman3/web/mailman-web.log
# Log cron seperately.
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
#log-route = cron uwsgi-cron
# Log qcluster commands seperately.
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
#log-route = qcluster uwsgi-daemons
# Last log and it logs the rest of the stuff.
#logger = file:/var/log/mailman3/web/mailman-web-error.log
logto = /var/log/mailman3/web/mailman-web.log

104
bundles/mailman/items.py Normal file
View file

@ -0,0 +1,104 @@
directories = {
'/var/lib/mailman3': {
'owner': 'list',
'group': 'list',
'needs': {
'zfs_dataset:tank/mailman',
'pkg_apt:mailman3-full',
},
'needed_by': {
'svc_systemd:mailman3.service',
'svc_systemd:mailman3-web.service',
},
},
}
files = {
'/etc/postfix/main.cf': {
'source': 'postfix.cf',
'content_type': 'mako',
'mode': '0644',
'context': {
'hostname': node.metadata.get('mailman/hostname'),
},
'needs': {
'pkg_apt:postfix',
},
'triggers': {
'svc_systemd:postfix.service:restart',
},
},
'/etc/mailman3/mailman.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-web.py': {
'content_type': 'mako',
'owner': 'root',
'group': 'www-data',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-hyperkitty.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/uwsgi.ini': {
'content_type': 'text',
'owner': 'root',
'group': 'root',
'mode': '0644',
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
}
svc_systemd = {
'postfix.service': {
'needs': {
'pkg_apt:postfix',
},
},
'mailman3.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
'mailman3-web.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
}

116
bundles/mailman/metadata.py Normal file
View file

@ -0,0 +1,116 @@
import base64
def derive_mailadmin_secret(metadata, salt):
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
)
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
defaults = {
'apt': {
'packages': {
'mailman3-full': {
'needs': {
'postgres_db:mailman',
'postgres_role:mailman',
'zfs_dataset:tank/mailman',
}
},
'postfix': {},
'python3-psycopg2': {
'needed_by': {
'pkg_apt:mailman3-full',
},
},
'apache2': {
'installed': False,
'needs': {
'pkg_apt:mailman3-full',
},
},
},
},
'zfs': {
'datasets': {
'tank/mailman': {
'mountpoint': '/var/lib/mailman3',
},
},
},
}
@metadata_reactor.provides(
'postgresql',
'mailman',
)
def postgresql(metadata):
node_id = metadata.get('id')
db_password = repo.vault.password_for(f'{node_id} database mailman')
return {
'postgresql': {
'databases': {
'mailman': {
'owner': 'mailman',
},
},
'roles': {
'mailman': {
'password': db_password,
},
},
},
'mailman': {
'db_password': db_password,
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('mailman/hostname'): {
'content': 'mailman/vhost.conf',
},
},
},
}
@metadata_reactor.provides(
'mailman/secret_key',
)
def secret_key(metadata):
import base64
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
)
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
return {
'mailman': {
'secret_key': secret_key,
},
}
@metadata_reactor.provides(
'mailman',
)
def secrets(metadata):
return {
'mailman': {
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
},
}

View file

@ -1,11 +0,0 @@
% for section, options in sorted(conf.items()):
[${section}]
% for key, value in sorted(options.items()):
% if value is None:
${key}
% else:
${key} = ${value}
% endif
% endfor
% endfor

View file

@ -10,8 +10,6 @@ directories = {
'group': 'mysql', 'group': 'mysql',
'needs': [ 'needs': [
'zfs_dataset:tank/mariadb', 'zfs_dataset:tank/mariadb',
],
'needed_by': [
'pkg_apt:mariadb-server', 'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client', 'pkg_apt:mariadb-client',
], ],
@ -20,10 +18,8 @@ directories = {
files = { files = {
'/etc/mysql/conf.d/override.conf': { '/etc/mysql/conf.d/override.conf': {
'context': { 'content': repo.libs.ini.dumps(node.metadata.get('mariadb/conf')),
'conf': node.metadata.get('mariadb/conf'), 'content_type': 'text',
},
'content_type': 'mako',
}, },
} }

View file

@ -1,33 +0,0 @@
assert node.has_bundle('nodejs')
assert node.has_bundle('postgresql')
assert node.has_bundle('zfs')
# To update:
#
# - systemctl stop n8n postgresql
# - tempsnap pre-n8n-update (for psql, emergency rollback)
# - apply
version = node.metadata.get("n8n/version")
actions['install_n8n'] = {
'command': f'cd /opt/n8n && sudo -u n8n npm install n8n@{version}',
'unless': f'test -e /opt/n8n/node_modules && '
f'test $(jq -r ".version" < /opt/n8n/node_modules/n8n/package.json) = "{version}"',
'needs': {
'directory:/opt/n8n',
'pkg_apt:nodejs',
'user:n8n',
},
'triggers': {
'svc_systemd:n8n.service:restart',
},
}
svc_systemd['n8n.service'] = {
'enabled': True,
'running': True,
'needs': {
'pkg_apt:nodejs',
'action:install_n8n',
},
}

View file

@ -1,89 +0,0 @@
database_password = repo.vault.password_for(f'{node.name} postgresql n8n')
defaults = {
'backups': {
'paths': {
'/opt/n8n',
},
},
'npm': {
'n8n': {},
},
'n8n': {
'DB_TYPE': 'postgresdb',
'DB_POSTGRESDB_DATABASE': 'n8n',
'DB_POSTGRESDB_HOST': 'localhost',
'DB_POSTGRESDB_PORT': 5432,
'DB_POSTGRESDB_USER': 'n8n',
'DB_POSTGRESDB_PASSWORD': database_password,
},
'postgresql': {
'databases': {
'n8n': {
'when_creating': {
'encoding': 'UTF8',
'collation': 'C.UTF-8',
'ctype': 'C.UTF-8',
},
'owner': 'n8n',
},
},
'roles': {
'n8n': {
'password': database_password,
},
},
},
'systemd': {
'units': {
'n8n.service': {
'Unit': {
'Description': 'n8n',
'Requires': 'network.target postgresql.service',
'After': 'postgresql.service',
},
'Service': {
'Restart': 'always',
'RestartSec': '5',
'WorkingDirectory': '/opt/n8n',
'ExecStart': '/usr/bin/npx n8n start',
'User': 'n8n',
'Group': 'n8n',
'Environment': {
'NODE_ENV=production',
},
},
},
},
},
'users': {
'n8n': {
'home': '/opt/n8n',
},
},
'zfs': {
'datasets': {
'tank/n8n': {
'mountpoint': '/opt/n8n',
'needed_by': {'directory:/opt/n8n'},
},
},
},
}
@metadata_reactor.provides(
'systemd/services/n8n.service',
)
def systemd(metadata):
return {
'systemd': {
'units': {
'n8n.service': {
'Service': {
'Environment': metadata.get('n8n'),
},
},
},
},
}

19
bundles/network/items.py Normal file
View file

@ -0,0 +1,19 @@
for network_name, network_conf in node.metadata.get('network').items():
if 'qdisc' in network_conf:
svc_systemd[f'qdisc-{network_name}.service'] = {
'enabled': True,
'running': None,
'needs': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
}
actions[f'qdisc-{network_name}.service_restart_workaround'] = {
'command': 'true',
'triggered': True,
'triggered_by': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
'triggers': {
f'svc_systemd:qdisc-{network_name}.service:restart',
},
}

View file

@ -36,61 +36,106 @@ def dhcp(metadata):
'systemd/units', 'systemd/units',
) )
def units(metadata): def units(metadata):
units = {} if node.has_bundle('systemd-networkd'):
units = {}
for network_name, network_conf in metadata.get('network').items(): for network_name, network_conf in metadata.get('network').items():
interface_type = network_conf.get('type', None) interface_type = network_conf.get('type', None)
# network # network
units[f'{network_name}.network'] = { units[f'{network_name}.network'] = {
'Match': { 'Match': {
'Name': network_name if interface_type == 'vlan' else network_conf['interface'], 'Name': network_name if interface_type == 'vlan' else network_conf['interface'],
},
'Network': {
'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(network_conf.get('vlans', set()))
}
}
# type
if interface_type:
units[f'{network_name}.network']['Match']['Type'] = interface_type
# ips
for i in [4, 6]:
if network_conf.get(f'ipv{i}', None):
units[f'{network_name}.network'].update({
f'Address#ipv{i}': {
'Address': network_conf[f'ipv{i}'],
},
})
if f'gateway{i}' in network_conf:
units[f'{network_name}.network'].update({
f'Route#ipv{i}': {
'Gateway': network_conf[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
# as vlan
if interface_type == 'vlan':
units[f"{network_name}.netdev"] = {
'NetDev': {
'Name': network_name,
'Kind': 'vlan',
}, },
'VLAN': { 'Network': {
'Id': network_conf['id'], 'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(
other_network_name
for other_network_name, other_network_conf in metadata.get('network', {}).items()
if other_network_conf.get('type') == 'vlan' and other_network_conf['vlan_interface'] == network_name
)
} }
} }
return { # type
'systemd': {
'units': units, if interface_type:
units[f'{network_name}.network']['Match']['Type'] = interface_type
# ips
for i in [4, 6]:
if network_conf.get(f'ipv{i}', None):
units[f'{network_name}.network'].update({
f'Address#ipv{i}': {
'Address': network_conf[f'ipv{i}'],
},
})
if f'gateway{i}' in network_conf:
units[f'{network_name}.network'].update({
f'Route#ipv{i}': {
'Gateway': network_conf[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
# as vlan
if interface_type == 'vlan':
units[f"{network_name}.netdev"] = {
'NetDev': {
'Name': network_name,
'Kind': 'vlan',
},
'VLAN': {
'Id': network_conf['id'],
}
}
# cake WIP
# if 'cake' in network_conf:
# units[f'{network_name}.network']['CAKE'] = network_conf['cake']
return {
'systemd': {
'units': units,
}
} }
} else:
return {}
@metadata_reactor.provides(
'systemd/units',
)
def queuing_disciplines(metadata):
if node.has_bundle('systemd-networkd'):
return {
'systemd': {
'units': {
f'qdisc-{network_name}.service': {
'Unit': {
'Description': f'setup queuing discipline for interface {network_name}',
'Wants': 'network.target',
'After': 'network.target',
'BindsTo': 'network.target',
},
'Service': {
'Type': 'oneshot',
'ExecStart': f'/sbin/tc qdisc replace root dev {network_name} {network_conf["qdisc"]}',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'network-online.target',
},
}
for network_name, network_conf in metadata.get('network').items()
if 'qdisc' in network_conf
},
},
}
else:
return {}

View file

@ -8,4 +8,5 @@ examples
```sh ```sh
nft add rule inet filter input tcp dport 5201 accept nft add rule inet filter input tcp dport 5201 accept
nft add rule inet filter input udp dport 5201 accept
``` ```

View file

@ -2,6 +2,23 @@
flush ruleset flush ruleset
% if nat:
table ip nat {
# NAT
chain postrouting {
type nat hook postrouting priority 100
policy accept
# rules
% for rule in sorted(nat):
${rule}
% endfor
}
}
% endif
table inet filter { table inet filter {
# INPUT # INPUT

View file

@ -6,6 +6,7 @@ files = {
'input': node.metadata.get('nftables/input'), 'input': node.metadata.get('nftables/input'),
'forward': node.metadata.get('nftables/forward'), 'forward': node.metadata.get('nftables/forward'),
'output': node.metadata.get('nftables/output'), 'output': node.metadata.get('nftables/output'),
'nat': node.metadata.get('nftables/nat'),
}, },
'triggers': [ 'triggers': [
'svc_systemd:nftables.service:reload', 'svc_systemd:nftables.service:reload',

View file

@ -8,7 +8,8 @@ defaults = {
'input': { 'input': {
'tcp dport 22 accept', 'tcp dport 22 accept',
}, },
'forward': {}, 'forward': set(),
'output': {}, 'nat': set(),
'output': set(),
}, },
} }

View file

@ -31,5 +31,13 @@ http {
} }
% endif % endif
include /etc/nginx/sites/*;
% if has_websockets:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
% endif
include /etc/nginx/sites-enabled/*;
} }

View file

@ -9,7 +9,7 @@ directories = {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
}, },
}, },
'/etc/nginx/sites': { '/etc/nginx/sites-available': {
'purge': True, 'purge': True,
'triggers': { 'triggers': {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
@ -33,6 +33,7 @@ files = {
'context': { 'context': {
'modules': node.metadata.get('nginx/modules'), 'modules': node.metadata.get('nginx/modules'),
'worker_processes': node.metadata.get('vm/cores'), 'worker_processes': node.metadata.get('vm/cores'),
'has_websockets': node.metadata.get('nginx/has_websockets'),
}, },
'triggers': { 'triggers': {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
@ -75,6 +76,12 @@ files = {
}, },
} }
symlinks = {
'/etc/nginx/sites-enabled': {
'target': '/etc/nginx/sites-available',
},
}
actions = { actions = {
'nginx-generate-dhparam': { 'nginx-generate-dhparam': {
'command': 'openssl dhparam -dsaparam -out /etc/ssl/certs/dhparam.pem 4096', 'command': 'openssl dhparam -dsaparam -out /etc/ssl/certs/dhparam.pem 4096',
@ -93,7 +100,7 @@ svc_systemd = {
for name, config in node.metadata.get('nginx/vhosts').items(): for name, config in node.metadata.get('nginx/vhosts').items():
files[f'/etc/nginx/sites/{name}'] = { files[f'/etc/nginx/sites-available/{name}'] = {
'content': Template(filename=join(repo.path, 'data', config['content'])).render( 'content': Template(filename=join(repo.path, 'data', config['content'])).render(
server_name=name, server_name=name,
**config.get('context', {}), **config.get('context', {}),
@ -109,6 +116,6 @@ for name, config in node.metadata.get('nginx/vhosts').items():
} }
if name in node.metadata.get('letsencrypt/domains'): if name in node.metadata.get('letsencrypt/domains'):
files[f'/etc/nginx/sites/{name}']['needs'].append( files[f'/etc/nginx/sites-available/{name}']['needs'].append(
f'action:letsencrypt_ensure-some-certificate_{name}', f'action:letsencrypt_ensure-some-certificate_{name}',
) )

View file

@ -18,6 +18,7 @@ defaults = {
'nginx': { 'nginx': {
'vhosts': {}, 'vhosts': {},
'modules': set(), 'modules': set(),
'has_websockets': False,
}, },
'systemd': { 'systemd': {
'units': { 'units': {
@ -95,7 +96,7 @@ def monitoring(metadata):
'monitoring': { 'monitoring': {
'services': { 'services': {
hostname: { hostname: {
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {quote(hostname + vhost.get('check_path', ''))}", 'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {vhost.get('check_protocol', 'https')}://{quote(hostname + vhost.get('check_path', '/'))}",
} }
for hostname, vhost in metadata.get('nginx/vhosts').items() for hostname, vhost in metadata.get('nginx/vhosts').items()
}, },

View file

@ -8,7 +8,9 @@ defaults = {
}, },
}, },
}, },
'npm': {}, 'npm': {
'yarn': {},
},
} }
@ -26,9 +28,7 @@ def sources(metadata):
'deb', 'deb',
'deb-src', 'deb-src',
}, },
'urls': { 'url': 'https://deb.nodesource.com/node_{version}.x',
f'https://deb.nodesource.com/node_{version}.x',
},
'suites': { 'suites': {
'{codename}', '{codename}',
}, },

View file

@ -0,0 +1,22 @@
# DO NOT DISABLE!
# If you change this first entry you will need to make sure that the
# database superuser can access the database using some other method.
# Noninteractive access to all databases is required during automatic
# maintenance (custom daily cronjobs, replication, and similar tasks).
#
# Database administrative login by Unix domain socket
local all postgres peer
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all peer
# IPv4 local connections:
host all all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# IPv6 local connections:
host all all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all peer
host replication all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
host replication all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}

View file

@ -18,6 +18,21 @@ directories = {
} }
files = { files = {
f"/etc/postgresql/{version}/main/pg_hba.conf": {
'content_type': 'mako',
'mode': '0640',
'owner': 'postgres',
'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
],
'triggers': [
'svc_systemd:postgresql.service:restart',
],
},
f"/etc/postgresql/{version}/main/conf.d/managed.conf": { f"/etc/postgresql/{version}/main/conf.d/managed.conf": {
'content': '\n'.join( 'content': '\n'.join(
f'{key} = {value}' f'{key} = {value}'

36
bundles/pppoe/REAMDE.md Normal file
View file

@ -0,0 +1,36 @@
# Firtzbox
Internet > Zugangsdaten
Internetanbieter
- weitere Internetanbieter
- anderer Internetanbieter
- Name: "My PPPOE" (nicht leer lassen)
Anschluss
(x) Anschluss an einen DSL-Anschluss
Zugangsdaten
(x) Nein
Verbindungseinstellungen
[x] VLAN für den Internetanschluss verwenden
VLAN-ID: 7
PBit: 0
DSL-ATM-Einstellungen
VPI: 1
VCI: 32
Kapselung
(x) Routed Bridge Encapsulation
[x] IP-Adresse automatisch über DHCP beziehen
DHCP-Hostname: fritz.box
PPPoE-Passthrough
[x] Angeschlossene Netzwerkgeräte dürfen zusätzlich ihre eigene Internetverbindung aufbauen (nicht empfohlen)
[ ] Internetzugang nach dem "Übernehmen" prüfen
-> Danach muss bei "Internetanbieter" statt "weitere Internetanbieter" der gewählte Name stehen, also zB "My PPPOE"

View file

@ -0,0 +1,3 @@
# Secrets for authentication using CHAP
# client server secret IP addresses
"${user}" * "${secret}" *

10
bundles/pppoe/files/isp Normal file
View file

@ -0,0 +1,10 @@
linkname ppp0
noauth
defaultroute
replacedefaultroute
persist
maxfail 0
lcp-echo-interval 20
lcp-echo-failure 3
plugin rp-pppoe.so ${interface}
user "${user}"

42
bundles/pppoe/items.py Normal file
View file

@ -0,0 +1,42 @@
files = {
'/etc/modules-load.d/pppoe.conf': {
'content': 'pppoe\npppox\nppp_generic',
'mode': '0644',
},
'/etc/ppp/peers/isp': {
'content_type': 'mako',
'mode': '0644',
'context': {
'interface': node.metadata.get('pppoe/interface'),
'user': node.metadata.get('pppoe/user'),
},
'needs': {
'pkg_apt:pppoe',
},
},
'/etc/ppp/chap-secrets': {
'content_type': 'mako',
'mode': '0600',
'context': {
'user': node.metadata.get('pppoe/user'),
'secret': node.metadata.get('pppoe/secret'),
},
'needs': {
'pkg_apt:pppoe',
},
},
}
svc_systemd = {
'pppoe-isp.service': {
'needs': {
'file:/etc/ppp/peers/isp',
'file:/etc/ppp/chap-secrets',
},
},
'qdisc-ppp0.service': {
'needs': {
'svc_systemd:pppoe-isp.service',
},
},
}

43
bundles/pppoe/metadata.py Normal file
View file

@ -0,0 +1,43 @@
defaults = {
'apt': {
'packages': {
'pppoe': {},
},
},
'nftables': {
'nat': {
'oifname ppp0 masquerade',
},
},
'systemd': {
'units': {
'pppoe-isp.service': {
'Unit': {
'Description': 'PPPoE Internet Connection',
'After': 'network.target',
},
'Service': {
'Type': 'forking',
'ExecStart': '/usr/sbin/pppd call isp',
'Restart': 'on-failure',
'RestartSec': 5,
},
},
'qdisc-ppp0.service': {
'Unit': {
'Description': 'setup queuing discipline for interface ppp0',
'After': 'sys-devices-virtual-net-ppp0.device',
'BindsTo': 'sys-devices-virtual-net-ppp0.device',
},
'Service': {
'Type': 'oneshot',
'ExecStart': '/sbin/tc qdisc replace root dev ppp0 cake bandwidth 30Mbit rtt 50ms diffserv4 nat egress',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'network-online.target',
},
}
},
},
}

View file

@ -0,0 +1,21 @@
files = {
'/etc/apt/apt.conf.d/10pveapthook': {
'content_type': 'any',
'mode': '0644',
},
'/etc/apt/apt.conf.d/76pveconf': {
'content_type': 'any',
'mode': '0444',
},
'/etc/apt/apt.conf.d/76pveproxy': {
'content_type': 'any',
'mode': '0644',
},
'/etc/network/interfaces': {
'content_type': 'any',
},
}
symlinks['/etc/ssh/ssh_host_rsa_key.pub'] = {
'target': '/etc/ssh/ssh_host_managed_key.pub',
}

View file

@ -0,0 +1,100 @@
defaults = {
'apt': {
'packages': {
'linux-image-amd64': {
'installed': False,
},
'proxmox-default-kernel': {},
# after reboot
'proxmox-ve': {},
'postfix': {},
'open-iscsi': {},
'chrony': {},
'os-prober': {
'installed': False,
},
'dnsmasq-base': {},
},
'sources': {
'proxmox-ve': {
'options': {
'aarch': 'amd64',
},
'urls': {
'http://download.proxmox.com/debian/pve',
},
'suites': {
'{codename}',
},
'components': {
'pve-no-subscription',
},
'key': 'proxmox-ve-{codename}',
},
},
},
# 'nftables': {
# 'input': {
# 'tcp dport 8006 accept',
# },
# },
'zfs': {
'datasets': {
'tank/proxmox-ve': {
'mountpoint': '/var/lib/proxmox-ve',
},
}
}
}
# @metadata_reactor.provides(
# 'systemd',
# )
# def bridge(metadata):
# return {
# 'systemd': {
# 'units': {
# # f'internal.network': {
# # 'Network': {
# # 'Bridge': 'br0',
# # },
# # },
# 'br0.netdev': {
# 'NetDev': {
# 'Name': 'br0',
# 'Kind': 'bridge'
# },
# },
# 'br0.network': {
# 'Match': {
# 'Name': 'br0',
# },
# 'Network': {
# 'Unmanaged': 'yes'
# },
# },
# },
# },
# }
@metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'has_websockets': True,
'vhosts': {
metadata.get('proxmox-ve/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'https://localhost:8006',
'websockets': True,
}
},
},
},
}

View file

@ -1,12 +1,15 @@
directories = { directories = {
'/etc/redis': { '/etc/redis': {
'purge': True, 'purge': True,
'owner': 'redis',
'mode': '2770',
'needs': [ 'needs': [
'pkg_apt:redis-server', 'pkg_apt:redis-server',
], ],
}, },
'/var/lib/redis': { '/var/lib/redis': {
'owner': 'redis', 'owner': 'redis',
'mode': '0750',
'needs': [ 'needs': [
'pkg_apt:redis-server', 'pkg_apt:redis-server',
], ],

View file

@ -0,0 +1,18 @@
initialization:
- reset (hold reset for 5-10 seconds, until user light starts flashing)
- open webinterface under 192.168.88.1
- set password
- vlans need to be configured and an additional ip needs to be assined to a vlan which es later accessible preferably through an untagged port
- for example add 10.0.0.62/24 to "home" vlan
- this happens on the first apply
- when vlan filering gets enabled, the apply freezes and the switch is no longer available under the old ip
- now that filtering is active, the switch is available under its new ip, because now you dont speak to the bridge anymore, where the old ip was residing, but to the vlan interface, where the new ip is residing
bw bug:
- f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false" fails, you need to remove &dynamic=false on create and then add it again
old 6.x Routeros firmware vlan filtering not working:
- update firmware first
- upload to files and just reboot
- didnt work via web interface, log said firmware image broken
- didi work via winbox

119
bundles/routeros/items.py Normal file
View file

@ -0,0 +1,119 @@
routeros['/ip/dns'] = {
'servers': '8.8.8.8',
}
routeros['/system/identity'] = {
'name': node.name,
}
# for service in (
# 'api-ssl', # slow :(
# 'ftp', # we can download files via HTTP
# 'telnet',
# 'www-ssl', # slow :(
# 'winbox',
# ):
# routeros[f'/ip/service?name={service}'] = {
# 'disabled': True,
# }
# LOGGING_TOPICS = (
# 'critical',
# 'error',
# 'info',
# 'stp',
# 'warning',
# )
# for topic in LOGGING_TOPICS:
# routeros[f'/system/logging?action=memory&topics={topic}'] = {}
# routeros['/snmp'] = {
# 'enabled': True,
# }
# routeros['/snmp/community?name=public'] = {
# 'addresses': '0.0.0.0/0',
# 'disabled': False,
# 'read-access': True,
# 'write-access': False,
# }
routeros['/system/clock'] = {
'time-zone-autodetect': False,
'time-zone-name': 'UTC',
}
# routeros['/ip/neighbor/discovery-settings'] = {
# 'protocol': 'cdp,lldp,mndp',
# }
routeros['/ip/route?dst-address=0.0.0.0/0'] = {
'gateway': node.metadata.get('routeros/gateway'),
}
for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
routeros[f'/interface/vlan?name={vlan_name}'] = {
'vlan-id': vlan_id,
'interface': 'bridge',
'tags': {
'routeros-vlan',
},
}
routeros[f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false"] = { # bw bug: remove &dynamic=false on first apply
'bridge': 'bridge',
'untagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/untagged')),
'tagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/tagged')),
'_comment': vlan_name,
'tags': {
'routeros-vlan-ports',
},
'needs': {
'tag:routeros-vlan',
},
}
# create IPs
for ip, ip_conf in node.metadata.get('routeros/ips').items():
routeros[f'/ip/address?address={ip}'] = {
'interface': ip_conf['interface'],
'tags': {
'routeros-ip',
},
'needs': {
'tag:routeros-vlan',
},
}
routeros['/interface/bridge?name=bridge'] = {
'vlan-filtering': True, # ENABLE AFTER PORT VLANS ARE SET UP
'igmp-snooping': False,
'priority': node.metadata.get('routeros/bridge_priority'),
'protocol-mode': 'rstp',
'needs': {
'tag:routeros-vlan',
'tag:routeros-vlan-ports',
'tag:routeros-ip',
},
}
# purge unused vlans
routeros['/interface/vlan'] = {
'purge': {
'id-by': 'name',
},
'needed_by': {
'tag:routeros-vlan',
}
}
routeros['/interface/bridge/vlan'] = {
'purge': {
'id-by': 'vlan-ids',
'keep': {
'dynamic': True,
},
},
'needed_by': {
'tag:routeros-vlan',
}
}

View file

@ -0,0 +1,26 @@
defaults = {}
@metadata_reactor.provides(
'routeros/vlan_ports',
)
def routeros__(metadata):
return {
'routeros': {
'vlan_ports': {
vlan_name: {
'untagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name == metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/untagged')
},
'tagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name in metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/tagged')
},
}
for vlan_name in metadata.get('routeros/vlans').keys()
},
},
}

View file

@ -1,3 +1,10 @@
% for nameserver in sorted(node.metadata.get('nameservers')): <%
nameservers = (
node.metadata.get('overwrite_nameservers', []) or
node.metadata.get('nameservers', [])
)
%>\
\
% for nameserver in nameservers:
nameserver ${nameserver} nameserver ${nameserver}
% endfor % endfor

View file

@ -19,5 +19,5 @@ directories = {
} }
svc_systemd = { svc_systemd = {
'systemd-networkd': {}, 'systemd-networkd.service': {},
} }

View file

@ -24,10 +24,10 @@ for name, unit in node.metadata.get('systemd/units').items():
path = f'/etc/systemd/network/{name}' path = f'/etc/systemd/network/{name}'
dependencies = { dependencies = {
'needed_by': [ 'needed_by': [
'svc_systemd:systemd-networkd', 'svc_systemd:systemd-networkd.service',
], ],
'triggers': [ 'triggers': [
'svc_systemd:systemd-networkd:restart', 'svc_systemd:systemd-networkd.service:restart',
], ],
} }
elif extension in ['timer', 'service', 'mount', 'swap', 'target']: elif extension in ['timer', 'service', 'mount', 'swap', 'target']:

View file

@ -26,7 +26,8 @@ def units(metadata):
type = name.split('.')[-1] type = name.split('.')[-1]
if type == 'service': if type == 'service':
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'multi-user.target'}) if not config.get('Install', {}).get('WantedBy', set()):
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'multi-user.target'})
elif type == 'timer': elif type == 'timer':
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'timers.target'}) units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'timers.target'})
elif type == 'mount': elif type == 'mount':

View file

@ -12,7 +12,7 @@ defaults = {
'wireguard': { 'wireguard': {
'backports': node.os_version < (11,), 'backports': node.os_version < (11,),
'triggers': [ 'triggers': [
'svc_systemd:systemd-networkd:restart', 'svc_systemd:systemd-networkd.service:restart',
], ],
}, },
}, },

View file

@ -0,0 +1,24 @@
<?php
define( 'YOURLS_DB_USER', 'yourls' );
define( 'YOURLS_DB_PASS', '${db_password}' );
define( 'YOURLS_DB_NAME', 'yourls' );
define( 'YOURLS_DB_HOST', 'localhost' );
define( 'YOURLS_DB_PREFIX', 'yourls_' );
define( 'YOURLS_SITE', 'https://${hostname}' );
define( 'YOURLS_LANG', '' );
define( 'YOURLS_UNIQUE_URLS', true );
define( 'YOURLS_PRIVATE', true );
define( 'YOURLS_COOKIEKEY', '${cookiekey}' );
$yourls_user_passwords = [
% for username, password in users.items():
'${username}' => '${password}',
% endfor
];
define( 'YOURLS_URL_CONVERT', 36 );
define( 'YOURLS_DEBUG', false );
$yourls_reserved_URL = [];

48
bundles/yourls/items.py Normal file
View file

@ -0,0 +1,48 @@
directories = {
'/var/www/yourls/htdocs': {
'owner': 'www-data',
'group': 'www-data',
'mode': '0755',
},
# FIXME:
'/var/www/certbot': {
'owner': 'www-data',
'group': 'www-data',
'mode': '0755',
}
}
git_deploy = {
'/var/www/yourls/htdocs': {
'repo': 'https://github.com/YOURLS/YOURLS.git',
'rev': node.metadata.get('yourls/version'),
'needs': [
'directory:/var/www/yourls/htdocs',
],
'triggers': [
'svc_systemd:nginx:restart',
],
},
}
files = {
f'/var/www/yourls/htdocs/user/config.php': {
'content_type': 'mako',
'mode': '0440',
'owner': 'www-data',
'group': 'www-data',
'context': {
'db_password': node.metadata.get('mariadb/databases/yourls/password'),
'hostname': node.metadata.get('yourls/hostname'),
'cookiekey': node.metadata.get('yourls/cookiekey'),
'users': node.metadata.get('yourls/users'),
},
'needs': [
'git_deploy:/var/www/yourls/htdocs',
],
'triggers': [
'svc_systemd:nginx:restart',
],
},
}

View file

@ -0,0 +1,43 @@
defaults = {
'mariadb': {
'databases': {
'yourls': {
'password': repo.vault.random_bytes_as_base64_for(f'{node.name} yourls DB', length=32).value,
},
},
},
}
@metadata_reactor.provides(
'apt/packages',
)
def apt(metadata):
php_version = metadata.get('php/version')
return {
'apt':{
'packages': {
f'php{php_version}-mysql': {},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('yourls/hostname'): {
'content': 'yourls/vhost.conf',
'context': {
'php_version': metadata.get('php/version'),
},
'check_path': '/admin',
},
},
},
}

Binary file not shown.

View file

@ -1,8 +1,3 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;

31
data/mailman/vhost.conf Normal file
View file

@ -0,0 +1,31 @@
upstream mailman3 {
server unix:/run/mailman3-web/uwsgi.sock fail_timeout=0;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${server_name};
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
server_tokens off;
location / {
uwsgi_pass mailman3;
include /etc/nginx/params/uwsgi;
}
location /mailman3/static {
alias /var/lib/mailman3/web/static;
}
location /mailman3/static/favicon.ico {
alias /var/lib/mailman3/web/static/postorius/img/favicon.ico;
}
# return 301 https://$server_name$request_uri;
access_log /var/log/nginx/mailman3/access.log combined;
error_log /var/log/nginx/mailman3/error.log;
}

View file

@ -8,6 +8,10 @@ server {
location / { location / {
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
% if websockets:
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
% endif
proxy_pass ${target}; proxy_pass ${target};
} }
} }

21
data/yourls/vhost.conf Normal file
View file

@ -0,0 +1,21 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${server_name};
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
root /var/www/yourls/htdocs;
location / {
index index.php index.html index.htm;
try_files $uri $uri/ /yourls-loader.php$is_args$args;
}
location ~ \.php$ {
include params/fastcgi;
fastcgi_index index.php;
fastcgi_pass unix:/run/php/php${php_version}-fpm.sock;
}
}

View file

@ -6,4 +6,8 @@ for root, dirs, files in walk(join(repo_path, "groups")):
if filename.endswith(".py"): if filename.endswith(".py"):
group = join(root, filename) group = join(root, filename)
with open(group, 'r', encoding='utf-8') as f: with open(group, 'r', encoding='utf-8') as f:
groups[splitext(basename(filename))[0]] = eval(f.read()) try:
groups[splitext(basename(filename))[0]] = eval(f.read())
except:
print(f"Error parsing {group}:")
raise

View file

@ -1,5 +1,5 @@
{ {
'bundles': [ 'bundles': {
'bind', 'bind',
], },
} }

View file

@ -2,6 +2,9 @@
'supergroups': [ 'supergroups': [
'debian', 'debian',
], ],
'bundles': [
'systemd-networkd',
],
'metadata': { 'metadata': {
'php': { 'php': {
'version': '7.4', 'version': '7.4',

View file

@ -0,0 +1,26 @@
{
'metadata': {
'apt': {
'sources': {
'debian': {
'components': {
'non-free-firmware',
},
},
'debian-security': {
'components': {
'non-free-firmware',
},
},
},
},
'php': {
'version': '8.2',
},
'postgresql': {
'version': '15',
},
'os_codename': 'bookworm',
},
'os_version': (12,),
}

View file

@ -0,0 +1,10 @@
{
'supergroups': [
'debian',
'debian-12-common',
],
'bundles': [
'ifupdown',
'proxmox-ve',
],
}

View file

@ -1,6 +1,10 @@
{ {
'supergroups': [ 'supergroups': [
'debian', 'debian',
'debian-12-common',
],
'bundles': [
'systemd-networkd',
], ],
'metadata': { 'metadata': {
'apt': { 'apt': {

View file

@ -47,6 +47,14 @@
'mtr-tiny': {}, 'mtr-tiny': {},
}, },
}, },
# iperf3
'nftables': {
'input': {
'tcp dport 5201 accept',
'udp dport 5201 accept',
},
},
}, },
'os': 'debian', 'os': 'debian',
'pip_command': 'pip3', 'pip_command': 'pip3',

View file

@ -14,7 +14,6 @@
'system', 'system',
'systemd', 'systemd',
'systemd-journald', 'systemd-journald',
'systemd-networkd',
'systemd-mount', 'systemd-mount',
'systemd-timers', 'systemd-timers',
'users', 'users',

44
groups/os/routeros.py Normal file
View file

@ -0,0 +1,44 @@
# https://ftp-master.debian.org/keys.html
{
'username': 'admin',
'supergroups': [
'all',
],
'bundles': [
'routeros',
],
'metadata': {
'routeros': {
'gateway': '10.0.0.1',
'bridge_priority': '0x8000',
'ports': {},
'vlans': {
'home': '1',
'iot': '2',
'internet': '3',
'proxmox': '4',
'gast': '9',
'rolf': '51',
},
'vlan_groups': {
'infra': {
'untagged': 'home',
'tagged': {
'iot',
'internet',
'proxmox',
'gast',
'rolf',
},
},
'internet': {
'untagged': 'internet',
'tagged': set(),
},
},
'vlan_ports': {},
},
},
'os': 'routeros',
}

View file

@ -14,7 +14,7 @@ class CaseSensitiveConfigParser(ConfigParser):
return value return value
def parse(text): def parse(text):
config = CaseSensitiveConfigParser() config = CaseSensitiveConfigParser(allow_no_value=True)
config.read_string(text) config.read_string(text)
return { return {
@ -24,8 +24,7 @@ def parse(text):
def dumps(dict): def dumps(dict):
sorted_dict = json.loads(json.dumps(dict, sort_keys=True, cls=MetadataJSONEncoder)) sorted_dict = json.loads(json.dumps(dict, sort_keys=True, cls=MetadataJSONEncoder))
parser = CaseSensitiveConfigParser(allow_no_value=True)
parser = CaseSensitiveConfigParser()
parser.read_dict(sorted_dict) parser.read_dict(sorted_dict)
writable = Writable() writable = Writable()

View file

@ -1,12 +1,10 @@
# https://stackoverflow.com/a/18266970 # https://stackoverflow.com/a/18266970
from Crypto.PublicKey import RSA from Crypto.PublicKey import RSA
from Crypto.Hash import HMAC
from struct import pack from struct import pack
from hashlib import sha3_512 from hashlib import sha3_512
from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.serialization import load_der_private_key
from functools import cache from functools import cache
from cache_to_disk import cache_to_disk
class PRNG(object): class PRNG(object):
@ -22,7 +20,6 @@ class PRNG(object):
return result return result
@cache_to_disk(30)
def _generate_deterministic_rsa_private_key(secret_bytes): def _generate_deterministic_rsa_private_key(secret_bytes):
return RSA.generate(2048, randfunc=PRNG(secret_bytes)).export_key('DER') return RSA.generate(2048, randfunc=PRNG(secret_bytes)).export_key('DER')

View file

@ -18,32 +18,26 @@
'id': '9cf52515-63a1-4659-a8ec-6c3c881727e5', 'id': '9cf52515-63a1-4659-a8ec-6c3c881727e5',
'network': { 'network': {
'internal': { 'internal': {
'interface': 'enp0s31f6', 'interface': 'enp1s0f0',
'ipv4': '10.0.0.5/24', 'ipv4': '10.0.0.5/24',
'gateway4': '10.0.0.1', 'gateway4': '10.0.0.1',
'mac': '98:b7:85:01:ca:a6',
},
'wakeonlan': {
'interface': 'enp0s31f6',
'ipv4': '10.0.0.6/24',
'mac': '4c:cc:6a:d5:96:f8', 'mac': '4c:cc:6a:d5:96:f8',
}, },
}, },
'backup-server': { 'backup-server': {
'hostname': 'backups.sublimity.de', 'hostname': 'backups.sublimity.de',
}, },
# 'smartctl': {
# '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K3GV6TPL': {
# 'apm': 1,
# },
# '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K4KAJXEB': {
# 'apm': 1,
# },
# '/dev/disk/by-id/ata-TOSHIBA_HDWQ140_19VZK0EMFAYG': {
# 'apm': 1,
# },
# },
'ssh': { 'ssh': {
# multipling prevents server from sleeping # multipling prevents server from sleeping
'multiplex_incoming': False, 'multiplex_incoming': False,
}, },
'wol-sleeper': { 'wol-sleeper': {
'network': 'internal', 'network': 'wakeonlan',
'waker': 'home.server', 'waker': 'home.server',
}, },
'zfs-mirror': { 'zfs-mirror': {

View file

@ -0,0 +1,11 @@
{
'hostname': '10.0.0.162',
'bundles': [
'bootshorn',
'systemd',
'systemd-timers',
],
'metadata': {
'id': '25c6f3fd-0d32-42c3-aeb3-0147bc3937c7',
},
}

View file

@ -1,28 +1,6 @@
{ {
'dummy': True,
'hostname': '10.0.0.16', 'hostname': '10.0.0.16',
'groups': [
'webserver',
'backup',
'monitored',
'raspberry-pi',
'autologin',
],
'bundles': [
'apt',
'homeassistant-supervised',
'hostname',
'hosts',
'htop',
'users',
'ssh',
'sudo',
'locale',
'zsh',
'zfs',
'systemd',
'systemd-timers',
'systemd-journald',
],
'metadata': { 'metadata': {
'id': '3d67964d-1270-4d3c-b93f-9c44219b3d59', 'id': '3d67964d-1270-4d3c-b93f-9c44219b3d59',
'network': { 'network': {
@ -33,80 +11,5 @@
'gateway4': '10.0.0.1', 'gateway4': '10.0.0.1',
}, },
}, },
'apt': {
'sources': {
'debian': {
'urls': {
'https://deb.debian.org/debian',
},
'suites': {
'{codename}',
'{codename}-updates',
},
'components': {
'main',
'contrib',
'non-free',
'non-free-firmware',
},
'key': 'debian-{version}',
},
'debian-security': {
'urls': {
'http://security.debian.org/debian-security',
},
'suites': {
'{codename}-security',
},
'components': {
'main',
'contrib',
'non-free',
'non-free-firmware',
},
'key': 'debian-{version}-security',
},
},
},
'hosts': {
'10.0.10.2': [
'resolver.name',
'secondary.resolver.name',
],
},
'letsencrypt': {
'acme_node': 'htz.mails',
},
'homeassistant': {
'domain': 'homeassistant.ckn.li',
'os_agent_version': '1.6.0',
},
'nameservers': {
'10.0.10.2',
},
'users': {
'ckn': {
'shell': '/usr/bin/zsh',
'authorized_keys': {
'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I ckn',
},
},
},
'sudoers': {
'ckn': {'ALL'},
},
'zfs': {
'pools': {
'tank': {
'devices': [
'/var/lib/zfs/tank.img',
],
},
},
},
'os_codename': 'bookworm',
}, },
'os': 'debian',
'os_version': (12,),
'pip_command': 'pip3',
} }

View file

@ -6,61 +6,66 @@
'hardware', 'hardware',
'home', 'home',
'monitored', 'monitored',
'dnsserver',
], ],
'bundles': [ 'bundles': [
'kea-dhcpd', 'kea-dhcpd',
'wireguard', 'wireguard',
'pppoe',
], ],
'metadata': { 'metadata': {
'id': '1d6a43e5-858c-42f9-9c40-ab63d61c787c', 'id': '1d6a43e5-858c-42f9-9c40-ab63d61c787c',
'network': { 'network': {
'external': {
'interface': 'enx00e04c220682',
'ipv4': '10.0.99.126/24',
'gateway4': '10.0.99.1',
'vlans': {'iot', 'internet', 'guest', 'rolf', 'internal'},
},
'internal': { 'internal': {
'type': 'vlan', 'interface': 'enp1s0f0',
'id': 1,
'ipv4': '10.0.0.1/24', 'ipv4': '10.0.0.1/24',
'dhcp_server': True, 'dhcp_server': True,
}, },
'iot': { 'iot': {
'type': 'vlan', 'type': 'vlan',
'vlan_interface': 'internal',
'id': 2, 'id': 2,
'ipv4': '10.0.2.1/24', 'ipv4': '10.0.2.1/24',
'dhcp_server': True, 'dhcp_server': True,
}, },
'internet': { 'external': {
'type': 'vlan', 'type': 'vlan',
'vlan_interface': 'internal',
'id': 3, 'id': 3,
'ipv4': '10.0.3.1/24', 'ipv4': '10.0.98.2/24',
#'qdisc': 'cake bandwidth 35Mbit diffserv4',
},
'proxmox': {
'type': 'vlan',
'vlan_interface': 'internal',
'id': 4,
'ipv4': '10.0.4.1/24',
'dhcp_server': True,
}, },
'guest': { 'guest': {
'type': 'vlan', 'type': 'vlan',
'vlan_interface': 'internal',
'id': 9, 'id': 9,
'ipv4': '10.0.9.1/24', 'ipv4': '10.0.9.1/24',
'dhcp_server': True, 'dhcp_server': True,
}, },
'rolf': { # rolf local test 'rolf': { # rolf local test
'type': 'vlan', 'type': 'vlan',
'vlan_interface': 'internal',
'id': 51, 'id': 51,
'ipv4': '192.168.179.1/24', 'ipv4': '192.168.179.1/24',
'dhcp_server': True, 'dhcp_server': True,
}, },
}, },
# 'nftables': { 'bind': {
# 'forward': { 'master_node': 'htz.mails',
# # Drop DHCP client requests (UDP port 68) 'hostname': 'home.resolver.name',
# 'udp sport 68 drop', },
# 'udp dport 68 drop', 'pppoe': {
'interface': 'external',
# # Drop DHCP server responses (UDP port 67) 'user': '!decrypt:encrypt$gAAAAABocUfodLqCBKPPN7H9S64yJ7kRddtaWI0nQU2oklPMEjBhMsir4NL2yjkcHXAN-Ozqn6FCokyE1AL8ek3c5CqAvd83jkxZytp-oclrKqUD9uhUCy4=',
# 'udp sport 67 drop', 'secret': '!decrypt:encrypt$gAAAAABocUhmDqFZsyHYBIP2qdMFIS1eWT_bPdyv98cHzIgeKFAxDfcCrVJwDxVPFDDMa_7UT76HDJLvtdYQ8mFl2RL0yR8k2A=='
# 'udp dport 67 drop', },
# },
# },
'sysctl': { 'sysctl': {
'net': { 'net': {
'ipv4': { 'ipv4': {
@ -77,6 +82,8 @@
'10.0.10.0/24', '10.0.10.0/24',
#'192.168.179.0/24', # while raspi at home #'192.168.179.0/24', # while raspi at home
'10.0.227.0/24', # mseibert.freescout '10.0.227.0/24', # mseibert.freescout
'10.0.228.0/24', # mseibert.yourls
'10.0.229.0/24', # mseibert.mailsman
], ],
}, },
}, },

View file

@ -3,7 +3,7 @@
'groups': [ 'groups': [
'autologin', 'autologin',
'backup', 'backup',
'debian-12', 'debian-12-pve',
'home', 'home',
'nextcloud', 'nextcloud',
'monitored', 'monitored',
@ -12,7 +12,6 @@
'build-server', 'build-server',
], ],
'bundles': [ 'bundles': [
'apcupsd',
'build-agent', 'build-agent',
'crystal', 'crystal',
'gitea', 'gitea',
@ -32,7 +31,6 @@
'systemd-swap', 'systemd-swap',
'twitch-clip-download', 'twitch-clip-download',
'raspberrymatic-cert', 'raspberrymatic-cert',
'tasmota-charge',
'wol-waker', 'wol-waker',
'zfs', 'zfs',
], ],
@ -40,14 +38,14 @@
'id': 'af96709e-b13f-4965-a588-ef2cd476437a', 'id': 'af96709e-b13f-4965-a588-ef2cd476437a',
'network': { 'network': {
'internal': { 'internal': {
'interface': 'enp42s0', 'interface': 'enp43s0',
'ipv4': '10.0.0.2/24', 'ipv4': '10.0.0.2/24',
'gateway4': '10.0.0.1', 'gateway4': '10.0.0.1',
}, },
}, },
'apt': { 'apt': {
'packages': { 'packages': {
'firmware-realtek': {}, # 'firmware-realtek': {}, proxmox-ve incompatibility
}, },
}, },
'build-server': { 'build-server': {
@ -110,7 +108,7 @@
}, },
'nextcloud': { 'nextcloud': {
'hostname': 'cloud.sublimity.de', 'hostname': 'cloud.sublimity.de',
'version': '29.0.7', 'version': '29.0.16',
'config': { 'config': {
'instanceid': 'oci6dw1woodz', 'instanceid': 'oci6dw1woodz',
'secret': '!decrypt:encrypt$gAAAAABj96CFynVtEgsje7173zjQAcY7xQG3uyf5cxE-sJAvhyPh_KUykTKdwnExc8NTDJ8RIGUmVfgC6or5crnYaggARPIEg5-Cb0xVdEPPZ3oZ01ImLmynLu3qXT9O8kVM-H21--OKeztMRn7bySsbXdWEGtETFQ==', 'secret': '!decrypt:encrypt$gAAAAABj96CFynVtEgsje7173zjQAcY7xQG3uyf5cxE-sJAvhyPh_KUykTKdwnExc8NTDJ8RIGUmVfgC6or5crnYaggARPIEg5-Cb0xVdEPPZ3oZ01ImLmynLu3qXT9O8kVM-H21--OKeztMRn7bySsbXdWEGtETFQ==',
@ -124,6 +122,9 @@
'unsortable': 'SofortUpload/Unsortable', 'unsortable': 'SofortUpload/Unsortable',
}, },
}, },
'proxmox-ve': {
'domain': 'pve.ckn.li',
},
'raspberrymatic-cert': { 'raspberrymatic-cert': {
'domain': 'homematic.ckn.li', 'domain': 'homematic.ckn.li',
'node': 'home.homematic', 'node': 'home.homematic',
@ -152,18 +153,6 @@
}, },
}, },
'systemd-swap': 4_000_000_000, 'systemd-swap': 4_000_000_000,
'tasmota-charge': {
'phone': {
'ip': '10.0.0.175',
'user': 'u0_a233',
'password': 'november',
},
'plug': {
'ip': '10.0.2.115',
'min': 45,
'max': 70,
},
},
'twitch-clip-download': { 'twitch-clip-download': {
'channel_name': 'cronekorkn_', 'channel_name': 'cronekorkn_',
}, },

View file

@ -0,0 +1,46 @@
{
'hostname': '10.0.0.63',
'password': '!decrypt:encrypt$gAAAAABoYFUx2faf18aV3rzNNuBA-4xZ22LQJ2HinpgsjkoTQS_l2TbmDtiAZI1jt-kWfTZ48d5_UPX-VDmY9qb4Sgn2Iz7Yee3CrB4hl85TyutilukTIP8=',
'groups': [
'routeros',
],
'metadata': {
'id': '26eca3f1-975e-426f-bd7d-e2a1ef36519e',
'routeros': {
'ips': {
'10.0.0.63/24': {
'interface': 'home',
},
},
'ports': {
'sfp-sfpplus1': {
'vlan_group': 'infra',
},
'sfp-sfpplus2': {
'vlan_group': 'infra',
},
'sfp-sfpplus3': {
'vlan_group': 'infra',
},
'sfp-sfpplus4': {
'vlan_group': 'infra',
},
'sfp-sfpplus5': {
'vlan_group': 'infra',
},
'sfp-sfpplus6': {
'vlan_group': 'infra',
},
'sfp-sfpplus7': {
'vlan_group': 'infra',
},
'sfp-sfpplus8': {
'vlan_group': 'infra',
},
'ether1': {
'vlan_group': 'infra',
},
},
},
},
}

View file

@ -0,0 +1,55 @@
{
'hostname': '10.0.0.64',
'password': '!decrypt:encrypt$gAAAAABob2elR_Sm13u-oG1ff_zOeEsay8PZ0Wgbl810hAZNhvuTYWJuNAJ1oyelC6sy7WsD2CC33oVLeb6m0EtNARtMs-2gKu9KlT7Xat1MvV-iatDKvro=',
'groups': [
'routeros',
],
'metadata': {
'id': 'f5ee3f48-f8a4-4fbc-9b82-e9510fb316ba',
'routeros': {
'ips': {
'10.0.0.64/24': {
'interface': 'home',
},
},
'ports': {
'ether1': {
'vlan_group': 'infra',
},
'ether2': {
'vlan_group': 'infra',
},
'ether3': {
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'infra',
},
'ether5': {
'vlan_group': 'infra',
},
'ether6': {
'vlan_group': 'infra',
},
'ether7': {
'vlan_group': 'infra',
},
'ether8': {
'vlan_group': 'infra',
},
'sfp9': {
'vlan_group': 'infra',
},
'sfp10': {
'vlan_group': 'infra',
},
'sfp11': {
'vlan_group': 'infra',
},
'sfp12': {
'vlan_group': 'infra',
},
},
},
},
}

View file

@ -0,0 +1,103 @@
{
'hostname': '10.0.0.60',
'password': '!decrypt:encrypt$gAAAAABoYVzxzO0R_bnW3S3Ggiq2LCCAGaKtXToviGZjgIlH2NpL9ojO8aNlSPPcGTKbn5z5RxSxjOlL161U0Ctdf6Rns2e5I5p5TIcsQ7c9qnAiaV-Hhuw=',
'groups': [
'routeros',
],
'metadata': {
'id': 'e6a24df7-eed1-404e-af78-15ebcbcc02a2',
'routeros': {
'ips': {
'10.0.0.60/24': {
'interface': 'home',
},
},
'ports': {
'sfp-sfpplus1': {
'vlan_group': 'infra',
},
'sfp-sfpplus2': {
'vlan_group': 'infra',
},
'sfp-sfpplus3': {
'vlan_group': 'infra',
},
'sfp-sfpplus4': {
'vlan_group': 'infra',
},
'ether1': {
'vlan_group': 'infra',
},
'ether2': {
'vlan_group': 'infra',
},
'ether3': {
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'internet',
},
'ether5': {
'vlan_group': 'infra',
},
'ether6': {
'vlan_group': 'infra',
},
'ether7': {
'vlan_group': 'infra',
},
'ether8': {
'vlan_group': 'infra',
},
'ether9': {
'vlan_group': 'infra',
},
'ether10': {
'vlan_group': 'infra',
},
'ether11': {
'vlan_group': 'infra',
},
'ether12': {
'vlan_group': 'infra',
},
'ether13': {
'vlan_group': 'infra',
},
'ether14': {
'vlan_group': 'infra',
},
'ether15': {
'vlan_group': 'infra',
},
'ether16': {
'vlan_group': 'infra',
},
'ether17': {
'vlan_group': 'infra',
},
'ether18': {
'vlan_group': 'infra',
},
'ether19': {
'vlan_group': 'infra',
},
'ether20': {
'vlan_group': 'infra',
},
'ether21': {
'vlan_group': 'infra',
},
'ether22': {
'vlan_group': 'infra',
},
'ether23': {
'vlan_group': 'infra',
},
'ether24': {
'vlan_group': 'infra',
},
},
},
},
}

View file

@ -0,0 +1,34 @@
{
'hostname': '10.0.0.62',
'password': '!decrypt:encrypt$gAAAAABoYFSyt2JAsdePXiHim1RdQwbarJedhAOE3XpS2rGMBx-F5eCWRCIyLU2g2ocUDUIDfgH3nBipUCkdcd0Bv4vbK-yqKmGSeSH7YXLYwq3ZWuCDsLM=',
'groups': [
'routeros',
],
'metadata': {
'id': '6d4b95dd-5d8a-4481-8c5f-8ee714d9f0cc',
'routeros': {
'ips': {
'10.0.0.62/24': {
'interface': 'home',
},
},
'ports': {
'ether1': {
'vlan_group': 'infra',
},
'ether2': {
'vlan_group': 'infra',
},
'ether3': {
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'infra',
},
'ether5': {
'vlan_group': 'infra',
},
},
},
},
}

View file

@ -9,7 +9,6 @@
'webserver', 'webserver',
'dnsserver', 'dnsserver',
'wordpress', 'wordpress',
#'left4dead2',
], ],
'bundles': [ 'bundles': [
'bind-acme', 'bind-acme',
@ -17,7 +16,6 @@
'download-server', 'download-server',
'islamicstate.eu', 'islamicstate.eu',
#'nginx-rtmps', #'nginx-rtmps',
#'steam',
'wireguard', 'wireguard',
'zfs', 'zfs',
], ],
@ -54,6 +52,7 @@
'left4.me', 'left4.me',
'elimu-kwanza.de', 'elimu-kwanza.de',
'cronekorkn.de', 'cronekorkn.de',
'direkt.oranienschule.de',
'foerderkreis.oranienschule-wiesbaden-wiki.de', 'foerderkreis.oranienschule-wiesbaden-wiki.de',
}, },
}, },
@ -86,34 +85,6 @@
'domain': 'elimu-kwanza.de', 'domain': 'elimu-kwanza.de',
}, },
}, },
'left4dead2': {
'servers': {
'standard': {
'port': 27020,
},
# 'standard-2': {
# 'port': 27021,
# 'workshop': {
# #2256379828, # bhop detect
# },
# },
},
'admins': {
'STEAM_1:0:12376499', # CroneKorkN ☮UKRAINE❤
'STEAM_1:1:169960486', # *RED*
'STEAM_1:1:112940736', # Ðark-AnGe
'STEAM_1:1:34263261', # Alekc
'STEAM_1:0:583132949', # Cat
'STEAM_1:0:610180592', # SonovaBeach
'STEAM_1:1:157619181', # Null
},
'workshop': {
214630948,
1229957234,
698857882,
},
'steamgroups': {'103582791467869287'},
},
'letsencrypt': { 'letsencrypt': {
'domains': { 'domains': {
'ckn.li': {}, 'ckn.li': {},
@ -221,12 +192,17 @@
}, },
'mseibert.freescout': { 'mseibert.freescout': {
'allowed_ips': [ 'allowed_ips': [
'10.0.227.2/32', '10.0.227.0/24',
], ],
}, },
'mseibert.n8n': { 'mseibert.yourls': {
'allowed_ips': [ 'allowed_ips': [
'10.0.227.3/32', '10.0.228.0/24',
],
},
'mseibert.mailman': {
'allowed_ips': [
'10.0.229.0/24',
], ],
}, },
}, },

View file

@ -30,6 +30,9 @@
'gateway6': 'fe80::1', 'gateway6': 'fe80::1',
}, },
}, },
'postgresql': {
'password_algorithm': 'scram-sha-256',
},
'freescout': { 'freescout': {
'domain': 'foerderkreis.oranienschule-wiesbaden-wiki.de', 'domain': 'foerderkreis.oranienschule-wiesbaden-wiki.de',
}, },

73
nodes/mseibert.mailman.py Normal file
View file

@ -0,0 +1,73 @@
# https://teamvault.apps.seibert-media.net/secrets/mkqMRv/
# https://console.hetzner.cloud/projects/889138/servers/46578341
# mailman.ckn.li
{
'hostname': '91.99.123.176',
'groups': [
'backup',
'debian-12',
'monitored',
'webserver',
],
'bundles': [
'mailman',
'wireguard',
'zfs',
'postgresql',
],
'metadata': {
'id': '854cb39f-d964-4cc7-9051-ba6574708820',
'network': {
'internal': {
'interface': 'ens10',
'ipv4': '10.0.229.2/24',
},
'external': {
'interface': 'eth0',
'ipv4': '91.99.123.176/32',
'gateway4': '172.31.1.1',
'ipv6': '2a01:4f8:c013:2030::2/64',
'gateway6': 'fe80::1',
},
},
'mailman': {
'hostname': 'mailman.ckn.li',
'site_owner_email': '!decrypt:encrypt$gAAAAABoWEeTyypfKw9l9jnNgF4GlS0-6O2NWCB0f3Fj1XnQ_HMjHXymAL8FWTyQjRmz3r8KnGJ-sogfnhW6lub_pnuk-wqB5Zuy9tgGsfi3RvkyNaOUeTE=',
'smtp_host': 'smtp.ionos.de',
'smtp_port': 465,
'smtp_user': '!decrypt:encrypt$gAAAAABoWEcZlLxiTKluyg3gZ-un2fYkuviW9BD9tTW8mfKBL5d41Z1X7LtI5CDnhhLXTGFpPnY1thr17h22oW3Ybz_WPgvbJVepnVwmeQwvMpg2psATKAY=',
'smtp_password': '!decrypt:encrypt$gAAAAABoWDusH3XY4ONh8MnmfBbyHW477ipjSycb3TiDGXxO5eujum80zXjNrOblswCGRTHsW9UasM_dXeeGBsa7KcK4s6AK_eynXCWeLCtXfrUSE_oEd7c='
},
'overwrite_nameservers': [
'8.8.8.8',
],
'vm': {
'cores': 2,
'ram': 4096,
},
'wireguard': {
'my_ip': '172.30.0.240/32',
's2s': {
'htz.mails': {
'allowed_ips': [
'10.0.0.0/24',
'10.0.2.0/24',
'10.0.9.0/24',
'10.0.10.0/24',
],
},
},
},
'zfs': {
'pools': {
'tank': {
'devices': [
'/var/lib/zfs_file',
],
},
},
},
},
}

View file

@ -1,9 +1,8 @@
# https://teamvault.apps.seibert-media.net/secrets/mkqMRv/ # https://teamvault.apps.seibert-media.net/secrets/mkqMRv/
# https://console.hetzner.cloud/projects/889138/servers/56564150 # https://console.hetzner.cloud/projects/889138/servers/46578341
{ {
#'dummy': True, 'hostname': '168.119.250.114',
'hostname': '159.69.178.45',
'groups': [ 'groups': [
'backup', 'backup',
'debian-12', 'debian-12',
@ -11,33 +10,39 @@
'webserver', 'webserver',
], ],
'bundles': [ 'bundles': [
'n8n',
'nodejs',
'wireguard', 'wireguard',
'mariadb',
'php',
'yourls',
'zfs', 'zfs',
'postgresql',
], ],
'metadata': { 'metadata': {
'id': '4852308e-9d36-4a0e-b533-a291e1495db3', 'id': '52efcd47-edd8-426c-aead-c492553d14f9',
'network': { 'network': {
'internal': { 'internal': {
'interface': 'enp7s0', 'interface': 'ens10',
'ipv4': '10.0.227.3/24', 'ipv4': '10.0.228.2/24',
}, },
'external': { 'external': {
'interface': 'eth0', 'interface': 'eth0',
'ipv4': '159.69.178.45/32', 'ipv4': '168.119.250.114/32',
'gateway4': '172.31.1.1', 'gateway4': '172.31.1.1',
'ipv6': '2a01:4f8:c012:491b::1/64', 'ipv6': '2a01:4f8:c013:e321::2/64',
'gateway6': 'fe80::1', 'gateway6': 'fe80::1',
}, },
}, },
'n8n': { 'yourls': {
'version': '1.68.0', 'hostname': "direkt.oranienschule.de",
}, 'cookiekey': "!decrypt:encrypt$gAAAAABoRvmcUs3t7PREllyeN--jBqs0XYewMHW16GWC-ikLzsDSe02YKGycOlgXuHU4hzKbNjGMEutpFXRLk9Zji6bbpy4GdyE6vStfwd8ZT0obAyoqBPwI47LwUlDSFMS51y5j8rG5",
'nodejs': { 'version': "1.10.1",
'version': '20', 'users': {
'mseibert': "!decrypt:encrypt$gAAAAABoRwtOcslyRY9ahkmtVI8QbXgJhyE3nuk04eakFDKl-4OZViiRvjtQW3Uwqki1aFeAS-syzr0Ug5sZM_zNelNahjZyzW1k47Xg9GltGNn_zp-uUII=",
},
}, },
# FIXME:
# 'overwrite_nameservers': [
# '8.8.8.8',
# ],
'vm': { 'vm': {
'cores': 2, 'cores': 2,
'ram': 4096, 'ram': 4096,
@ -59,7 +64,7 @@
'pools': { 'pools': {
'tank': { 'tank': {
'devices': [ 'devices': [
'/var/lib/tank.img', '/var/lib/zfs_file',
], ],
}, },
}, },

View file

@ -1,7 +1,8 @@
{ {
'dummy': True,
'hostname': '192.168.179.20', 'hostname': '192.168.179.20',
'groups': [ 'groups': [
'debian-11', 'debian-12',
'monitored', 'monitored',
'raspberry-pi', 'raspberry-pi',
], ],

View file

@ -3,5 +3,4 @@ pycryptodome
PyNaCl PyNaCl
PyYAML PyYAML
pyqrcode pyqrcode
cache_to_disk
setuptools setuptools