Compare commits

...

74 commits

Author SHA1 Message Date
0c74cfd5e9
other tank 2025-08-24 15:27:28 +02:00
841f523f73
bootshorn stuff 2025-08-24 15:23:17 +02:00
6d38d04a1e
bootshonr fixed ip 2025-08-24 13:35:41 +02:00
504089427d
bootshorn records use temp file 2025-08-24 13:34:01 +02:00
60f29aab70
fix hue dhcp 2025-08-24 13:33:44 +02:00
ee94e30004 Merge pull request 'the next l4d2 server iteration, this time more simple and kinda working' (#27) from l4d2_the_next into master
Reviewed-on: #27
2025-08-24 13:33:23 +02:00
3469d98a43
the next l4d2 server iteration, this time more simple and kinda working 2025-08-24 13:33:05 +02:00
5fd775d855 Merge pull request 'htz.mails debian 13' (#26) from htz.mails_debian_13_squash into master
Reviewed-on: #26
2025-08-10 15:40:44 +02:00
725d5292b2
must set number to not screw bw comparison 2025-08-10 15:39:45 +02:00
9161a2501c
vmail set recordsize 2025-08-10 15:34:41 +02:00
9b3f856eb0
mailserver zfs params 2025-08-10 15:33:21 +02:00
9621184bd8
htz.mails debian 13 2025-08-10 15:10:46 +02:00
1f2273d2ab
scale htz.mails down 2025-08-10 09:41:37 +02:00
0514fa0241 Merge pull request 'debian 13' (#25) from debian-13 into master
Reviewed-on: #25
2025-08-10 09:37:49 +02:00
2f263476d3
fix sysctl 2025-08-09 23:31:29 +02:00
e65aa8fdab
openhab no longer exists 2025-08-09 23:08:15 +02:00
70b17657a1
update router 2025-08-09 23:08:06 +02:00
b8389352ec
dont purge sudoers 2025-08-09 22:46:01 +02:00
7586d4ff29
remove unnecessary locales 2025-08-09 22:45:19 +02:00
bc656cdef4
backups debian 13 2025-08-09 22:45:08 +02:00
278f6de6f5
l4d readme updates 2025-08-09 22:26:48 +02:00
2de9fed1fa
besteffort 2025-08-09 22:26:39 +02:00
3bcd2be520
netword remove netplan 2025-08-09 21:33:35 +02:00
7eac09e547
ovh.secondary cake 2025-08-09 21:33:26 +02:00
5fb1ee54b9
less annoying root passwords 2025-08-09 21:32:23 +02:00
ecfd60803f
fix gateway 2025-08-09 21:29:57 +02:00
81b17b389f
ovh.secondary l4d readme 2025-08-09 19:13:00 +02:00
57675c08eb
new ovh.secondary 2025-08-09 14:58:27 +02:00
64f869121b
zones.rfc1918 only affect recursive views 2025-08-09 12:46:05 +02:00
c41e6f8240
debian 13 2025-08-09 12:43:59 +02:00
7483d0c012 Merge pull request 'ipv6_picking' (#24) from ipv6_picking into master
Reviewed-on: #24
2025-08-09 12:43:09 +02:00
f1b26e5933
upgrade debian 2025-08-09 11:54:56 +02:00
f8ddcd7b7c
forward ipv6 2025-08-03 22:39:18 +02:00
962bd06a32
qdisc-ppp0 partof pppoe-isp 2025-08-03 22:38:12 +02:00
3d6d4d5503
IPv6AcceptRA not via dhcp option 2025-08-03 22:35:56 +02:00
4b22705ff7
pyenv install --skip-existing 2025-08-03 22:35:29 +02:00
983ad1b1ae
fix annoying icingaweb redirect to empty page 2025-07-13 14:04:50 +02:00
849c305d7d
remove obsolete homeassistant supervised 2025-07-13 14:04:31 +02:00
ff0d0d2e8b
nodes/home.homeassistant.py: or use nginx addon 2025-07-13 13:12:38 +02:00
c98b8c6f05
homeassistant letsencrypt 2025-07-13 13:10:37 +02:00
4136f819a5
start service instead of duplicating code 2025-07-13 13:10:19 +02:00
78fe5440a8
change leaked password 2025-07-13 12:45:20 +02:00
012325e996
address conflict 2025-07-13 10:57:56 +02:00
951fa63296
bootshorn better temp logging + 2025-07-13 10:13:23 +02:00
6f86abd997
bootshorn better temp logging 2025-07-13 10:08:52 +02:00
c1917d51a0
bootshorn log temp every 15 mins 2025-07-13 09:55:09 +02:00
75017a99df
bootshorn log temperature 2025-07-13 09:53:24 +02:00
980fdc8203
mailman readme 2025-07-12 14:04:44 +02:00
7df21873c1
wip 2025-07-12 14:03:11 +02:00
9bbaeb67d3
mailman poc email sent 2025-07-12 13:53:46 +02:00
a6b557882d
bundles/pppoe/files/isp: ipv6 wip 2025-07-12 12:15:23 +02:00
90c02e58bf
fix rack switch hostname 2025-07-11 23:58:04 +02:00
8829902e0b
fix indent 2025-07-11 23:55:33 +02:00
e7c5fe9213
fix set not dict 2025-07-11 23:55:02 +02:00
5a1ce55086
fix 2025-07-11 23:52:58 +02:00
cca320e2f4
pppoe 2025-07-11 23:49:46 +02:00
e4e3c57f20
pppoe telekom 2025-07-11 20:44:05 +02:00
5274639ca3
bootshorn recording 2025-07-11 19:10:49 +02:00
3e5ed906bc
cake traffic shaping 2025-07-10 20:39:24 +02:00
9a519432b0
nodes/home.switch-rack-poe.py: introduce 2025-07-10 10:34:27 +02:00
6a3424faf4
fix provides 2025-07-10 09:19:20 +02:00
19a8d28a24
homeassistant os is dummy 2025-07-08 20:10:50 +02:00
a52d9b052f
home.backups fiber 2025-07-06 18:23:09 +02:00
db56385513
vlan interface in vlan netwrok, not in seperate list 2025-07-01 12:20:39 +02:00
7ab96e6a47
router as dns relay 2025-07-01 11:43:22 +02:00
c37bca287e Merge pull request 'routeros' (#23) from routeros into master
Reviewed-on: #23
2025-07-01 11:32:27 +02:00
d17f6da77a
tidy up and try home dns server 2025-07-01 11:30:57 +02:00
460f809403
more routeros 2025-07-01 11:30:57 +02:00
0e6a705d3f
routeros switches ok 2025-07-01 11:30:57 +02:00
d54eff344f
routeros wip 2025-07-01 11:30:37 +02:00
79a54578b8
yourls remove temp leftovers 2025-06-30 09:53:31 +02:00
1d8f20ff25
yurlls fix monitoring and use dehydrated certs 2025-06-29 14:46:39 +02:00
d3b8e2e414
mailman 2025-06-29 12:37:09 +02:00
85daf26174
routeros 2025-06-29 12:32:18 +02:00
113 changed files with 3881 additions and 833 deletions

View file

@ -0,0 +1,8 @@
$TTL 86400
@ IN SOA localhost. root.localhost. (
1 ; Serial
604800 ; Refresh
86400 ; Retry
2419200 ; Expire
86400 ) ; Negative Cache TTL
IN NS localhost.

View file

@ -29,6 +29,7 @@ view "${view_name}" {
% if view_conf['is_internal']:
recursion yes;
include "/etc/bind/zones.rfc1918";
% else:
recursion no;
rate-limit {
@ -62,9 +63,6 @@ view "${view_name}" {
file "/var/lib/bind/${view_name}/${zone_name}";
};
% endfor
include "/etc/bind/named.conf.default-zones";
include "/etc/bind/zones.rfc1918";
};
% endfor

View file

@ -10,7 +10,7 @@ options {
% if type == 'master':
notify yes;
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
% endif
};

View file

@ -0,0 +1,19 @@
zone "10.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "16.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "17.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "18.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "19.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "20.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "21.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "22.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "23.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "24.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "25.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "26.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "27.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "28.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "29.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "30.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "31.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "168.192.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "254.169.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };

View file

@ -142,3 +142,21 @@ actions['named-checkconf'] = {
'svc_systemd:bind9:reload',
]
}
# beantwortet Anfragen nach privaten IP-Adressen mit NXDOMAIN, statt sie ins Internet weiterzuleiten
files['/etc/bind/zones.rfc1918'] = {
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
files['/etc/bind/db.empty'] = {
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}

View file

@ -3,6 +3,7 @@ from json import dumps
h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo
defaults = {
'apt': {
'packages': {
@ -211,7 +212,7 @@ def generate_keys(metadata):
'token':repo.libs.hmac.hmac_sha512(
key,
str(repo.vault.random_bytes_as_base64_for(
f"{metadata.get('id')} bind key {key}",
f"{metadata.get('id')} bind key {key} 20250713",
length=32,
)),
)

165
bundles/bootshorn/files/process Executable file
View file

@ -0,0 +1,165 @@
#!/usr/bin/env python3
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from scipy.fft import rfft, rfftfreq
import shutil
import traceback
RECORDINGS_DIR = "recordings"
PROCESSED_RECORDINGS_DIR = "recordings/processed"
DETECTIONS_DIR = "events"
DETECT_FREQUENCY = 211 # Hz
DETECT_FREQUENCY_TOLERANCE = 2 # Hz
ADJACENCY_FACTOR = 2 # area to look for the frequency (e.g. 2 means 100Hz to 400Hz for 200Hz detection)
BLOCK_SECONDS = 3 # seconds (longer means more frequency resolution, but less time resolution)
DETECTION_DISTANCE_SECONDS = 30 # seconds (minimum time between detections)
BLOCK_OVERLAP_FACTOR = 0.9 # overlap between blocks (0.2 means 20% overlap)
MIN_SIGNAL_QUALITY = 1000.0 # maximum noise level (relative DB) to consider a detection valid
PLOT_PADDING_START_SECONDS = 2 # seconds (padding before and after the event in the plot)
PLOT_PADDING_END_SECONDS = 3 # seconds (padding before and after the event in the plot)
DETECTION_DISTANCE_BLOCKS = DETECTION_DISTANCE_SECONDS // BLOCK_SECONDS # number of blocks to skip after a detection
DETECT_FREQUENCY_FROM = DETECT_FREQUENCY - DETECT_FREQUENCY_TOLERANCE # Hz
DETECT_FREQUENCY_TO = DETECT_FREQUENCY + DETECT_FREQUENCY_TOLERANCE # Hz
def process_recording(filename):
print('processing', filename)
# get ISO 8601 nanosecond recording date from filename
date_string_from_filename = os.path.splitext(filename)[0]
recording_date = datetime.datetime.strptime(date_string_from_filename, "%Y-%m-%d_%H-%M-%S.%f%z")
# get data and metadata from recording
path = os.path.join(RECORDINGS_DIR, filename)
soundfile = sf.SoundFile(path)
samplerate = soundfile.samplerate
samples_per_block = int(BLOCK_SECONDS * samplerate)
overlapping_samples = int(samples_per_block * BLOCK_OVERLAP_FACTOR)
sample_num = 0
current_event = None
while sample_num < len(soundfile):
soundfile.seek(sample_num)
block = soundfile.read(frames=samples_per_block, dtype='float32', always_2d=False)
if len(block) == 0:
break
# calculate FFT
labels = rfftfreq(len(block), d=1/samplerate)
complex_amplitudes = rfft(block)
amplitudes = np.abs(complex_amplitudes)
# get the frequency with the highest amplitude within the search range
search_amplitudes = amplitudes[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
search_labels = labels[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
max_amplitude = max(search_amplitudes)
max_amplitude_index = np.argmax(search_amplitudes)
max_freq = search_labels[max_amplitude_index]
max_freq_detected = DETECT_FREQUENCY_FROM <= max_freq <= DETECT_FREQUENCY_TO
# calculate signal quality
adjacent_amplitudes = amplitudes[(labels < DETECT_FREQUENCY_FROM) | (labels > DETECT_FREQUENCY_TO)]
signal_quality = max_amplitude/np.mean(adjacent_amplitudes)
good_signal_quality = signal_quality > MIN_SIGNAL_QUALITY
# conclude detection
if (
max_freq_detected and
good_signal_quality
):
block_date = recording_date + datetime.timedelta(seconds=sample_num / samplerate)
# detecting an event
if not current_event:
current_event = {
'start_at': block_date,
'end_at': block_date,
'start_sample': sample_num,
'end_sample': sample_num + samples_per_block,
'start_freq': max_freq,
'end_freq': max_freq,
'max_amplitude': max_amplitude,
}
else:
current_event.update({
'end_at': block_date,
'end_freq': max_freq,
'end_sample': sample_num + samples_per_block,
'max_amplitude': max(max_amplitude, current_event['max_amplitude']),
})
print(f'- {block_date.strftime('%Y-%m-%d %H:%M:%S')}: {max_amplitude:.1f}rDB @ {max_freq:.1f}Hz (signal {signal_quality:.3f}x)')
else:
# not detecting an event
if current_event:
duration = (current_event['end_at'] - current_event['start_at']).total_seconds()
current_event['duration'] = duration
print(f'🔊 {current_event['start_at'].strftime('%Y-%m-%d %H:%M:%S')} ({duration:.1f}s): {current_event['start_freq']:.1f}Hz->{current_event['end_freq']:.1f}Hz @{current_event['max_amplitude']:.0f}rDB')
# read full audio clip again for writing
write_event(current_event=current_event, soundfile=soundfile, samplerate=samplerate)
current_event = None
sample_num += DETECTION_DISTANCE_BLOCKS * samples_per_block
sample_num += samples_per_block - overlapping_samples
# move to PROCESSED_RECORDINGS_DIR
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
shutil.move(os.path.join(RECORDINGS_DIR, filename), os.path.join(PROCESSED_RECORDINGS_DIR, filename))
# write a spectrogram using the sound from start to end of the event
def write_event(current_event, soundfile, samplerate):
# date and filename
event_date = current_event['start_at'] - datetime.timedelta(seconds=PLOT_PADDING_START_SECONDS)
filename_prefix = event_date.strftime('%Y-%m-%d_%H-%M-%S.%f%z')
# event clip
event_start_sample = current_event['start_sample'] - samplerate * PLOT_PADDING_START_SECONDS
event_end_sample = current_event['end_sample'] + samplerate * PLOT_PADDING_END_SECONDS
total_samples = event_end_sample - event_start_sample
soundfile.seek(event_start_sample)
event_clip = soundfile.read(frames=total_samples, dtype='float32', always_2d=False)
# write flac
flac_path = os.path.join(DETECTIONS_DIR, f"{filename_prefix}.flac")
sf.write(flac_path, event_clip, samplerate, format='FLAC')
# write spectrogram
plt.figure(figsize=(8, 6))
plt.specgram(event_clip, Fs=samplerate, NFFT=samplerate, noverlap=samplerate//2, cmap='inferno', vmin=-100, vmax=-10)
plt.title(f"Bootshorn @{event_date.strftime('%Y-%m-%d %H:%M:%S%z')}")
plt.xlabel(f"Time {current_event['duration']:.1f}s")
plt.ylabel(f"Frequency {current_event['start_freq']:.1f}Hz -> {current_event['end_freq']:.1f}Hz")
plt.colorbar(label="Intensity (rDB)")
plt.ylim(50, 1000)
plt.savefig(os.path.join(DETECTIONS_DIR, f"{filename_prefix}.png"))
plt.close()
def main():
os.makedirs(RECORDINGS_DIR, exist_ok=True)
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
for filename in sorted(os.listdir(RECORDINGS_DIR)):
if filename.endswith(".flac"):
try:
process_recording(filename)
except Exception as e:
print(f"Error processing {filename}: {e}")
# print stacktrace
traceback.print_exc()
if __name__ == "__main__":
main()

25
bundles/bootshorn/files/record Executable file
View file

@ -0,0 +1,25 @@
#!/bin/sh
mkdir -p recordings
while true
do
# get date in ISO 8601 format with nanoseconds
PROGRAMM=$(test $(uname) = "Darwin" && echo "gdate" || echo "date")
DATE=$($PROGRAMM "+%Y-%m-%d_%H-%M-%S.%6N%z")
# record audio using ffmpeg
ffmpeg \
-y \
-f pulse \
-i "alsa_input.usb-HANMUS_USB_AUDIO_24BIT_2I2O_1612310-00.analog-stereo" \
-ac 1 \
-ar 96000 \
-sample_fmt s32 \
-t "3600" \
-c:a flac \
-compression_level 12 \
"recordings/current/$DATE.flac"
mv "recordings/current/$DATE.flac" "recordings/$DATE.flac"
done

View file

@ -0,0 +1,43 @@
#!/usr/bin/env python3
import requests
import urllib3
import datetime
import csv
urllib3.disable_warnings()
import os
HUE_IP = "${hue_ip}" # replace with your bridge IP
HUE_APP_KEY = "${hue_app_key}" # local only
HUE_DEVICE_ID = "31f58786-3242-4e88-b9ce-23f44ba27bbe"
TEMPERATURE_LOG_DIR = "/opt/bootshorn/temperatures"
response = requests.get(
f"https://{HUE_IP}/clip/v2/resource/temperature",
headers={"hue-application-key": HUE_APP_KEY},
verify=False,
)
response.raise_for_status()
data = response.json()
for item in data["data"]:
if item["id"] == HUE_DEVICE_ID:
temperature = item["temperature"]["temperature"]
temperature_date_string = item["temperature"]["temperature_report"]["changed"]
temperature_date = datetime.datetime.fromisoformat(temperature_date_string).astimezone(datetime.timezone.utc)
break
print(f"@{temperature_date}: {temperature}°C")
filename = temperature_date.strftime("%Y-%m-%d_00-00-00.000000%z") + ".log"
logpath = os.path.join(TEMPERATURE_LOG_DIR, filename)
now_utc = datetime.datetime.now(datetime.timezone.utc)
with open(logpath, "a+", newline="") as logfile:
writer = csv.writer(logfile)
writer.writerow([
now_utc.strftime('%Y-%m-%d_%H-%M-%S.%f%z'), # current UTC time
temperature_date.strftime('%Y-%m-%d_%H-%M-%S.%f%z'), # date of temperature reading
temperature,
])

View file

@ -0,0 +1,61 @@
# nano /etc/selinux/config
# SELINUX=disabled
# reboot
directories = {
'/opt/bootshorn': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/temperatures': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings/current': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings/processed': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/events': {
'owner': 'ckn',
'group': 'ckn',
},
}
files = {
'/opt/bootshorn/record': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
'/opt/bootshorn/temperature': {
'content_type': 'mako',
'context': {
'hue_ip': repo.get_node('home.hue').hostname,
'hue_app_key': repo.vault.decrypt('encrypt$gAAAAABoc2WxZCLbxl-Z4IrSC97CdOeFgBplr9Fp5ujpd0WCCCPNBUY_WquHN86z8hKLq5Y04dwq8TdJW0PMSOSgTFbGgdp_P1q0jOBLEKaW9IIT1YM88h-JYwLf9QGDV_5oEfvnBCtO'),
},
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
'/opt/bootshorn/process': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
}
svc_systemd = {
'bootshorn-record.service': {
'needs': {
'file:/opt/bootshorn/record',
},
},
}

View file

@ -0,0 +1,44 @@
defaults = {
'systemd': {
'units': {
'bootshorn-record.service': {
'Unit': {
'Description': 'Bootshorn Recorder',
'After': 'network.target',
},
'Service': {
'User': 'ckn',
'Group': 'ckn',
'Type': 'simple',
'WorkingDirectory': '/opt/bootshorn',
'ExecStart': '/opt/bootshorn/record',
'Restart': 'always',
'RestartSec': 5,
'Environment': {
"XDG_RUNTIME_DIR": "/run/user/1000",
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
},
},
},
},
},
'systemd-timers': {
'bootshorn-temperature': {
'command': '/opt/bootshorn/temperature',
'when': '*:0/10',
'working_dir': '/opt/bootshorn',
'user': 'ckn',
'group': 'ckn',
},
# 'bootshorn-process': {
# 'command': '/opt/bootshorn/process',
# 'when': 'hourly',
# 'working_dir': '/opt/bootshorn',
# 'user': 'ckn',
# 'group': 'ckn',
# 'after': {
# 'bootshorn-process.service',
# },
# },
},
}

View file

@ -1,17 +0,0 @@
connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql
default_pass_scheme = ARGON2ID
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%u', '@', 1) \
AND domains.name = SPLIT_PART('%u', '@', 2)

View file

@ -1,13 +1,17 @@
dovecot_config_version = ${config_version}
dovecot_storage_version = ${storage_version}
protocols = imap lmtp sieve
auth_mechanisms = plain login
mail_privileged_group = mail
ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_server_cert_file = /var/lib/dehydrated/certs/${hostname}/fullchain.pem
ssl_server_key_file = /var/lib/dehydrated/certs/${hostname}/privkey.pem
ssl_server_dh_file = /etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
mail_plugins = fts fts_xapian
mail_driver = maildir
mail_path = ${maildir}/%{user}
mail_index_path = ${maildir}/index/%{user}
mail_plugins = fts fts_flatcurve
namespace inbox {
inbox = yes
@ -30,14 +34,46 @@ namespace inbox {
}
}
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
# postgres passdb userdb
sql_driver = pgsql
pgsql main {
parameters {
host = ${db_host}
dbname = ${db_name}
user = ${db_user}
password = ${db_password}
}
}
# use sql for userdb too, to enable iterate_query
userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
passdb sql {
passdb_default_password_scheme = ARGON2ID
query = SELECT \
CONCAT(users.name, '@', domains.name) AS "user", \
password \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%{user}', '@', 1) \
AND domains.name = SPLIT_PART('%{user}', '@', 2)
}
mail_uid = vmail
mail_gid = vmail
userdb sql {
query = SELECT \
'/var/vmail/%{user}' AS home, \
'vmail' AS uid, \
'vmail' AS gid
iterate_query = SELECT \
CONCAT(users.name, '@', domains.name) AS username \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
}
service auth {
@ -67,10 +103,9 @@ service stats {
}
}
service managesieve-login {
inet_listener sieve {
}
process_min_avail = 0
service_count = 1
#inet_listener sieve {}
process_min_avail = 1
process_limit = 1
vsz_limit = 64 M
}
service managesieve {
@ -78,31 +113,53 @@ service managesieve {
}
protocol imap {
mail_plugins = $mail_plugins imap_sieve
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
mail_plugins = fts fts_flatcurve imap_sieve
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
mail_plugins = fts fts_flatcurve sieve
}
protocol sieve {
plugin {
sieve = /var/vmail/sieve/%u.sieve
sieve_storage = /var/vmail/sieve/%u/
}
# Persönliches Skript (deine alte Datei /var/vmail/sieve/%u.sieve)
sieve_script personal {
driver = file
# Verzeichnis mit (evtl. mehreren) Sieve-Skripten des Users
path = /var/vmail/sieve/%{user}/
# Aktives Skript (entspricht früher "sieve = /var/vmail/sieve/%u.sieve")
active_path = /var/vmail/sieve/%{user}.sieve
}
# Globales After-Skript (dein früheres "sieve_after = …")
sieve_script after {
type = after
driver = file
path = /var/vmail/sieve/global/spam-to-folder.sieve
}
# fulltext search
plugin {
fts = xapian
fts_xapian = partial=3 full=20 verbose=0
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
language en {
}
language de {
default = yes
}
language_tokenizers = generic email-address
fts flatcurve {
substring_search = yes
# rotate_count = 5000 # DB-Rotation nach X Mails
# rotate_time = 5s # oder zeitbasiert rotieren
# optimize_limit = 10
# min_term_size = 3
}
fts_autoindex = yes
fts_decoder_driver = script
fts_decoder_script_socket_path = decode2text
service indexer-worker {
vsz_limit = ${indexer_ram}
process_limit = ${indexer_cores}
vsz_limit = ${indexer_ram}M
}
service decode2text {
executable = script /usr/local/libexec/dovecot/decode2text.sh
@ -112,24 +169,39 @@ service decode2text {
}
}
# spam filter
plugin {
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/bin
sieve_extensions = +vnd.dovecot.pipe
sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
mailbox Junk {
sieve_script learn_spam {
driver = file
type = before
cause = copy
path = /var/vmail/sieve/global/learn-spam.sieve
}
}
imapsieve_from Junk {
sieve_script learn_ham {
driver = file
type = before
cause = copy
path = /var/vmail/sieve/global/learn-ham.sieve
}
}
# Extprograms-Plugin einschalten
sieve_plugins {
sieve_extprograms = yes
}
# Welche Sieve-Erweiterungen dürfen genutzt werden?
# Empfehlung: nur global erlauben (nicht in User-Skripten):
sieve_global_extensions {
vnd.dovecot.pipe = yes
# vnd.dovecot.filter = yes # nur falls gebraucht
# vnd.dovecot.execute = yes # nur falls gebraucht
}
# Verzeichnis mit deinen Skripten/Binaries für :pipe
sieve_pipe_bin_dir = /var/vmail/sieve/bin
# (optional, analog für :filter / :execute)
# sieve_filter_bin_dir = /var/vmail/sieve/filter
# sieve_execute_bin_dir = /var/vmail/sieve/execute

View file

@ -44,6 +44,16 @@ files = {
'context': {
'admin_email': node.metadata.get('mailserver/admin_email'),
'indexer_ram': node.metadata.get('dovecot/indexer_ram'),
'config_version': node.metadata.get('dovecot/config_version'),
'storage_version': node.metadata.get('dovecot/storage_version'),
'maildir': node.metadata.get('mailserver/maildir'),
'hostname': node.metadata.get('mailserver/hostname'),
'db_host': node.metadata.get('mailserver/database/host'),
'db_name': node.metadata.get('mailserver/database/name'),
'db_user': node.metadata.get('mailserver/database/user'),
'db_password': node.metadata.get('mailserver/database/password'),
'indexer_cores': node.metadata.get('vm/cores'),
'indexer_ram': node.metadata.get('vm/ram')//2,
},
'needs': {
'pkg_apt:'
@ -52,29 +62,9 @@ files = {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dhparam.pem': {
'content_type': 'any',
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/vmail/sieve/global/spam-to-folder.sieve': {
'owner': 'vmail',
'group': 'vmail',
@ -131,7 +121,6 @@ svc_systemd = {
'action:letsencrypt_update_certificates',
'action:dovecot_generate_dhparam',
'file:/etc/dovecot/dovecot.conf',
'file:/etc/dovecot/dovecot-sql.conf',
},
},
}

View file

@ -8,7 +8,7 @@ defaults = {
'dovecot-sieve': {},
'dovecot-managesieved': {},
# fulltext search
'dovecot-fts-xapian': {}, # buster-backports
'dovecot-flatcurve': {}, # buster-backports
'poppler-utils': {}, # pdftotext
'catdoc': {}, # catdoc, catppt, xls2csv
},

View file

@ -49,7 +49,7 @@ files['/etc/gitea/app.ini'] = {
),
'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'],
'context': node.metadata.get('gitea'),
'triggers': {
'svc_systemd:gitea:restart',
},

View file

@ -69,9 +69,6 @@ defaults = {
},
},
},
'nginx': {
'has_websockets': True,
},
}
@ -142,6 +139,7 @@ def dns(metadata):
@metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts',
)
def nginx(metadata):

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -179,6 +179,7 @@ def nginx(metadata):
'context': {
'php_version': metadata.get('php/version'),
},
'check_path': '/icingaweb2/index.php',
},
},
},

View file

@ -52,13 +52,14 @@ def subnets(metadata):
if 'mac' in network_conf
)
for network_name, network_conf in metadata.get('network').items():
for id, (network_name, network_conf) in enumerate(sorted(metadata.get('network').items())):
dhcp_server_config = network_conf.get('dhcp_server_config', None)
if dhcp_server_config:
_network = ip_network(dhcp_server_config['subnet'])
subnet4.add(hashable({
'id': id + 1,
'subnet': dhcp_server_config['subnet'],
'pools': [
{
@ -72,7 +73,7 @@ def subnets(metadata):
},
{
'name': 'domain-name-servers',
'data': '10.0.10.2',
'data': '10.0.0.1',
},
],
'reservations': set(

View file

@ -1,58 +1 @@
https://developer.valvesoftware.com/wiki/List_of_L4D2_Cvars
Dead Center c1m1_hotel
Dead Center c1m2_streets
Dead Center c1m3_mall
Dead Center c1m4_atrium
Dark Carnival c2m1_highway
Dark Carnival c2m2_fairgrounds
Dark Carnival c2m3_coaster
Dark Carnival c2m4_barns
Dark Carnival c2m5_concert
Swamp Fever c3m1_plankcountry
Swamp Fever c3m2_swamp
Swamp Fever c3m3_shantytown
Swamp Fever c3m4_plantation
Hard Rain c4m1_milltown_a
Hard Rain c4m2_sugarmill_a
Hard Rain c4m3_sugarmill_b
Hard Rain c4m4_milltown_b
Hard Rain c4m5_milltown_escape
The Parish c5m1_waterfront_sndscape
The Parish c5m1_waterfront
The Parish c5m2_park
The Parish c5m3_cemetery
The Parish c5m4_quarter
The Parish c5m5_bridge
The Passing c6m1_riverbank
The Passing c6m2_bedlam
The Passing c6m3_port
The Sacrifice c7m1_docks
The Sacrifice c7m2_barge
The Sacrifice c7m3_port
No Mercy c8m1_apartment
No Mercy c8m2_subway
No Mercy c8m3_sewers
No Mercy c8m4_interior
No Mercy c8m5_rooftop
Crash Course c9m1_alleys
Crash Course c9m2_lots
Death Toll c10m1_caves
Death Toll c10m2_drainage
Death Toll c10m3_ranchhouse
Death Toll c10m4_mainstreet
Death Toll c10m5_houseboat
Dead Air c11m1_greenhouse
Dead Air c11m2_offices
Dead Air c11m3_garage
Dead Air c11m4_terminal
Dead Air c11m5_runway
Blood Harvest c12m1_hilltop
Blood Harvest c12m2_traintunnel
Blood Harvest c12m3_bridge
Blood Harvest c12m4_barn
Blood Harvest c12m5_cornfield
Cold Stream c13m1_alpinecreek
Cold Stream c13m2_southpinestream
Cold Stream c13m3_memorialbridge
Cold Stream c13m4_cutthroatcreek
https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md

View file

@ -0,0 +1,96 @@
#!/bin/bash
set -xeuo pipefail
getent passwd steam >/dev/null || useradd -M -d /opt/l4d2 -s /bin/bash steam
mkdir -p /opt/l4d2 /tmp/dumps
chown steam:steam /opt/l4d2 /tmp/dumps
dpkg --add-architecture i386
apt update
DEBIAN_FRONTEND=noninteractive apt install -y libc6:i386 lib32z1
function steam() {
# für systemd, damit es den prozess beenden kann
setpriv --reuid=steam --regid=steam --init-groups "$@"
export HOME=/opt/l4d2/steam
}
# -- STEAM -- #
steam mkdir -p /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd_linux.tar.gz || \
steam wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd.sh || \
steam tar -xvzf /opt/l4d2/steam/steamcmd_linux.tar.gz -C /opt/l4d2/steam
# fix for: /opt/l4d2/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
steam mkdir -p /opt/l4d2/steam/.steam # needs to be in steam users home dir
readlink /opt/l4d2/steam/.steam/sdk32 | grep -q ^/opt/l4d2/steam/linux32$ || \
steam ln -sf /opt/l4d2/steam/linux32 /opt/l4d2/steam/.steam/sdk32
readlink /opt/l4d2/steam/.steam/sdk64 | grep -q ^/opt/l4d2/steam/linux64$ || \
steam ln -sf /opt/l4d2/steam/linux64 /opt/l4d2/steam/.steam/sdk64
# -- INSTALL -- #
# erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
steam mkdir -p /opt/l4d2/installation
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType windows \
+app_update 222860 validate \
+quit
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType linux \
+app_update 222860 validate \
+quit
# -- OVERLAYS -- #
steam mkdir -p /opt/l4d2/overlays
# workshop downloader
test -f /opt/l4d2/steam-workshop-download || \
steam wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download -P /opt/l4d2
steam chmod +x /opt/l4d2/steam-workshop-download
# -- OVERLAY PVE -- #
steam mkdir -p /opt/l4d2/overlays/pve
# server config
steam mkdir -p /opt/l4d2/overlays/pve/left4dead2/cfg
steam cat <<'EOF' > /opt/l4d2/overlays/pve/left4dead2/cfg/server.cfg
motd_enabled 0
sv_steamgroup "38347879"
#sv_steamgroup_exclusive 0
sv_minrate 60000
sv_maxrate 0
net_splitpacket_maxrate 60000
#sv_cheats 1
#sb_all_bot_game 1
EOF
# admin system
steam mkdir -p /opt/l4d2/overlays/pve/left4dead2/addons
test -f /opt/l4d2/overlays/pve/left4dead2/addons/2524204971.vpk || \
steam /opt/l4d2/steam-workshop-download 2524204971 --out /opt/l4d2/overlays/pve/left4dead2/addons
steam mkdir -p "/opt/l4d2/overlays/pve/left4dead2/ems/admin system"
steam echo "STEAM_1:0:12376499" > "/opt/l4d2/overlays/pve/left4dead2/ems/admin system/admins.txt"
# ions vocalizer
test -f /opt/l4d2/overlays/pve/left4dead2/addons/698857882.vpk || \
steam /opt/l4d2/steam-workshop-download 698857882 --out /opt/l4d2/overlays/pve/left4dead2/addons
test -f /opt/l4d2/overlays/pve/left4dead2/addons/1575673903.vpk || \
steam /opt/l4d2/steam-workshop-download 1575673903 --out /opt/l4d2/overlays/pve/left4dead2/addons
# -- SERVERS -- #
#steam rm -rf /opt/l4d2/servers
steam mkdir -p /opt/l4d2/servers

View file

@ -0,0 +1,28 @@
#!/bin/bash
set -xeuo pipefail
name=$1
overlay=$2
port=$3
function steam() {
# für systemd, damit es den prozess beenden kann
setpriv --reuid=steam --regid=steam --init-groups "$@"
export HOME=/opt/l4d2/steam
}
mountpoint -q "/opt/l4d2/servers/$name/merged" && umount "/opt/l4d2/servers/$name/merged"
steam rm -rf "/opt/l4d2/servers/$name"
steam mkdir -p \
"/opt/l4d2/servers/$name" \
"/opt/l4d2/servers/$name/work" \
"/opt/l4d2/servers/$name/upper" \
"/opt/l4d2/servers/$name/merged"
mount -t overlay overlay \
-o "lowerdir=/opt/l4d2/overlays/$overlay:/opt/l4d2/installation,upperdir=/opt/l4d2/servers/$name/upper,workdir=/opt/l4d2/servers/$name/work" \
"/opt/l4d2/servers/$name/merged"
steam "/opt/l4d2/servers/$name/merged/srcds_run" -norestart -pidfile "/opt/l4d2/servers/$name/pid" -game left4dead2 -ip 0.0.0.0 -port "$port" +hostname "Crone_$name" +map c1m1_hotel

View file

@ -1,122 +1,31 @@
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = {
'/opt/steam/left4dead2-servers': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
files = {
'/opt/l4d2/setup': {
'mode': '755',
},
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway -
# to another dir. Also move workdir alongside it, as it has to be on same fs.
'/opt/steam-zfs-overlay-workarounds': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
'/opt/l4d2/start': {
'mode': '755',
},
}
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
symlinks = {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
'owner': 'steam',
'group': 'steam',
}
}
#
# SERVERS
#
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
svc_systemd = {
'left4dead2-initialize.service': {
'enabled': True,
'running': None,
'needs': {
'file:/usr/local/lib/systemd/system/left4dead2-initialize.service',
},
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
},
}
# service
svc_systemd[f'left4dead2-{name}.service'] = {
'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
],
}
#
# ADDONS
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
for server_name in node.metadata.get('left4dead2').keys():
svc_systemd[f'left4dead2-{server_name}.service'] = {
'enabled': True,
'running': True,
'tags': {
'left4dead2-servers',
},
'needs': {
'svc_systemd:left4dead2-initialize.service',
f'file:/usr/local/lib/systemd/system/left4dead2-{server_name}.service',
}
# admin system
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}

View file

@ -1,102 +1,68 @@
assert node.has_bundle('steam')
from re import match
from shlex import quote
defaults = {
'steam': {
'games': {
'left4dead2': 222860,
'apt': {
'packages': {
'libc6_i386': {}, # installs libc6:i386
'lib32z1': {},
'unzip': {},
},
},
'left4dead2': {
'servers': {},
'admins': set(),
'workshop': set(),
'left4dead2': {},
'nftables': {
'input': {
'udp dport { 27005, 27020 } accept',
},
},
'systemd': {
'units': {
'left4dead2-initialize.service': {
'Unit': {
'Description': 'initialize left4dead2',
'After': 'network-online.target',
},
'Service': {
'Type': 'oneshot',
'RemainAfterExit': 'yes',
'ExecStart': '/opt/l4d2/setup',
'StandardOutput': 'journal',
'StandardError': 'journal',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
},
}
@metadata_reactor.provides(
'left4dead2/servers',
)
def rconn_password(metadata):
# only works from localhost!
return {
'left4dead2': {
'servers': {
server: {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24),
}
for server in metadata.get('left4dead2/servers')
},
},
}
@metadata_reactor.provides(
'steam-workshop-download',
'systemd/units',
)
def server_units(metadata):
units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items():
# mount overlay
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
for name, config in metadata.get('left4dead2').items():
assert match(r'^[A-z0-9-_-]+$', name)
assert config["overlay"] in {'pve'}
assert 27000 <= config["port"] <= 27100
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'steam-update.service'},
'Requires': {'steam-update.service'},
'After': {'left4dead2-initialize.service'},
'Requires': {'left4dead2-initialize.service'},
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg',
'Type': 'simple',
'ExecStart': f'/opt/l4d2/start {name} {config["overlay"]} {config["port"]}',
'Restart': 'on-failure',
'Nice': -10,
'CPUWeight': 200,
'IOSchedulingClass': 'best-effort',
'IOSchedulingPriority': 0,
},
'Install': {
'WantedBy': {'multi-user.target'},
@ -104,7 +70,6 @@ def server_units(metadata):
}
return {
'steam-workshop-download': workshop,
'systemd': {
'units': units,
},
@ -114,14 +79,13 @@ def server_units(metadata):
@metadata_reactor.provides(
'nftables/input',
)
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
def nftables(metadata):
ports = sorted(str(config["port"]) for config in metadata.get('left4dead2', {}).values())
return {
'nftables': {
'input': {
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
f'ip protocol {{ tcp, udp }} th dport {{ {", ".join(ports)} }} accept'
},
},
}

View file

@ -0,0 +1,58 @@
https://developer.valvesoftware.com/wiki/List_of_L4D2_Cvars
Dead Center c1m1_hotel
Dead Center c1m2_streets
Dead Center c1m3_mall
Dead Center c1m4_atrium
Dark Carnival c2m1_highway
Dark Carnival c2m2_fairgrounds
Dark Carnival c2m3_coaster
Dark Carnival c2m4_barns
Dark Carnival c2m5_concert
Swamp Fever c3m1_plankcountry
Swamp Fever c3m2_swamp
Swamp Fever c3m3_shantytown
Swamp Fever c3m4_plantation
Hard Rain c4m1_milltown_a
Hard Rain c4m2_sugarmill_a
Hard Rain c4m3_sugarmill_b
Hard Rain c4m4_milltown_b
Hard Rain c4m5_milltown_escape
The Parish c5m1_waterfront_sndscape
The Parish c5m1_waterfront
The Parish c5m2_park
The Parish c5m3_cemetery
The Parish c5m4_quarter
The Parish c5m5_bridge
The Passing c6m1_riverbank
The Passing c6m2_bedlam
The Passing c6m3_port
The Sacrifice c7m1_docks
The Sacrifice c7m2_barge
The Sacrifice c7m3_port
No Mercy c8m1_apartment
No Mercy c8m2_subway
No Mercy c8m3_sewers
No Mercy c8m4_interior
No Mercy c8m5_rooftop
Crash Course c9m1_alleys
Crash Course c9m2_lots
Death Toll c10m1_caves
Death Toll c10m2_drainage
Death Toll c10m3_ranchhouse
Death Toll c10m4_mainstreet
Death Toll c10m5_houseboat
Dead Air c11m1_greenhouse
Dead Air c11m2_offices
Dead Air c11m3_garage
Dead Air c11m4_terminal
Dead Air c11m5_runway
Blood Harvest c12m1_hilltop
Blood Harvest c12m2_traintunnel
Blood Harvest c12m3_bridge
Blood Harvest c12m4_barn
Blood Harvest c12m5_cornfield
Cold Stream c13m1_alpinecreek
Cold Stream c13m2_southpinestream
Cold Stream c13m3_memorialbridge
Cold Stream c13m4_cutthroatcreek

View file

@ -0,0 +1,122 @@
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = {
'/opt/steam/left4dead2-servers': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway -
# to another dir. Also move workdir alongside it, as it has to be on same fs.
'/opt/steam-zfs-overlay-workarounds': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
}
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
symlinks = {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
'owner': 'steam',
'group': 'steam',
}
}
#
# SERVERS
#
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
},
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# service
svc_systemd[f'left4dead2-{name}.service'] = {
'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
],
}
#
# ADDONS
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# admin system
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}

View file

@ -0,0 +1,127 @@
assert node.has_bundle('steam')
from shlex import quote
defaults = {
'steam': {
'games': {
'left4dead2': 222860,
},
},
'left4dead2': {
'servers': {},
'admins': set(),
'workshop': set(),
},
}
@metadata_reactor.provides(
'left4dead2/servers',
)
def rconn_password(metadata):
# only works from localhost!
return {
'left4dead2': {
'servers': {
server: {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24),
}
for server in metadata.get('left4dead2/servers')
},
},
}
@metadata_reactor.provides(
'steam-workshop-download',
'systemd/units',
)
def server_units(metadata):
units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items():
# mount overlay
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'steam-update.service'},
'Requires': {'steam-update.service'},
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg',
'Restart': 'on-failure',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
}
return {
'steam-workshop-download': workshop,
'systemd': {
'units': units,
},
}
@metadata_reactor.provides(
'nftables/input',
)
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
return {
'nftables': {
'input': {
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
},
},
}

View file

@ -0,0 +1,97 @@
# https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
getent passwd steam >/dev/null || useradd -M -d /opt/l4d2 -s /bin/bash steam
mkdir -p /opt/l4d2 /tmp/dumps
chown steam:steam /opt/l4d2 /tmp/dumps
dpkg --add-architecture i386
apt update
DEBIAN_FRONTEND=noninteractive apt install -y libc6:i386 lib32z1
function steam() { sudo -Hiu steam $* }
# -- STEAM -- #
steam mkdir -p /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd_linux.tar.gz || \
steam wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd.sh || \
steam tar -xvzf /opt/l4d2/steam/steamcmd_linux.tar.gz -C /opt/l4d2/steam
# fix: /opt/l4d2/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
steam mkdir -p /opt/l4d2/steam/.steam
test -f /opt/l4d2/steam/.steam/sdk32/steamclient.so || \
steam ln -s /opt/l4d2/steam/linux32 /opt/l4d2/steam/.steam/sdk32
# -- INSTALL -- #
# erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
steam mkdir -p /opt/l4d2/installation
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType windows \
+app_update 222860 validate \
+quit
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType linux \
+app_update 222860 validate \
+quit
# -- OVERLAYS -- #
steam mkdir -p /opt/l4d2/overlays
# workshop downloader
steam wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download -P /opt/l4d2
steam chmod +x /opt/l4d2/steam-workshop-download
# -- OVERLAY PVE -- #
steam mkdir -p /opt/l4d2/overlays/pve
# admin system
steam mkdir -p /opt/l4d2/overlays/pve/left4dead2/addons
steam /opt/l4d2/steam-workshop-download 2524204971 --out /opt/l4d2/overlays/pve/left4dead2/addons
steam mkdir -p "/opt/l4d2/overlays/pve/left4dead2/ems/admin system"
echo "STEAM_1:0:12376499" | steam tee "/opt/l4d2/overlays/pve/left4dead2/ems/admin system/admins.txt"
# ions vocalizer
steam /opt/l4d2/steam-workshop-download 698857882 --out /opt/l4d2/overlays/pve/left4dead2/addons
# -- OVERLAY ZONEMOD -- #
true
# -- SERVERS -- #
steam mkdir -p /opt/l4d2/servers
# -- SERVER PVE1 -- #
steam mkdir -p \
/opt/l4d2/servers/pve1 \
/opt/l4d2/servers/pve1/work \
/opt/l4d2/servers/pve1/upper \
/opt/l4d2/servers/pve1/merged
mount -t overlay overlay \
-o lowerdir=/opt/l4d2/overlays/pve:/opt/l4d2/installation,upperdir=/opt/l4d2/servers/pve1/upper,workdir=/opt/l4d2/servers/pve1/work \
/opt/l4d2/servers/pve1/merged
# run server
steam cat <<'EOF' > /opt/l4d2/servers/pve1/merged/left4dead2/cfg/server.cfg
hostname "CKNs Server"
motd_enabled 0
sv_steamgroup "38347879"
#sv_steamgroup_exclusive 0
sv_minrate 60000
sv_maxrate 0
net_splitpacket_maxrate 60000
sv_hibernate_when_empty 0
EOF
steam /opt/l4d2/servers/pve1/merged/srcds_run -game left4dead2 -ip 0.0.0.0 -port 27015 +map c1m1_hotel

View file

View file

@ -0,0 +1,183 @@
from shlex import quote
def steam_run(cmd):
return f'su - steam -c {quote(cmd)}'
users = {
'steam': {
'home': '/opt/steam',
},
}
directories = {
'/opt/steam': {
'owner': 'steam',
'group': 'steam',
},
'/opt/steam/.steam': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2/left4dead2/ems/admin system': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2/left4dead2/addons': {
'owner': 'steam',
'group': 'steam',
},
'/tmp/dumps': {
'owner': 'steam',
'group': 'steam',
'mode': '1770',
},
}
symlinks = {
'/opt/steam/.steam/sdk32': {
'target': '/opt/steam/linux32',
'owner': 'steam',
'group': 'steam',
},
}
files = {
'/opt/steam-workshop-download': {
'content_type': 'download',
'source': 'https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download',
'mode': '755',
},
'/opt/left4dead2/left4dead2/ems/admin system/admins.txt': {
'unless': 'test -f /opt/left4dead2/left4dead2/ems/admin system/admins.txt',
'content': 'STEAM_1:0:12376499',
'owner': 'steam',
'group': 'steam',
},
}
actions = {
'dpkg_add_architecture': {
'command': 'dpkg --add-architecture i386',
'unless': 'dpkg --print-foreign-architectures | grep -q i386',
'triggers': [
'action:apt_update',
],
'needed_by': [
'pkg_apt:libc6_i386',
],
},
'download_steam': {
'command': steam_run('wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/steam'),
'unless': steam_run('test -f /opt/steam/steamcmd_linux.tar.gz'),
'needs': {
'pkg_apt:libc6_i386',
'directory:/opt/steam',
}
},
'extract_steamcmd': {
'command': steam_run('tar -xvzf /opt/steam/steamcmd_linux.tar.gz -C /opt/steam'),
'unless': steam_run('test -f /opt/steam/steamcmd.sh'),
'needs': {
'action:download_steam',
}
},
}
for addon_id in [2524204971]:
actions[f'download-left4dead2-addon-{addon_id}'] = {
'command': steam_run(f'/opt/steam-workshop-download {addon_id} --out /opt/left4dead2/left4dead2/addons'),
'unless': steam_run(f'test -f /opt/left4dead2/left4dead2/addons/{addon_id}.vpk'),
'needs': {
'directory:/opt/left4dead2/left4dead2/addons',
},
'needed_by': {
'tag:left4dead2-servers',
},
}
svc_systemd = {
'left4dead2-install.service': {
'enabled': True,
'running': False,
'needs': {
'file:/usr/local/lib/systemd/system/left4dead2-install.service',
},
},
}
for server_name, server_config in node.metadata.get('left4dead2/servers', {}).items():
svc_systemd[f'left4dead2-{server_name}.service'] = {
'enabled': True,
'running': True,
'tags': {
'left4dead2-servers',
},
'needs': {
'svc_systemd:left4dead2-install.service',
f'file:/usr/local/lib/systemd/system/left4dead2-{server_name}.service',
}
}
# # https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
# mkdir /opt/steam /tmp/dumps
# useradd -M -d /opt/steam -s /bin/bash steam
# chown steam:steam /opt/steam /tmp/dumps
# dpkg --add-architecture i386
# apt update
# apt install libc6:i386 lib32z1
# sudo su - steam -s /bin/bash
# #--------
# wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz
# tar -xvzf steamcmd_linux.tar.gz
# # fix: /opt/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
# mkdir /opt/steam/.steam && ln -s /opt/steam/linux32 /opt/steam/.steam/sdk32
# # erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
# ./steamcmd.sh \
# +force_install_dir /opt/steam/left4dead2 \
# +login anonymous \
# +@sSteamCmdForcePlatformType windows \
# +app_update 222860 validate \
# +quit
# ./steamcmd.sh \
# +force_install_dir /opt/steam/left4dead2 \
# +login anonymous \
# +@sSteamCmdForcePlatformType linux \
# +app_update 222860 validate \
# +quit
# # download admin system
# wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download
# chmod +x steam-workshop-download
# ./steam-workshop-download 2524204971 --out /opt/steam/left4dead2/left4dead2/addons
# mkdir -p "/opt/steam/left4dead2/left4dead2/ems/admin system"
# echo "STEAM_1:0:12376499" > "/opt/steam/left4dead2/left4dead2/ems/admin system/admins.txt"
# /opt/steam/left4dead2/srcds_run -game left4dead2 -ip 0.0.0.0 -port 27015 +map c1m1_hotel
# cat <<'EOF' > /opt/steam/left4dead2/left4dead2/cfg/server.cfg
# hostname "CKNs Server"
# motd_enabled 0
# sv_steamgroup "38347879"
# #sv_steamgroup_exclusive 0
# sv_minrate 60000
# sv_maxrate 0
# net_splitpacket_maxrate 60000
# sv_hibernate_when_empty 0
# EOF

View file

@ -0,0 +1,107 @@
from re import match
defaults = {
'apt': {
'packages': {
'libc6_i386': {}, # installs libc6:i386
'lib32z1': {},
'unzip': {},
},
},
'left4dead2': {
'servers': {},
},
'nftables': {
'input': {
'udp dport { 27005, 27020 } accept',
},
},
}
@metadata_reactor.provides(
'nftables/input',
)
def nftables(metadata):
ports = sorted(str(config["port"]) for config in metadata.get('left4dead2/servers', {}).values())
return {
'nftables': {
'input': {
f'ip protocol {{ tcp, udp }} th dport {{ {", ".join(ports)} }} accept'
},
},
}
@metadata_reactor.provides(
'systemd/units',
)
def initial_unit(metadata):
install_command = (
'/opt/steam/steamcmd.sh '
'+force_install_dir /opt/left4dead2 '
'+login anonymous '
'+@sSteamCmdForcePlatformType {platform} '
'+app_update 222860 validate '
'+quit '
)
return {
'systemd': {
'units': {
'left4dead2-install.service': {
'Unit': {
'Description': 'install or update left4dead2',
'After': 'network-online.target',
},
'Service': {
'Type': 'oneshot',
'RemainAfterExit': 'yes',
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': '/opt/steam',
'ExecStartPre': install_command.format(platform='windows'),
'ExecStart': install_command.format(platform='linux'),
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
},
}
@metadata_reactor.provides(
'systemd/units',
)
def server_units(metadata):
units = {}
for name, config in metadata.get('left4dead2/servers').items():
assert match(r'^[A-z0-9-_-]+$', name)
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'left4dead2-install.service'},
'Requires': {'left4dead2-install.service'},
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': '/opt/left4dead2',
'ExecStart': f'/opt/left4dead2/srcds_run -port {config["port"]} +exec server_{name}.cfg',
'Restart': 'on-failure',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
}
return {
'systemd': {
'units': units,
},
}

View file

@ -5,5 +5,5 @@ printf "server 127.0.0.1
zone acme.resolver.name.
update add _acme-challenge.ckn.li.acme.resolver.name. 600 IN TXT "hello"
send
" | nsupdate -y hmac-sha512:acme:Y9BHl85l352BGZDXa/vg90hh2+5PYe4oJxpkq/oQvIODDkW8bAyQSFr0gKQQxjyIOyYlTjf0MGcdWFv46G/3Rg==
" | nsupdate -y hmac-sha512:acme:XXXXXX
```

View file

@ -31,6 +31,12 @@ deploy_cert() {
% for domain, conf in sorted(domains.items()):
<% if not conf: continue %>\
${domain})
% if conf.get('scp', None):
scp "$KEYFILE" "${conf['scp']}/${conf.get('privkey_name', 'privkey.pem')}"
scp "$CERTFILE" "${conf['scp']}/${conf.get('cert_name', 'cert.pem')}"
scp "$FULLCHAINFILE" "${conf['scp']}/${conf.get('fullchain_name', 'fullchain.pem')}"
scp "$CHAINFILE" "${conf['scp']}/${conf.get('chain_name', 'chain.pem')}"
% endif
% if conf.get('location', None):
cat "$KEYFILE" > "${conf['location']}/${conf.get('privkey_name', 'privkey.pem')}"
cat "$CERTFILE" > "${conf['location']}/${conf.get('cert_name', 'cert.pem')}"

View file

@ -42,7 +42,7 @@ files = {
}
actions['letsencrypt_update_certificates'] = {
'command': 'dehydrated --cron --accept-terms --challenge dns-01',
'command': 'systemctl start letsencrypt.service',
'triggered': True,
'skip': delegated,
'needs': {

View file

@ -12,9 +12,8 @@ def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
files= {
'/etc/sysctl.conf': {
'/etc/sysctl.d/managed.conf': {
'content': '\n'.join(
sorted(
f"{'.'.join(path)}={value}"
@ -25,6 +24,9 @@ files= {
'svc_systemd:systemd-sysctl.service:restart',
],
},
'/etc/modules-load.d/managed.conf': {
'content': '\n'.join(sorted(node.metadata.get('modules-load'))),
}
}
svc_systemd = {

View file

@ -1,3 +1,4 @@
defaults = {
'sysctl': {},
'modules-load': set(),
}

View file

@ -7,12 +7,7 @@ defaults = {
'locale': {
'default': ('en_US.UTF-8', 'UTF-8'),
'installed': {
('de_AT.UTF-8', 'UTF-8'),
('de_CH.UTF-8', 'UTF-8'),
('de_DE.UTF-8', 'UTF-8'),
('de_LU.UTF-8', 'UTF-8'),
('en_CA.UTF-8', 'UTF-8'),
('en_GB.UTF-8', 'UTF-8'),
('en_US.UTF-8', 'UTF-8'),
},
},

View file

@ -2,6 +2,8 @@
cd "$OLDPWD"
pyenv install --skip-existing
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
then
echo "rebuilding venv für new python version"

26
bundles/mailman/README.md Normal file
View file

@ -0,0 +1,26 @@
# Mailman
- django admin udner /admin
## Testmail
`echo export REST_API_PASS=$(bw metadata mseibert.mailman -k mailman/api_password | jq -r .mailman.api_password)`
```sh
curl -s -o /dev/null \
-w "Status: %{http_code}\nTime: %{time_total}s\n" \
-u restadmin:$REST_API_PASS \
-H "Content-Type: application/json" \
-X POST http://localhost:8001/3.1/queues/in \
-d "{
\"list_id\": \"testlist-2.mailman.ckn.li\",
\"text\": \"From: i@ckn.li\nTo: testlist-2@mailman.ckn.li\nSubject: Curl Test $(date '+%Y-%m-%d %H:%M:%S')\n\nThis message was sent at $(date '+%Y-%m-%d %H:%M:%S').\"
}"
```
## Log locations
`tail -f /var/log/mailman3/*.log`
`journalctl -f | grep postfix/`
`mailq | head -20`

View file

@ -0,0 +1,22 @@
# This is the mailman extension configuration file to enable HyperKitty as an
# archiver. Remember to add the following lines in the mailman.cfg file:
#
# [archiver.hyperkitty]
# class: mailman_hyperkitty.Archiver
# enable: yes
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
#
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
# However, if your Mailman installation is accessed via HTTPS, the URL needs
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
base_url: http://${hostname}/mailman3/hyperkitty/
# The shared api_key, must be identical except for quoting to the value of
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
api_key: ${archiver_key}

View file

@ -0,0 +1,190 @@
ACCOUNT_EMAIL_VERIFICATION='none'
# This file is imported by the Mailman Suite. It is used to override
# the default settings from /usr/share/mailman3-web/settings.py.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '${secret_key}'
ADMINS = (
('Mailman Suite Admin', 'root@localhost'),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
# is meant to run behind a webserver reverse proxy anyway.
ALLOWED_HOSTS = [
'${hostname}',
]
# Mailman API credentials
MAILMAN_REST_API_URL = 'http://localhost:8001'
MAILMAN_REST_API_USER = 'restadmin'
MAILMAN_REST_API_PASS = '${api_password}'
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
# Application definition
INSTALLED_APPS = (
'hyperkitty',
'postorius',
'django_mailman3',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_gravatar',
'compressor',
'haystack',
'django_extensions',
'django_q',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_mailman3.lib.auth.fedora',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
'NAME': 'mailman',
# The following settings are not used with sqlite3:
'USER': 'mailman',
'PASSWORD': '${db_password}',
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '127.0.0.1',
# PORT: set to empty string for default.
'PORT': '5432',
# OPTIONS: Extra parameters to use when connecting to the database.
'OPTIONS': {
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
# https://docs.djangoproject.com/en/1.11/ref/
# databases/#setting-sql-mode
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
# Other security settings
# SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
# SECURE_REDIRECT_EXEMPT = [
# "archives/api/mailman/.*", # Request from Mailman.
# ]
# SESSION_COOKIE_SECURE = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# X_FRAME_OPTIONS = 'DENY'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set default domain for email addresses.
EMAILNAME = 'localhost.local'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
# SERVER_EMAIL = 'root@your-domain.org'
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
# Django Allauth
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
#
# Social auth
#
SOCIALACCOUNT_PROVIDERS = {
#'openid': {
# 'SERVERS': [
# dict(id='yahoo',
# name='Yahoo',
# openid_url='http://me.yahoo.com'),
# ],
#},
#'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
#},
#'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
#},
}
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
# significant performance improvement, as CSS files will not need to be
# recompiled on each requests. It means running an additional "compress"
# management command after each code upgrade.
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
COMPRESS_OFFLINE = True
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'

View file

@ -0,0 +1,271 @@
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
# This file contains the Debian configuration for mailman. It uses ini-style
# formats under the lazr.config regime to define all system configuration
# options. See <https://launchpad.net/lazr.config> for details.
[mailman]
# This address is the "site owner" address. Certain messages which must be
# delivered to a human, but which can't be delivered to a list owner (e.g. a
# bounce from a list owner), will be sent to this address. It should point to
# a human.
site_owner: ${site_owner_email}
# This is the local-part of an email address used in the From field whenever a
# message comes from some entity to which there is no natural reply recipient.
# Mailman will append '@' and the host name of the list involved. This
# address must not bounce and it must not point to a Mailman process.
noreply_address: noreply
# The default language for this server.
default_language: de
# Membership tests for posting purposes are usually performed by looking at a
# set of headers, passing the test if any of their values match a member of
# the list. Headers are checked in the order given in this variable. The
# value From_ means to use the envelope sender. Field names are case
# insensitive. This is a space separate list of headers.
sender_headers: from from_ reply-to sender
# Mail command processor will ignore mail command lines after designated max.
email_commands_max_lines: 10
# Default length of time a pending request is live before it is evicted from
# the pending database.
pending_request_life: 3d
# How long should files be saved before they are evicted from the cache?
cache_life: 7d
# A callable to run with no arguments early in the initialization process.
# This runs before database initialization.
pre_hook:
# A callable to run with no arguments late in the initialization process.
# This runs after adapters are initialized.
post_hook:
# Which paths.* file system layout to use.
# You should not change this variable.
layout: debian
# Can MIME filtered messages be preserved by list owners?
filtered_messages_are_preservable: no
# How should text/html parts be converted to text/plain when the mailing list
# is set to convert HTML to plaintext? This names a command to be called,
# where the substitution variable $filename is filled in by Mailman, and
# contains the path to the temporary file that the command should read from.
# The command should print the converted text to stdout.
html_to_plain_text_command: /usr/bin/lynx -dump $filename
# Specify what characters are allowed in list names. Characters outside of
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
# but this specifies a subset as the only allowable characters. This must be
# a valid character class regexp or the effect on list creation is
# unpredictable.
listname_chars: [-_.0-9a-z]
[shell]
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
# can use to interact with an initialized and configured Mailman system. Use
# --help for more information. This section allows you to configure certain
# aspects of this interactive shell.
# Customize the interpreter prompt.
prompt: >>>
# Banner to show on startup.
banner: Welcome to the GNU Mailman shell
# Use IPython as the shell, which must be found on the system. Valid values
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
# that any import errors will be displayed to stderr.
use_ipython: no
# Set this to allow for command line history if readline is available. This
# can be as simple as $var_dir/history.py to put the file in the var directory.
history_file:
[paths.debian]
# Important directories for Mailman operation. These are defined here so that
# different layouts can be supported. For example, a developer layout would
# be different from a FHS layout. Most paths are based off the var_dir, and
# often just setting that will do the right thing for all the other paths.
# You might also have to set spool_dir though.
#
# Substitutions are allowed, but must be of the form $var where 'var' names a
# configuration variable in the paths.* section. Substitutions are expanded
# recursively until no more $-variables are present. Beware of infinite
# expansion loops!
#
# This is the root of the directory structure that Mailman will use to store
# its run-time data.
var_dir: /var/lib/mailman3
# This is where the Mailman queue files directories will be created.
queue_dir: $var_dir/queue
# This is the directory containing the Mailman 'runner' and 'master' commands
# if set to the string '$argv', it will be taken as the directory containing
# the 'mailman' command.
bin_dir: /usr/lib/mailman3/bin
# All list-specific data.
list_data_dir: $var_dir/lists
# Directory where log files go.
log_dir: /var/log/mailman3
# Directory for system-wide locks.
lock_dir: $var_dir/locks
# Directory for system-wide data.
data_dir: $var_dir/data
# Cache files.
cache_dir: $var_dir/cache
# Directory for configuration files and such.
etc_dir: /etc/mailman3
# Directory containing Mailman plugins.
ext_dir: $var_dir/ext
# Directory where the default IMessageStore puts its messages.
messages_dir: $var_dir/messages
# Directory for archive backends to store their messages in. Archivers should
# create a subdirectory in here to store their files.
archive_dir: $var_dir/archives
# Root directory for site-specific template override files.
template_dir: $var_dir/templates
# There are also a number of paths to specific file locations that can be
# defined. For these, the directory containing the file must already exist,
# or be one of the directories created by Mailman as per above.
#
# This is where PID file for the master runner is stored.
pid_file: /run/mailman3/master.pid
# Lock file.
lock_file: $lock_dir/master.lck
[database]
# The class implementing the IDatabase.
class: mailman.database.sqlite.SQLiteDatabase
#class: mailman.database.mysql.MySQLDatabase
#class: mailman.database.postgresql.PostgreSQLDatabase
# Use this to set the Storm database engine URL. You generally have one
# primary database connection for all of Mailman. List data and most rosters
# will store their data in this database, although external rosters may access
# other databases in their own way. This string supports standard
# 'configuration' substitutions.
url: sqlite:///$DATA_DIR/mailman.db
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
#url: postgresql://mailman3:mmpass@localhost/mailman3
debug: no
[logging.debian]
# This defines various log settings. The options available are:
#
# - level -- Overrides the default level; this may be any of the
# standard Python logging levels, case insensitive.
# - format -- Overrides the default format string
# - datefmt -- Overrides the default date format string
# - path -- Overrides the default logger path. This may be a relative
# path name, in which case it is relative to Mailman's LOG_DIR,
# or it may be an absolute path name. You cannot change the
# handler class that will be used.
# - propagate -- Boolean specifying whether to propagate log message from this
# logger to the root "mailman" logger. You cannot override
# settings for the root logger.
#
# In this section, you can define defaults for all loggers, which will be
# prefixed by 'mailman.'. Use subsections to override settings for specific
# loggers. The names of the available loggers are:
#
# - archiver -- All archiver output
# - bounce -- All bounce processing logs go here
# - config -- Configuration issues
# - database -- Database logging (SQLAlchemy and Alembic)
# - debug -- Only used for development
# - error -- All exceptions go to this log
# - fromusenet -- Information related to the Usenet to Mailman gateway
# - http -- Internal wsgi-based web interface
# - locks -- Lock state changes
# - mischief -- Various types of hostile activity
# - runner -- Runner process start/stops
# - smtp -- Successful SMTP activity
# - smtp-failure -- Unsuccessful SMTP activity
# - subscribe -- Information about leaves/joins
# - vette -- Message vetting information
format: %(asctime)s (%(process)d) %(message)s
datefmt: %b %d %H:%M:%S %Y
propagate: no
level: info
path: mailman.log
[webservice]
# The hostname at which admin web service resources are exposed.
hostname: localhost
# The port at which the admin web service resources are exposed.
port: 8001
# Whether or not requests to the web service are secured through SSL.
use_https: no
# Whether or not to show tracebacks in an HTTP response for a request that
# raised an exception.
show_tracebacks: yes
# The API version number for the current (highest) API.
api_version: 3.1
# The administrative username.
admin_user: restadmin
# The administrative password.
admin_pass: ${api_password}
[mta]
# The class defining the interface to the incoming mail transport agent.
#incoming: mailman.mta.exim4.LMTP
incoming: mailman.mta.postfix.LMTP
# The callable implementing delivery to the outgoing mail transport agent.
# This must accept three arguments, the mailing list, the message, and the
# message metadata dictionary.
outgoing: mailman.mta.deliver.deliver
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
# then Mailman will attempt to log into the MTA when making a new connection.
smtp_host: 127.0.0.1
smtp_port: 25
smtp_user:
smtp_pass:
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
# localhost for Postfix integration, because Postfix only consults DNS
# (e.g. not /etc/hosts).
lmtp_host: 127.0.0.1
lmtp_port: 8024
# Where can we find the mail server specific configuration file? The path can
# be either a file system path or a Python import path. If the value starts
# with python: then it is a Python import path, otherwise it is a file system
# path. File system paths must be absolute since no guarantees are made about
# the current working directory. Python paths should not include the trailing
# .cfg, which the file must end with.
#configuration: python:mailman.config.exim4
configuration: python:mailman.config.postfix

View file

@ -0,0 +1,53 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
# fresh installs.
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level=may
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
#inet_protocols = all
inet_protocols = ipv4
unknown_local_recipient_reject_code = 550
owner_request_special = no
transport_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
local_recipient_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
relay_domains =
hash:/var/lib/mailman3/data/postfix_domains

View file

@ -0,0 +1,50 @@
[uwsgi]
# Port on which uwsgi will be listening.
uwsgi-socket = /run/mailman3-web/uwsgi.sock
#Enable threading for python
enable-threads = true
# Move to the directory wher the django files are.
chdir = /usr/share/mailman3-web
# Use the wsgi file provided with the django project.
wsgi-file = wsgi.py
# Setup default number of processes and threads per process.
master = true
process = 2
threads = 2
# Drop privielges and don't run as root.
uid = www-data
gid = www-data
plugins = python3
# Setup the django_q related worker processes.
attach-daemon = python3 manage.py qcluster
# Setup hyperkitty's cron jobs.
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
# Setup the request log.
#req-logger = file:/var/log/mailman3/web/mailman-web.log
# Log cron seperately.
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
#log-route = cron uwsgi-cron
# Log qcluster commands seperately.
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
#log-route = qcluster uwsgi-daemons
# Last log and it logs the rest of the stuff.
#logger = file:/var/log/mailman3/web/mailman-web-error.log
logto = /var/log/mailman3/web/mailman-web.log

104
bundles/mailman/items.py Normal file
View file

@ -0,0 +1,104 @@
directories = {
'/var/lib/mailman3': {
'owner': 'list',
'group': 'list',
'needs': {
'zfs_dataset:tank/mailman',
'pkg_apt:mailman3-full',
},
'needed_by': {
'svc_systemd:mailman3.service',
'svc_systemd:mailman3-web.service',
},
},
}
files = {
'/etc/postfix/main.cf': {
'source': 'postfix.cf',
'content_type': 'mako',
'mode': '0644',
'context': {
'hostname': node.metadata.get('mailman/hostname'),
},
'needs': {
'pkg_apt:postfix',
},
'triggers': {
'svc_systemd:postfix.service:restart',
},
},
'/etc/mailman3/mailman.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-web.py': {
'content_type': 'mako',
'owner': 'root',
'group': 'www-data',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-hyperkitty.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/uwsgi.ini': {
'content_type': 'text',
'owner': 'root',
'group': 'root',
'mode': '0644',
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
}
svc_systemd = {
'postfix.service': {
'needs': {
'pkg_apt:postfix',
},
},
'mailman3.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
'mailman3-web.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
}

149
bundles/mailman/metadata.py Normal file
View file

@ -0,0 +1,149 @@
import base64
def derive_mailadmin_secret(metadata, salt):
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
)
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
defaults = {
'apt': {
'packages': {
'mailman3-full': {
'needs': {
'postgres_db:mailman',
'postgres_role:mailman',
'zfs_dataset:tank/mailman',
}
},
'postfix': {},
'python3-psycopg2': {
'needed_by': {
'pkg_apt:mailman3-full',
},
},
'apache2': {
'installed': False,
'needs': {
'pkg_apt:mailman3-full',
},
},
},
},
'zfs': {
'datasets': {
'tank/mailman': {
'mountpoint': '/var/lib/mailman3',
},
},
},
}
@metadata_reactor.provides(
'postgresql',
'mailman',
)
def postgresql(metadata):
node_id = metadata.get('id')
db_password = repo.vault.password_for(f'{node_id} database mailman')
return {
'postgresql': {
'databases': {
'mailman': {
'owner': 'mailman',
},
},
'roles': {
'mailman': {
'password': db_password,
},
},
},
'mailman': {
'db_password': db_password,
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('mailman/hostname'): {
'content': 'mailman/vhost.conf',
},
},
},
}
@metadata_reactor.provides(
'mailman/secret_key',
)
def secret_key(metadata):
import base64
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
)
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
return {
'mailman': {
'secret_key': secret_key,
},
}
@metadata_reactor.provides(
'mailman',
)
def secrets(metadata):
return {
'mailman': {
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
report_email = metadata.get('mailman/dmarc_report_email')
return {
'dns': {
metadata.get('mailman/hostname'): {
'MX': [f"5 {metadata.get('mailman/hostname')}."],
'TXT': [
'v=spf1 a mx -all',
'; '.join(f'{k}={v}' for k, v in {
# dmarc version
'v': 'DMARC1',
# reject on failure
'p': 'reject',
# standard reports
'rua': f'mailto:{report_email}',
# forensic reports
'fo': 1,
'ruf': f'mailto:{report_email}',
# require alignment between the DKIM domain and the parent Header From domain
'adkim': 's',
# require alignment between the SPF domain (the sender) and the Header From domain
'aspf': 's',
}.items())
],
},
},
}

View file

@ -32,10 +32,14 @@ defaults = {
'tank/vmail': {
'mountpoint': '/var/vmail',
'compression': 'on',
'atime': 'off',
'recordsize': '16384',
},
'tank/vmail/index': {
'mountpoint': '/var/vmail/index',
'compression': 'on',
'atime': 'off',
'recordsize': '4096',
'com.sun:auto-snapshot': 'false',
'backup': False,
},

19
bundles/network/items.py Normal file
View file

@ -0,0 +1,19 @@
for network_name, network_conf in node.metadata.get('network').items():
if 'qdisc' in network_conf:
svc_systemd[f'qdisc-{network_name}.service'] = {
'enabled': True,
'running': None,
'needs': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
}
actions[f'qdisc-{network_name}.service_restart_workaround'] = {
'command': 'true',
'triggered': True,
'triggered_by': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
'triggers': {
f'svc_systemd:qdisc-{network_name}.service:restart',
},
}

View file

@ -34,10 +34,12 @@ def dhcp(metadata):
@metadata_reactor.provides(
'systemd/units',
'modules-load',
)
def units(metadata):
if node.has_bundle('systemd-networkd'):
units = {}
modules_load = set()
for network_name, network_conf in metadata.get('network').items():
interface_type = network_conf.get('type', None)
@ -50,8 +52,12 @@ def units(metadata):
},
'Network': {
'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(network_conf.get('vlans', set()))
'IPv6AcceptRA': network_conf.get('IPv6AcceptRA', 'no'),
'VLAN': set(
other_network_name
for other_network_name, other_network_conf in metadata.get('network', {}).items()
if other_network_conf.get('type') == 'vlan' and other_network_conf['vlan_interface'] == network_name
)
}
}
@ -90,10 +96,50 @@ def units(metadata):
}
}
# cake WIP
if 'cake' in network_conf:
units[f'{network_name}.network']['CAKE'] = network_conf['cake']
modules_load.add('sch_cake')
return {
'systemd': {
'units': units,
}
},
'modules-load': modules_load,
}
else:
return {}
@metadata_reactor.provides(
'systemd/units',
)
def queuing_disciplines(metadata):
if node.has_bundle('systemd-networkd'):
return {
'systemd': {
'units': {
f'qdisc-{network_name}.service': {
'Unit': {
'Description': f'setup queuing discipline for interface {network_name}',
'Wants': 'network.target',
'After': 'network.target',
'BindsTo': 'network.target',
},
'Service': {
'Type': 'oneshot',
'ExecStart': f'/sbin/tc qdisc replace root dev {network_name} {network_conf["qdisc"]}',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'network-online.target',
},
}
for network_name, network_conf in metadata.get('network').items()
if 'qdisc' in network_conf
},
},
}
else:
return {}

View file

@ -8,4 +8,5 @@ examples
```sh
nft add rule inet filter input tcp dport 5201 accept
nft add rule inet filter input udp dport 5201 accept
```

View file

@ -2,6 +2,23 @@
flush ruleset
% if nat:
table ip nat {
# NAT
chain postrouting {
type nat hook postrouting priority 100
policy accept
# rules
% for rule in sorted(nat):
${rule}
% endfor
}
}
% endif
table inet filter {
# INPUT

View file

@ -6,6 +6,7 @@ files = {
'input': node.metadata.get('nftables/input'),
'forward': node.metadata.get('nftables/forward'),
'output': node.metadata.get('nftables/output'),
'nat': node.metadata.get('nftables/nat'),
},
'triggers': [
'svc_systemd:nftables.service:reload',

View file

@ -8,7 +8,8 @@ defaults = {
'input': {
'tcp dport 22 accept',
},
'forward': {},
'output': {},
'forward': set(),
'nat': set(),
'output': set(),
},
}

View file

@ -96,7 +96,7 @@ def monitoring(metadata):
'monitoring': {
'services': {
hostname: {
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {quote(hostname + vhost.get('check_path', ''))}",
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {vhost.get('check_protocol', 'https')}://{quote(hostname + vhost.get('check_path', '/'))}",
}
for hostname, vhost in metadata.get('nginx/vhosts').items()
},

View file

@ -0,0 +1,22 @@
# DO NOT DISABLE!
# If you change this first entry you will need to make sure that the
# database superuser can access the database using some other method.
# Noninteractive access to all databases is required during automatic
# maintenance (custom daily cronjobs, replication, and similar tasks).
#
# Database administrative login by Unix domain socket
local all postgres peer
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all peer
# IPv4 local connections:
host all all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# IPv6 local connections:
host all all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all peer
host replication all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
host replication all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}

View file

@ -18,6 +18,21 @@ directories = {
}
files = {
f"/etc/postgresql/{version}/main/pg_hba.conf": {
'content_type': 'mako',
'mode': '0640',
'owner': 'postgres',
'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
],
'triggers': [
'svc_systemd:postgresql.service:restart',
],
},
f"/etc/postgresql/{version}/main/conf.d/managed.conf": {
'content': '\n'.join(
f'{key} = {value}'

36
bundles/pppoe/REAMDE.md Normal file
View file

@ -0,0 +1,36 @@
# Firtzbox
Internet > Zugangsdaten
Internetanbieter
- weitere Internetanbieter
- anderer Internetanbieter
- Name: "My PPPOE" (nicht leer lassen)
Anschluss
(x) Anschluss an einen DSL-Anschluss
Zugangsdaten
(x) Nein
Verbindungseinstellungen
[x] VLAN für den Internetanschluss verwenden
VLAN-ID: 7
PBit: 0
DSL-ATM-Einstellungen
VPI: 1
VCI: 32
Kapselung
(x) Routed Bridge Encapsulation
[x] IP-Adresse automatisch über DHCP beziehen
DHCP-Hostname: fritz.box
PPPoE-Passthrough
[x] Angeschlossene Netzwerkgeräte dürfen zusätzlich ihre eigene Internetverbindung aufbauen (nicht empfohlen)
[ ] Internetzugang nach dem "Übernehmen" prüfen
-> Danach muss bei "Internetanbieter" statt "weitere Internetanbieter" der gewählte Name stehen, also zB "My PPPOE"

View file

@ -0,0 +1,3 @@
# Secrets for authentication using CHAP
# client server secret IP addresses
"${user}" * "${secret}" *

30
bundles/pppoe/files/isp Normal file
View file

@ -0,0 +1,30 @@
# --- Plugin & Interface ---
plugin rp-pppoe.so ${interface}
unit 0
# --- IPv4 Einstellungen ---
noipdefault # keine selbstgewählte lokale IP
defaultroute # Default-Route über ppp0 anlegen
replacedefaultroute # ersetzt vorherige Default-Route
# --- IPv6 Einstellungen ---
+ipv6 # IPv6CP aktivieren
ipv6cp-accept-local # lokale IPv6 vom ISP übernehmen
ipv6cp-accept-remote # remote IPv6 vom ISP übernehmen
ipv6cp-use-ipaddr # statt Link-Local die zugewiesene IPv6 nutzen
defaultroute6
# --- Verbindungsmanagement ---
persist # bei Abbruch automatisch neu verbinden
maxfail 0 # unbegrenzt Neuversuche
# --- LCPKeepalive (zuverlässiger Ausfall-Check) ---
lcp-echo-interval 20
lcp-echo-failure 3
# --- Sicherheit / Logging ---
hide-password # Passwort nicht im Log anzeigen
noauth # Auth nur über chap-secrets
# --- Zugangsdaten (nur Username, das Passwort kommt aus /etc/ppp/chap-secrets) ---
user "${user}"

38
bundles/pppoe/items.py Normal file
View file

@ -0,0 +1,38 @@
files = {
'/etc/ppp/peers/isp': {
'content_type': 'mako',
'mode': '0644',
'context': {
'interface': node.metadata.get('pppoe/interface'),
'user': node.metadata.get('pppoe/user'),
},
'needs': {
'pkg_apt:pppoe',
},
},
'/etc/ppp/chap-secrets': {
'content_type': 'mako',
'mode': '0600',
'context': {
'user': node.metadata.get('pppoe/user'),
'secret': node.metadata.get('pppoe/secret'),
},
'needs': {
'pkg_apt:pppoe',
},
},
}
svc_systemd = {
'pppoe-isp.service': {
'needs': {
'file:/etc/ppp/peers/isp',
'file:/etc/ppp/chap-secrets',
},
},
'qdisc-ppp0.service': {
'needs': {
'svc_systemd:pppoe-isp.service',
},
},
}

55
bundles/pppoe/metadata.py Normal file
View file

@ -0,0 +1,55 @@
defaults = {
'apt': {
'packages': {
'pppoe': {},
},
},
'modules-load': {
'pppoe',
'pppox',
'ppp_generic',
},
'nftables': {
'nat': {
'oifname ppp0 masquerade',
},
},
'systemd': {
'units': {
'pppoe-isp.service': {
'Unit': {
'Description': 'PPPoE Internet Connection',
'After': 'network.target',
},
'Service': {
'Type': 'forking',
'ExecStart': '/usr/sbin/pppd call isp',
'Restart': 'on-failure',
'RestartSec': 5,
},
},
'qdisc-ppp0.service': {
'Unit': {
'Description': 'setup queuing discipline for interface ppp0',
'After': {
'pppoe-isp.service',
'sys-devices-virtual-net-ppp0.device',
},
'PartOf': 'pppoe-isp.service',
'BindsTo': 'sys-devices-virtual-net-ppp0.device',
},
'Service': {
'Type': 'oneshot',
'ExecStart': '/sbin/tc qdisc replace root dev ppp0 cake bandwidth 37Mbit internet besteffort triple-isolate nat egress memlimit 256mb',
# - no drops save
# - bis 37MBit keine retries bei: iperf3 --client 49.12.184.229 -t999 -i5 --bidir
#'ExecStart': '/sbin/tc qdisc replace root dev ppp0 cake bandwidth 37Mbit internet besteffort nat egress memlimit 256mb',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'multi-user.target',
},
}
},
},
}

View file

@ -9,7 +9,7 @@ files = {
},
'/etc/apt/apt.conf.d/76pveproxy': {
'content_type': 'any',
'mode': '0444',
'mode': '0644',
},
'/etc/network/interfaces': {
'content_type': 'any',

View file

@ -80,6 +80,7 @@ defaults = {
@metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts',
)
def nginx(metadata):

View file

@ -9,6 +9,7 @@ directories = {
},
'/var/lib/redis': {
'owner': 'redis',
'group': 'redis',
'mode': '0750',
'needs': [
'pkg_apt:redis-server',

View file

@ -7,18 +7,16 @@ $config['enable_installer'] = true;
/* Local configuration for Roundcube Webmail */
$config['db_dsnw'] = '${database['provider']}://${database['user']}:${database['password']}@${database['host']}/${database['name']}';
$config['imap_host'] = 'localhost';
$config['imap_host'] = 'ssl://${imap_host}';
$config['imap_port'] = 993;
$config['smtp_host'] = 'tls://localhost';
$config['smtp_port'] = 587;
$config['smtp_user'] = '%u';
$config['smtp_pass'] = '%p';
#$config['imap_debug'] = true;
#$config['smtp_debug'] = true;
$config['support_url'] = '';
$config['des_key'] = '${des_key}';
$config['product_name'] = '${product_name}';
$config['plugins'] = array(${', '.join(f'"{plugin}"' for plugin in plugins)});
$config['language'] = 'de_DE';
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'verify_peer_name' => false,
),
);

View file

@ -61,6 +61,7 @@ files['/opt/roundcube/config/config.inc.php'] = {
'des_key': node.metadata.get('roundcube/des_key'),
'database': node.metadata.get('roundcube/database'),
'plugins': node.metadata.get('roundcube/plugins'),
'imap_host': node.metadata.get('mailserver/hostname'),
},
'needs': [
'action:chown_roundcube',

View file

@ -0,0 +1,18 @@
initialization:
- reset (hold reset for 5-10 seconds, until user light starts flashing)
- open webinterface under 192.168.88.1
- set password
- vlans need to be configured and an additional ip needs to be assined to a vlan which es later accessible preferably through an untagged port
- for example add 10.0.0.62/24 to "home" vlan
- this happens on the first apply
- when vlan filering gets enabled, the apply freezes and the switch is no longer available under the old ip
- now that filtering is active, the switch is available under its new ip, because now you dont speak to the bridge anymore, where the old ip was residing, but to the vlan interface, where the new ip is residing
bw bug:
- f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false" fails, you need to remove &dynamic=false on create and then add it again
old 6.x Routeros firmware vlan filtering not working:
- update firmware first
- upload to files and just reboot
- didnt work via web interface, log said firmware image broken
- didi work via winbox

119
bundles/routeros/items.py Normal file
View file

@ -0,0 +1,119 @@
routeros['/ip/dns'] = {
'servers': '8.8.8.8',
}
routeros['/system/identity'] = {
'name': node.name,
}
# for service in (
# 'api-ssl', # slow :(
# 'ftp', # we can download files via HTTP
# 'telnet',
# 'www-ssl', # slow :(
# 'winbox',
# ):
# routeros[f'/ip/service?name={service}'] = {
# 'disabled': True,
# }
# LOGGING_TOPICS = (
# 'critical',
# 'error',
# 'info',
# 'stp',
# 'warning',
# )
# for topic in LOGGING_TOPICS:
# routeros[f'/system/logging?action=memory&topics={topic}'] = {}
# routeros['/snmp'] = {
# 'enabled': True,
# }
# routeros['/snmp/community?name=public'] = {
# 'addresses': '0.0.0.0/0',
# 'disabled': False,
# 'read-access': True,
# 'write-access': False,
# }
routeros['/system/clock'] = {
'time-zone-autodetect': False,
'time-zone-name': 'UTC',
}
# routeros['/ip/neighbor/discovery-settings'] = {
# 'protocol': 'cdp,lldp,mndp',
# }
routeros['/ip/route?dst-address=0.0.0.0/0'] = {
'gateway': node.metadata.get('routeros/gateway'),
}
for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
routeros[f'/interface/vlan?name={vlan_name}'] = {
'vlan-id': vlan_id,
'interface': 'bridge',
'tags': {
'routeros-vlan',
},
}
routeros[f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false"] = { # bw bug: remove &dynamic=false on first apply
'bridge': 'bridge',
'untagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/untagged')),
'tagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/tagged')),
'_comment': vlan_name,
'tags': {
'routeros-vlan-ports',
},
'needs': {
'tag:routeros-vlan',
},
}
# create IPs
for ip, ip_conf in node.metadata.get('routeros/ips').items():
routeros[f'/ip/address?address={ip}'] = {
'interface': ip_conf['interface'],
'tags': {
'routeros-ip',
},
'needs': {
'tag:routeros-vlan',
},
}
routeros['/interface/bridge?name=bridge'] = {
'vlan-filtering': True, # ENABLE AFTER PORT VLANS ARE SET UP
'igmp-snooping': False,
'priority': node.metadata.get('routeros/bridge_priority'),
'protocol-mode': 'rstp',
'needs': {
'tag:routeros-vlan',
'tag:routeros-vlan-ports',
'tag:routeros-ip',
},
}
# purge unused vlans
routeros['/interface/vlan'] = {
'purge': {
'id-by': 'name',
},
'needed_by': {
'tag:routeros-vlan',
}
}
routeros['/interface/bridge/vlan'] = {
'purge': {
'id-by': 'vlan-ids',
'keep': {
'dynamic': True,
},
},
'needed_by': {
'tag:routeros-vlan',
}
}

View file

@ -0,0 +1,26 @@
defaults = {}
@metadata_reactor.provides(
'routeros/vlan_ports',
)
def routeros__(metadata):
return {
'routeros': {
'vlan_ports': {
vlan_name: {
'untagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name == metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/untagged')
},
'tagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name in metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/tagged')
},
}
for vlan_name in metadata.get('routeros/vlans').keys()
},
},
}

View file

@ -66,13 +66,7 @@ files = {
],
},
'/etc/ssh/ssh_known_hosts': {
'content': '\n'.join(sorted(
line
for other_node in repo.nodes
if other_node != node
and other_node.has_bundle('ssh')
for line in other_node.metadata.get('ssh/is_known_as')
)) + '\n',
'content': '\n'.join(sorted(node.metadata.get('ssh/known_hosts'))) + '\n',
},
}

View file

@ -5,6 +5,7 @@ defaults = {
'ssh': {
'multiplex_incoming': True,
'is_known_as': set(), # known_hosts for other nodes
'known_hosts': set(), # known_hosts for this node
},
}
@ -86,3 +87,20 @@ def is_known_as(metadata):
),
},
}
@metadata_reactor.provides(
'ssh/known_hosts',
)
def known_hosts(metadata):
return {
'ssh': {
'known_hosts': set(
line
for other_node in repo.nodes
if other_node != node
and other_node.has_bundle('ssh')
for line in other_node.metadata.get('ssh/is_known_as')
)
}
}

View file

@ -1,6 +1,7 @@
directories = {
'/etc/sudoers.d': {
'purge': True,
'mode': '0750',
#'purge': True, # FIXME: purge after managed sudoers are ready
},
}

View file

@ -10,6 +10,9 @@ defaults = {
'resolvconf': {
'installed': False,
},
'netplan.io': {
'installed': False,
},
},
},
}

View file

@ -1,5 +1,5 @@
defaults = {
'systemd-swap': 2*10**9,
'systemd-swap': 2*(2**30), # 2GiB
'systemd': {
'units': {
'swapfile.swap': {

View file

@ -1,6 +1,6 @@
svc_systemd['cron'] = {
'enabled': False,
'running': False,
'enabled': node.metadata.get('systemd_timers/cron/enabled', False),
'running': node.metadata.get('systemd_timers/cron/enabled', False),
}
files['/usr/lib/nagios/plugins/check_systemd_timer'] = {

View file

@ -26,7 +26,8 @@ def units(metadata):
type = name.split('.')[-1]
if type == 'service':
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'multi-user.target'})
if not config.get('Install', {}).get('WantedBy', set()):
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'multi-user.target'})
elif type == 'timer':
units.setdefault(name, {}).setdefault('Install', {}).setdefault('WantedBy', {'timers.target'})
elif type == 'mount':

View file

@ -7,8 +7,6 @@ defaults = {
# needed by crystal plugins:
'libgc-dev': {},
'libevent-dev': {},
# crystal based (procio, pressure_stall):
'libpcre3': {},
},
'sources': {
'influxdata': {
@ -126,3 +124,23 @@ def influxdb(metadata):
},
},
}
# crystal based (procio, pressure_stall):
@metadata_reactor.provides(
'apt/packages/libpcre2-8-0',
'apt/packages/libpcre3',
)
def libpcre(metadata):
if node.os == 'debian' and node.os_version >= (13,):
libpcre_package = 'libpcre2-8-0'
else:
libpcre_package = 'libpcre3'
return {
'apt': {
'packages': {
libpcre_package: {},
},
},
}

View file

@ -4,7 +4,7 @@ defaults = {
'users': {
'root': {
'home': '/root',
'password': repo.vault.password_for(f'{node.name} user root'),
'password': repo.vault.password_for(f'{node.name} user root', length=24),
},
},
}
@ -49,7 +49,7 @@ def user_defaults(metadata):
if not 'shell' in config:
users[name]['shell'] = '/bin/bash'
if not 'privkey' in users[name] and not 'pubkey' in users[name]:
privkey, pubkey = repo.libs.ssh.generate_ed25519_key_pair(
b64decode(str(repo.vault.random_bytes_as_base64_for(f"{name}@{metadata.get('id')}", length=32)))
@ -57,7 +57,7 @@ def user_defaults(metadata):
users[name]['keytype'] = 'ed25519'
users[name]['privkey'] = privkey
users[name]['pubkey'] = pubkey + f' {name}@{node.name}'
return {
'users': users,
}

View file

@ -36,6 +36,7 @@ def nginx(metadata):
'context': {
'php_version': metadata.get('php/version'),
},
'check_path': '/admin',
},
},
},

View file

@ -0,0 +1,186 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBGPL0F0BEAC8s6aFGXEkW0xvN5FSZKaM+rp9FX4EhWNfkKi7PaHEpZcjzC6J
gIwSwJP7o9L/LLtLYr68Df9sv+AktdzhY50T4zBQouEl6ps/ZaaiVoTsH8wLOp7g
/qDFJ8kH7quUU9Qh6AmirwmEddKmEZTrabg4OjeU/eJEEBJW8/NDc18lrqKC7S62
hjt+XE7VC+/C/4BLEN0OvNjYfi+2giwVOBAThlAtaryz010g2Nb/zSdjQQCEndQs
wlS4enVwklleLo76S63H60rxbh2WiNCvRAJMm6OytcXsQO5NPLt0wyk9FvXf9r6B
eQG8zabfA8u5pai+/a8CYgMijH+k1LmBT2j5hOIFDQmUE05aNTLNYQz6uy+emXJk
PtIf805D4nFYk1OSN/KZ3xYr+4+FtyfQ5Gj0blSPhsq7fJzoSDA2wTlx4Q6x7abS
txtsY78/LCqkRbSUHRKZq1t5jQ5laOV0D1MrLzQB2NFhTWDRHe6UrDOx/ea5ORBU
MH7iW27DOZkMgeyidBzAdgoHArO+n9/OLdf1TvpgPuchEX9mn1eLX5KTco2F/kTu
nn+Yn8A6LwJtFehE4SWL8+PN1xRp9fv3udDNGHwbOuOIvFcc5wNrDj2nzGAV4rJH
9xpFTjx1cx8JYXVbuwGqVj0OVNz9jc64CYSpCeKrWBi5DQruo9OSVQn8gQARAQAB
iQJOBB8BCgA4FiEEBauQNAwMXnl/RKjIJUzzta7AqPAFAmPL0GEXDIABgOl28UpQ
ikjpyj/pvDciUsoc+WQCBwAACgkQJUzzta7AqPDItxAAnS68NpqYaYvCiFEQIj9Y
zwg9J0o6I8813GzBGF0M+2QLke6ObfBkNx6kj+Fd03992p/fjhHCqJpV0k4AbTEl
WVEBjS78PiuIetNTF4lKO6KPyUIPTt2ykYgDmsbrvBieTsTK41RED0wRw+jbzJzB
Vtc7ZsHSy2Pu4zOnPuD/JmXXds3XXaFDMsJeKW/PbfBWmv5X2xR99nM2Pqjg5PtX
RCwvB6WsHtlKtp5KLKmpQs+qq63Ixe6Kc2O7qArne0M06wdgezhKVX6rVatBd+TE
sa0hS7cjI+I9KzQwKbyARfPQC1gYicip1Edp1+89cA/Sv7OUvcUKDYy5nI4sx43q
rCDj0YFrqBVYeqVzMtwEr50xWWl9UsSJucywVE0PRUznoR01uCBzhSWem33FlAv3
p0h9LGwGkRxLgP/MmdrVc/d7+uCtrBduRRnY3otHcg9Pg8DIFjfxgGCR7faQGlIl
ECxDWHfgBLr6oHCiJaTgSVz2D7qg89nziNLuMe5Yhb/Mf2G8oYk12D8+p5GpYViq
04zKUlah02i6YLPcQE5190w7zWQ0vaYqBYO7Db8vb1hphtmkilxbTXkNoo2uNaWx
dZWK+KUtwElsYX+wHj9f+ec7Cx2pDjfJaImLt/MY+dwSMdzqWbhusIuz8VAl3sXO
n5PLmVFTKN1PRf8G60ZYQNGJAk4EHwEKADgWIQQFq5A0DAxeeX9EqMglTPO1rsCo
8AUCY8vQYRcMgAH7+r21QbXclVvZum7bFs9bsSUlxAIHAAAKCRAlTPO1rsCo8Jic
D/9i4c89S255kb8fBoKV1o60SnV76iVmCmk+iU6uxSKJ30mMY7icJYK3wusN/OZM
G/C7aMtj6ROgyG1z0KJdAS8yl6X63s55xI/XIDPhnb9PVf/Dga4dfW7hwq0z5XJq
TtoZZ81Iy/mDjBe3Lhc7tsESQdXsULfrpiQc/OiCUiLVOZGuceDtfHsYbRD1omtF
l+JCp0nF7LRhzfKII6IqKDqHVbMRzl0qUi42+W67zY81ont1SzfS28DTb+V2CLtD
wiBKfBVXBt6junhpPawip9r6OnSUmFaPYPquEmTtkNk8v0txzNifeDMnsPquFT1L
pY6trIlFtYFuFOMyQiDvuSHLgThvvWhwRICv4VqmAZIcTDSpFNqU5E+Tw24UQgL+
roHbBwnYIl7z///VIvZKZdz1Jk7mZ6pbubfw4Dd9k66h+cdalhT2sCQrLLbX7nrx
8BLyGJgqcUZzWa/phhecaiyrtYq4tS4C0pi0ZQ4xewjr45Fmo9B0lDNoiD5a34cR
ipEq4n07WqMdJrZG9bU5/KFy+qFpshrCi2KkG1HGLOW+pSM4HwvwTxItzm6R4ELL
BKEpYjDi+a+Y251ybMDM7ylXtwgFV8f9M+1fmmjXrZFk6axBbrh5KwQjQ/LBu9XG
7Rsw5WBQ6wpM9/nvbzCz7omE3C0Je9KrBeEsW9I4jlspP4kCTgQfAQoAOBYhBAWr
kDQMDF55f0SoyCVM87WuwKjwBQJjy9BhFwyAAYyCPe0QqoBBY54SEFrOjW4MFKRw
AgcAAAoJECVM87WuwKjwopcQAIiFcdAnN+EY6vd3ZCO+CktlBlpl8JYDgfVHA6jm
xCPafLa5Mo6uxQcU0Qzk7W3YBAHAONfT496Z1nPoR5iyqKf/z/TTjSZ8RqLkWnk0
cBGisr/EDH/cd9qfmlrXfIV6R7rJdlCXkleaStWrL7YCTCYEk6+hnkNL1p1Mrmnk
Kt3DPxzbM0iatubyGwhKTDJShXhCtTm91xbNHBjtXtMM9/AsPCmvb7nW243eAfqV
GPFeMfc/WStapJLttIocJ0OMhYbX9bTPFGzFgk77v7x48EW7sYdIPW+/3Hbk7pHO
C/vqgLc2FlrhthkigcWD9PpBn0M7M+OeELYxTAxbPYj1ZXwRPrdwnb6KeBTBqu1C
zsqHGLB0LWJQOw38bX0FaOGGwGO97hyevzuNZi7ohRjkF5Liq2G4JZHwyhP2Ydii
SwYu7Mhm9iMEd/+D/0FymFalmPxFLK2kJHSm7RI0YJMLvLH3b4w4LXxRn/8XA1Gl
ODeXKLNVBTfglmTZc9o7vLNzTzELcQx22kLeYjXS5j+P1F8Q4ctHbfXIuRJhKZ/v
th0JET0OIX0IU599Ux69Abv1GSh1FLATB83uKIKI77QlMpVyehhZrOxZcxodKdka
LWU7QzKoufrsKrTQRw98yFruyeHivCZQb5J6xZPhUQtYbHCerzinUjqpcJMpp8bo
+sSuiQJOBB8BCgA4FiEEBauQNAwMXnl/RKjIJUzzta7AqPAFAmPL0GEXDIABMJkR
vqlm0GEwUwRXEbTl/xWw/YICBwAACgkQJUzzta7AqPDvcQ/+MyvhivufExXRRIXz
l9YhJavb+kfppcSju1fmzInkyNvYvprc/OrGt15N3F7zAr6spATBBvlQ1O0B6Fjx
kEe8Iaugoi4inhfYDyBTP2lwFyOSGQk0QGsOkGYrEQ5D6GnFMYoRqT1u0xnQ5aiH
cQxEx0uEXqH5f1FPLRebYzyRRj02SOzakZkdQuxhHjRAhQj+qam2Bb4cBLzGiVT1
bU+pkwTMpWmJNst0+Sy7asTLQYQLptyAsXT+ZB0wj2mrc5WsjXWnTxXRNB2r9YHS
8nHW1j+9D108vJlU7dIrEi2uGkvDWoRl4clqPUE+Q4C+oVTgqUDivrbZijeCeDPR
z+1KlvOjoafK8qfskl/4u8hg1ycTD6nccbkSXa0Q2myHtSXerxVWNRCwDc7FvLm1
R6+L4JTPKbRDyLya6YaqMeTTJboj92gpFWXZ0ddaEF9yOJOwMki6K3QtGbIqoCtw
sPZpBCpdSCB+U99pPy+lS0XQ5wdn7RZZSKXk+CC2f5wbfiv6mB1nBbvlztWuNlb5
nOAxAWkUrdCo6q0iiq3ncBolGEFtBaINVxfBpyGKNqi/1qqotaPi5/8mxSgrRvwK
Dvf5Rwq7CGJ5FaoDakwkK/g6OJs9x1/VPkMu3/RgeK+Dot+bfNIKE5Bj4kT7lFl0
nW3x+SVe3zIXZzCsJA4N/efV3keJAk4EHwEKADgWIQQFq5A0DAxeeX9EqMglTPO1
rsCo8AUCY8vQYRcMgAHHT2rJ6TOzBn9S8z+kWexnFbBwXwIHAAAKCRAlTPO1rsCo
8CYhD/93z6kS0rb+br0gSH0eXbvByDjjOarxcLZ/ok07PkinhJUvbbu9ereMsfUa
Y1Inm+jznjd3oz7aIgx+oltt4IMWduPMJ2X5LmYRTCpyVPtEZGVdMowW9FFJIfWM
9OloZkx798GicuDx2qwIAg108xAtPpTFvBJRPYM4n3+I7+Imwl/s7uMdjfUdmvtz
J3p4bKB9OVXT1nOTCfeqtAMZLXmQtSWBxE6VGZzz+c6l93TaSnlabkPlIJRsqrZg
kcpd+Wzy0aUEKQaQOSitOTJ/3DU17QrJM1EQ7Mr79jQfkAQXwhzFj0SDee9H2P07
D/aHENifhbHfltr43lEZtoYZeY06VT+HBut6sWos61hH/4K/2Mr6YexER2DU6wC2
oUF0Z/BXs/FsJn8bxlEOfz0f7k+W8gDGjvESwsKcnagXUpArsD5EXChTNyKhwxx+
8MC9WBacGhziGC1I8xEDEuZF1YuINWusWY4h/Vx3fgTwNQmvnahXA5pFIFAHH3EW
JcX4+Ku0UUpBTz2zn0R1wWLLpmMwgMYFt5GfA86jJCYYnNbKWoC/3SZ5IMyln/QT
DWY3oXAoYHShs621rDjGI/NCFKIkblacmfLh+A7es/T552VRURFXaDHTDoAoJxmY
BiTKJkC9QvkHQUckSFEUC1MB9jczWJMOwiiDinuqTdu8j126b7RSRGViaWFuIFNl
Y3VyaXR5IEFyY2hpdmUgQXV0b21hdGljIFNpZ25pbmcgS2V5ICgxMi9ib29rd29y
bSkgPGZ0cG1hc3RlckBkZWJpYW4ub3JnPokCVAQTAQoAPhYhBAWrkDQMDF55f0So
yCVM87WuwKjwBQJjy9BdAhsDBQkPCZwABQsJCAcDBRUKCQgLBRYCAwEAAh4BAheA
AAoJECVM87WuwKjwT+IP/3oNbYJJuAi576J3aov4+tHleeoDtlhij3CNgkdJvkiv
6rSiKRNxqVbEi5A3+chJ7h0yHoCGYJdi8ciVEvwdbgduQaBrmdIR+Gt180KBWwQl
xSAMIb5+wuATnDoKykTiHy45vHsiXTyZ2IaPwAtcVsih42KOE/M2s27IfJZlQfQP
GDi0Uurzdl8RDQJiRZhNDJDp/MsCaIA8+MY+EIyiRjBf7cGmEBoNiCG+5xIChtD8
oFbragdcnIY39AfjVnAK136utBnEXUkjl9+hGCPVWOzPlnmBYelNTis2w6lwzbkm
FVVNXrKJCToOb0coOngxACBIZVHUEzGOYzTjkLjcsSnxoamFCxc1hVg8aikoai+H
nb/KMSB4/bpx1k9B4GVM8fuizbdKyRGnwi8aCUa2mP+cI43Llc+bpPQpdDNe77xO
9+Wg+Ysnlno+iwcEunVeTXyQ4GqmjCJZhjmiO/oJVID0qgYwsjEC5F7nmRy1zJTf
l3oTWM/I68hJCmSxd0kExDEN52fdGhx+42zsWlMdRwE4/+GL3lrqhUzpX/806Iib
4xP9zx+tKBs9ffmHNl2TlF4e3P2esSKgGaIFMlMomj9IPNeKdAae5mSwHyf7qkXC
g/1YvHM9LhzOb7GL5NtXc+r+tNSdZreX4xOu2Rzp6f/A4eRtj6c2UdxgtoJ7KaTB
iQIzBBABCgAdFiEEuLgLW2I+q2rYd1xFt8XX1jUJR/gFAmPL1EYACgkQt8XX1jUJ
R/gupRAAxnXA+zN9wu9wC7GikElCsVkY9TNk76BsgbZ5aJE2dqWVpB2heplryVUn
BBuw+2CMpgW3FgAOOt0bBDHkknJPSq7rK4CDUsAlL8A+iXFRXfNgGFwCLdmDtblZ
1Q20YMobZ/y3X7fdnVs1M0GXG4LsL6Xkd/SjSl3iQRPH9tntATDqBdmr/3lEItk4
zFtst1nfClQicVdQsBqf9hOF3ByGjrUfL8H/ujMY8KLs6vorSr16Y8v7p3VBAW6v
QIyBYK67GdUN1sGmb/gXG18ptHu8vaS4NH5CmRyfXUI+b9c33vbQacG1FU+TbE3z
XJWgT60shlTZlywSlkWWk6K4NVZfz9ECrDa3BSp+iDUqYZcv4N3zsKw7rXONbfXC
JRdOA+Q5jhepsw49r1opEmDogok27iEk3+Ug7lTucPZVNkA41UWPOeJiKW1xOke/
D2X8fAHvYkCDzEO+Qnu8MgRHX/DoQp1hgqG5umINCYnSjgK6aRCqATZf1OsWCP/m
iuK4O4HUJa0mKUKv8OdjROtJZnOQhlJep/OJwnWBGerpQD43ZWYy9tbPE3narpYW
g/QfY0WOTEFGcBOACEgL9s/5G46KquKBxdP+DY7kaGoLMICb30ESASUaPniUI/Sk
V9LlTcQy2ttEt1k1sqOCsfby1psikLCNqDal9o5ESeo1+wTRMQmJAjMEEAEKAB0W
IQQfiZg+AIH94BjzzJZzpPJ7jdR5NgUCY8vUbAAKCRBzpPJ7jdR5NrxJD/4q+MV8
SZ6BTiPjvolCeY0/3uddWbmc+74VjRukwGXjE6oYU7rcZKWEAM2aTRb5XBUgV7Sr
7DsrpSrZawjwkG2UTziJFQ1Jy3nQw93QrXuhqdrIYjjKosXliI5vT2EGTMfFKD8s
XqDppXaPGFdntitZpAT624XkCDkvbe4NOXohX6bfsxRirM200cjREEgyqkp0XsJo
t8iJVTElyGuOuRlv39V+FUsi8Cd69SGKKmjpdTLcAahrgL0w6Cqo4lCtKuTyczvf
X4qSQmb9aALL9+MsjDcI+zNhmA+6ma5c8S+X39fjTB3q9w+5ZlbURnR6pru9iDbJ
z5XPe8OD49K481yddpYOg6RjaQVKrYGnuCn5b62DHIDhrnGB64aBoM7AzQzkBBdY
HfNjovlAM8NbsoabH0OKkC8wRCVVCZXMby+ilfNVhdUQ5b/3PCpfCv7jkvtPxRCy
sejp/49ueMGol3gb11BOc8Zzqe483cCbObPKH3rfPZ4JxXSq4DF7CfotwWXSu0W9
UzJaDDyyIXj0MHiEzt1lXnbpDJTLn3ge9yvId/Y8Foea7M8maYUtqSAH+IKmj3+F
BUyaa/3iB7/yvb9NT3vEr/Tl83pJUlEc51vovlCjNCxG3v+RVQpDq1H4K0elydiD
NaVDCtxFpx5/lWRrp9eNEsk9szmpCbsNK2xch4kCMwQQAQoAHRYhBKxTDVIPLzJp
9emDE6SESQRKrVxdBQJjy9URAAoJEKSESQRKrVxdAKIP+wf3m7nEqieGM+NFXRX7
hk2c33lCmcI7eiS4E+HBuH7gnIg7XDUnAYuIMScOVNVaVC33enEiVBVaIF0eWmad
OlyZJFS/WRMilLJWBR6VlkEOh2hIQEaqpTsuXlhnTBrThLzdgoCf4+3wa8fTF3Uj
x6edHejhxn+Tll2xOv/JM4pOd/iblYxyla7wh+yrO5tsFUcioBHyI15ceS30qA7/
lc0dA4kY1XQnKASRlkNgGaETFV02hjZjXgg2i2Ksw+534NkoJLZL/Rnf1eRMMqA1
BBwqjuAR3g11Xe/rjLpXd2zdVI5bK+C+3V8autvZo7upzW50QhQn9P68aCXrZjqE
2FgVHxa/czYdy/oDaznYRDhmlEC0YX/zqcsYm4A9LQpnGg2GT/avVNAtKSPH1Ap/
vK2yTOEhMaf54YLuUCUnju0evs5AB2GRpkFM1kHnZxMBnIhUMqbJXZs8TY2fVmOr
49e9OoynOhKH3wJxQoOf50RuQDh4xTiYpCPPLq890OJTrOiObSvFPMhrHvo//1zo
49elCVvtZNFk6IwlX2Tlu4OunHicwROs7yWUnEm8ZwE3PInHHi9UbRp6Tzsdd36n
5mmHfUAK/HdVRfYe0tDMmN5vCdvMNHSd2kU7zrT0tFscCCM5XJiQfOtVm6Rl5jz3
QdeWAjREHBd83ooNaKiqYnUhiQIzBBABCgAdFiEEgOl28UpQikjpyj/pvDciUsoc
+WQFAmPL2NUACgkQvDciUsoc+WT7iQ//e0HZMpvpdpD7HuLfq1mIjW2rxoYELI0s
419FO1jmoJmqR3OtsmYA7U62hCMqhP8HCDqc+cDFDBFdzSgcXLeXIPqEzD0OgkTX
tjY1Q7GthHBszUh8CNbXUWmiDY/mwe31tf7JsvdglJr0lXe2gPo8qKT35ckQyAXE
mKsVKoBya5owndv0cv4j7UueYwLy2ocuKIMKeQr0FoWxThr+P6/CCwq5teiUCWIZ
0hzuxYINOFdUsf7Cm332J+WBnvd1qekzbGkcZMURjbQiJ7H3pvdyrFBl0oHlunGq
fiMgy+2hXShcax/AEzPNEcULzIuwaXypZsHtIkEmQPbIsTMwmeZJmo3eappsGbml
ZSCgu5vOvyGJTlvgm6ssLisC5Y5QsPMZnCh7k1w97J71fp43tuGSkO0SWodz3tCw
+FGD3Z+INueHmNCMom9taDHv3Tqo1jTBufOzZ3sGXSKPayqTEulvtCB5ZJDw9+6H
rx6LKcHnziROyALWiBxfgizW8lk8mbgKp5H9oD0cer8n72jiA0LD5hrt8eTlAPCF
cKwmprr2BSJOGI84RezsfItCr1bMkQ1xLsBIgMYjHRPFdFdICJUsMtyqtBED1y7a
BCxJZr+0bZkjwgk8G8pKYSPVEmRRe35ulSTWybBSSAFd6bixYUj0nnswLw2Lm1Hj
NElx+hnv/0mJAlUEEAEKAD8WIQT7+r21QbXclVvZum7bFs9bsSUlxAUCY8vt8iEa
aHR0cDovL2dwZy5nYW5uZWZmLmRlL3BvbGljeS50eHQACgkQ2xbPW7ElJcRLNBAA
ulagMImbvWUHayliO89kmXBQdok8/9CutzekHOa6+NyjTapABGemuh+p+Y41T6rs
S86IJ/Nvu7uGniLqHUjm9jfjCIw4MGq5mI8qRyNQ9W44ntlvlkvtPEyquF23ofoy
opkBfXZT88omHiOXENwdINLobsMSKjyu1PiIMzQ313fR4GuvCyFdBPwIycuCFbio
1igiLmeNRO3g0V8leFSEh62KWnx95kxdZbS0Vz3LCvHH39wQSEZ/bUyJPM2OOjlz
edHD9wbi4rSvOxHBZmXN2uWZBpIHTtYTF/BfrRFRZNcQhKHO6xUkpG+8Bo3cmy4R
MVt8GPwac/W4qxuKzrONmZnDWO8tgQei9XF/7JeH3FnQtqjCR6aBT4KFcjHaUca+
CHU5AIGWft8ZMVmJ1dphN3dVmb0G2P4s732xrKS1litCRMnJtulnvZsJCQGow+VW
1WYDgtoixgD7ymithet2VTmhWyRnQu2+T+XzzqtYC1sBuqFf4n1BMR3JeOqyna/y
n7C4oV0m+2/feaIBsqGGjDpC6Bn6cGLINdB1PMTwarPLrlXwxVm8w3I7c7sBggYT
2jxfsYmVAgDpFH1Tcz9Z63b12KqSY8P7dGxpPMLwbHQcAsacTRJm04TWUJBBmKTb
iFqP7WsDSxiKfqfK10dfXEvcLLzm8jjnT4b9/vi+M6a5Ag0EY8vQXQEQAODS7H4M
kaix3PJF4A0PzPLtZc1jUdtpdbnuDICQ0urpWRJ2WP5XER1lRs4nGFBnWEvP+49g
rT6G0x4I98nQgWYlij3qdTWgDcY3tMLlaKiitaaHmdychf5VXXXKjfcFAdWW/8/n
ZNBBAJZjgyfvOnt3kG2yNuJoZip10tp1ApQhbsSsxOhidDCz4OH0B9VXLQixi2cx
3uUTbF0bdb/++5/j9Gvx3FEYxZxCU2UP9G/YuBb6k+1cn2MeLq92DlfFZjThyT6Q
0EzWjWYKhI/yO0hU2wmMya5+qXGffQFsfcLm8DQFDCcMSyxF67g7VruapdpivLlH
45N3e3HIyHquIzX63l5m6MSOEmJOyrYYgm7798W/XVDkv7zA4+ZMVpQ3s+DvcfTR
r0ltQ0TqnVe4tUnypzUSlsHFhiotkodaWJyrcGBir8wU5FUK4yEVqiS/lm4kAUtN
k5EF62QcGAnSezfkH/rIm0zWfD3goNib3kceeYJjzV1uZAHF+HLkLTAvCiRoa5FY
EKe8f3VYONZLHngywhvnfHvmie4fQZkHQ/X73zWw0m5sS4T7Un3XGQkjfG8C1+je
MRE7stjCyJJk6+74eA/LRfX3TStNFJeCwPxvScyMQFA/R/Z32L4lz+Xp1fHFTjEs
7xssfbg7QUuM6pZGa/BrwF1z1tz/SdO9VctrABEBAAGJBHIEGAEKACYWIQQFq5A0
DAxeeX9EqMglTPO1rsCo8AUCY8vQXQIbAgUJDwmcAAJACRAlTPO1rsCo8MF0IAQZ
AQoAHRYhBLDKuSZujDkpeYs+7r3m0rkhbseoBQJjy9BdAAoJEL3m0rkhbseoTmMP
/AhFpk9kkt/kiftUBsEbK8AwVeBIaWvAeL7QM72ZGyZkbsk4gKPPY+jZUjEu+eBt
HaFKM6qJIwG0DxTpizIps2pLJZtiHU8NNLbX+Ch8nZFvoKUbO5b0TbG3GNoyRjci
MdIQVRwIfepCQXV1NH315hhZXFZn55a6JH27xbYfuckByAdCQuNF1iNDqDhbdAIm
rIZCsOFTh71sA3Sq5wJl6IsOzUoT2zGGateC6Y0+LtJ+B9sFx7V8PEeCxYQi1NHK
xOvLyeStRnCuFxfCZ0t91g58QPKxk8SpwPPG5BMxuSX9Bacuwv2OpiPnIRzHQyI/
uJ1mjU/FNybhx7rI7RFVTYESFJ7C4H0DmlpUzCxt4bajt3ql5Sqin8IeKZ46f5wA
FdLX84I2I2WT/mNrsQuiUKKkUGpN3USgC3MLvHXbDb19LECeFIuOo5AJjJVkdmXC
3zcTU0Thr7fAofhKdL4x/q1hPTeFggxT1TqbuW2hrcxLXQjZm3KWm7zbsotw09Sp
9j6lI5YHgLuhJhscHTvYANciPMOFmz6wuqjCNvJ5hIyZFzotvjAEJgUvFVyVZr1d
n6RDaQQ+aKMIUfAiPZa3waRPqyAfa33iVJJ5QL1i5ZuBLhQ1oflLpLRjtPRWdIia
n375OPSAU2VpI97SL88jVHqLrjBOwgITXbeQirAfnZIrhW4QALtuyXbjWx9Z+cHe
Hp0CUDJAse6IIPScrf/dtMzzEkxfDWY+OgzSvaiTstRnqLpgiVkm52FlD2AYRgBd
nXXdJqOEgH6SimM+IpGDdboi/syIrn16PtBbEHvu1ypdhEb4YW39aKnpMhbRL6KI
bpWTSbX5haX6JqdZByqhL7D3bYZCUZ7xie1ta68u/8J1Zazy6COj9wdUouNnj7I6
tsaNBGjpoT1RlNL614D9vTxje4ErQwYaMCOs5XcthRaopcIVJwtAwzP/tCLVpSKi
uVqdEq3RhK8EkvXSm1iEH8qWjlASzdVgMFWB3zx2epH/IDHiJkjBuUUONNRDMUsC
R4AcZq27p9DkNw37rOrBQUBeYlmFwItE3nIQ7QRVXtlbm8tVLM56/YmMXae/Mwzh
M9W/TKDtccVwtHs2iFLNka1iXZsN3SmqgfiEEAiwpzrnKvCIS3jsi8GTv9td0erQ
Q5a7LATQwV0DNwqvT2pDp4PRZLH1HGkFVb+yY/XZG0PwYCmBkZUoQDl6P8f58l9C
18w52Cp5D5/oqiqtz0NLY+a61uQbfa2oeYDDEK3NGlXBdEAaQqHarkY8Gf44/ea8
aCsM9iH3DogBJGgIkhs2Face7OmedNkvc7LiRNz/z7Vm62F/mXSBHIMvQ0pwvRiK
bn5U7DwupeFEycZrqQEKsjwFjLxa
=QzR4
-----END PGP PUBLIC KEY BLOCK-----

186
data/apt/keys/debian-13.asc Normal file
View file

@ -0,0 +1,186 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBGPL0BUBEADmW5NdOOHwPIJlgPu6JDcKw/NZJPR8lsD3K87ZM18gzyQZJD+w
ns6TSXOsx+BmpouHZgvh3FQADj/hhLjpNSqH5IH0xY7nic9BuSeyKx2WvfG62yxw
XcFkwTxoWpF3tg0cv+kT4VA3MfVj5GebuS4F9Jv01WuGkxUllzdzeAoC70IYNOKV
+Av7hX5cOaCAgvDCQmhVnQ6Nz4fXdPdMHVodlPsKbv8ymVsfvb8UzQ6dl9w1gIu9
4S0FCQeEePSii23jHISYwku/f6huQGxSjAy8yxab0aZshl98c3pGGfOJHntmHwOG
gqV+Gm1hbcBjc6X8ybL2KEr/Lu4xAK3xSQmP+tO6MNxfBTCeo8fXRT95pqj7t3QH
Iu+LbVYrkLQ6St9mdOgUUsAdVYXJ3eh8Y+CfjmBywNRizOGHrEp8JsAcS0+a9yBL
+BYWhS4BL/EeeacRLT9kfzIqS1OD/RL/4Qbi2GLGFsiHaKFUn4xse20ZXq5XtEL6
ltQVIr/iAlBtdSOnge/ZkNvd3SQIyC2QBNAy67QutS8yiaCE2vtr8i5GQOu2fgr1
NJ0VjuwshmgJvbZ2m/9Zq1Yp1iMnPVJtOWcNxTZAWJDN4L5OdoqbaOkqS/+cgLy2
UTsc0A7cxt/2ugOtln/utXsfgb3Qno69yCuSbQmVM1NrwvZVxPIWi7B2gQARAQAB
iQJOBB8BCgA4FiEEuLgLW2I+q2rYd1xFt8XX1jUJR/gFAmPL0BcXDIABgOl28UpQ
ikjpyj/pvDciUsoc+WQCBwAACgkQt8XX1jUJR/jTMRAAt6Mltzz7xk7RGIGaF+ug
0QSoh9n07Y0oxEAb1cPSvo3o5wnxQ6ZYIukr2KTFkXaDh35XpXoA2Z9Uf6wz4h8B
nF8DWhbo+2sSq9au0J16bsLuIHfhzJWXSwyekHOrLiiiSfhjey9eQzgOT8jJsEjy
FzfxtMOTepXX8yQdp4SK3WYdVjAcbwjFGcbh5VqQIsr1+MdlaVchqWP1vm1ADvQF
C87hQjhpMzQoU7WVkJWsqlMuXh95h59h/SndBiHKXHQfs/LAM7M2K/fgS9+EbPWW
fC97/8SqpXheDsvCvueumTyzUCNXFpNGwUUA1qO6GTaMwHjaX/AeCaRMxCQcLdQ0
7b6zc13dqiMAAL1eSQ10TFP9kD2QoyPjF6lh0S5xshHWET5duw71KjYAAOGdv8J3
9DGMvT8OdL8UklIJy7KLjxJOjY21oPCHgx1cQKLONCgOAcQ4ZmzBOP8sWZ7ld8OV
Ke4c/bOqwbRMLNXUwuVJuejwvoypCOxbdlYUnfL633wVMQBM8ilog+2TydStV4AU
CQVsICw4iaXUU+B6gh1euvgvCW13q7pMFJDPbpC+EFC1Fl4RT+CFLE8XG0kXHQ3x
HWo+/b49x3MYv5wS33+NZpfdHEuHKwybfTIVshlPU8rXmrwmVXO9iRmAczjcoeYZ
OTI5EJz20PBi65wAdpAFVBeJAk4EHwEKADgWIQS4uAtbYj6rath3XEW3xdfWNQlH
+AUCY8vQFxcMgAH7+r21QbXclVvZum7bFs9bsSUlxAIHAAAKCRC3xdfWNQlH+KbZ
D/4uoBtdR5LdZGh5sDBjhcDJ+09vhagDh4/lLsiH5/HEmY5M0fwUTvnzV00Bsu3y
u/blyKaX/oram1jBzwucqkIXFx/KF6ErMkHBQi0w7Kqb+nY1s24rD6++VL/ZIA5A
CLoMxD/xWNN0GA3IMa5HquAxejhgpKB1Dm7QcEab2Jk2hnlCFBgmjun1xEqb2IO0
fmfXjREpRBbzvmOTCkEUm8CIikJy7CHmAIVOJnxQZyK5bua05fKZOJQvb7VmmhJw
/1eE5+VU0fMHbZDkVeL0LOAecpPGH3uCEXaf4J0Pu4jXCHqz9UPMNRawNWEcBRTZ
oq5M5GpRkIpPpt8j7jGoQaKM5bUxtsS0+8L56n03J5xWBy+yEQPYnBJs5n61/dcc
aRwqO47TJsADIqg7T5Q+v97+1xXzMc8KkTbtQatWdukNuVrbLNXlLYI/sPChqMtZ
J7yW9Qhz+ljJnBKkYTjG5OLjsInB80cNFOkZMjsj9gQgAagSwqll/IIXry0zKF/Z
A3ARmy7G5vjvqP8HjSWbcqbjdz27/H8Zn/HaGRK5GwoBS/4CyDiuvrq9bS6bk7E4
Ql6Ni2UF7brjEULiYfbMdL0HHaKHuU3rWBCZtFRyVJ3yUKP/UAdxtS8VwbkYBOIp
gS4Y6RwXeQmC9G6crnXR6hsODs5E47hiugf/HkhvyQ6CJokCTgQfAQoAOBYhBLi4
C1tiPqtq2HdcRbfF19Y1CUf4BQJjy9AYFwyAAYyCPe0QqoBBY54SEFrOjW4MFKRw
AgcAAAoJELfF19Y1CUf4uo0P/i+m8SnrFF7IcsppML6dsxOvioUt5dBbXgkSbCUh
dciW583S04mqS8iicMoUSXg+WKXWJ+UaAnfh6yWLcbeYpH8SZ+TX+J3WuLj4ECPe
MYfLGY4eehKIJqnEDfVqtoc8g5w9JxFglZBTZ/PJeyj6I2ovzVG1YH2ZER0cvRvi
tywWBP3edDBa/KPHzBVLaeWuuH28aAGHF2pHtEh+nDfQ/EblDlPUkGclnu79E82g
dl3W0GvcbMXccVIvik9IHPI042me4KJwy7X3qoNGbn3+XditIA+6rb1N+wGDdQkD
s9MvGmoQoxs5iFi5kW/AIdIMHCR+A6MMO4KGQ6E6UDd/DM3iFh2V+gavktk85sIk
Thy378l3JQRidRptifTJjESnyM/NUjN8JMb6peyn0xKyYE6uNK9cZAmbEWGCdZfp
62gPUo6dR7BHe2a1qJokvfSJdjZtczBuWotFs6EQcCuRDqpySzrLYitCNxNqJ0FG
+kryruObVXgr4y+r1C7+CczmGF0m8zp1BuGaT6pbx7X6VqazYSfOkQSk4Wyk89Ry
45RZmg79Mgv1s6NNz4ngW7LYNJgMZXwYHL99UiL47dOFBCIXTqVXURwU+BkVxwqZ
Bq10BWd+qdMPGl8hsA3zi64PJMg0u4YaWs/jasZaWaJI6tv/M1WsfQ3TCZrtT6YE
nhieiQJOBB8BCgA4FiEEuLgLW2I+q2rYd1xFt8XX1jUJR/gFAmPL0BgXDIABMJkR
vqlm0GEwUwRXEbTl/xWw/YICBwAACgkQt8XX1jUJR/ilGw//W+ckV1lt00dA+S2T
L7qaQehp//03GXnC4CRVEWalaoEylcqHlvyUiQc6+r44ZkoLTRSadNWt6EIISFaZ
OiIEDrzzpNUVu/9heQeJeeOzPOFQ0LBNI86xo8e1EmvWMBLDf6NGJZtoG1qBNIyJ
k0x7x51pOGf7h8xlvEDo3F0JNC5/N1FjtdAHdyA8HLQFkePIWHUm+h76lgF3Z5cE
3Myh7XA0NfKe33pgI7CWhbNiF62XhOMAVM6Lrjk+Zp7FWDplSiNu+J3TTjR0sAkp
H5Uf4V3i7zIhlVKKhV+Ktr5ojuj805U1tocrH68bBn4weLDfPzGp4rZ5aMoKqK+n
sTYZzFr6NYBQG/cjs0Mj8g5WDvXLLoJ9aCzhQvPqAzgkle2EQuzb3QSOQdg4Koub
/aQIB0TGjgKYM7WAj/ECoK0hk3w077VL7MeG8O4qSubW1toZ0ZrabWGRtJ6WxTNc
8NqdZHZhZnfDqJQ6YVnpuuvlpAMBZfTIMCQDpgfwbDA3ZmAQuYikB6Jyr28ge5v9
tYdZIIil4P17Jdma/usnVSplGrDZzDqxAM+sOsXejjdAIMnpw9tilIa7y23Cefls
qdzJsAxZimipzSuRU29VJ35dEtMvqxL5cbBVMcl1FQXGIchrWtSDlzy20WuQpitd
PejufO0YcdZCTo83Wze2OFIKmjGJAk4EHwEKADgWIQS4uAtbYj6rath3XEW3xdfW
NQlH+AUCY8vQGBcMgAHHT2rJ6TOzBn9S8z+kWexnFbBwXwIHAAAKCRC3xdfWNQlH
+E2DEADOwCe6UQAojyXmQSLPeRH9wfykeeAqVowt15L3SegF3CGf/WyPeA7o4fwg
60DMub81UtDanTB2s5ayGH/bzLhhDF/XjaotyEox6/J1/zpginVTnYRUs8mJempE
rWuirifsKHzh3VT/pv35rwblHhMdHj2txoZtTHa5MjgeRd3oT+NlbbG6firKCzGC
Vdw6sz478axa8tgwG65GPa/4lRZCfPYd62pA2HLlfFwjgDC5x1cOU6YRHVdX1VJ0
QEr++oOFWNi9grbBZjZpNSN2FFpXsvvA3zzaCGfUVZ5Ti4GKsC/RDbmIZFLQrF8v
1bETSQDWt4F56/njcQMcIOYp0yWBvRKhJUeEHVl3u+tGaMl74f59MZNPmNnY6y2d
aDIRMYJmcjagYcTSpFar6MziRN2vepQ0kVDxXoytmt05kNOLFkPgcKrqweVP7R5m
Vy+//w99drx47TwJeii7/GiuTN3FLc2gn5wmoeur3hksm05Kg99gxr8i1jeKGCGt
WLeA2Kh6deozOsAjyT+4cX4wh7mUO8lOTvRp/WRqqNo3aTdelVxdmKOjtqrukVjL
LaY1LLvlQE9K4jshcQBidr1NmdCl9zV/IZzP329juu4MvK7uyyzHSxXSG5jt0wu4
szIOzpgAqhsTasLQMi5Z1cdfy+NfqlVk/vmmSYSaBlmq2QgnX7RJRGViaWFuIEFy
Y2hpdmUgQXV0b21hdGljIFNpZ25pbmcgS2V5ICgxMi9ib29rd29ybSkgPGZ0cG1h
c3RlckBkZWJpYW4ub3JnPokCVAQTAQoAPhYhBLi4C1tiPqtq2HdcRbfF19Y1CUf4
BQJjy9AVAhsDBQkPCZwABQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJELfF19Y1
CUf461gP/1p6/NzPvYsEfUm6zJYTIDKG1/zGeIC9EsOOluJKDgZYiY6ogYUDhRN9
X83yBMzIQkVF88SOQuT2fZk9KOdOAzdAgc5CB7ivoh/P44HeacxjAb2z8/tJJKW2
O4B3HpyWR+Yn5aymdLJe+ZFsBdfyU7RPlox42o7zZmf1ZQKQSoBZb7X3Eq3lq442
ZewjsjsRiijlTODfp6EEIHYhY8vGhU/lyqpwPkGVfl/G+s43j/MAo5b5TBeG2J9W
tqBYy+aG8cRM2vJoUrMZR0GZvgfbMVun17Bxg7ez4OiYhVblx3lMQv25BnagQTpR
QgV021xuw40cR9POy6+yBwRUYNziGZi31rrvzTzmFw9cxV7lpgjAMwZJifGZClda
DBxYUQR3OeAzn09lRhpOdFXpM+MM5GXgRVPmHhtyn60xLMiy5NCRuMtzmP/OaClR
KL9BjWnOH3NzsjAvc1VtNj0DSVGTtnswDmAQgFZVYYesjpiTNFE7EDTBCT1uYVhI
Mr3fV1US3VIfKEZlJrbB9FAccWqC/oHT/DUvhjnDhC3wRdChlEbfCxqaiHU++gsN
66J9r6ZI95PC4w0X3O1hXJeWtm9d8M0SxmAfJ4eBPVOPyFgOI4OFM8fFFie5MeAk
4BsN0Qyu2hD5g2RCFYIinbfFsSdW2WQVa62uoHfWgwLPwYz+sWjAiQIzBBABCgAd
FiEEH4mYPgCB/eAY88yWc6Tye43UeTYFAmPL1SwACgkQc6Tye43UeTb0HQ/+Pwzn
SBBtEV7eLS6qZpS7kosP5aVagUkcTO8UMxZkUqBhm2yW8V885kSic7rZOeWcd0NF
rVpTGH5LH3hi/a13B1S28v7Wy1AxNdlHJVfH5bRq4aSJmtCNNbbhH92IuzpV/YKc
y3ueFdQ3ssLWWKBVc8UGa+qrAre5DXmmawwMLlZ16G7OC7YyppN2EzFnf1rC8AV3
O1UtpZLNq8MkWAk/65UTDbTMS4f6IM57Z9pemBWsxTBKyAKXduKq8zkdnv8B+RPu
PgyhqJUiJ4RgesuYw4AhKqiO4CYQm5gK9IH+hMN6INUBHOkn26OkyjArZgFw/OS7
rT3BZinqSloWiBPhAg/4wdg+Yj/mGktJ3Uiu0Z//QVZ6/OWRAAMNCbrwZcADt9pE
CRS24y8lbNuicfXB7rw+yX8j1mXlily6kVpPtdAJpkE62cHbMYsMKVkUFBQS9Cn1
Pvo5UqB3i+6Rxx50TKkq5OLf/ZciFw4StZYBRlHzgOiyBZRCi8+ze61gmrzv9Z5a
d6UCz0sYara6MmvQv1No+O/emaaO0N15bKFuztfmuoXmWSh93ek5ZNC8Kjb4hHkl
31C1JGPubGsRaoq8YTeVIFEgYIzzfVgofceDy9oVtjcRYikDAbDYVgvSzeVEi05T
TBRW8Xaj/RxIS99Mxog/6oSND5CzjoJ7DnuT2quJAjMEEAEKAB0WIQQFq5A0DAxe
eX9EqMglTPO1rsCo8AUCY8vUIQAKCRAlTPO1rsCo8O0DD/9NpnkalWr7thu1rh18
aItAF3r6/TOR3yhfz7LCRYWnOx4WudV4x/+W1rhFFxB7EvE51FzOjgoGqC2c2pBp
+UR/+YsUKyCe2iTf4z/ZkxGGgpx23Pz9/bMQtQ7YKB1yD7uXu69SaT1gJVOOziFu
gpV8L7wX11qukTHJU1sMemWgbHVyLJAjXkrDt11KcpvUh1q1CcVMQJdhB6xkPhJB
RHrY1Dxg6qipXN3d7CD8AaD9p4Rc8MJO9F3D63JkmRvBn0Ecvsnxxgo/Zl0nbZSy
MODQZA8yevFqrOmyG8o2rIzvM/fjNiiAniIocyt/syK02LCNs3lpvGDqANkvFvYx
faGG5O5mS6pv6BsRBxzoFZI5z+OXNM8IXw5hgDx577aPbcu6t1tRrWUSr5EfFbN5
rYqUtECB7o100b4aFXOP6Ly62WNQABBkenT/aeUGI5VVg6J53+M9OAUagqSVuoVB
a6/AZtD+WN/iBsRc8jwWjWvb+bmvK/fN5wT7A9P+x87I907bQbT/qowDJet5kR0f
+A9F7zy6RXbQ1MCYL9RmUlKX+an3g7s9ZcQssbKfsvONFtieI2xgdL9pLYZKiwJ2
Q7wF61IaD88Yi5iovtbH8Ewqz5lCSzib8h8JqC5vFAj+KgjhFJXr6dC5DqIp9DvE
iJzogcrlmV61SWjg2K3EIJ9Z6IkCMwQQAQoAHRYhBKxTDVIPLzJp9emDE6SESQRK
rVxdBQJjy9SJAAoJEKSESQRKrVxdzGQP/33qzOrxlAOisutKpi038qrhBegZpWIP
oFE05lSMXQVODVRoqbMU6EaWKEFBbX8H0v+N3h84gIrLRWAaDhdmPviY5vJzYJoq
Wd67GSvzkWZLE7/nMTni1Nz4uMuPgEz/2uGtoX4N8hpDvtq+39YazTj92t1vGjHL
3Wuofv8zEl7AkUvvq4qdfwjj/+p4QSzum5xp0/PlNIbHXyGgpR8R1zJzTInrZ78/
bEubmk5VSiZOlnwVBW7dfg2lHb9EKr1TtQjO62ht/NsIEASTN7sHSDOqG3QMABFZ
/TFf0VNvQdU7K4sgw9NnxkqP+NhOIxu1S3R/ii/RmbwMWabRSQb5ZpAxxM0Y7uuK
X92wWmVFOKfKIqdVisWz/hjPREBCDXuwISr5PzUgk9Jd1+iTIHPu/XXKtYDt8oTy
iX8m/Ea3QtC9r+Il8Zj5AXWVgVjldLPKDVRb8ByhFjuaw5HqovfPiL2ZYcSt7w5Z
GRb8VD2HAqp3B6+2RzOVRRQrp7TwYhw3YGsNggqDdpjv7i4ViZHD2sUbO/1GISaP
PfiISqAoySN2TwCnqMFc6Y+iXlmHe5N44O37LzDg/lVRkEul47ifVVfF868xHzWo
4WGXdZLHq+x0kUNjhrfU3fpbmIAAkrSypo9Pbup6acv7fqrFmLcjv5Ueg9HJiKva
ar11ZIq1jw6ziQIzBBABCgAdFiEEgOl28UpQikjpyj/pvDciUsoc+WQFAmPL2KMA
CgkQvDciUsoc+WQ71A/+LtoZSPhQnpVJPq08M8KNShaUeQEUCh4ZKITWAOm5NXUN
J7833/5plypgmUJUwuXtwkCvVFup+LyZIptbzALDxLkseIY4lau3kEfeT6JvsIS/
SvgjUBPkX6h0i3Lg0Ggfiv+3Nf0+bsGAS7Ti6I0/6gpeA013M08uUdpcJDSu1OtC
CdoWD5KvOAAuU06/Q2L37LOColsC6Z5frg3aBaDmScBJc5C7PSZA4hNOimqv4iZQ
x300KOFH1OhyBRZOd1bW8atQooI/JEhjh1dJdIaOgyjPBXFJ8pYY2Y9Ms0Oa3ppr
XNa0XCYgEcT5rYZEFup29H1+JFjTcYqecwLUycYGH3MnqRdqriZwiHUK0Ui/MpiP
lS2Dkb/2Cz6iWMpJSAtvEetCVgSMpGsTlFgKjcsBN60UmvebmW7zajXOmgFU5cHT
UoGmbNo39iK7fgQH/WcpSCr+bMwrSq6L4AAWIR2Tr6xEbDJQKgh33aEzsgU2OVw+
qJKQL4XicWki0ul/Q94zltobRA86iqxh7+spfYBYCaCMYB5lIlDFfHLW62cim36Y
XrBt+p6VyB3JGevXM4up7bnumFc90YDj0dsh6q55+BA0JPWxPPPAWQe5CiLmd7+h
x5xAJ85+1ztFSz91w4VaQ9jOoEb5IC8uayLyX9GM646umFZCVqrKyHHHjhsh84aJ
AlUEEAEKAD8WIQT7+r21QbXclVvZum7bFs9bsSUlxAUCY8vtKSEaaHR0cDovL2dw
Zy5nYW5uZWZmLmRlL3BvbGljeS50eHQACgkQ2xbPW7ElJcS84Q//eh+yOPIQqTF/
ncxGJpen5pCCMs0dVo9dP9EJ7xc2eSSJ0VhJd9dfpJqTMUqljp/zPeDiRRlhpZjM
SXYg0EMMt2vbZ9g1S9cSbYU7Alogvp6VleK33hDuSoLabHETG78pSpq2YmGCUn47
AyW7zdsWV0lM0kiBhJxuWjl8B+pmXzSJFqm63JPB9zHndLxuNay42UnLsDTi7B26
BNKebQrB5ZioOe/IhpnHoxF8v5sdSIIvYKd/vRE5Za/uYy+2cMmjjLQD6IX/f9yJ
Dc+sqehW4/DgJgU7cq2lBJM+35AuUDI86MqzG/2BwtKnttX8FKy79FIAMAv6Sf3r
QoyOcfSjeSe3FF5DD1ISR/Iyfjo/WZ/my59KADqwEMcwd3QpcQwRIXtDE1LUezWQ
AbWd5caY3d0jZocG4KrDThkokLsl/kMkmbTO8C6oJdVv+g2AD2MHGBRzStDBzNLK
mcuOq2UtlP03ACl5YcYY6AY7Way5Cz8o99l2frgVHf6THscxjRn3cxH4PXbOeOn+
GTyk0PCqcyUBs6Rz/tO2NAgyzQlf/6lD8pIoSFHm/TEequeZZKAiGTodIQLS0a8G
KZpGmVsjtbXSzu78CUdjucsdUbawfXQ4Yy7klV18m9EQjiWrVMBYX8nnkyEvAsfM
4yl9/yOV8Y9Q/NEe+wZjshO1AikB+1W5Ag0EY8vQFQEQAOUiKRLuENTs8bri0Xm8
5N1RIG6Lfoc+h7S3vB+hu2QMLMqybyVXLPsMCCj4iSPrMXuhwzu3w+s3xvRzZ01H
DkYNxUzF00QLTr8F67vyZadysf9gytYFuVJgMRBxRGlke3IxT0LknAIlPX4Dys5P
+6QdOZtkm9H8OEUzGXkkBQGpibYzNGj7IIJOcNci49L4GM/kyznDFnUB8QfHD7pB
j/m8apGGmUjvwPUOgVtFJR7XufclIHkJCeo4l+pppdeQTg8uZ2elWIqENAZ0Cbj6
WL+y2oW/DhlmDuFHkgvf/hKlcTtQMGIH22ZNQKjjeqKoVTnj2JF3gQy8xJQ+9nc/
YZD3XRIDCKtMvs0ZBxwWgoYHY3E8zRhE/yxyquAX/u8BTaIS4O3w5tl1tl6Dv2sI
NjXrb8FTAcwe4tuo5xtJgSrYk4SdbUIoh2Mgn28mw4IavP0HNM3aFQa/Fl6Y/VkG
LICor1UTe3+9dvTAHkjw0LbHuq9geUiuDqR5+hZd+SBGTCdimZfTLC0sXa3dTvF8
NiSxB3yQ//TblgJh4HS37Q4OIMc2UWeZURTlvHYv0fDtIKUCc6hl0Ip3eaGteXgO
VzrU20CecHJtY2wUhckE4lxMhfU9h1wEDsE8GB6umABhUQt6uFm6SyEBaaapoBeb
/xyGhJ5YR1+cFSm+2Z2AbwC3ABEBAAGJBHIEGAEKACYWIQS4uAtbYj6rath3XEW3
xdfWNQlH+AUCY8vQFQIbAgUJDwmcAAJACRC3xdfWNQlH+MF0IAQZAQoAHRYhBEy1
AZAge0dYo/c6eW7Q57gmQ+ExBQJjy9AVAAoJEG7Q57gmQ+Ex4W4QAMeM6oUrpKYD
ABPknMOQpT6iQo/sQlfPxVhiAp1XGzKoR+MxzGHn2W4LJ82RCyXLyKbPdW2yJ2tB
+/ZLOO8bwOp6gbSzOSTb1fCBztIINd75dKm+leGvUlr3Ot2HRyvZDnoqb6MDO3VE
rbnvz3AhtYg4KGMHyDjIvJisjg0ZyAsdSSXEMqHYmUaA+KXL4UbUKQP5K+VdKwqU
yHLIq38azfEIfwYyv3br9IKtBWyjyiHQ9EqzeoJv/pC/ClcktKYdKyZrwZPiIVBb
Lg//hkWIU3MSxsvHfcmra/xxfx3ws0aN5Cs+FbeQkEh4Np5MwQqRQSiHY2bKT0Ip
XHOtOk+h/aCIGmPLIhsnazUbsyy+G/HIgjEkvUYP+7fW6wPewXNJDZjrgfL202Jh
Gyt5aGJOFLEfYmPSFa1LKXamaNgHKC9FtLGOS/fC4T1QkS94WLtq7Igseea3Cm0c
iDn3aA6moCNxUcxG235Ck0MQ4J5kiaGn6sfJ63it0J138CWQEjTt9HvKBZ/w7ynb
rZxK5M4iY+pUjfwLtanKKK+H4HW4gQqVmByaWOntfaRVCWfkAIDISn82W2IpgKRk
UYn6YwLXO5k/hB+6X+D/BSQF4WKs6C5MSLP8o8uBfnaBTDYPi5Hq2YN+jxsD0kij
+0/KrPy+EyO7pQJVdRT1INW4y2JWNwfIJ5oP/RhXmcjs7rZyFL1JUxJ4giENi4Ku
MRu0RcZYywO8y08r/ZNKm0FBZBRJ0elYR5Ca0KdFMFDay9H7AYFcxMjylgMA0G2k
QHFG6En4GY9dZoCXlTEkiB8xChDASlb5xIU9VKGCyojVMLh/ety8a1pAFrj9ygCw
fWZCI4u6lSoM3ENhokJHKaf722B+9eQGZa9LXq5RwcNJ5o8Qpd8zn6sb6Xs9vGK5
jw2xjWbGL70PFqEm895xTMS3P+x8ALaZ9Ktnux76eA0a4edmn8hWa1puSMjOe4Hx
P+YILIGNIELJTYK5+cA/X9IUTOTkeWAzVb8czNjDK/sA3+VZS0fPFbPW4NPs8BMm
y/uB/s5Xuyj+Ypircp8/LyPic+dmHgFRH6+5J+hNGCAin+at1i9sgC0rJhqcL7Ho
77HowuIQQppL6PUPcF8CNM4QNcgVW+53DeBeaXNLq10ZrTKL6O0aK4pez+0hsL00
1KwTBrgaHop5AYuqacWMguD4Qvthqzl/3W5+YdOPMwyzxuniMq04Ns9AHFE9DgxS
0s1mwd/orTk0/IHZpFQ8/0UsG7pmq/tiRP49LV/G4KuDDJvpbMLs6l1b0weFUE/7
kE8TE9mZVGXyjW3m/MGDGEOBsT64HZLsduljYFW5tVTbaVKSKMqSLrhCZxSenzgQ
NlB2T6bKGcYGqL7L
=UUyy
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -10,13 +10,8 @@ server {
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
location /api/ {
proxy_pass https://127.0.0.1:5665/;
proxy_http_version 1.1;
}
location / {
return 302 /icingaweb2/index.php;
location = / {
return 302 /icingaweb2/;
}
location ~ ^/icingaweb2/index\.php(.*)$ {
@ -33,4 +28,9 @@ server {
index index.php;
try_files $1 $uri $uri/ /icingaweb2/index.php$is_args$args;
}
location /api/ {
proxy_pass https://127.0.0.1:5665/;
proxy_http_version 1.1;
}
}

31
data/mailman/vhost.conf Normal file
View file

@ -0,0 +1,31 @@
upstream mailman3 {
server unix:/run/mailman3-web/uwsgi.sock fail_timeout=0;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${server_name};
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
server_tokens off;
location / {
uwsgi_pass mailman3;
include /etc/nginx/params/uwsgi;
}
location /mailman3/static {
alias /var/lib/mailman3/web/static;
}
location /mailman3/static/favicon.ico {
alias /var/lib/mailman3/web/static/postorius/img/favicon.ico;
}
# return 301 https://$server_name$request_uri;
access_log /var/log/nginx/mailman3/access.log combined;
error_log /var/log/nginx/mailman3/error.log;
}

View file

@ -1,31 +1,21 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${server_name};
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${server_name};
ssl_certificate /etc/letsencrypt/archive/${server_name}/fullchain1.pem;
ssl_certificate_key /etc/letsencrypt/archive/${server_name}/privkey1.pem;
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
root /var/www/yourls/htdocs;
root /var/www/yourls/htdocs;
location / {
index index.php index.html index.htm;
try_files $uri $uri/ /yourls-loader.php$is_args$args;
}
location / {
index index.php index.html index.htm;
try_files $uri $uri/ /yourls-loader.php$is_args$args;
}
location ~ \.php$ {
include params/fastcgi;
fastcgi_index index.php;
fastcgi_pass unix:/run/php/php${php_version}-fpm.sock;
}
# temp
location ^~ /.well-known/acme-challenge/ {
alias /var/www/certbot/;
}
}
# FIXME: this is a temporary solution to allow the certbot challenge to work:
# - ssl_certificate
# - ssl_certificate_key
location ~ \.php$ {
include params/fastcgi;
fastcgi_index index.php;
fastcgi_pass unix:/run/php/php${php_version}-fpm.sock;
}
}

View file

@ -1,5 +1,5 @@
{
'bundles': [
'bundles': {
'bind',
],
},
}

View file

@ -1,18 +0,0 @@
{
'supergroups': [
'debian',
],
'bundles': [
'systemd-networkd',
],
'metadata': {
'php': {
'version': '7.4',
},
'postgresql': {
'version': '13',
},
'os_codename': 'bullseye',
},
'os_version': (11,),
}

View file

@ -0,0 +1,26 @@
{
'metadata': {
'apt': {
'sources': {
'debian': {
'components': {
'non-free-firmware',
},
},
'debian-security': {
'components': {
'non-free-firmware',
},
},
},
},
'php': {
'version': '8.2',
},
'postgresql': {
'version': '15',
},
'os_codename': 'bookworm',
},
'os_version': (12,),
}

33
groups/os/debian-13.py Normal file
View file

@ -0,0 +1,33 @@
{
'supergroups': [
'debian',
'debian-13-common',
],
'bundles': [
'systemd-networkd',
],
'metadata': {
'apt': {
'sources': {
'debian': {
'components': {
'non-free-firmware',
},
},
'debian-security': {
'components': {
'non-free-firmware',
},
},
},
},
'php': {
'version': '8.4',
},
'postgresql': {
'version': '17',
},
'os_codename': 'trixie',
},
'os_version': (13,),
}

44
groups/os/routeros.py Normal file
View file

@ -0,0 +1,44 @@
# https://ftp-master.debian.org/keys.html
{
'username': 'admin',
'supergroups': [
'all',
],
'bundles': [
'routeros',
],
'metadata': {
'routeros': {
'gateway': '10.0.0.1',
'bridge_priority': '0x8000',
'ports': {},
'vlans': {
'home': '1',
'iot': '2',
'internet': '3',
'proxmox': '4',
'gast': '9',
'rolf': '51',
},
'vlan_groups': {
'infra': {
'untagged': 'home',
'tagged': {
'iot',
'internet',
'proxmox',
'gast',
'rolf',
},
},
'internet': {
'untagged': 'internet',
'tagged': set(),
},
},
'vlan_ports': {},
},
},
'os': 'routeros',
}

30
hass_get_temp.py Normal file
View file

@ -0,0 +1,30 @@
#! /usr/bin/env python3
import requests
from datetime import datetime, timedelta, timezone
BASE = "https://homeassistant.ckn.li"
TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI1YjY0ZWE5N2FiMzM0NTQ0OGMyNjhmZTIxYzAxZTE1MSIsImlhdCI6MTc1NjAzOTAxNCwiZXhwIjoyMDcxMzk5MDE0fQ.X-sQli-NTpCjeXpn19zf-maPRDldkSeTuhKZua1k8uM"
ENTITY = "sensor.hue_outdoor_motion_sensor_2_temperature"
HEADERS = {
"Authorization": f"Bearer {TOKEN}",
"Content-Type": "application/json",
}
begin = datetime(2025, 7, 1, 0, 0, 0, tzinfo=timezone.utc)
current = begin
now = datetime.now(timezone.utc)
while current < now:
current += timedelta(hours=1)
resp = requests.get(
f"{BASE}/api/history/period/{current.isoformat()}",
params={
"end_time": current.isoformat(),
"filter_entity_id": ENTITY
},
headers=HEADERS,
timeout=15,
)
print(current, resp.json())

View file

@ -3,7 +3,7 @@
'groups': [
'autologin',
'backup-server',
'debian-12',
'debian-13',
'home',
'monitored',
],
@ -18,32 +18,26 @@
'id': '9cf52515-63a1-4659-a8ec-6c3c881727e5',
'network': {
'internal': {
'interface': 'enp0s31f6',
'interface': 'enp1s0f0',
'ipv4': '10.0.0.5/24',
'gateway4': '10.0.0.1',
'mac': '98:b7:85:01:ca:a6',
},
'wakeonlan': {
'interface': 'enp0s31f6',
'ipv4': '10.0.0.6/24',
'mac': '4c:cc:6a:d5:96:f8',
},
},
'backup-server': {
'hostname': 'backups.sublimity.de',
},
# 'smartctl': {
# '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K3GV6TPL': {
# 'apm': 1,
# },
# '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K4KAJXEB': {
# 'apm': 1,
# },
# '/dev/disk/by-id/ata-TOSHIBA_HDWQ140_19VZK0EMFAYG': {
# 'apm': 1,
# },
# },
'ssh': {
# multipling prevents server from sleeping
'multiplex_incoming': False,
},
'wol-sleeper': {
'network': 'internal',
'network': 'wakeonlan',
'waker': 'home.server',
},
'zfs-mirror': {

View file

@ -0,0 +1,19 @@
{
'hostname': '10.0.0.150',
'bundles': [
'bootshorn',
'systemd',
'systemd-timers',
],
'metadata': {
'id': '25c6f3fd-0d32-42c3-aeb3-0147bc3937c7',
'network': {
'internal': {
'ipv4': '10.0.0.150/24',
'mac': 'd6:d8:61:33:f2:05',
},
},
},
}
# rsync -avh --progress -e 'ssh -o "StrictHostKeyChecking no"' 10.0.0.150:/opt/bootshorn/recordings /hdd/bootshorn

View file

@ -1,28 +1,6 @@
{
'dummy': True,
'hostname': '10.0.0.16',
'groups': [
'webserver',
'backup',
'monitored',
'raspberry-pi',
'autologin',
],
'bundles': [
'apt',
'homeassistant-supervised',
'hostname',
'hosts',
'htop',
'users',
'ssh',
'sudo',
'locale',
'zsh',
'zfs',
'systemd',
'systemd-timers',
'systemd-journald',
],
'metadata': {
'id': '3d67964d-1270-4d3c-b93f-9c44219b3d59',
'network': {
@ -33,80 +11,39 @@
'gateway4': '10.0.0.1',
},
},
'apt': {
'sources': {
'debian': {
'urls': {
'https://deb.debian.org/debian',
},
'suites': {
'{codename}',
'{codename}-updates',
},
'components': {
'main',
'contrib',
'non-free',
'non-free-firmware',
},
'key': 'debian-{version}',
},
'debian-security': {
'urls': {
'http://security.debian.org/debian-security',
},
'suites': {
'{codename}-security',
},
'components': {
'main',
'contrib',
'non-free',
'non-free-firmware',
},
'key': 'debian-{version}-security',
'dns': {
'homeassistant.ckn.li': {
'A': {
'10.0.0.16',
},
},
},
'hosts': {
'10.0.10.2': [
'resolver.name',
'secondary.resolver.name',
],
},
'letsencrypt': {
'acme_node': 'htz.mails',
},
'homeassistant': {
'domain': 'homeassistant.ckn.li',
'os_agent_version': '1.6.0',
},
'nameservers': {
'10.0.10.2',
},
'users': {
'ckn': {
'shell': '/usr/bin/zsh',
'authorized_keys': {
'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I ckn',
},
},
},
'sudoers': {
'ckn': {'ALL'},
},
'zfs': {
'pools': {
'tank': {
'devices': [
'/var/lib/zfs/tank.img',
],
},
},
},
'os_codename': 'bookworm',
},
'os': 'debian',
'os_version': (12,),
'pip_command': 'pip3',
}
# LETSENCRYPT
# - cant use the letsencrypt addon, because it doesnt suppeort supplying a different zone (which would be acme.sublimity.de)
# Advanced SSH & Web Terminal:
# username: root
# password: ""
# authorized_keys:
# - >-
# ssh-ed25519
# AAAAC3NzaC1lZDI1NTE5AAAAIJT9Spe+BYue7iiutl3rSf6PlU6dthHizyK+ZWnLodrA
# root@home.server
# - >-
# ssh-ed25519
# AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I ckn
# sftp: true
# compatibility_mode: false
# allow_agent_forwarding: false
# allow_remote_port_forwarding: false
# allow_tcp_forwarding: false
# add to /homeassistant/configuration.yaml:
# http:
# http_port: 443 # or use nginx addon
# ssl_certificate: /ssl/fullchain.pem
# ssl_key: /ssl/privkey.pem

View file

@ -1,6 +1,6 @@
{
'dummy': True,
'hostname': '10.0.2.100',
'hostname': '10.0.0.143',
'groups': [
'home',
],
@ -8,13 +8,13 @@
'id': '87879bc1-130f-4fca-a8d2-e1d93a794df4',
'network': {
'internal': {
'ipv4': '10.0.2.100/24',
'ipv4': '10.0.0.143/24',
'mac': '00:17:88:67:e7:f2',
},
},
'dns': {
'hue.ckn.li': {
'A': {'10.0.2.100'},
'A': {'10.0.0.143'},
},
},
},

Some files were not shown because too many files have changed in this diff Show more