Compare commits

..

No commits in common. "master" and "htz.mails_debian_13_squash" have entirely different histories.

103 changed files with 658 additions and 7379 deletions

1
.gitignore vendored
View file

@ -2,4 +2,3 @@
.venv
.cache
*.pyc
.bw_debug_history

View file

@ -1,148 +0,0 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
from time import sleep
from bundlewrap.exceptions import RemoteException
from bundlewrap.utils.cmdline import get_target_nodes
from bundlewrap.utils.ui import io
from bundlewrap.repo import Repository
from os.path import realpath, dirname
# parse args
parser = ArgumentParser()
parser.add_argument("targets", nargs="*", default=['bundle:routeros'], help="bw nodes selector")
parser.add_argument("--yes", action="store_true", default=False, help="skip confirmation prompts")
args = parser.parse_args()
def wait_up(node):
sleep(5)
while True:
try:
node.run_routeros('/system/resource/print')
except RemoteException:
sleep(2)
continue
else:
io.debug(f"{node.name}: is up")
sleep(10)
return
def upgrade_switch_os(node):
# get versions for comparison
with io.job(f"{node.name}: checking OS version"):
response = node.run_routeros('/system/package/update/check-for-updates').raw[-1]
installed_os = bw.libs.version.Version(response['installed-version'])
latest_os = bw.libs.version.Version(response['latest-version'])
io.debug(f"{node.name}: installed: {installed_os} >= latest: {latest_os}")
# compare versions
if installed_os >= latest_os:
# os is up to date
io.stdout(f"{node.name}: os up to date ({installed_os})")
else:
# confirm os upgrade
if not args.yes and not io.ask(
f"{node.name}: upgrade os from {installed_os} to {latest_os}?", default=True
):
io.stdout(f"{node.name}: skipped by user")
return
# download os
with io.job(f"{node.name}: downloading OS"):
response = node.run_routeros('/system/package/update/download').raw[-1]
io.debug(f"{node.name}: OS upgrade download response: {response['status']}")
# install and wait for reboot
with io.job(f"{node.name}: upgrading OS"):
try:
response = node.run_routeros('/system/package/update/install').raw[-1]
except RemoteException:
pass
wait_up(node)
# verify new os version
with io.job(f"{node.name}: checking new OS version"):
new_os = bw.libs.version.Version(node.run_routeros('/system/package/update/check-for-updates').raw[-1]['installed-version'])
if new_os == latest_os:
io.stdout(f"{node.name}: OS successfully upgraded from {installed_os} to {new_os}")
else:
raise Exception(f"{node.name}: OS upgrade failed, expected {latest_os}, got {new_os}")
def upgrade_switch_firmware(node):
# get versions for comparison
with io.job(f"{node.name}: checking Firmware version"):
response = node.run_routeros('/system/routerboard/print').raw[-1]
current_firmware = bw.libs.version.Version(response['current-firmware'])
upgrade_firmware = bw.libs.version.Version(response['upgrade-firmware'])
io.debug(f"{node.name}: firmware installed: {current_firmware}, upgrade: {upgrade_firmware}")
# compare versions
if current_firmware >= upgrade_firmware:
# firmware is up to date
io.stdout(f"{node.name}: firmware is up to date ({current_firmware})")
else:
# confirm firmware upgrade
if not args.yes and not io.ask(
f"{node.name}: upgrade firmware from {current_firmware} to {upgrade_firmware}?", default=True
):
io.stdout(f"{node.name}: skipped by user")
return
# upgrade firmware
with io.job(f"{node.name}: upgrading Firmware"):
node.run_routeros('/system/routerboard/upgrade')
# reboot and wait
with io.job(f"{node.name}: rebooting"):
try:
node.run_routeros('/system/reboot')
except RemoteException:
pass
wait_up(node)
# verify firmware version
new_firmware = bw.libs.version.Version(node.run_routeros('/system/routerboard/print').raw[-1]['current-firmware'])
if new_firmware == upgrade_firmware:
io.stdout(f"{node.name}: firmware successfully upgraded from {current_firmware} to {new_firmware}")
else:
raise Exception(f"firmware upgrade failed, expected {upgrade_firmware}, got {new_firmware}")
def upgrade_switch(node):
with io.job(f"{node.name}: checking"):
# check if routeros
if node.os != 'routeros':
io.progress_advance(2)
io.stdout(f"{node.name}: skipped, unsupported os {node.os}")
return
# check switch reachability
try:
node.run_routeros('/system/resource/print')
except RemoteException as error:
io.progress_advance(2)
io.stdout(f"{node.name}: skipped, error {error}")
return
upgrade_switch_os(node)
io.progress_advance(1)
upgrade_switch_firmware(node)
io.progress_advance(1)
with io:
bw = Repository(dirname(dirname(realpath(__file__))))
nodes = get_target_nodes(bw, args.targets)
io.progress_set_total(len(nodes) * 2)
io.stdout(f"upgrading {len(nodes)} switches: {', '.join([node.name for node in sorted(nodes)])}")
for node in sorted(nodes):
upgrade_switch(node)

View file

@ -1,22 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('node', help='Node to generate passwords for')
args = parser.parse_args()
bw = Repository(dirname(dirname(realpath(__file__))))
node = bw.get_node(args.node)
if node.password:
print(f"password: {node.password}")
for metadata_key in sorted([
'users/root/password',
]):
if value := node.metadata.get(metadata_key, None):
print(f"{metadata_key}: {value}")

View file

@ -3,4 +3,4 @@
from bundlewrap.repo import Repository
from os.path import realpath, dirname
bw = Repository(dirname(dirname(realpath(__file__))))
repo = Repository(dirname(dirname(realpath(__file__))))

View file

@ -1,132 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
import json
import os
import subprocess
from dataclasses import dataclass
from typing import Optional, List
bw = Repository(dirname(dirname(realpath(__file__))))
VAULT=bw.vault.decrypt('encrypt$gAAAAABpLgX_xxb5NmNCl3cgHM0JL65GT6PHVXO5gwly7IkmWoEgkCDSuAcSAkNFB8Tb4RdnTdpzVQEUL1XppTKVto_O7_b11GjATiyQYiSfiQ8KZkTKLvk=').value
BW_TAG = "bw"
BUNDLEWRAP_FIELD_LABEL = "bundlewrap node id"
@dataclass
class OpResult:
stdout: str
stderr: str
returncode: int
def main():
for node in bw.nodes_in_group('routeros'):
upsert_node_item(
node_name=node.name,
node_uuid=node.metadata.get('id'),
username=node.username,
password=node.password,
url=f'http://{node.hostname}',
)
def run_op(args):
proc = subprocess.run(
["op", "--vault", VAULT] + args,
env=os.environ.copy(),
capture_output=True,
text=True,
)
if proc.returncode != 0:
raise RuntimeError(
f"op {' '.join(args)} failed with code {proc.returncode}:\n"
f"STDOUT:\n{proc.stdout}\n\nSTDERR:\n{proc.stderr}"
)
return OpResult(stdout=proc.stdout, stderr=proc.stderr, returncode=proc.returncode)
def op_item_list_bw():
out = run_op([
"item", "list",
"--tags", BW_TAG,
"--format", "json",
])
stdout = out.stdout.strip()
return json.loads(stdout) if stdout else []
def op_item_get(item_id):
args = ["item", "get", item_id, "--format", "json"]
return json.loads(run_op(args).stdout)
def op_item_create(title, node_uuid, username, password, url):
print(f"creating {title}")
return json.loads(run_op([
"item", "create",
"--category", "LOGIN",
"--title", title,
"--tags", BW_TAG,
"--url", url,
"--format", "json",
f"username={username}",
f"password={password}",
f"{BUNDLEWRAP_FIELD_LABEL}[text]={node_uuid}",
]).stdout)
def op_item_edit(item_id, title, username, password, url):
print(f"updating {title}")
return json.loads(run_op([
"item", "edit",
item_id,
"--title", title,
"--url", url,
"--format", "json",
f"username={username}",
f"password={password}",
]).stdout)
def find_node_item_id(node_uuid):
for summary in op_item_list_bw():
item_id = summary.get("id")
if not item_id:
continue
item = op_item_get(item_id)
for field in item.get("fields") or []:
label = field.get("label")
value = field.get("value")
if label == BUNDLEWRAP_FIELD_LABEL and value == node_uuid:
return item_id
return None
def upsert_node_item(node_name, node_uuid, username, password, url):
if item_id := find_node_item_id(node_uuid):
return op_item_edit(
item_id=item_id,
title=node_name,
username=username,
password=password,
url=url,
)
else:
return op_item_create(
title=node_name,
node_uuid=node_uuid,
username=username,
password=password,
url=url,
)
if __name__ == "__main__":
main()

View file

@ -4,21 +4,20 @@ from bundlewrap.repo import Repository
from os.path import realpath, dirname
from sys import argv
from ipaddress import ip_network, ip_interface
import argparse
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
# get info from repo
repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node('htz.mails')
available_clients = server_node.metadata.get('wireguard/clients').keys()
server_node = repo.get_node(argv[1])
# parse args
parser = argparse.ArgumentParser(description='Generate WireGuard client configuration.')
parser.add_argument('client', choices=available_clients, help='The client name to generate the configuration for.')
args = parser.parse_args()
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
# get cert
data = server_node.metadata.get(f'wireguard/clients/{args.client}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
allowed_ips = [
vpn_network,
@ -44,15 +43,10 @@ Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:
PersistentKeepalive = 10
'''
answer = input("print config or qrcode? [Cq]: ").strip().upper()
match answer:
case '' | 'C':
print('>>>>>>>>>>>>>>>')
print(conf)
print('<<<<<<<<<<<<<<<')
case 'Q':
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1))
case _:
print(f'Invalid option "{answer}".')
exit(1)

View file

@ -13,14 +13,16 @@ defaults = {
},
},
'telegraf': {
'config': {
'inputs': {
'exec': {
'apcupsd': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/apcupsd"],
'name_override': "apcupsd",
'data_format': "influx",
'interval': '30s',
'flush_interval': '30s',
}),
},
},
},

View file

@ -49,13 +49,13 @@ defaults = {
},
},
'telegraf': {
'config': {
'inputs': {
'bind': {
'default': {
'bind': [{
'urls': ['http://localhost:8053/xml/v3'],
'gather_memory_contexts': False,
'gather_views': True,
},
}],
},
},
},

View file

@ -112,11 +112,6 @@ def process_recording(filename):
sample_num += samples_per_block - overlapping_samples
# move to PROCESSED_RECORDINGS_DIR
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
shutil.move(os.path.join(RECORDINGS_DIR, filename), os.path.join(PROCESSED_RECORDINGS_DIR, filename))
# write a spectrogram using the sound from start to end of the event
def write_event(current_event, soundfile, samplerate):

View file

@ -19,7 +19,5 @@ do
-t "3600" \
-c:a flac \
-compression_level 12 \
"recordings/current/$DATE.flac"
mv "recordings/current/$DATE.flac" "recordings/$DATE.flac"
"recordings/$DATE.flac"
done

View file

@ -8,7 +8,7 @@ urllib3.disable_warnings()
import os
HUE_IP = "${hue_ip}" # replace with your bridge IP
HUE_IP = "10.0.0.134" # replace with your bridge IP
HUE_APP_KEY = "${hue_app_key}" # local only
HUE_DEVICE_ID = "31f58786-3242-4e88-b9ce-23f44ba27bbe"
TEMPERATURE_LOG_DIR = "/opt/bootshorn/temperatures"

View file

@ -7,15 +7,11 @@ directories = {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/temperatures': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings/current': {
'/opt/bootshorn/temperatures': {
'owner': 'ckn',
'group': 'ckn',
},
@ -38,7 +34,6 @@ files = {
'/opt/bootshorn/temperature': {
'content_type': 'mako',
'context': {
'hue_ip': repo.get_node('home.hue').hostname,
'hue_app_key': repo.vault.decrypt('encrypt$gAAAAABoc2WxZCLbxl-Z4IrSC97CdOeFgBplr9Fp5ujpd0WCCCPNBUY_WquHN86z8hKLq5Y04dwq8TdJW0PMSOSgTFbGgdp_P1q0jOBLEKaW9IIT1YM88h-JYwLf9QGDV_5oEfvnBCtO'),
},
'owner': 'ckn',

View file

@ -27,7 +27,7 @@ def ssh_keys(metadata):
'users': {
'build-agent': {
'authorized_users': {
f'build-server@{other_node.name}': {}
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
for architecture in other_node.metadata.get('build-server/architectures').values()

View file

@ -14,7 +14,7 @@ def ssh_keys(metadata):
'users': {
'build-ci': {
'authorized_users': {
f'build-server@{other_node.name}': {}
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},

View file

@ -8,7 +8,6 @@ defaults = {
'sources': {
'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
# curl -fsSL https://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/Release.key
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},

View file

@ -57,7 +57,7 @@ def ssh_keys(metadata):
'users': {
'downloads': {
'authorized_users': {
f'build-server@{other_node.name}': {}
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},

View file

@ -127,7 +127,7 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
panel['gridPos']['y'] = (row_id - 1) * panel['gridPos']['h']
if 'display_name' in panel_config:
panel['fieldConfig']['defaults']['displayName'] = panel_config['display_name']
panel['fieldConfig']['defaults']['displayName'] = '${'+panel_config['display_name']+'}'
if panel_config.get('stacked'):
panel['fieldConfig']['defaults']['custom']['stacking']['mode'] = 'normal'
@ -158,14 +158,13 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
host=monitored_node.name,
negative=query_config.get('negative', False),
boolean_to_int=query_config.get('boolean_to_int', False),
over=query_config.get('over', None),
minimum=query_config.get('minimum', None),
filters={
'host': monitored_node.name,
**query_config['filters'],
},
exists=query_config.get('exists', []),
function=query_config.get('function', None),
multiply=query_config.get('multiply', None),
).strip()
})
@ -179,3 +178,4 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
'svc_systemd:grafana-server:restart',
]
}

View file

@ -2,7 +2,7 @@ files = {
'/usr/local/share/telegraf/cpu_frequency': {
'mode': '0755',
'triggers': {
'svc_systemd:telegraf.service:restart',
'svc_systemd:telegraf:restart',
},
},
}

View file

@ -14,18 +14,17 @@ defaults = {
},
},
'telegraf': {
'config': {
'inputs': {
'sensors': {
'default': {
'sensors': {repo.libs.hashable.hashable({
'timeout': '2s',
},
},
})},
'exec': {
'cpu_frequency': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/cpu_frequency"],
'name_override': "cpu_frequency",
'data_format': "influx",
},
}),
# repo.libs.hashable.hashable({
# 'commands': ["/bin/bash -c 'expr $(cat /sys/class/thermal/thermal_zone0/temp) / 1000'"],
# 'name_override': "cpu_temperature",
@ -35,4 +34,5 @@ defaults = {
},
},
},
},
}

View file

@ -1 +1,58 @@
https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
https://developer.valvesoftware.com/wiki/List_of_L4D2_Cvars
Dead Center c1m1_hotel
Dead Center c1m2_streets
Dead Center c1m3_mall
Dead Center c1m4_atrium
Dark Carnival c2m1_highway
Dark Carnival c2m2_fairgrounds
Dark Carnival c2m3_coaster
Dark Carnival c2m4_barns
Dark Carnival c2m5_concert
Swamp Fever c3m1_plankcountry
Swamp Fever c3m2_swamp
Swamp Fever c3m3_shantytown
Swamp Fever c3m4_plantation
Hard Rain c4m1_milltown_a
Hard Rain c4m2_sugarmill_a
Hard Rain c4m3_sugarmill_b
Hard Rain c4m4_milltown_b
Hard Rain c4m5_milltown_escape
The Parish c5m1_waterfront_sndscape
The Parish c5m1_waterfront
The Parish c5m2_park
The Parish c5m3_cemetery
The Parish c5m4_quarter
The Parish c5m5_bridge
The Passing c6m1_riverbank
The Passing c6m2_bedlam
The Passing c6m3_port
The Sacrifice c7m1_docks
The Sacrifice c7m2_barge
The Sacrifice c7m3_port
No Mercy c8m1_apartment
No Mercy c8m2_subway
No Mercy c8m3_sewers
No Mercy c8m4_interior
No Mercy c8m5_rooftop
Crash Course c9m1_alleys
Crash Course c9m2_lots
Death Toll c10m1_caves
Death Toll c10m2_drainage
Death Toll c10m3_ranchhouse
Death Toll c10m4_mainstreet
Death Toll c10m5_houseboat
Dead Air c11m1_greenhouse
Dead Air c11m2_offices
Dead Air c11m3_garage
Dead Air c11m4_terminal
Dead Air c11m5_runway
Blood Harvest c12m1_hilltop
Blood Harvest c12m2_traintunnel
Blood Harvest c12m3_bridge
Blood Harvest c12m4_barn
Blood Harvest c12m5_cornfield
Cold Stream c13m1_alpinecreek
Cold Stream c13m2_southpinestream
Cold Stream c13m3_memorialbridge
Cold Stream c13m4_cutthroatcreek

View file

@ -1,13 +0,0 @@
#!/bin/bash
set -xeuo pipefail
function steam() {
# for systemd, so it can terminate the process (for other things sudo would have been enough)
setpriv --reuid=steam --regid=steam --init-groups "$@" <&0
export HOME=/opt/l4d2/steam
}
function workshop() {
steam mkdir -p "/opt/l4d2/overlays/${overlay}/left4dead2/addons"
steam /opt/l4d2/scripts/steam-workshop-download --out "/opt/l4d2/overlays/${overlay}/left4dead2/addons" "$@"
}

View file

@ -1,10 +0,0 @@
#!/bin/bash
set -xeuo pipefail
source /opt/l4d2/scripts/helpers
overlay=$(basename "$0")
# https://github.com/SirPlease/L4D2-Competitive-Rework
steam mkdir -p /opt/l4d2/overlays/$overlay/left4dead2
test -d /opt/l4d2/overlays/$overlay/left4dead2/cfg/cfgogl || \
curl -L https://github.com/SirPlease/L4D2-Competitive-Rework/archive/refs/heads/master.tar.gz | steam tar -xz --strip-components=1 -C /opt/l4d2/overlays/$overlay/left4dead2

View file

@ -1,128 +0,0 @@
#!/bin/bash
set -xeuo pipefail
source /opt/l4d2/scripts/helpers
overlay=$(basename "$0")
steam mkdir -p /opt/l4d2/overlays/$overlay/left4dead2/addons
cd /opt/l4d2/overlays/$overlay/left4dead2/addons
# https://l4d2center.com/maps/servers/l4d2center_maps_sync.sh.txt ->
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to print error messages
error_exit() {
echo "Error: $1" >&2
exit 1
}
# Check if the current directory ends with /left4dead2/addons
current_dir=$(pwd)
expected_dir="/left4dead2/addons"
if [[ ! "$current_dir" == *"$expected_dir" ]]; then
error_exit "Script must be run from your L4D2 \"addons\" folder. Current directory: $current_dir"
fi
# Check for required commands
for cmd in curl md5sum 7z; do
if ! command -v "$cmd" >/dev/null 2>&1; then
error_exit "Required command '$cmd' is not installed. Please install it and retry."
fi
done
# URL of the CSV file
CSV_URL="https://l4d2center.com/maps/servers/index.csv"
# Temporary file to store CSV
TEMP_CSV=$(mktemp)
# Ensure temporary file is removed on exit
trap 'rm -f "$TEMP_CSV"' EXIT
echo "Downloading CSV from $CSV_URL..."
curl -sSL -o "$TEMP_CSV" "$CSV_URL" || error_exit "Failed to download CSV."
declare -A map_md5
declare -A map_links
# Read CSV and populate associative arrays
{
# Skip the first line (header)
IFS= read -r header
while IFS=';' read -r Name Size MD5 DownloadLink || [[ $Name ]]; do
# Trim whitespace
Name=$(echo "$Name" | xargs)
MD5=$(echo "$MD5" | xargs)
DownloadLink=$(echo "$DownloadLink" | xargs)
# Populate associative arrays
map_md5["$Name"]="$MD5"
map_links["$Name"]="$DownloadLink"
done
} < "$TEMP_CSV"
# Get list of expected VPK files
expected_vpk=("${!map_md5[@]}")
# Remove VPK files not in expected list or with mismatched MD5
echo "Cleaning up existing VPK files..."
for file in *.vpk; do
# Check if it's a regular file
if [[ -f "$file" ]]; then
if [[ -z "${map_md5["$file"]}" ]]; then
echo "Removing unexpected file: $file"
rm -f "$file"
else
# Calculate MD5
echo "Calculating MD5 for existing file: $file..."
current_md5=$(md5sum "$file" | awk '{print $1}')
expected_md5="${map_md5["$file"]}"
if [[ "$current_md5" != "$expected_md5" ]]; then
echo "MD5 mismatch for $file. Removing."
rm -f "$file"
fi
fi
fi
done
# Download and extract missing or updated VPK files
echo "Processing required VPK files..."
for vpk in "${expected_vpk[@]}"; do
if [[ ! -f "$vpk" ]]; then
echo "Downloading and extracting $vpk..."
download_url="${map_links["$vpk"]}"
if [[ -z "$download_url" ]]; then
echo "No download link found for $vpk. Skipping."
continue
fi
encoded_url=$(echo "$download_url" | sed 's/ /%20/g')
# Download the .7z file to a temporary location
TEMP_7Z=$(mktemp --suffix=.7z)
curl -# -L -o "$TEMP_7Z" "$encoded_url"
# Check if the download was successful
if [[ $? -ne 0 ]]; then
echo "Failed to download $download_url. Skipping."
rm -f "$TEMP_7Z"
continue
fi
# Extract the .7z file
7z x -y "$TEMP_7Z" || { echo "Failed to extract $TEMP_7Z. Skipping."; rm -f "$TEMP_7Z"; continue; }
# Remove the temporary .7z file
rm -f "$TEMP_7Z"
else
echo "$vpk is already up to date."
fi
done
echo "Synchronization complete."

View file

@ -1,25 +0,0 @@
#!/bin/bash
set -xeuo pipefail
source /opt/l4d2/scripts/helpers
overlay=$(basename "$0")
# server config
# https://github.com/SirPlease/L4D2-Competitive-Rework/blob/7ecc3a32a5e2180d6607a40119ff2f3c072502a9/cfg/server.cfg#L58-L69
# https://www.programmersought.com/article/513810199514/
steam mkdir -p /opt/l4d2/overlays/$overlay/left4dead2/cfg
steam cat <<'EOF' > /opt/l4d2/overlays/$overlay/left4dead2/cfg/server.cfg
# https://github.com/SirPlease/L4D2-Competitive-Rework/blob/7ecc3a32a5e2180d6607a40119ff2f3c072502a9/cfg/server.cfg#L58-L69
sv_minrate 100000
sv_maxrate 100000
nb_update_frequency 0.014
net_splitpacket_maxrate 50000
net_maxcleartime 0.0001
fps_max 0
EOF
# install tickrate enabler
steam mkdir -p "/opt/l4d2/overlays/${overlay}/left4dead2/addons"
for file in tickrate_enabler.dll tickrate_enabler.so tickrate_enabler.vdf
do
curl -L "https://github.com/SirPlease/L4D2-Competitive-Rework/raw/refs/heads/master/addons/${file}" -o "/opt/l4d2/overlays/${overlay}/left4dead2/addons/${file}"
done

View file

@ -1,12 +0,0 @@
#!/bin/bash
set -xeuo pipefail
source /opt/l4d2/scripts/helpers
overlay=$(basename "$0")
# Ions Vocalizer
workshop -i 698857882
# admin system
workshop --item 2524204971
steam mkdir -p "/opt/l4d2/overlays/${overlay}/left4dead2/ems/admin system"
steam echo "STEAM_1:0:12376499" > "/opt/l4d2/overlays/${overlay}/left4dead2/ems/admin system/admins.txt"

View file

@ -1,13 +0,0 @@
#!/bin/bash
set -xeuo pipefail
source /opt/l4d2/scripts/helpers
overlay=$(basename "$0")
workshop --collection 121115793 # Back To School
workshop --item 2957035482 # hehe30-part1
workshop --item 2973628334 # hehe30-part2
workshop --item 3013844371 # hehe30-part3
workshop --item 3478461158 # 虚伪黎明(Dawn's Deception)
workshop --item 3478934394 # 虚伪黎明(Dawn's Deception)PART2

View file

@ -1,13 +1,40 @@
// defaults
hostname ${server_name}
hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de"
sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}"
motd_enabled 0
rcon_password ${rcon_password}
sv_steamgroup "38347879"
mp_autoteambalance 0
sv_forcepreload 1
// server specific
% for line in config:
${line}
% endfor
sv_cheats 1
sv_consistency 0
sv_lan 0
sv_allow_lobby_connect_only 0
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
sv_minrate 30000
sv_maxrate 60000
sv_mincmdrate 66
sv_maxcmdrate 101
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console.
sv_logfile 1 //default 1; Log server information in the log file.
sv_log_onefile 0 //default 0; Log server information to only one file.
sv_logbans 1 //default 0;Log server bans in the server logs.
sv_logflush 0 //default 0; Flush the log files to disk on each write (slow).

View file

@ -1,72 +0,0 @@
#!/bin/bash
set -xeuo pipefail
# -- DEFINE FUNCTIONS AND VARIABLES -- #
function steam() {
# for systemd, so it can terminate the process (for other things sudo would have been enough)
setpriv --reuid=steam --regid=steam --init-groups "$@" <&0
export HOME=/opt/l4d2/steam
}
# -- PREPARE SYSTEM -- #
getent passwd steam >/dev/null || useradd -M -d /opt/l4d2 -s /bin/bash steam
mkdir -p /opt/l4d2 /tmp/dumps
chown steam:steam /opt/l4d2 /tmp/dumps
dpkg --add-architecture i386
apt update
DEBIAN_FRONTEND=noninteractive apt install -y libc6:i386 lib32z1
# workshop downloader
test -f /opt/l4d2/scripts/steam-workshop-download || \
steam wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download -P /opt/l4d2/scripts
steam chmod +x /opt/l4d2/scripts/steam-workshop-download
# -- STEAM -- #
steam mkdir -p /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd_linux.tar.gz || \
steam wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd.sh || \
steam tar -xvzf /opt/l4d2/steam/steamcmd_linux.tar.gz -C /opt/l4d2/steam
# fix for: /opt/l4d2/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
steam mkdir -p /opt/l4d2/steam/.steam # needs to be in steam users home dir
readlink /opt/l4d2/steam/.steam/sdk32 | grep -q ^/opt/l4d2/steam/linux32$ || \
steam ln -sf /opt/l4d2/steam/linux32 /opt/l4d2/steam/.steam/sdk32
readlink /opt/l4d2/steam/.steam/sdk64 | grep -q ^/opt/l4d2/steam/linux64$ || \
steam ln -sf /opt/l4d2/steam/linux64 /opt/l4d2/steam/.steam/sdk64
# -- INSTALL -- #
# erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
steam mkdir -p /opt/l4d2/installation
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType windows \
+app_update 222860 validate \
+quit
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType linux \
+app_update 222860 validate \
+quit
# -- OVERLAYS -- #
for overlay_path in /opt/l4d2/scripts/overlays/*; do
overlay=$(basename "$overlay_path")
steam mkdir -p /opt/l4d2/overlays/$overlay
bash -xeuo pipefail "$overlay_path"
test -f /opt/l4d2/overlays/$overlay/left4dead2/cfg/server.cfg && \
steam cp /opt/l4d2/overlays/$overlay/left4dead2/cfg/server.cfg /opt/l4d2/overlays/$overlay/left4dead2/cfg/server_$overlay.cfg
done
# -- SERVERS -- #
#steam rm -rf /opt/l4d2/servers
steam mkdir -p /opt/l4d2/servers

View file

@ -1,75 +0,0 @@
#!/bin/bash
set -xeuo pipefail
name=""
port=""
configfile=""
overlays=""
arguments=""
while [[ $# -gt 0 ]]; do
case "$1" in
-n|--name)
name="$2"; shift 2
;;
-p|--port)
port="$2"; shift 2
;;
-c|--config)
configfile="$2"; shift 2
;;
-o|--overlay)
overlays="/opt/l4d2/overlays/$2:$overlays"; shift 2
;;
--)
shift
arguments+="$@"
break
;;
*)
echo "ERROR: unknown argument $1"; exit 1
;;
esac
done
[[ -n "${name}" ]] || { echo "ERROR: -n/--name missing"; exit 1; }
[[ -n "${port}" ]] || { echo "ERROR: -p/--port missing"; exit 1; }
# -- HELPER FUNCTIONS -- #
function steam() {
# für systemd, damit es den prozess beenden kann
setpriv --reuid=steam --regid=steam --init-groups "$@"
export HOME=/opt/l4d2/steam
}
# -- TIDY UP -- #
mountpoint -q "/opt/l4d2/servers/$name/merged" && umount "/opt/l4d2/servers/$name/merged"
steam rm -rf "/opt/l4d2/servers/$name"
# -- CREATE DIRECTORIES -- #
steam mkdir -p \
"/opt/l4d2/servers/$name" \
"/opt/l4d2/servers/$name/work" \
"/opt/l4d2/servers/$name/upper" \
"/opt/l4d2/servers/$name/merged"
# -- MOUNT OVERLAYFS -- #
mount -t overlay overlay \
-o "lowerdir=$overlays/opt/l4d2/installation,upperdir=/opt/l4d2/servers/$name/upper,workdir=/opt/l4d2/servers/$name/work" \
"/opt/l4d2/servers/$name/merged"
# -- REPLACE SERVER.CFG -- #
if [[ -n "$configfile" ]]; then
cp "$configfile" "/opt/l4d2/servers/$name/merged/left4dead2/cfg/server.cfg"
chown steam:steam "/opt/l4d2/servers/$name/merged/left4dead2/cfg/server.cfg"
fi
# -- RUN L4D2 -- #
steam "/opt/l4d2/servers/$name/merged/srcds_run" -norestart -pidfile "/opt/l4d2/servers/$name/pid" -game left4dead2 -ip 0.0.0.0 -port "$port" +hostname "Crone_$name" +map c1m1_hotel $arguments

View file

@ -1,19 +0,0 @@
#!/bin/bash
set -xeuo pipefail
name=""
while [[ $# -gt 0 ]]; do
case "$1" in
-n|--name)
name="$2"; shift 2
;;
*)
echo "ERROR: unknown argument $1"; exit 1
;;
esac
done
mountpoint -q "/opt/l4d2/servers/$name/merged" && umount "/opt/l4d2/servers/$name/merged"
steam rm -rf "/opt/l4d2/servers/$name"

View file

@ -1,105 +1,122 @@
users = {
'steam': {
'home': '/opt/l4d2/steam',
'shell': '/bin/bash',
},
}
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = {
'/opt/l4d2': {
'owner': 'steam', 'group': 'steam',
},
'/opt/l4d2/steam': {
'owner': 'steam', 'group': 'steam',
},
'/opt/l4d2/configs': {
'owner': 'steam', 'group': 'steam',
'/opt/steam/left4dead2-servers': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
'/opt/l4d2/scripts': {
'owner': 'steam', 'group': 'steam',
},
'/opt/l4d2/scripts/overlays': {
'owner': 'steam', 'group': 'steam',
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway -
# to another dir. Also move workdir alongside it, as it has to be on same fs.
'/opt/steam-zfs-overlay-workarounds': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
}
files = {
'/opt/l4d2/setup': {
'mode': '755',
'triggers': {
'svc_systemd:left4dead2-initialize.service:restart',
},
},
'/opt/l4d2/start': {
'mode': '755',
'triggers': {
f'svc_systemd:left4dead2-{server_name}.service:restart'
for server_name in node.metadata.get('left4dead2/servers').keys()
},
},
'/opt/l4d2/stop': {
'mode': '755',
'triggers': {
f'svc_systemd:left4dead2-{server_name}.service:restart'
for server_name in node.metadata.get('left4dead2/servers').keys()
},
},
'/opt/l4d2/scripts/helpers': {
'source': 'scripts/helpers',
'mode': '755',
'triggers': {
'svc_systemd:left4dead2-initialize.service:restart',
},
},
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
symlinks = {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
'owner': 'steam',
'group': 'steam',
}
}
for overlay in node.metadata.get('left4dead2/overlays'):
files[f'/opt/l4d2/scripts/overlays/{overlay}'] = {
'source': f'scripts/overlays/{overlay}',
'mode': '755',
'triggers': {
'svc_systemd:left4dead2-initialize.service:restart',
},
#
# SERVERS
#
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
svc_systemd = {
'left4dead2-initialize.service': {
'enabled': True,
'running': None,
'needs': {
'tag:left4dead2-packages',
'file:/opt/l4d2/setup',
'file:/usr/local/lib/systemd/system/left4dead2-initialize.service',
},
},
}
for server_name, config in node.metadata.get('left4dead2/servers').items():
files[f'/opt/l4d2/configs/{server_name}.cfg'] = {
'source': 'server.cfg',
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'server_name': server_name,
'rcon_password': repo.vault.decrypt('encrypt$gAAAAABpAdZhxwJ47I1AXotuZmBvyZP1ecVTt9IXFkLI28JiVS74LKs9QdgIBz-FC-iXtIHHh_GVGxxKQZprn4UrXZcvZ57kCKxfHBs3cE2JiGnbWE8_mfs=').value,
'config': config.get('config', []),
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
},
'owner': 'steam',
'mode': '644',
'triggers': {
f'svc_systemd:left4dead2-{server_name}.service:restart',
},
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
svc_systemd[f'left4dead2-{server_name}.service'] = {
'enabled': True,
'running': True,
'tags': {
'left4dead2-servers',
},
'needs': {
'svc_systemd:left4dead2-initialize.service',
f'file:/usr/local/lib/systemd/system/left4dead2-{server_name}.service',
},
# service
svc_systemd[f'left4dead2-{name}.service'] = {
'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
],
}
#
# ADDONS
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# admin system
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}

View file

@ -1,112 +1,110 @@
from re import match
from os import path, listdir
assert node.has_bundle('steam')
from shlex import quote
defaults = {
'apt': {
'packages': {
'libc6_i386': { # installs libc6:i386
'tags': {'left4dead2-packages'},
},
'lib32z1': {
'tags': {'left4dead2-packages'},
},
'unzip': {
'tags': {'left4dead2-packages'},
},
'p7zip-full': { # l4d2center_maps_sync.sh
'tags': {'left4dead2-packages'},
},
'steam': {
'games': {
'left4dead2': 222860,
},
},
'left4dead2': {
'overlays': set(listdir(path.join(repo.path, 'bundles/left4dead2/files/scripts/overlays'))),
'servers': {
# 'port': 27017,
# 'overlays': ['competitive_rework'],
# 'arguments': ['-tickrate 60'],
# 'config': [
# 'exec server_original.cfg',
# 'sm_forcematch zonemod',
# ],
},
},
'nftables': {
'input': {
'udp dport { 27005, 27020 } accept',
},
},
'systemd': {
'units': {
'left4dead2-initialize.service': {
'Unit': {
'Description': 'initialize left4dead2',
'After': 'network-online.target',
},
'Service': {
'Type': 'oneshot',
'RemainAfterExit': 'yes',
'ExecStart': '/opt/l4d2/setup',
'StandardOutput': 'journal',
'StandardError': 'journal',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
'servers': {},
'admins': set(),
'workshop': set(),
},
}
@metadata_reactor.provides(
'left4dead2/servers',
)
def rconn_password(metadata):
# only works from localhost!
return {
'left4dead2': {
'servers': {
server: {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24),
}
for server in metadata.get('left4dead2/servers')
},
},
}
@metadata_reactor.provides(
'steam-workshop-download',
'systemd/units',
)
def server_units(metadata):
units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items():
assert match(r'^[A-z0-9-_-]+$', name)
assert 27000 <= config["port"] <= 27100
for overlay in config.get('overlays', []):
assert overlay in metadata.get('left4dead2/overlays'), f"unknown overlay {overlay}, known: {metadata.get('left4dead2/overlays')}"
# mount overlay
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
cmd = f'/opt/l4d2/start -n {name} -p {config["port"]}'
if 'config' in config:
cmd += f' -c /opt/l4d2/configs/{name}.cfg'
for overlay in config.get('overlays', []):
cmd += f' -o {overlay}'
if 'arguments' in config:
cmd += ' -- ' + ' '.join(config['arguments'])
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'left4dead2-initialize.service'},
'Requires': {'left4dead2-initialize.service'},
'After': {'steam-update.service'},
'Requires': {'steam-update.service'},
},
'Service': {
'Type': 'simple',
'ExecStart': cmd,
'ExecStopPost': f'/opt/l4d2/stop -n {name}',
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg',
'Restart': 'on-failure',
'Nice': -10,
'CPUWeight': 200,
'IOSchedulingClass': 'best-effort',
'IOSchedulingPriority': 0,
},
'Install': {
'WantedBy': {'multi-user.target'},
},
'triggers': {
f'svc_systemd:left4dead2-{name}.service:restart',
},
}
return {
'steam-workshop-download': workshop,
'systemd': {
'units': units,
},
@ -116,13 +114,14 @@ def server_units(metadata):
@metadata_reactor.provides(
'nftables/input',
)
def nftables(metadata):
ports = sorted(str(config["port"]) for config in metadata.get('left4dead2/servers').values())
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
return {
'nftables': {
'input': {
f'ip protocol {{ tcp, udp }} th dport {{ {", ".join(ports)} }} accept'
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
},
},
}

View file

@ -1,58 +0,0 @@
https://developer.valvesoftware.com/wiki/List_of_L4D2_Cvars
Dead Center c1m1_hotel
Dead Center c1m2_streets
Dead Center c1m3_mall
Dead Center c1m4_atrium
Dark Carnival c2m1_highway
Dark Carnival c2m2_fairgrounds
Dark Carnival c2m3_coaster
Dark Carnival c2m4_barns
Dark Carnival c2m5_concert
Swamp Fever c3m1_plankcountry
Swamp Fever c3m2_swamp
Swamp Fever c3m3_shantytown
Swamp Fever c3m4_plantation
Hard Rain c4m1_milltown_a
Hard Rain c4m2_sugarmill_a
Hard Rain c4m3_sugarmill_b
Hard Rain c4m4_milltown_b
Hard Rain c4m5_milltown_escape
The Parish c5m1_waterfront_sndscape
The Parish c5m1_waterfront
The Parish c5m2_park
The Parish c5m3_cemetery
The Parish c5m4_quarter
The Parish c5m5_bridge
The Passing c6m1_riverbank
The Passing c6m2_bedlam
The Passing c6m3_port
The Sacrifice c7m1_docks
The Sacrifice c7m2_barge
The Sacrifice c7m3_port
No Mercy c8m1_apartment
No Mercy c8m2_subway
No Mercy c8m3_sewers
No Mercy c8m4_interior
No Mercy c8m5_rooftop
Crash Course c9m1_alleys
Crash Course c9m2_lots
Death Toll c10m1_caves
Death Toll c10m2_drainage
Death Toll c10m3_ranchhouse
Death Toll c10m4_mainstreet
Death Toll c10m5_houseboat
Dead Air c11m1_greenhouse
Dead Air c11m2_offices
Dead Air c11m3_garage
Dead Air c11m4_terminal
Dead Air c11m5_runway
Blood Harvest c12m1_hilltop
Blood Harvest c12m2_traintunnel
Blood Harvest c12m3_bridge
Blood Harvest c12m4_barn
Blood Harvest c12m5_cornfield
Cold Stream c13m1_alpinecreek
Cold Stream c13m2_southpinestream
Cold Stream c13m3_memorialbridge
Cold Stream c13m4_cutthroatcreek

View file

@ -1,40 +0,0 @@
hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de"
sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}"
motd_enabled 0
sv_cheats 1
sv_consistency 0
sv_lan 0
sv_allow_lobby_connect_only 0
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
sv_minrate 30000
sv_maxrate 60000
sv_mincmdrate 66
sv_maxcmdrate 101
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console.
sv_logfile 1 //default 1; Log server information in the log file.
sv_log_onefile 0 //default 0; Log server information to only one file.
sv_logbans 1 //default 0;Log server bans in the server logs.
sv_logflush 0 //default 0; Flush the log files to disk on each write (slow).

View file

@ -1,122 +0,0 @@
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = {
'/opt/steam/left4dead2-servers': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway -
# to another dir. Also move workdir alongside it, as it has to be on same fs.
'/opt/steam-zfs-overlay-workarounds': {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
},
}
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
symlinks = {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
'owner': 'steam',
'group': 'steam',
}
}
#
# SERVERS
#
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
},
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# service
svc_systemd[f'left4dead2-{name}.service'] = {
'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
],
}
#
# ADDONS
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# admin system
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}

View file

@ -1,127 +0,0 @@
assert node.has_bundle('steam')
from shlex import quote
defaults = {
'steam': {
'games': {
'left4dead2': 222860,
},
},
'left4dead2': {
'servers': {},
'admins': set(),
'workshop': set(),
},
}
@metadata_reactor.provides(
'left4dead2/servers',
)
def rconn_password(metadata):
# only works from localhost!
return {
'left4dead2': {
'servers': {
server: {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24),
}
for server in metadata.get('left4dead2/servers')
},
},
}
@metadata_reactor.provides(
'steam-workshop-download',
'systemd/units',
)
def server_units(metadata):
units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items():
# mount overlay
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'steam-update.service'},
'Requires': {'steam-update.service'},
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg',
'Restart': 'on-failure',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
}
return {
'steam-workshop-download': workshop,
'systemd': {
'units': units,
},
}
@metadata_reactor.provides(
'nftables/input',
)
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
return {
'nftables': {
'input': {
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
},
},
}

View file

@ -1,97 +0,0 @@
# https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
getent passwd steam >/dev/null || useradd -M -d /opt/l4d2 -s /bin/bash steam
mkdir -p /opt/l4d2 /tmp/dumps
chown steam:steam /opt/l4d2 /tmp/dumps
dpkg --add-architecture i386
apt update
DEBIAN_FRONTEND=noninteractive apt install -y libc6:i386 lib32z1
function steam() { sudo -Hiu steam $* }
# -- STEAM -- #
steam mkdir -p /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd_linux.tar.gz || \
steam wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/l4d2/steam
test -f /opt/l4d2/steam/steamcmd.sh || \
steam tar -xvzf /opt/l4d2/steam/steamcmd_linux.tar.gz -C /opt/l4d2/steam
# fix: /opt/l4d2/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
steam mkdir -p /opt/l4d2/steam/.steam
test -f /opt/l4d2/steam/.steam/sdk32/steamclient.so || \
steam ln -s /opt/l4d2/steam/linux32 /opt/l4d2/steam/.steam/sdk32
# -- INSTALL -- #
# erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
steam mkdir -p /opt/l4d2/installation
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType windows \
+app_update 222860 validate \
+quit
steam /opt/l4d2/steam/steamcmd.sh \
+force_install_dir /opt/l4d2/installation \
+login anonymous \
+@sSteamCmdForcePlatformType linux \
+app_update 222860 validate \
+quit
# -- OVERLAYS -- #
steam mkdir -p /opt/l4d2/overlays
# workshop downloader
steam wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download -P /opt/l4d2
steam chmod +x /opt/l4d2/steam-workshop-download
# -- OVERLAY PVE -- #
steam mkdir -p /opt/l4d2/overlays/pve
# admin system
steam mkdir -p /opt/l4d2/overlays/pve/left4dead2/addons
steam /opt/l4d2/steam-workshop-download 2524204971 --out /opt/l4d2/overlays/pve/left4dead2/addons
steam mkdir -p "/opt/l4d2/overlays/pve/left4dead2/ems/admin system"
echo "STEAM_1:0:12376499" | steam tee "/opt/l4d2/overlays/pve/left4dead2/ems/admin system/admins.txt"
# ions vocalizer
steam /opt/l4d2/steam-workshop-download 698857882 --out /opt/l4d2/overlays/pve/left4dead2/addons
# -- OVERLAY ZONEMOD -- #
true
# -- SERVERS -- #
steam mkdir -p /opt/l4d2/servers
# -- SERVER PVE1 -- #
steam mkdir -p \
/opt/l4d2/servers/pve1 \
/opt/l4d2/servers/pve1/work \
/opt/l4d2/servers/pve1/upper \
/opt/l4d2/servers/pve1/merged
mount -t overlay overlay \
-o lowerdir=/opt/l4d2/overlays/pve:/opt/l4d2/installation,upperdir=/opt/l4d2/servers/pve1/upper,workdir=/opt/l4d2/servers/pve1/work \
/opt/l4d2/servers/pve1/merged
# run server
steam cat <<'EOF' > /opt/l4d2/servers/pve1/merged/left4dead2/cfg/server.cfg
hostname "CKNs Server"
motd_enabled 0
sv_steamgroup "38347879"
#sv_steamgroup_exclusive 0
sv_minrate 60000
sv_maxrate 0
net_splitpacket_maxrate 60000
sv_hibernate_when_empty 0
EOF
steam /opt/l4d2/servers/pve1/merged/srcds_run -game left4dead2 -ip 0.0.0.0 -port 27015 +map c1m1_hotel

View file

@ -1,183 +0,0 @@
from shlex import quote
def steam_run(cmd):
return f'su - steam -c {quote(cmd)}'
users = {
'steam': {
'home': '/opt/steam',
},
}
directories = {
'/opt/steam': {
'owner': 'steam',
'group': 'steam',
},
'/opt/steam/.steam': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2/left4dead2/ems/admin system': {
'owner': 'steam',
'group': 'steam',
},
'/opt/left4dead2/left4dead2/addons': {
'owner': 'steam',
'group': 'steam',
},
'/tmp/dumps': {
'owner': 'steam',
'group': 'steam',
'mode': '1770',
},
}
symlinks = {
'/opt/steam/.steam/sdk32': {
'target': '/opt/steam/linux32',
'owner': 'steam',
'group': 'steam',
},
}
files = {
'/opt/steam-workshop-download': {
'content_type': 'download',
'source': 'https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download',
'mode': '755',
},
'/opt/left4dead2/left4dead2/ems/admin system/admins.txt': {
'unless': 'test -f /opt/left4dead2/left4dead2/ems/admin system/admins.txt',
'content': 'STEAM_1:0:12376499',
'owner': 'steam',
'group': 'steam',
},
}
actions = {
'dpkg_add_architecture': {
'command': 'dpkg --add-architecture i386',
'unless': 'dpkg --print-foreign-architectures | grep -q i386',
'triggers': [
'action:apt_update',
],
'needed_by': [
'pkg_apt:libc6_i386',
],
},
'download_steam': {
'command': steam_run('wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz -P /opt/steam'),
'unless': steam_run('test -f /opt/steam/steamcmd_linux.tar.gz'),
'needs': {
'pkg_apt:libc6_i386',
'directory:/opt/steam',
}
},
'extract_steamcmd': {
'command': steam_run('tar -xvzf /opt/steam/steamcmd_linux.tar.gz -C /opt/steam'),
'unless': steam_run('test -f /opt/steam/steamcmd.sh'),
'needs': {
'action:download_steam',
}
},
}
for addon_id in [2524204971]:
actions[f'download-left4dead2-addon-{addon_id}'] = {
'command': steam_run(f'/opt/steam-workshop-download {addon_id} --out /opt/left4dead2/left4dead2/addons'),
'unless': steam_run(f'test -f /opt/left4dead2/left4dead2/addons/{addon_id}.vpk'),
'needs': {
'directory:/opt/left4dead2/left4dead2/addons',
},
'needed_by': {
'tag:left4dead2-servers',
},
}
svc_systemd = {
'left4dead2-install.service': {
'enabled': True,
'running': False,
'needs': {
'file:/usr/local/lib/systemd/system/left4dead2-install.service',
},
},
}
for server_name, server_config in node.metadata.get('left4dead2/servers', {}).items():
svc_systemd[f'left4dead2-{server_name}.service'] = {
'enabled': True,
'running': True,
'tags': {
'left4dead2-servers',
},
'needs': {
'svc_systemd:left4dead2-install.service',
f'file:/usr/local/lib/systemd/system/left4dead2-{server_name}.service',
}
}
# # https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
# mkdir /opt/steam /tmp/dumps
# useradd -M -d /opt/steam -s /bin/bash steam
# chown steam:steam /opt/steam /tmp/dumps
# dpkg --add-architecture i386
# apt update
# apt install libc6:i386 lib32z1
# sudo su - steam -s /bin/bash
# #--------
# wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz
# tar -xvzf steamcmd_linux.tar.gz
# # fix: /opt/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
# mkdir /opt/steam/.steam && ln -s /opt/steam/linux32 /opt/steam/.steam/sdk32
# # erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
# ./steamcmd.sh \
# +force_install_dir /opt/steam/left4dead2 \
# +login anonymous \
# +@sSteamCmdForcePlatformType windows \
# +app_update 222860 validate \
# +quit
# ./steamcmd.sh \
# +force_install_dir /opt/steam/left4dead2 \
# +login anonymous \
# +@sSteamCmdForcePlatformType linux \
# +app_update 222860 validate \
# +quit
# # download admin system
# wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download
# chmod +x steam-workshop-download
# ./steam-workshop-download 2524204971 --out /opt/steam/left4dead2/left4dead2/addons
# mkdir -p "/opt/steam/left4dead2/left4dead2/ems/admin system"
# echo "STEAM_1:0:12376499" > "/opt/steam/left4dead2/left4dead2/ems/admin system/admins.txt"
# /opt/steam/left4dead2/srcds_run -game left4dead2 -ip 0.0.0.0 -port 27015 +map c1m1_hotel
# cat <<'EOF' > /opt/steam/left4dead2/left4dead2/cfg/server.cfg
# hostname "CKNs Server"
# motd_enabled 0
# sv_steamgroup "38347879"
# #sv_steamgroup_exclusive 0
# sv_minrate 60000
# sv_maxrate 0
# net_splitpacket_maxrate 60000
# sv_hibernate_when_empty 0
# EOF

View file

@ -1,107 +0,0 @@
from re import match
defaults = {
'apt': {
'packages': {
'libc6_i386': {}, # installs libc6:i386
'lib32z1': {},
'unzip': {},
},
},
'left4dead2': {
'servers': {},
},
'nftables': {
'input': {
'udp dport { 27005, 27020 } accept',
},
},
}
@metadata_reactor.provides(
'nftables/input',
)
def nftables(metadata):
ports = sorted(str(config["port"]) for config in metadata.get('left4dead2/servers', {}).values())
return {
'nftables': {
'input': {
f'ip protocol {{ tcp, udp }} th dport {{ {", ".join(ports)} }} accept'
},
},
}
@metadata_reactor.provides(
'systemd/units',
)
def initial_unit(metadata):
install_command = (
'/opt/steam/steamcmd.sh '
'+force_install_dir /opt/left4dead2 '
'+login anonymous '
'+@sSteamCmdForcePlatformType {platform} '
'+app_update 222860 validate '
'+quit '
)
return {
'systemd': {
'units': {
'left4dead2-install.service': {
'Unit': {
'Description': 'install or update left4dead2',
'After': 'network-online.target',
},
'Service': {
'Type': 'oneshot',
'RemainAfterExit': 'yes',
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': '/opt/steam',
'ExecStartPre': install_command.format(platform='windows'),
'ExecStart': install_command.format(platform='linux'),
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
},
}
@metadata_reactor.provides(
'systemd/units',
)
def server_units(metadata):
units = {}
for name, config in metadata.get('left4dead2/servers').items():
assert match(r'^[A-z0-9-_-]+$', name)
units[f'left4dead2-{name}.service'] = {
'Unit': {
'Description': f'left4dead2 server {name}',
'After': {'left4dead2-install.service'},
'Requires': {'left4dead2-install.service'},
},
'Service': {
'User': 'steam',
'Group': 'steam',
'WorkingDirectory': '/opt/left4dead2',
'ExecStart': f'/opt/left4dead2/srcds_run -port {config["port"]} +exec server_{name}.cfg',
'Restart': 'on-failure',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
}
return {
'systemd': {
'units': units,
},
}

View file

@ -82,7 +82,6 @@ def dns(metadata):
'dns': dns,
}
@metadata_reactor.provides(
'letsencrypt/domains',
)

View file

@ -42,7 +42,7 @@ def user(metadata):
'users': {
'sshmon': {
'authorized_users': {
'nagios@' + metadata.get('monitoring/icinga2_node'): {},
'nagios@' + metadata.get('monitoring/icinga2_node'),
}
},
},

View file

@ -25,9 +25,9 @@ defaults = {
},
},
'telegraf': {
'config': {
'inputs': {
'postfix': {
'default': {},
'postfix': [{}],
},
},
},

View file

@ -98,17 +98,17 @@ def zfs(metadata):
@metadata_reactor.provides(
'telegraf/inputs/postgresql/default',
'telegraf/config/inputs/postgresql',
)
def telegraf(metadata):
return {
'telegraf': {
'config': {
'inputs': {
'postgresql': {
'default': {
'postgresql': [{
'address': f'postgres://root:{root_password}@localhost:5432/postgres',
'databases': sorted(list(node.metadata.get('postgresql/databases').keys())),
},
}],
},
},
},

View file

@ -8,14 +8,16 @@ defaults = {
@metadata_reactor.provides(
'telegraf/agent',
'telegraf/config/agent',
)
def telegraf(metadata):
return {
'telegraf': {
'config': {
'agent': {
'flush_interval': '30s',
'interval': '1m',
'interval': '30s',
},
},
},
}

View file

@ -9,11 +9,11 @@ $config['enable_installer'] = true;
$config['db_dsnw'] = '${database['provider']}://${database['user']}:${database['password']}@${database['host']}/${database['name']}';
$config['imap_host'] = 'ssl://${imap_host}';
$config['imap_port'] = 993;
#$config['imap_debug'] = true;
$config['smtp_host'] = 'tls://${imap_host}';
$config['smtp_host'] = 'tls://localhost';
$config['smtp_port'] = 587;
$config['smtp_user'] = '%u';
$config['smtp_pass'] = '%p';
#$config['imap_debug'] = true;
#$config['smtp_debug'] = true;
$config['support_url'] = '';
$config['des_key'] = '${des_key}';

File diff suppressed because it is too large Load diff

View file

@ -1,11 +0,0 @@
files = {
# https://mikrotik.com/download/tools
'/usr/share/snmp/mibs/MIKROTIK-MIB.txt': {
'source': 'mikrotik.mib',
'content_type': 'binary',
'mode': '0644',
'needed_by': {
'svc_systemd:telegraf.service',
},
},
}

View file

@ -1,444 +0,0 @@
input_defaults = {
"agents": [
f"udp://{routeros_node.hostname}:161"
for routeros_node in repo.nodes_in_group("routeros")
],
"agent_host_tag": "agent_host",
"version": 2,
"community": "public",
"max_repetitions": 5, # supposedly less spiky loads
"tags": {
"operating_system": "routeros",
},
}
defaults = {
'apt': {
'packages': {
'snmp': {},
'snmp-mibs-downloader': {},
},
},
"telegraf": {
"processors": {
"enum": {
"mikrotik_host_mapping":{
# - measurements get switch ip as agent_host tag
# - wie define a value mapping ip -> node name
# - agent_host gets translated and written into host tag
"tagpass": {
"operating_system": ["routeros"],
},
"mapping": [
{
"tag": "agent_host",
"dest": "host",
"default": "unknown",
"value_mappings": {
routeros_node.hostname: routeros_node.name
for routeros_node in repo.nodes_in_group("routeros")
},
},
],
},
},
},
"inputs": {
"snmp": {
"mikrotik_switches_fast": {
"interval": "2m",
"collection_jitter": "20s",
**input_defaults,
"table": [
# CPU load (HR-MIB)
{
"name": "mikrotik_cpu",
"oid": "HOST-RESOURCES-MIB::hrProcessorTable",
"field": [
{
"name": "frw_id",
"oid": "HOST-RESOURCES-MIB::hrProcessorFrwID",
"is_tag": True,
},
{
"name": "load",
"oid": "HOST-RESOURCES-MIB::hrProcessorLoad",
},
],
},
# Storage (HR-MIB)
{
"name": "mikrotik_storage",
"oid": "HOST-RESOURCES-MIB::hrStorageTable",
"field": [
{
"name": "index",
"oid": "HOST-RESOURCES-MIB::hrStorageIndex",
"is_tag": True,
},
{
"name": "type",
"oid": "HOST-RESOURCES-MIB::hrStorageType",
"is_tag": True,
},
{
"name": "descr",
"oid": "HOST-RESOURCES-MIB::hrStorageDescr",
"is_tag": True,
},
{
"name": "alloc_unit",
"oid": "HOST-RESOURCES-MIB::hrStorageAllocationUnits",
},
{
"name": "size",
"oid": "HOST-RESOURCES-MIB::hrStorageSize",
},
{
"name": "used",
"oid": "HOST-RESOURCES-MIB::hrStorageUsed",
},
{
"name": "alloc_failures",
"oid": "HOST-RESOURCES-MIB::hrStorageAllocationFailures",
},
],
},
# MikroTik Health (table)
{
"name": "mikrotik_health",
"oid": "MIKROTIK-MIB::mtxrGaugeTable",
"field": [
{
"name": "sensor",
"oid": "MIKROTIK-MIB::mtxrGaugeName",
"is_tag": True,
},
{
"name": "value",
"oid": "MIKROTIK-MIB::mtxrGaugeValue",
},
{
"name": "unit",
"oid": "MIKROTIK-MIB::mtxrGaugeUnit",
"is_tag": True,
},
],
},
],
},
"mikrotik_switches_slow": {
"interval": "7m",
"collection_jitter": "2m",
**input_defaults,
"table": [
# Interface statistics (standard IF-MIB)
{
"name": "mikrotik_interface_generic",
"oid": "IF-MIB::ifTable",
"field": [
# 6: ethernetCsmacd (physischer Ethernet-Port)
# 24: softwareLoopback
# 53: propVirtual (oft VLANs bei MikroTik)
# 131: tunnel
# 135: l2vlan
# 161: ieee8023adLag (Bonding/LACP)
# 209: bridge
{
"name": "ifType",
"oid": "IF-MIB::ifType",
"is_tag": True,
},
# Labels (optional but recommended)
{
"name": "ifName",
"oid": "IF-MIB::ifName",
"is_tag": True,
},
{
"name": "ifAlias",
"oid": "IF-MIB::ifAlias",
"is_tag": True,
},
# Bytes (64-bit)
{
"name": "in_octets",
"oid": "IF-MIB::ifHCInOctets",
},
{
"name": "out_octets",
"oid": "IF-MIB::ifHCOutOctets",
},
# Packets (64-bit unicast)
{
"name": "in_ucast_pkts",
"oid": "IF-MIB::ifHCInUcastPkts",
},
{
"name": "out_ucast_pkts",
"oid": "IF-MIB::ifHCOutUcastPkts",
},
{
"name": "in_mcast_pkts",
"oid": "IF-MIB::ifHCInMulticastPkts",
},
{
"name": "in_bcast_pkts",
"oid": "IF-MIB::ifHCInBroadcastPkts",
},
{
"name": "out_mcast_pkts",
"oid": "IF-MIB::ifHCOutMulticastPkts",
},
{
"name": "out_bcast_pkts",
"oid": "IF-MIB::ifHCOutBroadcastPkts",
},
# Drops / Errors
{
"name": "in_discards",
"oid": "IF-MIB::ifInDiscards",
},
{
"name": "out_discards",
"oid": "IF-MIB::ifOutDiscards",
},
{
"name": "in_errors",
"oid": "IF-MIB::ifInErrors",
},
{
"name": "out_errors",
"oid": "IF-MIB::ifOutErrors",
},
],
},
# Interface PoE
{
"name": "mikrotik_poe",
"oid": "MIKROTIK-MIB::mtxrPOETable",
"field": [
{
"name": "ifName",
"oid": "IF-MIB::ifName",
"is_tag": True,
},
{
"name": "ifAlias",
"oid": "IF-MIB::ifAlias",
"is_tag": True,
},
{
"name": "ifindex",
"oid": "MIKROTIK-MIB::mtxrPOEInterfaceIndex",
"is_tag": True,
},
{
"name": "status",
"oid": "MIKROTIK-MIB::mtxrPOEStatus",
},
{
"name": "voltage",
"oid": "MIKROTIK-MIB::mtxrPOEVoltage",
},
{
"name": "current",
"oid": "MIKROTIK-MIB::mtxrPOECurrent",
},
{
"name": "power",
"oid": "MIKROTIK-MIB::mtxrPOEPower",
},
],
},
],
},
"mikrotik_switches_very_slow": {
"interval": "20m",
"collection_jitter": "5m",
**input_defaults,
"table": [
# Interface statistics (MikroTik-specific mib)
{
"name": "mikrotik_interface_detailed",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTable",
"field": [
# Join key / label (usually identical to IF-MIB ifName)
{
"name": "ifName",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsName",
"is_tag": True,
},
# join IF-MIB for better labels
{
"name": "ifAlias",
"oid": "IF-MIB::ifAlias",
"is_tag": True,
},
# =========================
# Physical layer (L1/L2)
# =========================
# CRC/FCS errors → very often cabling, connectors, SFPs, signal quality (EMI)
{
"name": "rx_fcs_errors",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxFCSError",
},
# Alignment errors → typically duplex mismatch or PHY problems
{
"name": "rx_align_errors",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxAlignError",
},
# Code errors → PHY encoding errors (signal/SFP/PHY)
{
"name": "rx_code_errors",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxCodeError",
},
# Carrier errors → carrier lost (copper issues, autoneg, PHY instability)
{
"name": "rx_carrier_errors",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxCarrierError",
},
# Jabber → extremely long invalid frames (faulty NIC/PHY, very severe)
{
"name": "rx_jabber",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxJabber",
},
# ==================================
# Length / framing anomalies (diagnostic)
# ==================================
# Frames shorter than minimum (noise, collisions, broken sender)
{
"name": "rx_too_short",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxTooShort",
},
# Frames longer than allowed (MTU mismatch, framing errors)
{
"name": "rx_too_long",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxTooLong",
},
# Fragments (often collision-related or duplex mismatch)
{
"name": "rx_fragment",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxFragment",
},
# Generic length errors
{
"name": "rx_length_errors",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxLengthError",
},
# ==================
# Drops (real packet loss)
# ==================
# RX drops (queue/ASIC/policy/overload) → highly alert-worthy
{
"name": "rx_drop",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxDrop",
},
# TX drops (buffer/queue exhaustion, scheduling, ASIC limits)
{
"name": "tx_drop",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxDrop",
},
# =========================================
# Duplex / collision indicators
# (should be zero on full-duplex links)
# =========================================
# Total collisions (relevant only for half-duplex or misconfigurations)
{
"name": "tx_collisions",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxCollision",
},
# Late collisions → almost always duplex mismatch / bad autoneg
{
"name": "tx_late_collisions",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxLateCollision",
},
# Aggregate collision counter (context)
{
"name": "tx_total_collisions",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxTotalCollision",
},
# Excessive collisions → persistent duplex problems
{
"name": "tx_excessive_collisions",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxExcessiveCollision",
},
# ==================
# Flow control (diagnostic)
# ==================
# Pause frames received (peer throttling you)
{
"name": "rx_pause",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsRxPause",
},
# Pause frames sent (you throttling the peer)
{
"name": "tx_pause",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxPause",
},
# Pause frames actually honored
{
"name": "tx_pause_honored",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsTxPauseHonored",
},
# ==========
# Stability
# ==========
# Link-down events (loose cables, bad SFPs, PoE power drops, reboots)
{
"name": "link_downs",
"oid": "MIKROTIK-MIB::mtxrInterfaceStatsLinkDowns",
},
],
},
],
},
},
},
},
}
# @metadata_reactor.provides(
# 'telegraf/processors/enum',
# )
# def tag_important_ports(metadata):
# # We want a graph, only for important ports. We deem ports important, if they have untagged vlans configured.
# return {
# "telegraf": {
# "processors": {
# "enum": {
# f"mikrotik_port_mapping_{routeros_node.name}":{
# "tagpass": {
# "agent_host": [routeros_node.hostname],
# },
# "mapping": [
# {
# "tag": "ifName",
# "dest": "is_infra",
# "default": "false",
# "value_mappings": {
# port_name: "true"
# for port_name, port_conf in repo.libs.mikrotik.get_netbox_config_for(routeros_node)['interfaces'].items()
# if port_conf['mode'] == "tagged-all" or port_conf['tagged_vlans']
# if port_conf['type'] != "lag"
# },
# },
# ],
# }
# for routeros_node in repo.nodes_in_group("switches-mikrotik")
# },
# },
# },
# }

View file

@ -27,15 +27,15 @@ routeros['/system/identity'] = {
# for topic in LOGGING_TOPICS:
# routeros[f'/system/logging?action=memory&topics={topic}'] = {}
routeros['/snmp'] = {
'enabled': True,
}
routeros['/snmp/community?name=public'] = {
'addresses': '0.0.0.0/0',
'disabled': False,
'read-access': True,
'write-access': False,
}
# routeros['/snmp'] = {
# 'enabled': True,
# }
# routeros['/snmp/community?name=public'] = {
# 'addresses': '0.0.0.0/0',
# 'disabled': False,
# 'read-access': True,
# 'write-access': False,
# }
routeros['/system/clock'] = {
'time-zone-autodetect': False,
@ -55,7 +55,7 @@ for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
'vlan-id': vlan_id,
'interface': 'bridge',
'tags': {
'routeros-vlans',
'routeros-vlan',
},
}
@ -68,33 +68,10 @@ for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
'routeros-vlan-ports',
},
'needs': {
'tag:routeros-vlans',
'tag:routeros-vlan',
},
}
for port_name, port_conf in node.metadata.get('routeros/ports').items():
untagged_vlan = node.metadata.get('routeros/vlan_groups')[port_conf.get('vlan_group')]['untagged']
routeros[f'/interface/bridge/port?interface={port_name}'] = {
'disabled': False,
'bridge': 'bridge',
'pvid': node.metadata.get('routeros/vlans')[untagged_vlan],
'tags': {
'routeros-ports'
},
'needs': {
'tag:routeros-vlan-ports',
},
}
routeros[f'/interface?name={port_name}'] = {
'_comment': port_conf.get('description', ''),
}
if comment := port_conf.get('comment', None):
routeros[f'/interface/bridge/port?interface={port_name}']['_comment'] = comment
routeros[f'/interface?name={port_name}']['_comment'] = comment
# create IPs
for ip, ip_conf in node.metadata.get('routeros/ips').items():
routeros[f'/ip/address?address={ip}'] = {
@ -103,8 +80,7 @@ for ip, ip_conf in node.metadata.get('routeros/ips').items():
'routeros-ip',
},
'needs': {
'tag:routeros-vlans',
'tag:routeros-ports'
'tag:routeros-vlan',
},
}
@ -114,8 +90,7 @@ routeros['/interface/bridge?name=bridge'] = {
'priority': node.metadata.get('routeros/bridge_priority'),
'protocol-mode': 'rstp',
'needs': {
'tag:routeros-vlans',
'tag:routeros-ports',
'tag:routeros-vlan',
'tag:routeros-vlan-ports',
'tag:routeros-ip',
},
@ -127,7 +102,7 @@ routeros['/interface/vlan'] = {
'id-by': 'name',
},
'needed_by': {
'tag:routeros-vlans',
'tag:routeros-vlan',
}
}
@ -139,6 +114,6 @@ routeros['/interface/bridge/vlan'] = {
},
},
'needed_by': {
'tag:routeros-vlans',
'tag:routeros-vlan',
}
}

View file

@ -11,22 +11,24 @@ defaults = {
},
'smartctl': {},
'telegraf': {
'config': {
'inputs': {
'exec': {
'smartctl_power_mode': {
h({
'commands': [
f'sudo /usr/local/share/telegraf/smartctl_power_mode',
],
'data_format': 'influx',
'interval': '20s',
},
'smartctl_errors': {
}),
h({
'commands': [
f'sudo /usr/local/share/telegraf/smartctl_errors',
],
'data_format': 'influx',
'interval': '6h',
}
})
},
},
},
},

View file

@ -19,7 +19,7 @@ def users(metadata):
'allow_users': set(
name
for name, conf in metadata.get('users').items()
if conf.get('authorized_keys', []) or conf.get('authorized_users', {})
if conf.get('authorized_keys', []) or conf.get('authorized_users', [])
),
},
}

55
bundles/steam/README.md Normal file
View file

@ -0,0 +1,55 @@
# https://github.com/SirPlease/L4D2-Competitive-Rework/blob/master/Dedicated%20Server%20Install%20Guide/README.md
mkdir /opt/steam /tmp/dumps
useradd -M -d /opt/steam -s /bin/bash steam
chown steam:steam /opt/steam /tmp/dumps
dpkg --add-architecture i386
apt update
apt install libc6:i386 lib32z1
sudo su - steam -s /bin/bash
#--------
wget http://media.steampowered.com/installer/steamcmd_linux.tar.gz
tar -xvzf steamcmd_linux.tar.gz
# fix: /opt/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory
mkdir /opt/steam/.steam && ln -s /opt/steam/linux32 /opt/steam/.steam/sdk32
# erst die windows deps zu installieren scheint ein workaround für x64 zu sein?
./steamcmd.sh \
+force_install_dir /opt/steam/left4dead2 \
+login anonymous \
+@sSteamCmdForcePlatformType windows \
+app_update 222860 validate \
+quit
./steamcmd.sh \
+force_install_dir /opt/steam/left4dead2 \
+login anonymous \
+@sSteamCmdForcePlatformType linux \
+app_update 222860 validate \
+quit
# download admin system
wget -4 https://git.sublimity.de/cronekorkn/steam-workshop-downloader/raw/branch/master/steam-workshop-download
chmod +x steam-workshop-download
./steam-workshop-download 2524204971 --out /opt/steam/left4dead2/left4dead2/addons
mkdir -p "/opt/steam/left4dead2/left4dead2/ems/admin system"
echo "STEAM_1:0:12376499" > "/opt/steam/left4dead2/left4dead2/ems/admin system/admins.txt"
/opt/steam/left4dead2/srcds_run -game left4dead2 -ip 0.0.0.0 -port 27015 +map c1m1_hotel
cat <<'EOF' > /opt/steam/left4dead2/left4dead2/cfg/server.cfg
hostname "CKNs Server"
motd_enabled 0
sv_steamgroup "38347879"
#sv_steamgroup_exclusive 0
sv_minrate 60000
sv_maxrate 0
net_splitpacket_maxrate 60000
sv_hibernate_when_empty 0
EOF

View file

@ -1,8 +1,5 @@
from bundlewrap.utils.dicts import merge_dict
files = {}
svc_systemd = {}
directories = {
'/usr/local/lib/systemd/system': {
'purge': True,
@ -45,9 +42,6 @@ for name, unit in node.metadata.get('systemd/units').items():
else:
raise Exception(f'unknown type {extension}')
for attribute in ['needs', 'needed_by', 'triggers', 'triggered_by']:
if attribute in unit:
dependencies.setdefault(attribute, []).extend(unit.pop(attribute))
files[path] = {
'content': repo.libs.systemd.generate_unitfile(unit),

View file

@ -15,7 +15,7 @@ defaults = {
@metadata_reactor.provides(
'telegraf/inputs/exec',
'telegraf/config/inputs/exec',
)
def telegraf(metadata):
return {
@ -23,11 +23,11 @@ def telegraf(metadata):
'config': {
'inputs': {
'exec': {
'tasmota_charge': {
repo.libs.hashable.hashable({
'commands': ["/usr/local/share/telegraf/tasmota_charge"],
'name_override': "tasmota_charge",
'data_format': "influx",
},
}),
},
},
},

View file

@ -1,46 +1,19 @@
import tomlkit
def inner_dict_to_list(dict_of_dicts):
"""
Example:
{
'cpu': {
'default': {'something': True},
'another': {'something': False},
},
}
becomes
{
'cpu': [
{'something': True},
{'something': False},
],
}
"""
return {
key: [value for _, value in sorted(dicts.items())]
for key, dicts in sorted(dict_of_dicts.items())
}
import json
from bundlewrap.metadata import MetadataJSONEncoder
files = {
"/etc/telegraf/telegraf.conf": {
'owner': 'telegraf',
'group': 'telegraf',
'mode': '0440',
'needs': [
"pkg_apt:telegraf",
'/etc/telegraf/telegraf.conf': {
'content': tomlkit.dumps(
json.loads(json.dumps(
node.metadata.get('telegraf/config'),
cls=MetadataJSONEncoder,
)),
sort_keys=True,
),
'triggers': [
'svc_systemd:telegraf:restart',
],
'content': tomlkit.dumps({
'agent': node.metadata.get('telegraf/agent'),
'inputs': inner_dict_to_list(node.metadata.get('telegraf/inputs')),
'processors': inner_dict_to_list(node.metadata.get('telegraf/processors')),
'outputs': inner_dict_to_list(node.metadata.get('telegraf/outputs')),
}),
'triggers': {
'svc_systemd:telegraf.service:restart',
},
},
'/usr/local/share/telegraf/procio': {
'content_type': 'download',
@ -54,26 +27,9 @@ files = {
},
}
actions = {
'telegraf-test-config': {
'command': "sudo -u telegraf bash -c 'telegraf config check --config /etc/telegraf/telegraf.conf --strict-env-handling'",
'triggered': True,
svc_systemd['telegraf'] = {
'needs': [
'bundle:sudo',
'file:/etc/telegraf/telegraf.conf',
'pkg_apt:telegraf',
],
},
}
svc_systemd = {
'telegraf.service': {
'needs': ['pkg_apt:telegraf'],
'preceded_by': {
'action:telegraf-test-config',
},
'needs': {
'action:telegraf-test-config',
},
},
}

View file

@ -23,29 +23,26 @@ defaults = {
},
},
'telegraf': {
'config': {
'agent': {
'hostname': node.name,
'collection_jitter': '20s',
'flush_interval': '20s',
'flush_jitter': '5s',
'interval': '2m',
'collection_jitter': '0s',
'flush_interval': '15s',
'flush_jitter': '0s',
'interval': '15s',
'metric_batch_size': 1000,
'metric_buffer_limit': 10000,
'omit_hostname': False,
'round_interval': True,
'skip_processors_after_aggregators': True,
},
'inputs': {
'cpu': {
'default': {
'cpu': {h({
'collect_cpu_time': False,
'percpu': True,
'report_active': False,
'totalcpu': True,
},
},
'disk': {
'default': {
})},
'disk': {h({
'ignore_fs': [
'tmpfs',
'devtmpfs',
@ -55,60 +52,42 @@ defaults = {
'aufs',
'squashfs',
],
}
},
'procstat': {
'default': {
})},
'procstat': {h({
'interval': '60s',
'pattern': '.',
'fieldinclude': [
'cpu_usage',
'memory_rss',
],
},
},
'diskio': {
'default': {
})},
'diskio': {h({
'device_tags': ["ID_PART_ENTRY_NUMBER"],
}
},
'kernel': {
'default': {},
},
'mem': {
'default': {},
},
'processes': {
'default': {},
},
'swap': {
'default': {},
},
'system': {
'default': {},
},
'net': {
'default': {},
},
})},
'kernel': {h({})},
'mem': {h({})},
'processes': {h({})},
'swap': {h({})},
'system': {h({})},
'net': {h({})},
'exec': {
# h({
# 'commands': [
# f'sudo /usr/local/share/telegraf/procio',
# ],
# 'data_format': 'influx',
# 'interval': '20s',
# }),
'pressure_stall': {
h({
'commands': [
f'sudo /usr/local/share/telegraf/procio',
],
'data_format': 'influx',
'interval': '20s',
}),
h({
'commands': [
f'/usr/local/share/telegraf/pressure_stall',
],
'data_format': 'influx',
'interval': '10s',
}),
},
},
},
'processors': {},
'outputs': {},
},
'grafana_rows': {
'cpu',
@ -126,21 +105,21 @@ defaults = {
@metadata_reactor.provides(
'telegraf/outputs/influxdb_v2/default',
'telegraf/config/outputs/influxdb_v2',
)
def influxdb(metadata):
influxdb_metadata = repo.get_node(metadata.get('telegraf/influxdb_node')).metadata.get('influxdb')
return {
'telegraf': {
'config': {
'outputs': {
'influxdb_v2': {
'default': {
'influxdb_v2': [{
'urls': [f"http://{influxdb_metadata['hostname']}:{influxdb_metadata['port']}"],
'token': str(influxdb_metadata['writeonly_token']),
'organization': influxdb_metadata['org'],
'bucket': influxdb_metadata['bucket'],
},
}]
},
},
},

View file

@ -20,15 +20,11 @@ def authorized_users(metadata):
users[name] = {
'authorized_keys': set(),
}
for authorized_user, options in config.get('authorized_users', {}).items():
for authorized_user in config.get('authorized_users', set()):
authorized_user_name, authorized_user_node = authorized_user.split('@')
authorized_user_public_key = repo.get_node(authorized_user_node).metadata.get(f'users/{authorized_user_name}/pubkey')
for command in options.get('commands', []):
users[name]['authorized_keys'].add(f'command="{command}" ' + authorized_user_public_key)
else:
users[name]['authorized_keys'].add(authorized_user_public_key)
users[name]['authorized_keys'].add(
repo.get_node(authorized_user_node).metadata.get(f'users/{authorized_user_name}/pubkey')
)
return {
'users': users,
}

View file

@ -44,7 +44,6 @@ defaults = {
@metadata_reactor.provides(
'wol-sleeper/mac',
'wol-sleeper/wake_command',
)
def wake_command(metadata):
@ -54,8 +53,7 @@ def wake_command(metadata):
return {
'wol-sleeper': {
'mac': mac,
'wake_command': f"ssh -o StrictHostKeyChecking=no wol@{waker_hostname} '/usr/bin/wakeonlan {mac}' && while ! ping {ip} -c1 -W3; do true; done",
'wake_command': f"ssh -o StrictHostKeyChecking=no wol@{waker_hostname} 'wakeonlan {mac} && while ! ping {ip} -c1 -W3; do true; done'",
},
}

View file

@ -6,25 +6,17 @@ defaults = {
},
}
@metadata_reactor.provides(
'users/wol/authorized_users',
'users/wol',
)
def user(metadata):
return {
'users': {
'wol': {
'authorized_users': {
f'root@{ssh_client.name}': {
'commands': {
'/usr/bin/wakeonlan ' + sleeper.metadata.get('wol-sleeper/mac')
for sleeper in repo.nodes
if sleeper.has_bundle('wol-sleeper')
and sleeper.metadata.get('wol-sleeper/waker') == node.name
}
}
for ssh_client in repo.nodes
if ssh_client.dummy == False and ssh_client.has_bundle('ssh')
f'root@{node.name}'
for node in repo.nodes
if node.dummy == False and node.has_bundle('ssh')
},
},
},

View file

@ -59,9 +59,9 @@ defaults = {
},
},
'telegraf': {
'config': {
'inputs': {
'zfs': {
'default': {},
'zfs': [{}],
},
},
},

View file

@ -8,14 +8,14 @@ KHyP5XgRU/pIOyOo3g6+qIkhgynHVYIBuPbFQGEbOuUg7noAwTC9B9pYXSRFq9wk
T/q8rqOBiyO9SWB9gMiem8HNAzUo5TbVp9xPv2pl3mNXwe5te92pjlWdktOsBZuy
TfTgoj3y0HUY48He/z85aJ5j7gX5PU/6arxdABEBAAG0UGRldmVsOmxhbmd1YWdl
czpjcnlzdGFsIE9CUyBQcm9qZWN0IDxkZXZlbDpsYW5ndWFnZXM6Y3J5c3RhbEBi
dWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMBCAAoBQJodLPOAhsDBQkMCLQ6BgsJCAcD
AgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDkVq5yhW0Udi/iB/9pzVWeChRvk7+bC2p3
QXjc+KRmkev7yC3QglBX/17qDG+nW/z1SptFpIUKMllH/xu0GXIWOW/rxshRdKRK
422wnT7KA2AqxArsHfvu0/nGBXAI1DnHuwP0j6xNmmw+uob2nWiUZZNgKydxcGSF
fgRfIJcsHBKweasy9G/Fpdur/BFSBNQ8BP6CnB9qx0Z1LgQ6bQQNY1LKH4EzmiNA
rBowUcuVjUzXUW8rc0Old/ffymH3TBM9xQXnsGVZb5+E6NKpcdt0lnWkrtHQK3RX
ohNmaLwMQe/wMzWN3u/5XshQD8mMQjxEg4QSt2gAEXJdIzI+VgLrGqcfbrk/qhVM
D+c+iEYEExECAAYFAmCKr5QACgkQOzARt2udZSNdFQCgtpRzGoKr9VWnhv+/k4pk
dWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMBCAAoBQJkq9RAAhsDBQkIP9SsBgsJCAcD
AgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDkVq5yhW0UdsH4CACAMuwfsUTlUVmdMBw5
wktrrdwfwN6TiG5tPDjzTcMQNL+RSCh1gNRvaJjNHAy9sAsruGwTyX76K1p942EG
F99DrYd/PMBK4oOWe7HHouYIMrLqZFT38shv/tbyJvUfxqfMHSPQJSFPVGtInn3h
iKtDeIc88Hl+dsmBhWxDdaoHTGKgIcQTLN1OaX6SsT6WuMo7B4kPxHerwFp/n5bO
hqyLLkTY0oxJpZlzCj2tYDytHhjkPnYtcPpQ8LnQpGKogUxYDYZ+o4zYvIcT/J5+
cLx1xpf4fI7ZoE+dpIpAGKzN8MoQQ+fjgSheXar35p+8lOKrvrk7MmbQJlBQO+rM
IHdJiEYEExECAAYFAmCKr5QACgkQOzARt2udZSNdFQCgtpRzGoKr9VWnhv+/k4pk
Cmp9fycAn0pdJ2xIEsqxOjPBFVDh7Sahecuq
=v2my
=yIwD
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -9,16 +9,16 @@ Dts4PNx4Wr9CktHIvbypT4Lk2oJEPWjcCJQHqpPQZXbnclXRlK5Ea0NVpaQdGK+v
JS4HGxFFjSkvTKAZYgwOk93qlpFeDML3TuSgWxuw4NIDitvewudnaWzfl9tDIoVS
Bb16nwJ8bMDzovC/RBE14rRKYtMLmBsRzGYHWd0NnX+FitAS9uURHuFxghv9GFPh
eTaXvc4glM94HBUAEQEAAbQmR3JhZmFuYSBMYWJzIDxlbmdpbmVlcmluZ0BncmFm
YW5hLmNvbT6JAdQEEwEKAD4CGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQS1
Oud7rbYwpoMEYAWWP6J3EEWFRQUCaKhvPQUJB4NP1AAKCRCWP6J3EEWFRUjOC/9Y
dWOWJLJVKzLx8uv5YVzebyw15HevhKahbznJX5fHnE8irjkiPFltVEZ4T37s5afR
GBEJnR1UFd80s7jzwbuoZh/zEB3jN8q50g64AznuzDa0PWKzaY7Tgkssx3+hs6TS
vIwV4z8T7f56lDudeHxHXx+htRnZ3ebKNPCJS7+G12GF6W3C3znpdjgvhVUB0uxd
+42V0fRqk2GLNZeKS9988fi5dYRAy9Ozwced7ByCFjde9FBgUtrH3mG1/ibzLEh0
4k02nYjc8mrH32t4UCWpxQEJ1vZA2vT2HN3/cH/4uyFdyU6OHkMyMbz6lmeXe71d
F5hOB4+/RP6Ndyj7ViRNDbm70NRBaFne/+YOJvmMfJTCh7YbF5qEn1ihGkJJ0ohE
u2IB+EGEhyiDm8SIsj1uMw7n17iIPNtbsU5GgnmLtfguP/WbwKV2UeuxTpiOeYb6
blDwRlh48uHMlA5HBW+487Jktw3iPj1IKhdtAC9CU3xAvzDcseMbgmM6Xj2bSQG5
YW5hLmNvbT6JAdQEEwEKAD4WIQS1Oud7rbYwpoMEYAWWP6J3EEWFRQUCZOeGaQIb
AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCWP6J3EEWFRUiADACa
i+xytv2keEFJWjXNnFAx6/obnHRcXOI3w6nH/zL8gNI7YN5jcdQT2NYvKVYTb3fW
GuMsjHWgat5Gq3AtJrOKABpZ6qeYNPk0Axn/dKtOTwXjZ4pKX3bbUYvVfs0fCEZv
B0HHIj2wI9kgMpoTrkj22LE8layZTPOoQ+3/FbLzS8hN3CYZj25mHN7bpZq8EbV3
8FW9EU0HM0tg6CvoxkRiVqAuAC0KnVIZAdhD4dlYKuncq64nMvT1A5wxSYbnE+uf
mnWQQhhS6BOwRqN054yw1FrWNDFsvnOSHmr8dIiriv+aZYvx5JQFJ7oZP3LwdYyg
ocQcAJA8HFTIk3P6uJiIF/zdDzocgdKs+IYDoId0hxX7sGCvqdrsveq8n3m7uQiN
7FvSiV0eXIdV4F7340kc8EKiYwpuYSaZX0UWKLenzlUvD+W4pZCWtoXzPsW7PKUt
q1xdW0+NY+AGLCvSJCc5F4S5kFCObfBAYBbldjwwJFocdq/YOvvWYTPyV7kJeJS5
AY0EZOeGaQEMALNIFUricEIwtZiX7vSDjwxobbqPKqzdek8x3ud0CyYlrbGHy0k+
FDEXstjJQQ1s9rjJSu3sv5wyg9GDAUH3nzO976n/ZZvKPti3p2XU2UFx5gYkaaFV
D56yYxqGY0YU5ft6BG+RUz3iEPg3UBUzt0sCIYnG9+CsDqGOnRYIIa46fu2/H9Vu
@ -27,15 +27,15 @@ D56yYxqGY0YU5ft6BG+RUz3iEPg3UBUzt0sCIYnG9+CsDqGOnRYIIa46fu2/H9Vu
3zht8luFOYpJr2lVzp7n3NwB4zW08RptTzTgFAaW/NH2JjYI+rDvQm4jNs08Dtsp
nm4OQvBA9Df/6qwMEOZ9i10ixqk+55UpQFJ3nf4uKlSUM7bKXXVcD/odq804Y/K4
y3csE059YVIyaPexEvYSYlHE2odJWRg2Q1VehmrOSC8Qps3xpU7dTHXD74ZpaYbr
haViRS5v/lCsiwARAQABiQG8BBgBCgAmAhsMFiEEtTrne622MKaDBGAFlj+idxBF
hUUFAmiobzkFCQeDT9AACgkQlj+idxBFhUVsmQwA0PA/zd7NqtnZ/Z8857gp2Wq2
/e4EX8nRjsW2ZlrZfbU5oMQv9OZZ4z1UjIKEUV+TnCwXEKXTMJomdekQSSayVVx/
u5w+0YM8gRuQGrG8hW0GRR8sHIeuwBFlyQrlwxUwXvDOPDYyieETjaQqMucupIKo
IPm3CjFySvfizvSWUVSWBnGmQfpv6OiGYawvwfewcQHUdLMgWN3lYlzGQJL4+OMm
7XcB8VNTa586Q00fmjDfktHYvGpmhqr3gsd4gS3AjTk0zI65qXBRJkdqVnwUrMUD
8TcxXYNXf90mhR0NWkLmp6kBYiW8+QY6ndMmRVpodg1A87qgMYaZUAAlxCS4XKTU
r+/YMDYOWgLN6i4UeYG/3/hsnAEHm5ITojfh6cLfdlhjohFTnD0IYw3AsNJXRzKB
1g5FTBKLLLIdXgS/3rWV1qjAd3drQVIMCku6HKl/vT4ftrBHeSyV7eLwOYbe3/bw
8VMx+lmMheD8/qJMia1om0iBBRSXRjY//f+Lllqm
=TH3J
haViRS5v/lCsiwARAQABiQG8BBgBCgAmFiEEtTrne622MKaDBGAFlj+idxBFhUUF
AmTnhmkCGwwFCQPCZwAACgkQlj+idxBFhUUNbQv8DCcfi3GbWfvp9pfY0EJuoFJX
LNgci7z7smXq7aqDp2huYQ+MulnPAydjRCVW2fkHItF2Ks6l+2/8t5Xz0eesGxST
xTyR31ARENMXaq78Lq+itZ+usOSDNuwJcEmJM6CceNMLs4uFkX2GRYhchkry7P0C
lkLxUTiB43ooi+CqILtlNxH7kM1O4Ncs6UGZMXf2IiG9s3JDCsYVPkC5QDMOPkTy
2ZriF56uPerlJveF0dC61RZ6RlM3iSJ9Fwvea0Oy4rwkCcs5SHuwoDTFyxiyz0QC
9iqi3fG3iSbLvY9UtJ6X+BtDqdXLAT9Pq527mukPP3LwpEqFVyNQKnGLdLOu2YXc
TWWWseSQkHRzBmjD18KTD74mg4aXxEabyT4snrXpi5+UGLT4KXGV5syQO6Lc0OGw
9O/0qAIU+YW7ojbKv8fr+NB31TGhGYWASjYlN1NvPotRAK6339O0/Rqr9xGgy3AY
SR+ic2Y610IM7xccKuTVAW9UofKQwJZChqae9VVZ
=J9CI
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,99 +1,29 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBGPIEtQBEADSkkGhaEytmsAzvHtUn/1/wIW5RTp6tHWlEsz3b2iZ3LEpNlfe
EqfUiK88edtEFgmioozHif2ZBRj2pyV2gckPmXna2b0UOefAAibMSTYXwhUQRgw4
DNbecJk6J3HfcsXBVO4jGcR98UCVmpslZkqax1b/q+ju5BGA1PBHZZqGyooVWdv2
5fmJ6ZPdMWKr6lyCVbMKU3Z3zzsWlsqsA1aadNbwsg1vPHemVwGiI1esQFZo2ltS
K37Ar9hJSMreVeU5k0Vrg5rWaQnNEjcpVJQMHapMxTG3RZzZrl6jMVCFKia4JWPk
LBcPL4GP6qlHxLng/lv+6uullddv8dMxFwr8uClyvyoJcTjL78RMFG5+6AqK8v89
Xy2BpQfOWnlBC492+X7wEAZX9zVhRg1cqZKn9l3YkIf1tQnSXu7S4oqLRsc/53rw
QuD2YxyIbDEG5vYBrQouL6cgasRGYpzDak9qEOrtuckWZAZc89VxK3jJ9S5MxLha
t55FNC6rhx0kLu5tK6RvsExp6bomUDfPWOUUoyJsVXqWi7A57nm2zFfLkaFYDXaX
ijgfTsahvkI6BxVJ0QJTEOyx/ymURcelbfDAez6Mx6mDXD4kmsYoa/IXBPPvHwbK
MdDZm5kyB0eyWpubAKvLGESe093xUQq9Sy77R/vZ78CXUvLL/udOfjm+QQARAQAB
mQINBGPIEycBEACpG4qSjhxA6fh4QJVJxFVBvCFt9tVx/hDbKH0Ryy9iilyMeReC
AS1/CZnSv/fhDNKmVPckf6on72z/ODwZcVfMV6DHkxmZ6x/tQrS6CWfKkupsON2H
KS3t4HUivahwHPlWtbfDqsWNwTAsZqklKpJQWY2ADPwurkbCmtYSjsgbLuWe23Pd
nJpLTHtlChM0ntW/l7Le1zYjGPUGoxMJgjg1YG8fi2l/zS0Of8bdQ26ps+WRvrSQ
RKhfAkfIgUiCXxBpDlN1spN73ZlAkaSb+myTfEKyJR55Yt9pHfkDdJh26RVgE1+N
GuLmm6oidaD9lTlNJ9P8wlLzoof3xJXYprgLLz/HmgtawnJ+DxFIXoXNNpUmhORJ
6Hb2Z5IKIyGIwXhQVe2Lw7B8awBNV99zUw517Wuax3RYx7Hwhntz9gFxS4GRxaCo
uLCFQ0AgDCkMHyEHufQo1XdjIB7fz6U551y5GMQw6/rjMnUM9ZI68SQ/FWou2cQf
533PyayvWOYQM4pP7ZmbzyCd393XlMaPWA5dyUOqv7Vcmv0IsAbncX6/KJmZAhKG
qu19xb6rv3ab2RbcU422guK3C/h/URPZJbSjf2w4jUV5UDe2veZg6BEVn7Sk5bW0
ceX8n0GVbPNG7CvRduJPjXNzsz3FzmUS8QFFde3H5gl1T0f6GcfhmKgKEQARAQAB
tDdJbmZsdXhEYXRhIFBhY2thZ2UgU2lnbmluZyBLZXkgPHN1cHBvcnRAaW5mbHV4
ZGF0YS5jb20+iQJOBBMBCgA4FiEEJMl1y6YaAk7htjF4fD1XFZ/C+ScFAmPIEtQC
GwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQfD1XFZ/C+SekJQ/9HPftk2YP
PZgWUVOiFswKORLSp6REycxUFzl8vliHfkglR+FmCGeNJdB+Aw14kKzHXPh1RZ8p
ghlwl4oirXsiqOFGVtHS/4ne1mGpk5bw8R/pGWwrtIUEUtQULRHshUL4T2FcBJwt
RdeJbZAyRKnnw9Ub1CtT02RyQsPCkFJIjQpTyZwRBrk4Z/Br9z12cQLrZXCOxmhw
lWvbC0Bn8EeJAk35xYHHJuK+eJx+lnstxl5c+5qZg+z5X0lXjg22vFwiYvJ7bxjH
cwG8QSDVkUsyqLIsLwz+Y1Rb404Pq9tWg0dN8hdDa6kV4pi0L3rx5PJMZb/ufkR+
9gBUV6dOYWbzmDHhMe89xKUeNBRV4AZ9no8QtB0s5PzUNB2EB1m94R/W+dQGt8ZP
Q9tn1kd+SqqbzOWHgxr7o7BvFfU3wNrc1MwMBTOiYVlCgJFUc2gvOV2Vs09OBRsG
uZEBS0xpoXemAnp54YazKKYqgiyWNZNIboWVzN5YXatXv5jc3pFwYPP2FGy9VEj0
HvZh+GaAs62vrBcNi6aj4LqHeuv7gWEcVrMWeGaQcxGpr+MESh0W1ryS+DaW+g00
6VN7SOhsygcBU2NyxUNjwqZ7/YXjtjaHnC19rHFc5C1Ny2OfTAS+vU+1WSLZ3fih
kpWlWICNP6CppJ663egz8arvDjnQEeHSSxa5Ag0EY8gTJwEQAKkbipKOHEDp+HhA
lUnEVUG8IW321XH+ENsofRHLL2KKXIx5F4IBLX8JmdK/9+EM0qZU9yR/qifvbP84
PBlxV8xXoMeTGZnrH+1CtLoJZ8qS6mw43YcpLe3gdSK9qHAc+Va1t8OqxY3BMCxm
qSUqklBZjYAM/C6uRsKa1hKOyBsu5Z7bc92cmktMe2UKEzSe1b+Xst7XNiMY9Qaj
EwmCODVgbx+LaX/NLQ5/xt1Dbqmz5ZG+tJBEqF8CR8iBSIJfEGkOU3Wyk3vdmUCR
pJv6bJN8QrIlHnli32kd+QN0mHbpFWATX40a4uabqiJ1oP2VOU0n0/zCUvOih/fE
ldimuAsvP8eaC1rCcn4PEUhehc02lSaE5EnodvZnkgojIYjBeFBV7YvDsHxrAE1X
33NTDnXta5rHdFjHsfCGe3P2AXFLgZHFoKi4sIVDQCAMKQwfIQe59CjVd2MgHt/P
pTnnXLkYxDDr+uMydQz1kjrxJD8Vai7ZxB/nfc/JrK9Y5hAzik/tmZvPIJ3f3deU
xo9YDl3JQ6q/tVya/QiwBudxfr8omZkCEoaq7X3Fvqu/dpvZFtxTjbaC4rcL+H9R
E9kltKN/bDiNRXlQN7a95mDoERWftKTltbRx5fyfQZVs80bsK9F24k+Nc3OzPcXO
ZRLxAUV17cfmCXVPR/oZx+GYqAoRABEBAAGJBHIEGAEKACYWIQQkyXXLphoCTuG2
MXh8PVcVn8L5JwUCY8gTJwIbAgUJBaOagAJACRB8PVcVn8L5J8F0IAQZAQoAHRYh
BJ1TnZDTMo3H1sjTudj/jh99+LB+BQJjyBMnAAoJENj/jh99+LB+Ti8QAJLJw0Uq
AGxio0ejT7jYrf56NMIYnIp9VdlHYQQyJP8/WyiQHq0w+mxNy+3RkfUscI5hqhHv
/UWoPAbNiy18qeVsivnGkCwegPVvQyE18j3YHW4TWN6pjirSu/5DMeLUMJcVm6eP
KDDwJF2aF/xBUgF8ctFYxvThwG2FnRiBq3P1pdp2D9FAIPHGtmkVJs+yuO9NonA8
7YDCu0r4buisQhDNpvEJFPXaTb0Jo4Q3Xg6db2IVVdCr1K1VgEE4oG8wLDW8e8u1
hdD3I/pG7DgP40/y3QFleq18Sts0SUemIoOO79h/xHCA9xlIppSs3yNu/5n8M6J7
ar2vvzq34LmR68Wenw9ErmaVZpOdjGlGDWCcqefhFfl6Kvn1H93zVWt+FSyrQrsW
or2OwTrDXyijeCmfqYyN182B3R+E5NajJvSd4X504MPgVaAqKsWrqbMGqpyTPMCg
H/LteOBA9rKm/yZvWqrttHIBiCnlkqbMVC/KqwA1jlbJV24yGJ3byMPe7KvqUoc3
lMlV6duOuFblLWCVAsDUpuFoRe7hrmN6dcjn/vGpZbVMA5mqvkLdLbl+8B+7h5Bt
gyRobmrc+spaikIoyffgAvMCqTWDJGP240xw23CzI42i2A2lNQibr8xTK1XefCJz
z4iitOlixvElDvAdjaB3OXLngZhY95c6+tVydcAP/2DmBeCml5dNDdG+aEaP5ieL
FIZq9ex8gY3GYaoC4x0nZs+o6H4yBzdyKZPk2NoPB4yOKLb2FpOTMYtH4ekUgEYV
CKiyu8n8G48j8anYYFsH2l6K3imkiMUrNL0LqVNRk+gbLh1uRQs96TXBT0bgv0Ed
WBee8rjCpsx3ZIBQX7UsJfKLJFjjMiXPXjWjHDb5RRyyJ/qjWFZ/cdoUpRCJtnSR
bd21ho3uHsFuJgNy3OXYhsvc5xTafdKYQcWvyU9MvNLnLkyVCY2U9sUIL8H4QqcE
AoeUIMT7QjN1uCxx2DaiS5mtgvf6Lzs19FQmxVql9DgD/d2BpI6v4e/A/UPGlP22
ho+gu70J/z1zQGiwcC3J02wofzby4UZjyRT4QaKMA8s+R9L3L4kyejWBTI02lunR
fzhisvu3UKXKnoWDZ0msRrPdMCZFgf6C2DYJa8kK3iqaS2Xjzt2Fert8nT1dp003
wQbxZ7+Takb62meVSUxo5NwKCF3f2PgkgZ+Dbj80Jtp0KEiOpRquUmf1+8bCiGl6
LCfZp8OZLeZ5GhUanyJjy41Kc3yi7FwyUQt4qMI5reAeEvFks9BjUc4O9Ke2JUQn
nzJFOkWza20F9abgR7vpI0XbXeJnlhokw7QU1Kj8BBkwpn13BRgucaJrHnKf4WoN
mkkO7wkTEAhz6IuBGjMguQINBGhwK6ABEACtsIMXIPGPeNXXxnY7Uh+i28Stamec
5WJ8KSQ3CIAl4J/mXujhnwmfDrpB/Y12V5Bg/1DeCjCT1iKBiUT2JbhcTkUQnatZ
XF41ChnbK7LTjwbLTCoOo0OJJzDe0QNZ7CLXSk6Dv0gfBpe7KpZOXLVfGuqhrnrv
Ta1qgz5TeBermbRLUVin7R7oyibqPoHWELNjXDokm8Ub0f3tw3mE6I/D1TIkNTpf
covBR6Ss9/qheh4nJEnl7SmNrQbK1pMdtLKKd/0i5EgUlAw22Ygj6xa/aeUCjcG6
4pBv/xWFtxLyy+WVBIyfM+lo9KRKJ2pld8DAYBxiPCKpQMWpfS1fwHYRvEFn/DvC
JFS/Md0nUHkSooCKkbeNchs93gtCJOyuJ/RBnPKrpIPU5DglBqZxFvw3R7Jihx8n
6F5ZztOzqV1IngObsfs8SBtm3nUrhn6Hg1YkfT3XYDsCugZoMLfpfrwKqmdGBA2J
E27g80Ot32uTy9GN6uBdrQp01n+Coo1EyiH5bY5nMSAOfN2IygzlkEZJXuXPzmHP
wJUg0JCdqxSFkU2JdyCPGbQyoyplbMyd48PlEDHUcoV1CsW2rdIrbhYYFl1lVetI
gLYCrZi+xZpmH8Tby60hk9d5+s5AHLSKLbsuMgmF7oGeXAs7ZUQi07PHaEOjeL0L
jfsK507DwMn9bQARAQABiQRyBBgBCgAmFiEEJMl1y6YaAk7htjF4fD1XFZ/C+ScF
AmhwK6ACGwIFCQaguoACQAkQfD1XFZ/C+SfBdCAEGQEKAB0WIQSsENdEnzQ63O/d
wrbaYcJqBYW9OwUCaHAroAAKCRDaYcJqBYW9O1hEEACQ8/5ThIjjhvS6ZkIzTab4
e5/qj3tUpUFJH82wsf1E55U1bTEPOj82avXTQLWK6f+hn2YABc03j89URBytA99k
T400y/vjWPSNMm8w++VzGMIiPer7soKMZGGFYfsAClkk5VZ3o9bCFy9ROG98q5fr
MpYkXUJmvJvoZVDRdbhW+RE37g0g5i89EtFvF6JSEpzaPs7pqJUsXK02R65lT5qy
YVJxUqVWCpVaazsj37Inh0zeERRlGnmfPCOfbycFv0AQBwEwY0IGdb3/7vmKSKBb
5LRoiv/yO76NJWDbPXB7AqW5DPtBdU/j8x7YrI8GgM/4ADvBkuxSbReKGCxlbOeX
KEf+SBGbPjtY35hRST14TUmOryaFek/f39nYiFByXiPvjcoOfH1dZZOnj+BwW4AP
LSSFjUTBMZbTkvGfOQlThiX7y2H0J4LKXp5j1FGzQGel0Ij+sO09vDQ1iQ/bXvq6
ZGAVVR4HMOLoEC/YI4WbnONZdtpmUUykvtO5zN4lBEseION83kmxPyvVRAtd0ctq
IDIxGdnToaYQCsYOf0lsqk+KdI81xskUgegqEBjGcRq6ShhKNHTJiJVFNeIm+XA7
I5Kl468xtyEeDUsrUJzW9zs7G+XCGEjx5Jq++4hSRu96dfEaS/Kz3O3SWIxmKc5u
Lq3gKEm2O/QZY3sDK4DVNfSoD/98Gb+YCcvnDB+1tBeDiLOp1/2EG2WXkmnkiHBD
0Ifw72miBhwpBmjlBZTdyfjZkvcs5Ga2H7Dfw6EAMJlpV0Wt4z3170W6va81I/Av
mGUPknVHyExez9vqcJWCCQ/xKvrQ25EoTpAzkueEWzuVruTQkc0vuUlOM/gGUbxS
uhbyiH6AVrJ/6QgZw/iSdcTHG50976bSjHpLUQaTiMXglx+wuMb8ECck4cT18FEF
6sle/4MTtLYcm6IJbFk5pVGmJSpTlSCArWqyKG8d8Lqe/XolB2+YJf/JF53ariL8
AHoyCEBUAUnYqEbjSKMUWvIXdIeezvhTk7VVRN3ssdfUBX1/t1R46rz0oiP/9Bol
AKXupdNIwxWCCABDxWVkR6PtWo3mqGhbh3MqXdElpdD84BDahvEU/mIzZkln6Bhr
CnKRBt4mxHVs/jUR5IOBX5foEKJM3GFU+OQ0bi8gzYxfoF6mfO42Km2RvrqHpBJa
OtMEDP2hvCZi+O9WZYNna74u4IFEIpULAeoU3e/7B4DJNOtHTPRmdwpHrZBszrz7
tYTqYmqG1DjiGieki6Byvw0/9XIYoHZBuLu51iHj3WwOeuYj2Gi5boq+YXumW9xQ
gUgyWu8M6XFa4UCzpx50Zay/BWA9LsRxUUpDNCjysg4gDuw9WflUI+LdlrvKFBX9
AHMnjg==
=UiOS
ZGF0YS5jb20+iQJVBBMBCAA/BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUJBaOk
/BYhBJ1TnZDTMo3H1sjTudj/jh99+LB+BQJjyB9PAhsDAAoJENj/jh99+LB+klgQ
AKOKdwTyKOr6+mnRrACz5U3EFxfAXXFGan9Ka7Nzgz4K+FOnTtT1gWwqrPPmTKQk
epNUMcelfX1kCA08yCm0nyw2niqxES40W33ergKUj6jlDx7UQYXWsDQGD9IKksa8
MWfZlJ3zlrsGKXA4oa+kfY+vltWDVP8WhLcQzm2LywbKvr3WgY80GZbnRjoekiBK
oMKztQVMJG5yNZBo9B4JrqB3wMpnXZxEtqZcBPsJJdXTFKHsQ7kB9TMNorbUvDNH
ohwsprgMw84vHikEk9jyCypXpYq/E/wvkM0CeIUJ36S2vGvACib7BiY6Xv0BQbM4
rWq2Rrjag1y5vVAF9gJkeo/3rhM6lE1ahDCRq0QcBMVzbxiE+3COIzRPmz14J3Yn
0pkvzlVkNj5UZR8q91ESl+UxkFCP1wzcXgs0dpJWirQIOZ9E2eYv3LcjE68xjW1k
c5q1GOGvJI7aXADxUZ4lFbz+NUb4Ts4HXHc8gV1Gm0vvmIqv2YfAvL5DXbKLdZxh
73CxKvBMmTXIEQ+vQJ3p1ZnUnb+l6DoxEFWg/hXHmE5jY3P6HIVFdliXF5FEs1lr
9snU2Pn1BDL+TBN7SX0QbKqArWA4qyn6eGH8Z1ULoUVBPCjwC9QuInp/9fqifFYo
OM3A51MDGyc/HCVG6jNJEI5h71QGHlPfyQybpjy7rQSe
=YwXc
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -8,21 +8,16 @@ from(bucket: "${bucket}")
|> filter(fn: (r) => exists r["${exist}"]) // WTF
% endfor
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) // aggregate early for best performance
% if over is not None:
|> filter(fn: (r) => r._value > ${over})
% if minimum:
|> filter(fn: (r) => r._value > ${minimum})
% endif
% if function == 'derivative':
|> derivative(nonNegative: true)
% elif function == 'difference':
|> difference(nonNegative: true)
% endif
% if boolean_to_int:
|> map(fn: (r) => ({r with _value: if r._value == true then 1 else 0 }))
% endif
% if negative:
|> map(fn: (r) => ({r with _value: r._value * - 1.0}))
% endif
% if multiply is not None:
|> map(fn: (r) => ({r with _value: r._value * ${multiply}}))
% endif
|> yield(name: "mean")

View file

@ -51,7 +51,7 @@
},
'min': 0,
'soft_max': 3,
'display_name': '${__field.labels.resource}',
'display_name': '__field.labels.resource',
'unit': 'percent',
'tooltip': 'multi',
'legend': {

View file

@ -16,7 +16,7 @@
},
},
'unit': 'Bps',
'display_name': '${__field.labels.name}',
'display_name': '__field.labels.name',
'tooltip': 'multi',
},
'write': {
@ -36,7 +36,7 @@
},
},
'unit': 'Bps',
'display_name': '${__field.labels.name}',
'display_name': '__field.labels.name',
'tooltip': 'multi',
},
}

View file

@ -17,7 +17,7 @@
},
'tooltip': 'multi',
'unit': 'MHz',
'display_name': '${__field.labels.cpu}',
'display_name': '__field.labels.cpu',
'min': 0,
},
# 'temperature': {
@ -57,6 +57,6 @@
},
'tooltip': 'multi',
'unit': 'degrees',
'display_name': '${__field.labels.chip}',
'display_name': '__field.labels.chip',
},
}

View file

@ -13,7 +13,7 @@
},
},
'unit': 'Bps',
'display_name': '${__field.labels.interface}',
'display_name': '__field.labels.interface',
'tooltip': 'multi',
},
'out': {
@ -30,7 +30,7 @@
},
},
'unit': 'Bps',
'display_name': '${__field.labels.interface}',
'display_name': '__field.labels.interface',
'tooltip': 'multi',
},
}

View file

@ -12,7 +12,7 @@
'function': 'max',
},
},
'display_name': '${__field.labels.queue}'
'display_name': '__field.labels.queue'
},
'size': {
'stacked': True,
@ -27,7 +27,7 @@
'function': 'max',
},
},
'display_name': '${__field.labels.queue}'
'display_name': '__field.labels.queue'
},
'age': {
'stacked': True,
@ -42,6 +42,6 @@
'function': 'max',
},
},
'display_name': '${__field.labels.queue}'
'display_name': '__field.labels.queue'
},
}

View file

@ -9,11 +9,11 @@
'cpu_usage',
],
},
'over': 0.2,
'minimum': 0.2,
},
},
'unit': 'percent',
'display_name': '${__field.labels.process_name}',
'display_name': '__field.labels.process_name',
'legend': {
'displayMode': 'table',
'placement': 'right',
@ -32,11 +32,11 @@
'memory_rss',
],
},
'over': 10*(10**6),
'minimum': 10*(10**6),
},
},
'unit': 'bytes',
'display_name': '${__field.labels.process_name}',
'display_name': '__field.labels.process_name',
'legend': {
'displayMode': 'table',
'placement': 'right',

View file

@ -10,11 +10,11 @@
],
},
'function': 'derivative',
'over': 1024,
'minimum': 1024,
},
},
'unit': 'bytes',
'display_name': '${__field.labels.comm}',
'display_name': '__field.labels.comm',
'legend': {
'displayMode': 'table',
'placement': 'right',
@ -34,11 +34,11 @@
],
},
'function': 'derivative',
'over': 1,
'minimum': 1,
},
},
'unit': 'bytes',
'display_name': '${__field.labels.comm}',
'display_name': '__field.labels.comm',
'legend': {
'displayMode': 'table',
'placement': 'right',

View file

@ -1,97 +0,0 @@
{
'critical': {
'stacked': True,
'queries': {
'generic': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': [
'in_errors',
'out_errors',
],
'operating_system': 'routeros',
},
'function': 'difference',
'over': 0,
},
'mikrotik': {
'filters': {
'_measurement': 'mikrotik_interface_detailed',
'_field': [
'rx_fcs_errors',
'rx_align_errors',
'rx_code_errors',
'rx_carrier_errors',
'rx_jabber',
'rx_fragment',
'rx_length_errors',
'tx_late_collisions',
'tx_excessive_collisions',
'link_downs',
],
'operating_system': 'routeros',
},
'function': 'difference',
'over': 0,
},
},
'min': 0,
'unit': 'cps',
'tooltip': 'multi',
'display_name': '${__field.name} ${__field.labels.ifName} ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'table',
'placement': 'right',
'calcs': [
'max',
],
},
},
'warning': {
'stacked': True,
'queries': {
'generic': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': [
'in_discards',
'out_discards',
],
'operating_system': 'routeros',
},
'function': 'difference',
'over': 0,
},
'mikrotik': {
'filters': {
'_measurement': 'mikrotik_interface_detailed',
'_field': [
'rx_too_short',
'rx_too_long',
'rx_drop',
'tx_drop',
'rx_pause',
'tx_pause',
'tx_pause_honored',
'tx_collisions',
'tx_total_collisions',
],
'operating_system': 'routeros',
},
'function': 'difference',
'over': 0,
},
},
'min': 0,
'unit': 'cps',
'tooltip': 'multi',
'display_name': '${__field.name} ${__field.labels.ifName} ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'table',
'placement': 'right',
'calcs': [
'max',
],
},
},
}

View file

@ -1,107 +0,0 @@
{
'temperature': {
'stacked': False,
'queries': {
'temp': {
'filters': {
'_measurement': 'mikrotik_health',
'sensor': [
'temperature',
'cpu-temperature',
'switch-temperature',
'board-temperature1',
'sfp-temperature',
],
'_field': [
'value',
],
'operating_system': 'routeros',
},
},
},
'min': 0,
'unit': 'celsius',
'tooltip': 'multi',
'display_name': '${__field.labels.sensor}',
'legend': {
'displayMode': 'hidden',
},
},
'fan': {
'stacked': False,
'queries': {
'temp': {
'filters': {
'_measurement': 'mikrotik_health',
'sensor': [
'fan1-speed',
'fan2-speed',
],
'_field': [
'value',
],
'operating_system': 'routeros',
},
},
},
'min': 0,
'unit': 'rpm',
'tooltip': 'multi',
'display_name': '${__field.labels.sensor}',
'legend': {
'displayMode': 'hidden',
},
},
'psu_current': {
'stacked': False,
'queries': {
'temp': {
'filters': {
'_measurement': 'mikrotik_health',
'sensor': [
'psu1-current',
'psu2-current',
],
'_field': [
'value',
],
'operating_system': 'routeros',
},
'multiply': 0.1,
},
},
'min': 0,
'unit': 'ampere',
'tooltip': 'multi',
'display_name': '${__field.labels.sensor}',
'legend': {
'displayMode': 'hidden',
},
},
'psu_voltage': {
'stacked': False,
'queries': {
'temp': {
'filters': {
'_measurement': 'hw',
'sensor': [
'psu1-voltage',
'psu2-voltage',
],
'_field': [
'value',
],
'operating_system': 'routeros',
},
'multiply': 0.1,
},
},
'min': 0,
'unit': 'volt',
'tooltip': 'multi',
'display_name': '${__field.labels.sensor}',
'legend': {
'displayMode': 'hidden',
},
},
}

View file

@ -1,54 +0,0 @@
{
'in': {
'stacked': True,
'queries': {
'in': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': [
'in_ucast_pkts',
'in_mcast_pkts',
'in_bcast_pkts',
],
'ifType': [6],
'operating_system': 'routeros',
},
'function': 'derivative',
'over': 0,
},
},
'min': 0,
'unit': 'pps',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
'out': {
'stacked': True,
'queries': {
'out': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': [
'out_ucast_pkts',
'out_mcast_pkts',
'out_bcast_pkts',
],
'ifType': [6],
'operating_system': 'routeros',
},
'function': 'derivative',
'over': 0,
},
},
'min': 0,
'unit': 'pps',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
}

View file

@ -1,68 +0,0 @@
{
'power': {
'stacked': True,
'queries': {
'power': {
'filters': {
'_measurement': 'mikrotik_poe',
'_field': ['power'],
'operating_system': 'routeros',
},
'function': 'mean',
'multiply': 0.1,
'over': 0,
},
},
'min': 0,
'unit': 'watt',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
'current': {
'stacked': True,
'queries': {
'voltage': {
'filters': {
'_measurement': 'mikrotik_poe',
'_field': ['current'],
'operating_system': 'routeros',
},
'function': 'mean',
'multiply': 0.1,
'over': 0,
},
},
'min': 0,
'unit': 'ampere',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
'voltage': {
'stacked': False,
'queries': {
'voltage': {
'filters': {
'_measurement': 'mikrotik_poe',
'_field': ['voltage'],
'operating_system': 'routeros',
},
'function': 'mean',
'multiply': 0.1,
'over': 0,
},
},
'min': 0,
'unit': 'volt',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
}

View file

@ -1,46 +0,0 @@
{
'in': {
'stacked': True,
'queries': {
'in': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': ['in_octets'],
'ifType': [6],
'operating_system': 'routeros',
},
'function': 'derivative',
'over': 0,
},
},
'min': 0,
'unit': 'bps',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
'out': {
'stacked': True,
'queries': {
'out': {
'filters': {
'_measurement': 'mikrotik_interface_generic',
'_field': ['out_octets'],
'ifType': [6],
'operating_system': 'routeros',
},
'function': 'derivative',
'over': 0,
},
},
'min': 0,
'unit': 'bps',
'tooltip': 'multi',
'display_name': '${__field.labels.ifName} - ${__field.labels.ifAlias}',
'legend': {
'displayMode': 'hidden',
},
},
}

View file

@ -11,7 +11,7 @@
'function': 'mean',
},
},
'display_name': '${__field.labels.device}',
'display_name': '__field.labels.device',
'min': 0,
'unit': 'celsius',
'tooltip': 'multi',
@ -29,7 +29,7 @@
'function': 'last',
},
},
'display_name': '${__field.labels.device}',
'display_name': '__field.labels.device',
'min': 0,
'tooltip': 'multi',
},
@ -42,7 +42,7 @@
},
},
},
'display_name': '${__field.labels.device} ${__field.name}',
'display_name': '__field.labels.device} ${__field.name',
'min': 0,
'tooltip': 'multi',
'legend': {

View file

@ -1,9 +1 @@
{
'metadata': {
'nftables': {
'input': {
'udp dport 161 accept',
},
},
},
}
{}

View file

@ -9,13 +9,6 @@
'routeros',
],
'metadata': {
'grafana_rows': {
'routeros_errors',
'routeros_throughput',
'routeros_poe',
'routeros_packets',
'routeros_health',
},
'routeros': {
'gateway': '10.0.0.1',
'bridge_priority': '0x8000',
@ -25,15 +18,10 @@
'iot': '2',
'internet': '3',
'proxmox': '4',
'wokeonlan': '5',
'gast': '9',
'rolf': '51',
},
'vlan_groups': {
'home': {
'untagged': 'home',
'tagged': set(),
},
'infra': {
'untagged': 'home',
'tagged': {
@ -42,23 +30,15 @@
'proxmox',
'gast',
'rolf',
'wokeonlan',
},
},
'internet': {
'untagged': 'internet',
'tagged': set(),
},
'wokeonlan': {
'untagged': 'wokeonlan',
'tagged': set(),
},
},
'vlan_ports': {},
},
'telegraf': {
'influxdb_node': 'home.server',
},
},
'os': 'routeros',
}

View file

@ -1,30 +0,0 @@
#! /usr/bin/env python3
import requests
from datetime import datetime, timedelta, timezone
BASE = "https://homeassistant.ckn.li"
TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI1YjY0ZWE5N2FiMzM0NTQ0OGMyNjhmZTIxYzAxZTE1MSIsImlhdCI6MTc1NjAzOTAxNCwiZXhwIjoyMDcxMzk5MDE0fQ.X-sQli-NTpCjeXpn19zf-maPRDldkSeTuhKZua1k8uM"
ENTITY = "sensor.hue_outdoor_motion_sensor_2_temperature"
HEADERS = {
"Authorization": f"Bearer {TOKEN}",
"Content-Type": "application/json",
}
begin = datetime(2025, 7, 1, 0, 0, 0, tzinfo=timezone.utc)
current = begin
now = datetime.now(timezone.utc)
while current < now:
current += timedelta(hours=1)
resp = requests.get(
f"{BASE}/api/history/period/{current.isoformat()}",
params={
"end_time": current.isoformat(),
"filter_entity_id": ENTITY
},
headers=HEADERS,
timeout=15,
)
print(current, resp.json())

View file

@ -1,18 +0,0 @@
from subprocess import check_output
def test_node(repo, node, **kwargs):
for node in repo.nodes_in_group('mailserver'):
domain = node.metadata.get('mailserver/hostname')
expected_ptr_record = f"{domain}."
expected_a_record = node.hostname
# check A record
actual_a_record = check_output(['dig', '+short', 'A', domain, '@9.9.9.9'], text=True).strip()
if actual_a_record != expected_a_record:
raise AssertionError(f"A record for {expected_a_record} on node {node.name} is {actual_a_record}, expected {expected_a_record}")
# check otr record
actual_ptr_record = check_output(['dig', '+short', '-x', expected_a_record, '@9.9.9.9'], text=True).strip()
if actual_ptr_record != expected_ptr_record:
raise AssertionError(f"PTR record for {expected_a_record} on node {node.name} is {actual_ptr_record}, expected {expected_ptr_record}")

View file

@ -1,14 +0,0 @@
def test_unique_node_ids(repo):
ids = {}
for node in repo.nodes:
if node.metadata.get('id') in ids:
raise ValueError(f"Duplicate node ID found: {node.metadata.get('id')} in node {node.name} and {ids[node.metadata.get('id')]}")
ids[node.metadata.get('id')] = node.name
def apply_start(repo, target, nodes, interactive=False, **kwargs):
test_unique_node_ids(repo)
def test(repo, **kwargs):
test_unique_node_ids(repo)

View file

@ -1,8 +1,5 @@
def wake_on_lan(node):
node.repo.libs.wol.wake(node)
def node_apply_start(repo, node, **kwargs):
wake_on_lan(node)
repo.libs.wol.wake(node)
def node_run_start(repo, node, cmd, **kwargs):
wake_on_lan(node)
repo.libs.wol.wake(node)

View file

@ -1,28 +0,0 @@
from functools import total_ordering
@total_ordering
class Version():
def __init__(self, string):
self._tuple = self.tupelize(string)
def __lt__(self, other):
return self._tuple < self.tupelize(other)
def __eq__(self, other):
return self._tuple == self.tupelize(other)
def __repr__(self):
return f'{type(self).__name__}({repr(self._tuple)})'
def __str__(self):
return '.'.join(str(i) for i in self._tuple)
@staticmethod
def tupelize(version):
if isinstance(version, (int, float, str, Version)):
return tuple(int(i) for i in str(version).split('.'))
elif type(version) == tuple:
return version
else:
raise TypeError(type(version))

View file

@ -12,7 +12,7 @@
'smartctl',
'wol-sleeper',
'zfs',
#'zfs-mirror',
'zfs-mirror',
],
'metadata': {
'id': '9cf52515-63a1-4659-a8ec-6c3c881727e5',
@ -25,7 +25,7 @@
},
'wakeonlan': {
'interface': 'enp0s31f6',
'ipv4': '10.0.5.5/24',
'ipv4': '10.0.0.6/24',
'mac': '4c:cc:6a:d5:96:f8',
},
},
@ -38,7 +38,7 @@
},
'wol-sleeper': {
'network': 'wakeonlan',
'waker': 'home.router',
'waker': 'home.server',
},
'zfs-mirror': {
'server': 'wb.offsite-backups',

View file

@ -1,5 +1,5 @@
{
'hostname': '10.0.0.150',
'hostname': '10.0.0.162',
'bundles': [
'bootshorn',
'systemd',
@ -7,14 +7,5 @@
],
'metadata': {
'id': '25c6f3fd-0d32-42c3-aeb3-0147bc3937c7',
'network': {
'internal': {
'ipv4': '10.0.0.150/24',
'mac': 'd6:d8:61:33:f2:05',
},
},
},
}
# rsync -avh --progress -e 'ssh -o "StrictHostKeyChecking no"' 10.0.0.150:/opt/bootshorn/recordings /hdd/bootshorn
# rsync -avh --progress -e 'ssh -o "StrictHostKeyChecking no"' 10.0.0.196:/opt/bootshorn/events ~/Downloads/bootshorn-events

View file

@ -33,9 +33,6 @@
# ssh-ed25519
# AAAAC3NzaC1lZDI1NTE5AAAAIJT9Spe+BYue7iiutl3rSf6PlU6dthHizyK+ZWnLodrA
# root@home.server
# - >-
# ssh-ed25519
# AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I ckn
# sftp: true
# compatibility_mode: false
# allow_agent_forwarding: false

View file

@ -25,7 +25,7 @@
'users': {
'root': {
'authorized_users': {
'root@home.server': {},
'root@home.server',
},
},
},

View file

@ -1,6 +1,6 @@
{
'dummy': True,
'hostname': '10.0.0.143',
'hostname': '10.0.2.100',
'groups': [
'home',
],
@ -8,13 +8,13 @@
'id': '87879bc1-130f-4fca-a8d2-e1d93a794df4',
'network': {
'internal': {
'ipv4': '10.0.0.143/24',
'ipv4': '10.0.2.100/24',
'mac': '00:17:88:67:e7:f2',
},
},
'dns': {
'hue.ckn.li': {
'A': {'10.0.0.143'},
'A': {'10.0.2.100'},
},
},
},

View file

@ -12,7 +12,6 @@
'kea-dhcpd',
'wireguard',
'pppoe',
'wol-waker',
],
'metadata': {
'id': '1d6a43e5-858c-42f9-9c40-ab63d61c787c',
@ -43,13 +42,6 @@
'ipv4': '10.0.4.1/24',
'dhcp_server': True,
},
'wakeonlan': {
'type': 'vlan',
'vlan_interface': 'internal',
'id': 5,
'ipv4': '10.0.5.1/24',
'dhcp_server': True,
},
'guest': {
'type': 'vlan',
'vlan_interface': 'internal',

View file

@ -31,8 +31,8 @@
'systemd-swap',
'twitch-clip-download',
'raspberrymatic-cert',
'wol-waker',
'zfs',
'routeros-monitoring',
],
'metadata': {
'id': 'af96709e-b13f-4965-a588-ef2cd476437a',
@ -207,10 +207,6 @@
'hdd/nextcloud/ckn-privat': {
'mountpoint': '/var/lib/nextcloud/ckn-privat/files',
},
'hdd/bootshorn': {
'mountpoint': '/hdd/bootshorn',
'dedup': 'on',
},
},
},
},

View file

@ -1,9 +1,7 @@
{
'hostname': '10.0.0.63',
'username': 'admin',
'password': '!decrypt:encrypt$gAAAAABoYFUx2faf18aV3rzNNuBA-4xZ22LQJ2HinpgsjkoTQS_l2TbmDtiAZI1jt-kWfTZ48d5_UPX-VDmY9qb4Sgn2Iz7Yee3CrB4hl85TyutilukTIP8=',
'groups': [
'home',
'routeros',
],
'metadata': {
@ -17,35 +15,30 @@
'ports': {
'sfp-sfpplus1': {
'vlan_group': 'infra',
'comment': 'home.router',
},
'sfp-sfpplus2': {
'vlan_group': 'infra',
'comment': 'home.server',
},
'sfp-sfpplus3': {
'vlan_group': 'home',
'comment': 'home.backups',
'vlan_group': 'infra',
},
'sfp-sfpplus4': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp-sfpplus5': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp-sfpplus6': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp-sfpplus7': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp-sfpplus8': {
'vlan_group': 'infra',
'comment': 'home.switch-vorratsraum-poe',
},
'ether1': {
'vlan_group': 'infra',
'comment': 'home.switch-rack-poe',
},
},
},

View file

@ -1,9 +1,7 @@
{
'hostname': '10.0.0.64',
'username': 'admin',
'password': '!decrypt:encrypt$gAAAAABob2elR_Sm13u-oG1ff_zOeEsay8PZ0Wgbl810hAZNhvuTYWJuNAJ1oyelC6sy7WsD2CC33oVLeb6m0EtNARtMs-2gKu9KlT7Xat1MvV-iatDKvro=',
'groups': [
'home',
'routeros',
],
'metadata': {
@ -17,43 +15,39 @@
'ports': {
'ether1': {
'vlan_group': 'infra',
'comment': 'home.switch-rack-10g',
},
'ether2': {
'vlan_group': 'home',
'comment': 'unifi-cloudkey',
'vlan_group': 'infra',
},
'ether3': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether5': {
'vlan_group': 'wokeonlan',
'comment': 'home.backups',
'vlan_group': 'infra',
},
'ether6': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether7': {
'vlan_group': 'home',
'comment': 'usv',
'vlan_group': 'infra',
},
'ether8': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp9': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp10': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp11': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp12': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
},
},

View file

@ -1,9 +1,7 @@
{
'hostname': '10.0.0.60',
'username': 'admin',
'password': '!decrypt:encrypt$gAAAAABoYVzxzO0R_bnW3S3Ggiq2LCCAGaKtXToviGZjgIlH2NpL9ojO8aNlSPPcGTKbn5z5RxSxjOlL161U0Ctdf6Rns2e5I5p5TIcsQ7c9qnAiaV-Hhuw=',
'groups': [
'home',
'routeros',
],
'metadata': {
@ -17,92 +15,87 @@
'ports': {
'sfp-sfpplus1': {
'vlan_group': 'infra',
'comment': 'home.switch-rack-10g',
},
'sfp-sfpplus2': {
'vlan_group': 'infra',
'comment': 'home.switch-wohnzimmer-10g',
},
'sfp-sfpplus3': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'sfp-sfpplus4': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether1': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether2': {
'vlan_group': 'infra',
'comment': 'switch-gartenhaus-unifi',
},
'ether3': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'internet',
'comment': 'fritzbox',
},
'ether5': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether6': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether7': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether8': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether9': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether10': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether11': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether12': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether13': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether14': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether15': {
'vlan_group': 'home',
'comment': 'noctua-fan',
'vlan_group': 'infra',
},
'ether16': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether17': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether18': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether19': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether20': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether21': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether22': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether23': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
'ether24': {
'vlan_group': 'home',
'vlan_group': 'infra',
},
},
},

View file

@ -1,9 +1,7 @@
{
'hostname': '10.0.0.62',
'username': 'admin',
'password': '!decrypt:encrypt$gAAAAABoYFSyt2JAsdePXiHim1RdQwbarJedhAOE3XpS2rGMBx-F5eCWRCIyLU2g2ocUDUIDfgH3nBipUCkdcd0Bv4vbK-yqKmGSeSH7YXLYwq3ZWuCDsLM=',
'groups': [
'home',
'routeros',
],
'metadata': {
@ -17,23 +15,18 @@
'ports': {
'ether1': {
'vlan_group': 'infra',
'comment': 'home.switch-vorratsraum-poe',
},
'ether2': {
'vlan_group': 'infra',
'comment': 'wohnzimmer-ap',
},
'ether3': {
'vlan_group': 'home',
'comment': 'gaming-pc',
'vlan_group': 'infra',
},
'ether4': {
'vlan_group': 'infra',
'comment': 'schreibtisch-dock',
},
'ether5': {
'vlan_group': 'infra',
'comment': 'switch-wohnzimmer-unifi',
},
},
},

View file

@ -171,28 +171,6 @@
'version': '1.6.11',
'installer': False,
},
'sysctl': {
'net': {
'ipv4': {
'ip_forward': 1,
'conf': {
'default': {
'forwarding': 1,
},
},
},
'ipv6': {
'conf': {
'all': {
'forwarding': 1,
},
'default': {
'forwarding': 1,
},
},
},
},
},
'vm': {
'cores': 2,
'ram': 4096,

View file

@ -7,7 +7,6 @@
],
'bundles': [
'wireguard',
'left4dead2',
],
'metadata': {
'id': 'd5080b1a-b310-48be-bd5a-02cfcecf0c90',
@ -26,72 +25,6 @@
},
},
},
'left4dead2': {
'servers': {
'vanilla': {
'port': 27015,
'overlays': ['vanilla'],
'config': [
'sv_consistency 0',
],
},
'tick100': {
'port': 27016,
'arguments': ['-tickrate 100'],
'overlays': ['tickrate', 'vanilla', 'workshop_maps'],
'config': [
'exec server_tickrate.cfg',
'sv_minupdaterate 101',
'sv_maxupdaterate 101',
'sv_mincmdrate 101',
'sv_maxcmdrate 101',
'sv_consistency 0',
],
},
'tick100_maps': {
'port': 27017,
'arguments': ['-tickrate 100'],
'overlays': ['tickrate', 'vanilla', 'workshop_maps'],
'config': [
'exec server_tickrate.cfg',
'sv_minupdaterate 101',
'sv_maxupdaterate 101',
'sv_mincmdrate 101',
'sv_maxcmdrate 101',
'sv_consistency 0',
],
},
'vanilla_maps': {
'port': 27018,
'overlays': ['vanilla', 'workshop_maps'],
'config': [
'sv_consistency 0',
],
},
'tick60_maps': {
'port': 27019,
'arguments': ['-tickrate 60'],
'overlays': ['tickrate', 'vanilla', 'workshop_maps'],
'config': [
'exec server_tickrate.cfg',
'sv_minupdaterate 101',
'sv_maxupdaterate 101',
'sv_mincmdrate 101',
'sv_maxcmdrate 101',
'sv_consistency 0',
],
},
'zonemod': {
'port': 27020,
'arguments': ['-tickrate 60'],
'overlays': ['competitive_rework'],
'config': [
'exec server_competitive_rework.cfg',
'sm_forcematch zonemod',
],
},
},
},
'bind': {
'master_node': 'htz.mails',
'hostname': 'secondary.resolver.name',
@ -113,5 +46,11 @@
},
},
},
'nftables': {
'input': {
'tcp dport 27015 accept',
'udp dport { 27005, 27015, 27020 } accept',
},
},
},
}

Some files were not shown because too many files have changed in this diff Show more