Compare commits

..

No commits in common. "master" and "gpiod" have entirely different histories.

409 changed files with 1850 additions and 12199 deletions

View file

@ -1,22 +0,0 @@
root = true
[*]
end_of_line = lf
[*.py]
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true
[*.toml]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
[*.yaml]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true

9
.envrc
View file

@ -1,7 +1,8 @@
#!/usr/bin/env bash
PATH_add bin
python3 -m venv .venv
source ./.venv/bin/activate
source_env ~/.local/share/direnv/pyenv
source_env ~/.local/share/direnv/venv
source_env ~/.local/share/direnv/bundlewrap
export BW_GIT_DEPLOY_CACHE="$(realpath ~)/.cache/bw/git_deploy"
mkdir -p "$BW_GIT_DEPLOY_CACHE"
unset PS1

2
.gitignore vendored
View file

@ -1,4 +1,2 @@
.secrets.cfg*
.venv
.cache
*.pyc

1
.python-version Normal file
View file

@ -0,0 +1 @@
3.9.0

View file

@ -1,48 +1,10 @@
# TODO
- dont spamfilter forwarded mails
- gollum wiki
- blog?
- fix dkim not working sometimes
- LDAP
- oauth2/OpenID
- icinga
Raspberry pi as soundcard
- gadget mode
- OTG g_audio
- https://audiosciencereview.com/forum/index.php?threads/raspberry-pi-as-usb-to-i2s-adapter.8567/post-215824
# install bw fork
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork@main#egg=bundlewrap
# monitor timers
```sh
Timer=backup
Triggers=$(systemctl show ${Timer}.timer --property=Triggers --value)
echo $Triggers
if systemctl is-failed "$Triggers"
then
InvocationID=$(systemctl show "$Triggers" --property=InvocationID --value)
echo $InvocationID
ExitCode=$(systemctl show "$Triggers" -p ExecStartEx --value | sed 's/^{//' | sed 's/}$//' | tr ';' '\n' | xargs -n 1 | grep '^status=' | cut -d '=' -f 2)
echo $ExitCode
journalctl INVOCATION_ID="$InvocationID" --output cat
fi
```
telegraf: execd for daemons
TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"
- Homeassistant/OpenHAB
- Homematic

View file

@ -1,32 +0,0 @@
#!/usr/bin/env python3
from sys import argv
from os.path import realpath, dirname
from shlex import quote
from bundlewrap.repo import Repository
repo = Repository(dirname(dirname(realpath(__file__))))
if len(argv) == 1:
for node in repo.nodes:
for name in node.metadata.get('left4dead2/servers', {}):
print(name)
exit(0)
server = argv[1]
command = argv[2]
remote_code = """
from rcon.source import Client
with Client('127.0.0.1', {port}, passwd='''{password}''') as client:
response = client.run('''{command}''')
print(response)
"""
for node in repo.nodes:
for name, conf in node.metadata.get('left4dead2/servers', {}).items():
if name == server:
response = node.run('python3 -c ' + quote(remote_code.format(port=conf['port'], password=conf['rcon_password'], command=command)))
print(response.stdout.decode())

View file

@ -1,70 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
from ipaddress import ip_interface
repo = Repository(dirname(dirname(realpath(__file__))))
nodes = [
node
for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy
]
print('updating nodes:', sorted(node.name for node in nodes))
# UPDATE
for node in nodes:
print('--------------------------------------')
print('updating', node.name)
print('--------------------------------------')
repo.libs.wol.wake(node)
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
# REBOOT IN ORDER
wireguard_servers = [
node
for node in nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
)
]
wireguard_s2s = [
node
for node in nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
)
]
everything_else = [
node
for node in nodes
if not node.has_bundle('wireguard')
]
print('======================================')
for node in [
*everything_else,
*wireguard_s2s,
*wireguard_servers,
]:
try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
except Exception as e:
print(e)

View file

@ -1,9 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
from sys import argv
repo = Repository(dirname(dirname(realpath(__file__))))
repo.libs.wol.wake(repo.get_node(argv[1]))

View file

@ -5,18 +5,10 @@ from os.path import realpath, dirname
from sys import argv
from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
server_node = repo.get_node('htz.mails')
data = server_node.metadata.get(f'wireguard/clients/{argv[1]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
allowed_ips = [
@ -28,25 +20,17 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network))
conf = f'''
[Interface]
print(
f'''[Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820
Address = {data['peer_ip']}
DNS = 172.30.0.1
DNS = 8.8.8.8
[Peer]
PublicKey = {repo.libs.wireguard.pubkey(server_node.metadata.get('id'))}
PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.get('id'))}
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1))
PersistentKeepalive = 10'''
)

View file

@ -1,10 +0,0 @@
http://www.apcupsd.org/manual/manual.html#power-down-during-shutdown
- onbattery: power lost
- battery drains
- when BATTERYLEVEL or MINUTES threshold is reached, server is shut down and
the ups is issued to cut the power
- when the mains power returns, the ups will reinstate power to the server
- the server will reboot
NOT IMPLEMENTED

View file

@ -1,343 +0,0 @@
## apcupsd.conf v1.1 ##
#
# "apcupsd" POSIX config file
#
# Note that the apcupsd daemon must be restarted in order for changes to
# this configuration file to become active.
#
#
# ========= General configuration parameters ============
#
# UPSNAME xxx
# Use this to give your UPS a name in log files and such. This
# is particulary useful if you have multiple UPSes. This does not
# set the EEPROM. It should be 8 characters or less.
#UPSNAME
# UPSCABLE <cable>
# Defines the type of cable connecting the UPS to your computer.
#
# Possible generic choices for <cable> are:
# simple, smart, ether, usb
#
# Or a specific cable model number may be used:
# 940-0119A, 940-0127A, 940-0128A, 940-0020B,
# 940-0020C, 940-0023A, 940-0024B, 940-0024C,
# 940-1524C, 940-0024G, 940-0095A, 940-0095B,
# 940-0095C, 940-0625A, M-04-02-2000
#
UPSCABLE usb
# To get apcupsd to work, in addition to defining the cable
# above, you must also define a UPSTYPE, which corresponds to
# the type of UPS you have (see the Description for more details).
# You must also specify a DEVICE, sometimes referred to as a port.
# For USB UPSes, please leave the DEVICE directive blank. For
# other UPS types, you must specify an appropriate port or address.
#
# UPSTYPE DEVICE Description
# apcsmart /dev/tty** Newer serial character device, appropriate for
# SmartUPS models using a serial cable (not USB).
#
# usb <BLANK> Most new UPSes are USB. A blank DEVICE
# setting enables autodetection, which is
# the best choice for most installations.
#
# net hostname:port Network link to a master apcupsd through apcupsd's
# Network Information Server. This is used if the
# UPS powering your computer is connected to a
# different computer for monitoring.
#
# snmp hostname:port:vendor:community
# SNMP network link to an SNMP-enabled UPS device.
# Hostname is the ip address or hostname of the UPS
# on the network. Vendor can be can be "APC" or
# "APC_NOTRAP". "APC_NOTRAP" will disable SNMP trap
# catching; you usually want "APC". Port is usually
# 161. Community is usually "private".
#
# netsnmp hostname:port:vendor:community
# OBSOLETE
# Same as SNMP above but requires use of the
# net-snmp library. Unless you have a specific need
# for this old driver, you should use 'snmp' instead.
#
# dumb /dev/tty** Old serial character device for use with
# simple-signaling UPSes.
#
# pcnet ipaddr:username:passphrase:port
# PowerChute Network Shutdown protocol which can be
# used as an alternative to SNMP with the AP9617
# family of smart slot cards. ipaddr is the IP
# address of the UPS management card. username and
# passphrase are the credentials for which the card
# has been configured. port is the port number on
# which to listen for messages from the UPS, normally
# 3052. If this parameter is empty or missing, the
# default of 3052 will be used.
#
# modbus /dev/tty** Serial device for use with newest SmartUPS models
# supporting the MODBUS protocol.
# modbus <BLANK> Leave the DEVICE setting blank for MODBUS over USB
# or set to the serial number of the UPS to ensure
# that apcupsd binds to that particular unit
# (helpful if you have more than one USB UPS).
#
UPSTYPE usb
#DEVICE /dev/ttyS0
# POLLTIME <int>
# Interval (in seconds) at which apcupsd polls the UPS for status. This
# setting applies both to directly-attached UPSes (UPSTYPE apcsmart, usb,
# dumb) and networked UPSes (UPSTYPE net, snmp). Lowering this setting
# will improve apcupsd's responsiveness to certain events at the cost of
# higher CPU utilization. The default of 60 is appropriate for most
# situations.
#POLLTIME 60
# LOCKFILE <path to lockfile>
# Path for device lock file for UPSes connected via USB or
# serial port. This is the directory into which the lock file
# will be written. The directory must already exist; apcupsd will not create
# it. The actual name of the lock file is computed from DEVICE.
# Not used on Win32.
LOCKFILE /var/lock
# SCRIPTDIR <path to script directory>
# Directory in which apccontrol and event scripts are located.
SCRIPTDIR /etc/apcupsd
# PWRFAILDIR <path to powerfail directory>
# Directory in which to write the powerfail flag file. This file
# is created when apcupsd initiates a system shutdown and is
# checked in the OS halt scripts to determine if a killpower
# (turning off UPS output power) is required.
PWRFAILDIR /etc/apcupsd
# NOLOGINDIR <path to nologin directory>
# Directory in which to write the nologin file. The existence
# of this flag file tells the OS to disallow new logins.
NOLOGINDIR /etc
#
# ======== Configuration parameters used during power failures ==========
#
# The ONBATTERYDELAY is the time in seconds from when a power failure
# is detected until we react to it with an onbattery event.
#
# This means that, apccontrol will be called with the powerout argument
# immediately when a power failure is detected. However, the
# onbattery argument is passed to apccontrol only after the
# ONBATTERYDELAY time. If you don't want to be annoyed by short
# powerfailures, make sure that apccontrol powerout does nothing
# i.e. comment out the wall.
ONBATTERYDELAY 6
#
# Note: BATTERYLEVEL, MINUTES, and TIMEOUT work in conjunction, so
# the first that occurs will cause the initation of a shutdown.
#
# If during a power failure, the remaining battery percentage
# (as reported by the UPS) is below or equal to BATTERYLEVEL,
# apcupsd will initiate a system shutdown.
BATTERYLEVEL 10
# If during a power failure, the remaining runtime in minutes
# (as calculated internally by the UPS) is below or equal to MINUTES,
# apcupsd, will initiate a system shutdown.
MINUTES 5
# If during a power failure, the UPS has run on batteries for TIMEOUT
# many seconds or longer, apcupsd will initiate a system shutdown.
# A value of 0 disables this timer.
#
# Note, if you have a Smart UPS, you will most likely want to disable
# this timer by setting it to zero. That way, you UPS will continue
# on batteries until either the % charge remaing drops to or below BATTERYLEVEL,
# or the remaining battery runtime drops to or below MINUTES. Of course,
# if you are testing, setting this to 60 causes a quick system shutdown
# if you pull the power plug.
# If you have an older dumb UPS, you will want to set this to less than
# the time you know you can run on batteries.
TIMEOUT 0
# Time in seconds between annoying users to signoff prior to
# system shutdown. 0 disables.
ANNOY 300
# Initial delay after power failure before warning users to get
# off the system.
ANNOYDELAY 60
# The condition which determines when users are prevented from
# logging in during a power failure.
# NOLOGON <string> [ disable | timeout | percent | minutes | always ]
NOLOGON disable
# If KILLDELAY is non-zero, apcupsd will continue running after a
# shutdown has been requested, and after the specified time in
# seconds attempt to kill the power. This is for use on systems
# where apcupsd cannot regain control after a shutdown.
# KILLDELAY <seconds> 0 disables
KILLDELAY 0
#
# ==== Configuration statements for Network Information Server ====
#
# NETSERVER [ on | off ] on enables, off disables the network
# information server. If netstatus is on, a network information
# server process will be started for serving the STATUS and
# EVENT data over the network (used by CGI programs).
NETSERVER on
# NISIP <dotted notation ip address>
# IP address on which NIS server will listen for incoming connections.
# This is useful if your server is multi-homed (has more than one
# network interface and IP address). Default value is 0.0.0.0 which
# means any incoming request will be serviced. Alternatively, you can
# configure this setting to any specific IP address of your server and
# NIS will listen for connections only on that interface. Use the
# loopback address (127.0.0.1) to accept connections only from the
# local machine.
NISIP 127.0.0.1
# NISPORT <port> default is 3551 as registered with the IANA
# port to use for sending STATUS and EVENTS data over the network.
# It is not used unless NETSERVER is on. If you change this port,
# you will need to change the corresponding value in the cgi directory
# and rebuild the cgi programs.
NISPORT 3551
# If you want the last few EVENTS to be available over the network
# by the network information server, you must define an EVENTSFILE.
EVENTSFILE /var/log/apcupsd.events
# EVENTSFILEMAX <kilobytes>
# By default, the size of the EVENTSFILE will be not be allowed to exceed
# 10 kilobytes. When the file grows beyond this limit, older EVENTS will
# be removed from the beginning of the file (first in first out). The
# parameter EVENTSFILEMAX can be set to a different kilobyte value, or set
# to zero to allow the EVENTSFILE to grow without limit.
EVENTSFILEMAX 10
#
# ========== Configuration statements used if sharing =============
# a UPS with more than one machine
#
# Remaining items are for ShareUPS (APC expansion card) ONLY
#
# UPSCLASS [ standalone | shareslave | sharemaster ]
# Normally standalone unless you share an UPS using an APC ShareUPS
# card.
UPSCLASS standalone
# UPSMODE [ disable | share ]
# Normally disable unless you share an UPS using an APC ShareUPS card.
UPSMODE disable
#
# ===== Configuration statements to control apcupsd system logging ========
#
# Time interval in seconds between writing the STATUS file; 0 disables
STATTIME 0
# Location of STATUS file (written to only if STATTIME is non-zero)
STATFILE /var/log/apcupsd.status
# LOGSTATS [ on | off ] on enables, off disables
# Note! This generates a lot of output, so if
# you turn this on, be sure that the
# file defined in syslog.conf for LOG_NOTICE is a named pipe.
# You probably do not want this on.
LOGSTATS off
# Time interval in seconds between writing the DATA records to
# the log file. 0 disables.
DATATIME 0
# FACILITY defines the logging facility (class) for logging to syslog.
# If not specified, it defaults to "daemon". This is useful
# if you want to separate the data logged by apcupsd from other
# programs.
#FACILITY DAEMON
#
# ========== Configuration statements used in updating the UPS EPROM =========
#
#
# These statements are used only by apctest when choosing "Set EEPROM with conf
# file values" from the EEPROM menu. THESE STATEMENTS HAVE NO EFFECT ON APCUPSD.
#
# UPS name, max 8 characters
#UPSNAME UPS_IDEN
# Battery date - 8 characters
#BATTDATE mm/dd/yy
# Sensitivity to line voltage quality (H cause faster transfer to batteries)
# SENSITIVITY H M L (default = H)
#SENSITIVITY H
# UPS delay after power return (seconds)
# WAKEUP 000 060 180 300 (default = 0)
#WAKEUP 60
# UPS Grace period after request to power off (seconds)
# SLEEP 020 180 300 600 (default = 20)
#SLEEP 180
# Low line voltage causing transfer to batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 106 103 100 097
# M 177 172 168 182
# A 092 090 088 086
# I 208 204 200 196 (default = 0 => not valid)
#LOTRANSFER 208
# High line voltage causing transfer to batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 127 130 133 136
# M 229 234 239 224
# A 108 110 112 114
# I 253 257 261 265 (default = 0 => not valid)
#HITRANSFER 253
# Battery charge needed to restore power
# RETURNCHARGE 00 15 50 90 (default = 15)
#RETURNCHARGE 15
# Alarm delay
# 0 = zero delay after pwr fail, T = power fail + 30 sec, L = low battery, N = never
# BEEPSTATE 0 T L N (default = 0)
#BEEPSTATE T
# Low battery warning delay in minutes
# LOWBATT 02 05 07 10 (default = 02)
#LOWBATT 2
# UPS Output voltage when running on batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 115
# M 208
# A 100
# I 230 240 220 225 (default = 0 => not valid)
#OUTPUTVOLTS 230
# Self test interval in hours 336=2 weeks, 168=1 week, ON=at power on
# SELFTEST 336 168 ON OFF (default = 336)
#SELFTEST 336

View file

@ -1,10 +0,0 @@
#!/bin/bash
date=$(date --utc +%s%N)
METRICS=$(apcaccess)
for METRIC in TIMELEFT LOADPCT BCHARGE
do
echo "apcupsd $METRIC=$(grep $METRIC <<< $METRICS | cut -d ':' -f 2 | xargs | cut -d ' ' -f 1 ) $date"
done

View file

@ -1,20 +0,0 @@
files = {
'/etc/apcupsd/apcupsd.conf': {
'needs': [
'pkg_apt:apcupsd',
],
},
'/usr/local/share/telegraf/apcupsd': {
'source': 'telegraf_plugin',
'mode': '755',
},
}
svc_systemd = {
'apcupsd': {
'needs': [
'pkg_apt:apcupsd',
'file:/etc/apcupsd/apcupsd.conf',
],
}
}

View file

@ -1,30 +0,0 @@
defaults = {
'apt': {
'packages': {
'apcupsd': {},
},
},
'grafana_rows': {
'ups',
},
'sudoers': {
'telegraf': {
'/usr/local/share/telegraf/apcupsd',
},
},
'telegraf': {
'config': {
'inputs': {
'exec': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/apcupsd"],
'name_override': "apcupsd",
'data_format': "influx",
'interval': '30s',
'flush_interval': '30s',
}),
},
},
},
},
}

View file

@ -1,6 +1,3 @@
# https://manpages.debian.org/latest/apt/sources.list.5.de.html
# https://repolib.readthedocs.io/en/latest/deb822-format.html
```python
{
'apt': {
@ -8,29 +5,8 @@
'apt-transport-https': {},
},
'sources': {
'debian': {
'types': { # optional, defaults to `{'deb'}``
'deb',
'deb-src',
},
'urls': {
'https://deb.debian.org/debian',
},
'suites': { # at least one
'{codename}',
'{codename}-updates',
'{codename}-backports',
},
'components': { # optional
'main',
'contrib',
'non-frese',
},
# key:
# - optional, defaults to source name (`debian` in this example)
# - place key under data/apt/keys/debian-12.{asc|gpg}
'key': 'debian-{version}',
},
# place key under data/apt/keys/packages.cloud.google.com.{asc|gpg}
'deb https://packages.cloud.google.com/apt cloud-sdk main',
},
},
}

View file

@ -1,15 +0,0 @@
#!/bin/bash
apt update -qq --silent 2> /dev/null
UPGRADABLE=$(apt list --upgradable -qq 2> /dev/null | cut -d '/' -f 1)
if test "$UPGRADABLE" != ""
then
echo "$(wc -l <<< $UPGRADABLE) package(s) upgradable:"
echo
echo "$UPGRADABLE"
exit 1
else
exit 0
fi

View file

@ -1,66 +1,32 @@
# TODO pin repo: https://superuser.com/a/1595920
from os.path import join
from urllib.parse import urlparse
from glob import glob
from os.path import join, basename
directories = {
'/etc/apt': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/apt.conf.d': {
# existance is expected
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/keyrings': {
# https://askubuntu.com/a/1307181
'purge': True,
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf.d': {
# 'purge': True,
# 'triggers': {
# 'action:apt_update',
# },
# },
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/trusted.gpg.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
files = {
'/etc/apt/apt.conf': {
'content': repo.libs.apt.render_apt_conf(node.metadata.get('apt/config')),
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list': {
'content': '# managed by bundlewrap\n',
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755',
'content': '# managed'
},
}
@ -75,22 +41,39 @@ actions = {
},
}
# create sources.lists and respective keyfiles
# group sources by apt server hostname
for name, config in node.metadata.get('apt/sources').items():
# place keyfile
keyfile_destination_path = repo.libs.apt.format_variables(node, config['options']['Signed-By'])
files[keyfile_destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', basename(keyfile_destination_path)),
'content_type': 'binary',
hosts = {}
for source_string in node.metadata.get('apt/sources'):
source = repo.libs.apt.AptSource(source_string)
hosts\
.setdefault(source.url.hostname, set())\
.add(source)
# create sources lists and keyfiles
for host, sources in hosts.items():
keyfile = basename(glob(join(repo.path, 'data', 'apt', 'keys', f'{host}.*'))[0])
destination_path = f'/etc/apt/trusted.gpg.d/{keyfile}'
for source in sources:
source.options['signed-by'] = [destination_path]
files[f'/etc/apt/sources.list.d/{host}.list'] = {
'content': '\n'.join(
str(source) for source in sorted(sources)
).format(
release=node.metadata.get('os_release')
),
'triggers': {
'action:apt_update',
},
}
# place sources.list
files[f'/etc/apt/sources.list.d/{name}.sources'] = {
'content': repo.libs.apt.render_source(node, name),
files[destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', keyfile),
'content_type': 'binary',
'triggers': {
'action:apt_update',
},
@ -105,7 +88,7 @@ for package, options in node.metadata.get('apt/packages', {}).items():
files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([
f"Package: {package}",
f"Pin: release a={node.metadata.get('os_codename')}-backports",
f"Pin: release a={node.metadata.get('os_release')}-backports",
f"Pin-Priority: 900",
]),
'needed_by': [
@ -115,25 +98,3 @@ for package, options in node.metadata.get('apt/packages', {}).items():
'action:apt_update',
},
}
# unattended upgrades
#
# unattended-upgrades.service: delays shutdown if necessary
# apt-daily.timer: performs apt update
# apt-daily-upgrade.timer: performs apt upgrade
svc_systemd['unattended-upgrades.service'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily-upgrade.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}

View file

@ -1,177 +1,6 @@
defaults = {
'apt': {
'packages': {
'apt-listchanges': {
'installed': False,
},
},
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true',
},
'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
},
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
},
'APT': {
'NeverAutoRemove': {
'^firmware-linux.*',
'^linux-firmware$',
'^linux-image-[a-z0-9]*$',
'^linux-image-[a-z0-9]*-[a-z0-9]*$',
},
'VersionedKernelPackages': {
# kernels
'linux-.*',
'kfreebsd-.*',
'gnumach-.*',
# (out-of-tree) modules
'.*-modules',
'.*-kernel',
},
'Never-MarkAuto-Sections': {
'metapackages',
'tasks',
},
'Move-Autobit-Sections': {
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
},
},
'sources': {},
},
'monitoring': {
'services': {
'apt upgradable': {
'vars.command': '/usr/lib/nagios/plugins/check_apt_upgradable',
'vars.sudo': True,
'check_interval': '1h',
},
'current kernel': {
'vars.command': 'ls /boot/vmlinuz-* | sort -V | tail -n 1 | xargs -n1 basename | cut -d "-" -f 2- | grep -q "^$(uname -r)$"',
'check_interval': '1h',
},
'apt reboot-required': {
'vars.command': 'ls /var/run/reboot-required 2> /dev/null && exit 1 || exit 0',
'check_interval': '1h',
},
},
'packages': {},
'sources': set(),
},
}
@metadata_reactor.provides(
'apt/sources',
)
def key(metadata):
return {
'apt': {
'sources': {
source_name: {
'key': source_name,
}
for source_name, source_config in metadata.get('apt/sources').items()
if 'key' not in source_config
},
},
}
@metadata_reactor.provides(
'apt/sources',
)
def signed_by(metadata):
return {
'apt': {
'sources': {
source_name: {
'options': {
'Signed-By': '/etc/apt/keyrings/' + metadata.get(f'apt/sources/{source_name}/key') + '.' + repo.libs.apt.find_keyfile_extension(node, metadata.get(f'apt/sources/{source_name}/key')),
},
}
for source_name in metadata.get('apt/sources')
},
},
}
@metadata_reactor.provides(
'apt/config',
'apt/packages',
)
def unattended_upgrades(metadata):
return {
'apt': {
'config': {
'APT': {
'Periodic': {
'Update-Package-Lists': '1',
'Unattended-Upgrade': '1',
},
},
'Unattended-Upgrade': {
'Origins-Pattern': {
"origin=*",
},
},
},
'packages': {
'unattended-upgrades': {},
},
},
}
# @metadata_reactor.provides(
# 'apt/config',
# 'apt/list_changes',
# )
# def listchanges(metadata):
# return {
# 'apt': {
# 'config': {
# 'DPkg': {
# 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# },
# 'Tools': {
# 'Options': {
# '/usr/bin/apt-listchanges': {
# 'Version': '2',
# 'InfoFD': '20',
# },
# },
# },
# },
# 'Dir': {
# 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d',
# },
# },
# },
# 'list_changes': {
# 'apt': {
# 'frontend': 'pager',
# 'which': 'news',
# 'email_address': 'root',
# 'email_format': 'text',
# 'confirm': 'false',
# 'headers': 'false',
# 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db',
# },
# },
# },
# }

View file

@ -1,47 +0,0 @@
#!/usr/bin/env python3
import json
from subprocess import check_output
from datetime import datetime, timedelta
now = datetime.now()
two_days_ago = now - timedelta(days=2)
with open('/etc/backup-freshness-check.json', 'r') as file:
config = json.load(file)
local_datasets = check_output(['zfs', 'list', '-H', '-o', 'name']).decode().splitlines()
errors = set()
for dataset in config['datasets']:
if f'tank/{dataset}' not in local_datasets:
errors.add(f'dataset "{dataset}" not present at all')
continue
snapshots = [
snapshot
for snapshot in check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot', f'tank/{dataset}', '-s', 'creation']).decode().splitlines()
if f"@{config['prefix']}" in snapshot
]
if not snapshots:
errors.add(f'dataset "{dataset}" has no backup snapshots')
continue
newest_backup_snapshot = snapshots[-1]
snapshot_datetime = datetime.utcfromtimestamp(
int(check_output(['zfs', 'list', '-p', '-H', '-o', 'creation', '-t', 'snapshot', newest_backup_snapshot]).decode())
)
if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
continue
if errors:
for error in errors:
print(error)
exit(2)
else:
print(f"all {len(config['datasets'])} datasets have fresh backups.")

View file

@ -1,15 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/backup-freshness-check.json': {
'content': dumps({
'prefix': node.metadata.get('backup-freshness-check/prefix'),
'datasets': node.metadata.get('backup-freshness-check/datasets'),
}, indent=4, sort_keys=True, cls=MetadataJSONEncoder),
},
'/usr/lib/nagios/plugins/check_backup_freshness': {
'mode': '0755',
},
}

View file

@ -1,37 +0,0 @@
defaults = {
'backup-freshness-check': {
'server': node.name,
'prefix': 'auto-backup_',
'datasets': {},
},
'monitoring': {
'services': {
'backup freshness': {
'vars.command': '/usr/lib/nagios/plugins/check_backup_freshness',
'check_interval': '6h',
'vars.sudo': True,
},
},
},
}
@metadata_reactor.provides(
'backup-freshness-check/datasets'
)
def backup_freshness_check(metadata):
return {
'backup-freshness-check': {
'datasets': {
f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes
if not other_node.dummy
and other_node.has_bundle('backup')
and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items()
if options.get('backup', True)
and not options.get('mountpoint', None) in [None, 'none']
},
},
}

View file

@ -12,18 +12,8 @@ defaults = {
},
},
'sudoers': {
'backup-receiver': {
'/usr/bin/rsync',
'/sbin/zfs',
},
},
'zfs': {
'datasets': {
'tank': {
'recordsize': "1048576",
},
},
},
'backup-receiver': ['ALL'],
}
}
@ -35,53 +25,30 @@ def zfs(metadata):
for other_node in repo.nodes:
if (
not other_node.dummy and
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
id = other_node.metadata.get('id')
base_dataset = f'tank/{id}'
# container
datasets[base_dataset] = {
datasets[f"tank/{other_node.metadata.get('id')}"] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# for rsync backups
datasets[f'{base_dataset}/fs'] = {
'mountpoint': f"/mnt/backups/{id}",
datasets[f"tank/{other_node.metadata.get('id')}/fs"] = {
'mountpoint': f"/mnt/backups/{other_node.metadata.get('id')}",
'readonly': 'off',
'compression': 'lz4',
'com.sun:auto-snapshot': 'true',
'backup': False,
}
# for zfs send/recv
if other_node.has_bundle('zfs'):
# base datasets for each tank
for pool in other_node.metadata.get('zfs/pools'):
datasets[f'{base_dataset}/{pool}'] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# actual datasets
for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
if path == config.get('mountpoint'):
datasets[f'{base_dataset}/{dataset}'] = {
datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
continue
@ -99,7 +66,7 @@ def zfs(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('backup-server/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('backup-server/hostname'): repo.libs.dns.get_a_records(metadata),
}
}

View file

@ -1,31 +1,6 @@
#!/bin/bash
set -u
# FIXME: inelegant
% if wol_command:
${wol_command}
% endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do
echo backing up $path
/opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,15 +1,13 @@
#!/bin/bash
set -exu
path=$1
if zfs list -H -o mountpoint | grep -q "^$path$"
if zfs list -H -o mountpoint | grep -q "$path"
then
/opt/backup/backup_path_via_zfs "$path"
elif test -e "$path"
elif test -d "$path"
then
/opt/backup/backup_path_via_rsync "$path"
/opt/backuo/backup_path_via_rsync "$path"
else
echo "UNKNOWN PATH: $path"
exit 1

View file

@ -5,16 +5,7 @@ set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
if test -d "$path"
then
postfix="/"
elif test -f "$path"
then
postfix=""
else
exit 1
fi
rsync -av --rsync-path="sudo rsync" "$path$postfix" "backup-receiver@$server:/mnt/backups/$uuid$path$postfix"
rsync -av --rsync-path="sudo rsync" "$path/" "backup-receiver@$server:/mnt/backups/$uuid$path/"
$ssh sudo zfs snap "tank/$uuid/fs@auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"

View file

@ -1,11 +1,11 @@
#!/bin/bash
set -eu
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
@ -39,26 +39,13 @@ else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -L -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
$(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$source_dataset" | grep "^$source_dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete remote snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$target_dataset" | grep "^$target_dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark" # keep snapshots?
zfs destroy "$source_dataset@$new_bookmark"
echo "SUCCESS"
else
zfs destroy "$source_dataset@$new_bookmark"

View file

@ -1,16 +1,9 @@
from json import dumps
backup_node = repo.get_node(node.metadata.get('backup/server'))
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
'content_type': 'mako',
'context': {
'wol_command': backup_node.metadata.get('wol-sleeper/wake_command', False),
},
}
files['/opt/backup/backup_path'] = {
'mode': '700',
@ -27,7 +20,7 @@ directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': backup_node.metadata.get('backup-server/hostname'),
'server_hostname': repo.get_node(node.metadata.get('backup/server')).metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},

View file

@ -1,16 +1,8 @@
defaults = {
'apt': {
'packages': {
'jq': {
'needed_by': {
'svc_systemd:backup.timer',
},
},
'rsync': {
'needed_by': {
'svc_systemd:backup.timer',
},
},
'jq': {},
'rsync': {},
},
},
'backup': {
@ -20,11 +12,7 @@ defaults = {
'systemd-timers': {
f'backup': {
'command': '/opt/backup/backup_all',
'when': '1:00',
'persistent': True,
'after': {
'network-online.target',
},
'when': 'daily',
},
},
}

View file

@ -1,69 +0,0 @@
from ipaddress import ip_interface
@metadata_reactor.provides(
'dns',
)
def acme_records(metadata):
domains = set()
for other_node in repo.nodes:
for domain, conf in other_node.metadata.get('letsencrypt/domains', {}).items():
domains.add(domain)
domains.update(conf.get('aliases', []))
return {
'dns': {
f'_acme-challenge.{domain}': {
'CNAME': {f"{domain}.{metadata.get('bind/acme_zone')}."},
}
for domain in domains
}
}
@metadata_reactor.provides(
'bind/acls/acme',
'bind/views/external/keys/acme',
'bind/views/external/zones',
)
def acme_zone(metadata):
allowed_ips = {
*{
str(ip_interface(other_node.metadata.get('network/internal/ipv4')).ip)
for other_node in repo.nodes
if other_node.metadata.get('letsencrypt/domains', {})
},
*{
str(ip_interface(other_node.metadata.get('wireguard/my_ip')).ip)
for other_node in repo.nodes
if other_node.has_bundle('wireguard')
},
}
return {
'bind': {
'acls': {
'acme': {
'key acme',
'!{ !{' + ' '.join(f'{ip};' for ip in sorted(allowed_ips)) + '}; any;}',
},
},
'views': {
'external': {
'keys': {
'acme': {},
},
'zones': {
metadata.get('bind/acme_zone'): {
'allow_update': {
'acme',
},
},
},
},
},
},
}
#https://lists.isc.org/pipermail/bind-users/2006-January/061051.html

View file

@ -4,15 +4,15 @@ def column_width(column, table):
%>\
$TTL 600
@ IN SOA ${hostname}. admin.${hostname}. (
2021111709 ;Serial
2021070821 ;Serial
3600 ;Refresh
200 ;Retry
1209600 ;Expire
900 ;Negative response caching TTL
)
% for record in sorted(records, key=lambda r: (tuple(reversed(r['name'].split('.'))), r['type'], r['value'])):
(${(record['name'] or '@').rjust(column_width('name', records))}) \
% for record in sorted(records, key=lambda r: (r['name'], r['type'], r['value'])):
${(record['name'] or '@').ljust(column_width('name', records))} \
IN \
${record['type'].ljust(column_width('type', records))} \
% if record['type'] == 'TXT':

View file

@ -1,33 +1,14 @@
# KEYS
% for view_name, view_conf in views.items():
% for key_name, key_conf in sorted(view_conf['keys'].items()):
key "${key_name}" {
algorithm hmac-sha512;
secret "${key_conf['token']}";
};
% endfor
% endfor
# ACLS
% for acl_name, acl_content in acls.items():
acl "${acl_name}" {
% for ac in sorted(acl_content, key=lambda e: (not e.startswith('!'), not e.startswith('key'), e)):
${ac};
% endfor
% for view in views:
acl "${view['name']}" {
${' '.join(f'{e};' for e in view['acl'])}
};
% endfor
# VIEWS
% for view in views:
view "${view['name']}" {
match-clients { ${view['name']}; };
% for view_name, view_conf in views.items():
view "${view_name}" {
match-clients {
${view_name};
};
% if view_conf['is_internal']:
% if view['is_internal']:
recursion yes;
% else:
recursion no;
@ -44,22 +25,13 @@ view "${view_name}" {
8.8.8.8;
};
% for zone_name, zone_conf in sorted(view_conf['zones'].items()):
zone "${zone_name}" {
% if type == 'slave' and zone_conf.get('allow_update', []):
type slave;
% for zone in zones:
zone "${zone}" {
type ${type};
% if type == 'slave':
masters { ${master_ip}; };
% else:
type master;
% if zone_conf.get('allow_update', []):
allow-update {
% for allow_update in zone_conf['allow_update']:
${allow_update};
% endfor
};
% endif
% endif
file "/var/lib/bind/${view_name}/${zone_name}";
file "/var/lib/bind/${view['name']}/db.${zone}";
};
% endfor

View file

@ -1,25 +1,25 @@
from ipaddress import ip_address, ip_interface
from datetime import datetime
from hashlib import sha3_512
if node.metadata.get('bind/type') == 'master':
master_node = node
zones = node.metadata.get('bind/zones')
master_ip = None
slave_ips = [
ip_interface(repo.get_node(slave).metadata.get('network/external/ipv4')).ip
for slave in node.metadata.get('bind/slaves')
]
else:
master_node = repo.get_node(node.metadata.get('bind/master_node'))
zones = repo.get_node(node.metadata.get('bind/master_node')).metadata.get('bind/zones')
master_ip = ip_interface(repo.get_node(node.metadata.get('bind/master_node')).metadata.get('network/external/ipv4')).ip
slave_ips = []
directories[f'/var/lib/bind'] = {
'owner': 'bind',
'group': 'bind',
'purge': True,
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -29,7 +29,7 @@ files['/etc/default/bind9'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -43,16 +43,14 @@ files['/etc/bind/named.conf'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
files['/etc/bind/named.conf.options'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'slave_ips': node.metadata.get('bind/slave_ips', []),
'master_ip': node.metadata.get('bind/master_ip', None),
'slave_ips': sorted(slave_ips),
},
'owner': 'root',
'group': 'bind',
@ -63,26 +61,38 @@ files['/etc/bind/named.conf.options'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
views = [
{
'name': 'internal',
'is_internal': True,
'acl': [
'127.0.0.1',
'10.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.168.0.0/16',
]
},
{
'name': 'external',
'is_internal': False,
'acl': [
'any',
]
},
]
files['/etc/bind/named.conf.local'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'master_ip': node.metadata.get('bind/master_ip', None),
'acls': {
**master_node.metadata.get('bind/acls'),
**{
view_name: view_conf['match_clients']
for view_name, view_conf in master_node.metadata.get('bind/views').items()
},
},
'views': dict(sorted(
master_node.metadata.get('bind/views').items(),
key=lambda e: (e[1].get('default', False), e[0]),
)),
'master_ip': master_ip,
'views': views,
'zones': sorted(zones),
},
'owner': 'root',
'group': 'bind',
@ -93,45 +103,72 @@ files['/etc/bind/named.conf.local'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
for view_name, view_conf in master_node.metadata.get('bind/views').items():
directories[f"/var/lib/bind/{view_name}"] = {
'owner': 'bind',
'group': 'bind',
def record_matches_view(record, records, view):
if record['type'] in ['A', 'AAAA']:
if view == 'external':
# no internal addresses in external view
if ip_address(record['value']).is_private:
return False
elif view == 'internal':
# external addresses in internal view only, if no internal exists
if ip_address(record['value']).is_global:
for other_record in records:
if (
record['name'] == other_record['name'] and
record['type'] == other_record['type'] and
ip_address(other_record['value']).is_private
):
return False
return True
for view in views:
directories[f"/var/lib/bind/{view['name']}"] = {
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
for zone_name, zone_conf in view_conf['zones'].items():
files[f"/var/lib/bind/{view_name}/{zone_name}"] = {
for zone, record_dicts in zones.items():
records = record_dicts.values()
unique_records = [
dict(record_tuple)
for record_tuple in set(
tuple(record.items()) for record in records
)
]
files[f"/var/lib/bind/{view['name']}/db.{zone}"] = {
'group': 'bind',
'source': 'db',
'content_type': 'mako',
'unless': f"test -f /var/lib/bind/{view_name}/{zone_name}" if zone_conf.get('allow_update', False) else 'false',
'context': {
'view': view['name'],
'serial': datetime.now().strftime('%Y%m%d%H'),
'records': zone_conf['records'],
'records': list(filter(
lambda record: record_matches_view(record, records, view['name']),
unique_records
)),
'hostname': node.metadata.get('bind/hostname'),
'type': node.metadata.get('bind/type'),
},
'owner': 'bind',
'group': 'bind',
'needs': [
f"directory:/var/lib/bind/{view['name']}",
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
svc_systemd['bind9'] = {}
actions['named-checkconf'] = {
@ -139,6 +176,5 @@ actions['named-checkconf'] = {
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
'svc_systemd:bind9:reload',
]
}

View file

@ -1,7 +1,6 @@
from ipaddress import ip_interface
from json import dumps
h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo
defaults = {
'apt': {
@ -10,42 +9,8 @@ defaults = {
},
},
'bind': {
'zones': {},
'slaves': {},
'acls': {
'our-nets': {
'127.0.0.1',
'10.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.168.0.0/16',
}
},
'views': {
'internal': {
'is_internal': True,
'keys': {},
'match_clients': {
'our-nets',
},
'zones': {},
},
'external': {
'default': True,
'is_internal': False,
'keys': {},
'match_clients': {
'any',
},
'zones': {},
},
},
'zones': set(),
},
'nftables': {
'input': {
'tcp dport 53 accept',
'udp dport 53 accept',
},
},
'telegraf': {
'config': {
@ -63,25 +28,11 @@ defaults = {
@metadata_reactor.provides(
'bind/type',
'bind/master_ip',
'bind/slave_ips',
)
def master_slave(metadata):
if metadata.get('bind/master_node', None):
def type(metadata):
return {
'bind': {
'type': 'slave',
'master_ip': str(ip_interface(repo.get_node(metadata.get('bind/master_node')).metadata.get('network/external/ipv4')).ip),
}
}
else:
return {
'bind': {
'type': 'master',
'slave_ips': {
str(ip_interface(repo.get_node(slave).metadata.get('network/external/ipv4')).ip)
for slave in metadata.get('bind/slaves')
}
'type': 'slave' if metadata.get('bind/master_node', None) else 'master',
}
}
@ -92,27 +43,26 @@ def master_slave(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('bind/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('bind/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'bind/views',
'bind/zones',
)
def collect_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
views = {}
zones = {}
for view_name, view_conf in metadata.get('bind/views').items():
for other_node in repo.nodes:
for fqdn, records in other_node.metadata.get('dns', {}).items():
matching_zones = sorted(
filter(
lambda potential_zone: fqdn.endswith(potential_zone),
metadata.get('bind/zones')
metadata.get('bind/zones').keys()
),
key=len,
)
@ -125,25 +75,22 @@ def collect_records(metadata):
for type, values in records.items():
for value in values:
if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata):
views\
.setdefault(view_name, {})\
.setdefault('zones', {})\
entry = {'name': name, 'type': type, 'value': value}
zones\
.setdefault(zone, {})\
.setdefault('records', set())\
.add(
h({'name': name, 'type': type, 'value': value})
)
.update({
str(hash(dumps(entry))): entry,
})
return {
'bind': {
'views': views,
'zones': zones,
},
}
@metadata_reactor.provides(
'bind/views',
'bind/zones',
)
def ns_records(metadata):
if metadata.get('bind/type') == 'slave':
@ -158,20 +105,12 @@ def ns_records(metadata):
]
return {
'bind': {
'views': {
view_name: {
'zones': {
zone_name: {
'records': {
zone: {
# FIXME: bw currently cant handle lists of dicts :(
h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."})
str(hash(dumps({'name': '@', 'type': 'NS', 'value': f"{nameserver}."}))): {'name': '@', 'type': 'NS', 'value': f"{nameserver}."}
for nameserver in nameservers
}
}
for zone_name, zone_conf in view_conf['zones'].items()
}
}
for view_name, view_conf in metadata.get('bind/views').items()
} for zone in metadata.get('bind/zones').keys()
},
},
}
@ -193,65 +132,3 @@ def slaves(metadata):
],
},
}
@metadata_reactor.provides(
'bind/views',
)
def generate_keys(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'views': {
view_name: {
'keys': {
key: {
'token':repo.libs.hmac.hmac_sha512(
key,
str(repo.vault.random_bytes_as_base64_for(
f"{metadata.get('id')} bind key {key}",
length=32,
)),
)
}
for key in view_conf['keys']
}
}
for view_name, view_conf in metadata.get('bind/views').items()
}
}
}
@metadata_reactor.provides(
'bind/views',
)
def generate_acl_entries_for_keys(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'views': {
view_name: {
'match_clients': {
# allow keys from this view
*{
f'key {key}'
for key in view_conf['keys']
},
# reject keys from other views
*{
f'! key {key}'
for other_view_name, other_view_conf in metadata.get('bind/views').items()
if other_view_name != view_name
for key in other_view_conf.get('keys', [])
}
}
}
for view_name, view_conf in metadata.get('bind/views').items()
},
},
}

View file

@ -1,38 +0,0 @@
defaults = {
'apt': {
'packages': {
'build-essential': {},
# crystal
'clang': {},
'libssl-dev': {},
'libpcre3-dev': {},
'libgc-dev': {},
'libevent-dev': {},
'zlib1g-dev': {},
},
},
'users': {
'build-agent': {
'home': '/var/lib/build-agent',
},
},
}
@metadata_reactor.provides(
'users/build-agent/authorized_users',
)
def ssh_keys(metadata):
return {
'users': {
'build-agent': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
for architecture in other_node.metadata.get('build-server/architectures').values()
if architecture['node'] == node.name
},
},
},
}

View file

@ -1,9 +0,0 @@
for project, options in node.metadata.get('build-ci').items():
directories[options['path']] = {
'owner': 'build-ci',
'group': options['group'],
'mode': '770',
'needs': [
'user:build-ci',
],
}

View file

@ -1,29 +0,0 @@
from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides(
'users/build-ci/authorized_users',
'sudoers/build-ci',
)
def ssh_keys(metadata):
return {
'users': {
'build-ci': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},
},
},
'sudoers': {
'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values()
}
},
}

View file

@ -1,2 +0,0 @@
JSON=$(cat bundles/build-server/example.json)
curl -X POST 'https://build.sublimity.de/crystal?file=procio.cr' -H "Content-Type: application/json" --data-binary @- <<< $JSON

View file

@ -1,169 +0,0 @@
{
"after": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"before": "7a358255247926363ef0ef34111f0bc786a8c6f4",
"commits": [
{
"added": [],
"author": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"committer": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"message": "wip\n",
"modified": [
"README.md"
],
"removed": [],
"timestamp": "2021-11-16T22:10:05+01:00",
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
"verification": null
}
],
"compare_url": "https://git.sublimity.de/cronekorkn/telegraf-procio/compare/7a358255247926363ef0ef34111f0bc786a8c6f4...122d7843c7814079e8df4919b0208c95ec7c75e3",
"head_commit": {
"added": [],
"author": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"committer": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"message": "wip\n",
"modified": [
"README.md"
],
"removed": [],
"timestamp": "2021-11-16T22:10:05+01:00",
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
"verification": null
},
"pusher": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
},
"ref": "refs/heads/master",
"repository": {
"allow_merge_commits": true,
"allow_rebase": true,
"allow_rebase_explicit": true,
"allow_squash_merge": true,
"archived": false,
"avatar_url": "",
"clone_url": "https://git.sublimity.de/cronekorkn/telegraf-procio.git",
"created_at": "2021-11-05T18:46:04+01:00",
"default_branch": "master",
"default_merge_style": "merge",
"description": "",
"empty": false,
"fork": false,
"forks_count": 0,
"full_name": "cronekorkn/telegraf-procio",
"has_issues": true,
"has_projects": true,
"has_pull_requests": true,
"has_wiki": true,
"html_url": "https://git.sublimity.de/cronekorkn/telegraf-procio",
"id": 5,
"ignore_whitespace_conflicts": false,
"internal": false,
"internal_tracker": {
"allow_only_contributors_to_track_time": true,
"enable_issue_dependencies": true,
"enable_time_tracker": true
},
"mirror": false,
"mirror_interval": "",
"name": "telegraf-procio",
"open_issues_count": 0,
"open_pr_counter": 0,
"original_url": "",
"owner": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
},
"parent": null,
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"release_counter": 0,
"size": 28,
"ssh_url": "git@git.sublimity.de:cronekorkn/telegraf-procio.git",
"stars_count": 0,
"template": false,
"updated_at": "2021-11-16T21:41:40+01:00",
"watchers_count": 1,
"website": ""
},
"sender": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
}
}

View file

@ -1,31 +0,0 @@
#!/bin/bash
set -xu
CONFIG_PATH=${config_path}
JSON="$1"
REPO_NAME=$(jq -r .repository.name <<< $JSON)
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
REPO_BRANCH=$(jq -r .ref <<< $JSON | cut -d'/' -f3)
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
for INTEGRATION in "$(cat $CONFIG_PATH | jq -r '.ci | values[]')"
do
[[ $(jq -r '.repo' <<< $INTEGRATION) = "$REPO_NAME" ]] || continue
[[ $(jq -r '.branch' <<< $INTEGRATION) = "$REPO_BRANCH" ]] || continue
HOSTNAME=$(jq -r '.hostname' <<< $INTEGRATION)
DEST_PATH=$(jq -r '.path' <<< $INTEGRATION)
DEST_GROUP=$(jq -r '.group' <<< $INTEGRATION)
[[ -z "$HOSTNAME" ]] || [[ -z "$DEST_PATH" ]] || [[ -z "$DEST_GROUP" ]] && exit 5
cd ~
rm -rf "$REPO_NAME"
git clone "$CLONE_URL" "$REPO_NAME"
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "find \"$DEST_PATH\" -mindepth 1 -delete"
scp -r $SSH_OPTIONS "$REPO_NAME"/* "build-ci@$HOSTNAME:$DEST_PATH"
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "sudo chown -R build-ci:$DEST_GROUP $(printf "%q" "$DEST_PATH")"
done

View file

@ -1,32 +0,0 @@
#!/bin/bash
set -exu
DOWNLOAD_SERVER="${download_server}"
CONFIG=$(cat ${config_path})
JSON="$1"
ARGS="$2"
REPO_NAME=$(jq -r .repository.name <<< $JSON)
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
BUILD_FILE=$(jq -r .file <<< $ARGS)
DATE=$(date --utc +%s)
cd ~
rm -rf "$REPO_NAME"
git clone "$CLONE_URL"
cd "$REPO_NAME"
shards install
for ARCH in $(jq -r '.architectures | keys[]' <<< $CONFIG)
do
TARGET=$(jq -r .architectures.$ARCH.target <<< $CONFIG)
IP=$(jq -r .architectures.$ARCH.ip <<< $CONFIG)
BUILD_CMD=$(crystal build "$BUILD_FILE" --cross-compile --target="$TARGET" --release -o "$REPO_NAME")
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME.o" "build-agent@$IP:~"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP" $BUILD_CMD
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP:~/$REPO_NAME" .
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" mkdir -p "~/$REPO_NAME"
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME" "downloads@$DOWNLOAD_SERVER:~/$REPO_NAME/$REPO_NAME-$ARCH-$DATE"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" ln -sf "$REPO_NAME-$ARCH-$DATE" "~/$REPO_NAME/$REPO_NAME-$ARCH-latest"
done

View file

@ -1,32 +0,0 @@
import json
from bundlewrap.metadata import MetadataJSONEncoder
directories = {
'/opt/build-server/strategies': {
'owner': 'build-server',
},
}
files = {
'/etc/build-server.json': {
'owner': 'build-server',
'content': json.dumps(node.metadata.get('build-server'), indent=4, sort_keys=True, cls=MetadataJSONEncoder)
},
'/opt/build-server/strategies/crystal': {
'content_type': 'mako',
'owner': 'build-server',
'mode': '0777', # FIXME
'context': {
'config_path': '/etc/build-server.json',
'download_server': node.metadata.get('build-server/download_server_ip'),
},
},
'/opt/build-server/strategies/ci': {
'content_type': 'mako',
'owner': 'build-server',
'mode': '0777', # FIXME
'context': {
'config_path': '/etc/build-server.json',
},
},
}

View file

@ -1,78 +0,0 @@
from ipaddress import ip_interface
defaults = {
'flask': {
'build-server' : {
'git_url': "https://git.sublimity.de/cronekorkn/build-server.git",
'port': 4000,
'app_module': 'build_server',
'user': 'build-server',
'group': 'build-server',
'timeout': 900,
'env': {
'CONFIG': '/etc/build-server.json',
'STRATEGIES_DIR': '/opt/build-server/strategies',
},
},
},
'users': {
'build-server': {
'home': '/var/lib/build-server',
},
},
}
@metadata_reactor.provides(
'build-server',
)
def agent_conf(metadata):
download_server = repo.get_node(metadata.get('build-server/download_server'))
return {
'build-server': {
'architectures': {
architecture: {
'ip': str(ip_interface(repo.get_node(conf['node']).metadata.get('network/internal/ipv4')).ip),
}
for architecture, conf in metadata.get('build-server/architectures').items()
},
'download_server_ip': str(ip_interface(download_server.metadata.get('network/internal/ipv4')).ip),
},
}
@metadata_reactor.provides(
'build-server',
)
def ci(metadata):
return {
'build-server': {
'ci': {
f'{repo}@{other_node.name}': {
'hostname': other_node.metadata.get('hostname'),
'repo': repo,
**options,
}
for other_node in repo.nodes
if other_node.has_bundle('build-ci')
for repo, options in other_node.metadata.get('build-ci').items()
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('build-server/hostname'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:4000',
},
'check_path': '/status',
},
},
},
}

View file

@ -1,20 +0,0 @@
debian_version = min([node.os_version, (11,)])[0] # FIXME
defaults = {
'apt': {
'packages': {
'crystal': {},
},
'sources': {
'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},
'suites': {
'/',
},
},
},
},
}

View file

@ -33,14 +33,3 @@ for name, conf in node.metadata.get('dm-crypt').items():
for pool, pool_conf in node.metadata.get('zfs/pools').items():
if f'/dev/mapper/{name}' in pool_conf['devices']:
actions[f'dm-crypt_open_{name}']['needed_by'].add(f'zfs_pool:{pool}')
actions[f'zpool_import_{name}'] = {
'command': f"zpool import -d /dev/mapper/{name} {pool}",
'unless': f"zpool status {pool}",
'needs': {
f"action:dm-crypt_open_{name}",
},
'needed_by': {
f"zfs_pool:{pool}",
},
}

View file

@ -1,12 +1,9 @@
DOVECOT
=======
rescan index
------------
https://doc.dovecot.org/configuration_manual/fts/#rescan
rescan index: https://doc.dovecot.org/configuration_manual/fts/#rescan
```
doveadm fts rescan -u 'i@ckn.li'
doveadm index -u 'i@ckn.li' -q '*'
sudo -u vmail doveadm fts rescan -u 'test@mail2.sublimity.de'
sudo -u vmail doveadm index -u 'test@mail2.sublimity.de' -q '*'
```

View file

@ -66,7 +66,8 @@ xmlunzip() {
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat | /usr/lib/dovecot/xml2text
find . -name "$name" -print0 | xargs -0 cat |
$libexec_dir/xml2text
}
wait_timeout() {

View file

@ -2,13 +2,6 @@ connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql
default_pass_scheme = ARGON2ID
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password\
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \

View file

@ -6,7 +6,7 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
mail_location = maildir:~
mail_plugins = fts fts_xapian
namespace inbox {
@ -34,10 +34,9 @@ passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
# use sql for userdb too, to enable iterate_query
userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
driver = static
args = uid=vmail gid=vmail home=/var/vmail/%u
}
service auth {

View file

@ -0,0 +1 @@
www-data ALL=(ALL) NOPASSWD: /usr/bin/doveadm pw -s ARGON2ID

View file

@ -20,10 +20,6 @@ directories = {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': {
'owner': 'vmail',
'group': 'vmail',

View file

@ -13,26 +13,15 @@ defaults = {
'catdoc': {}, # catdoc, catppt, xls2csv
},
},
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
'letsencrypt': {
'reload_after': {
'dovecot',
},
},
'nftables': {
'input': {
'tcp dport {143, 993, 4190} accept',
},
},
'systemd-timers': {
'dovecot-optimize-index': {
'command': '/usr/bin/doveadm fts optimize -A',
'when': 'daily',
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
}

View file

@ -1,66 +0,0 @@
defaults = {
'users': {
'downloads': {
'home': '/var/lib/downloads',
'needs': {
'zfs_dataset:tank/downloads'
},
},
},
'zfs': {
'datasets': {
'tank/downloads': {
'mountpoint': '/var/lib/downloads',
},
},
},
}
@metadata_reactor.provides(
'systemd-mount'
)
def mount_certs(metadata):
return {
'systemd-mount': {
'/var/lib/downloads_nginx': {
'source': '/var/lib/downloads',
'user': 'www-data',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('download-server/hostname'): {
'content': 'nginx/directory_listing.conf',
'context': {
'directory': '/var/lib/downloads_nginx',
},
},
},
},
}
@metadata_reactor.provides(
'users/downloads/authorized_users',
)
def ssh_keys(metadata):
return {
'users': {
'downloads': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},
},
},
}

View file

@ -1,54 +0,0 @@
# Flask
This bundle can deploy one or more Flask applications per node.
```python
'flask': {
'myapp': {
'app_module': "myapp",
'apt_dependencies': [
"libffi-dev",
"libssl-dev",
],
'env': {
'APP_SECRETS': "/opt/client_secrets.json",
},
'json_config': {
'this json': 'is_visible',
'inside': 'your template.cfg',
},
'git_url': "ssh://git@bitbucket.apps.seibert-media.net:7999/smedia/myapp.git",
'git_branch': "master",
'deployment_triggers': ["action:do-a-thing"],
},
},
```
The git repo containing the application has to obey some conventions:
* requirements-frozen.txt (preferred) or requirements.txt
* minimal setup.py to allow for installation with pip
The `app` instance has to exists in the module defined by `app_module`.
It is also very advisable to enable logging in your app (otherwise HTTP 500s won't be logged):
```python
import logging
if not app.debug:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
```
If you specify `json_config`, then `/opt/${app}/config.json` will be
created. The environment variable `$APP_CONFIG` will point to the exact
name. You can use it in your app to load your config:
```python
app.config.from_json(environ['APP_CONFIG'])
```
If `json_config` is *not* specified, you *can* put a static file in
`data/flask/files/cfg/$app_name`.

View file

@ -1,10 +0,0 @@
<%
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
%>
${dumps(
json_config,
cls=MetadataJSONEncoder,
indent=4,
sort_keys=True,
)}

View file

@ -1,14 +0,0 @@
[Unit]
Description=flask application ${name}
After=network.target
[Service]
% for key, value in env.items():
Environment=${key}=${value}
% endfor
User=${user}
Group=${group}
ExecStart=/opt/${name}/venv/bin/gunicorn -w ${workers} -b ${host}:${port} ${app_module}:app
[Install]
WantedBy=multi-user.target

View file

@ -1,119 +0,0 @@
for name, conf in node.metadata.get('flask').items():
for dep in conf.get('apt_dependencies', []):
pkg_apt[dep] = {
'needed_by': {
f'svc_systemd:{name}',
},
}
directories[f'/opt/{name}'] = {
'owner': conf['user'],
'group': conf['group'],
}
directories[f'/opt/{name}/src'] = {}
git_deploy[f'/opt/{name}/src'] = {
'repo': conf['git_url'],
'rev': conf.get('git_branch', 'master'),
'triggers': [
f'action:flask_{name}_pip_install_deps',
*conf.get('deployment_triggers', []),
],
}
# CONFIG
env = conf.get('env', {})
if conf.get('json_config', {}):
env['APP_CONFIG'] = f'/opt/{name}/config.json'
files[env['APP_CONFIG']] = {
'source': 'flask.cfg',
'context': {
'json_config': conf.get('json_config', {}),
},
}
if 'APP_CONFIG' in env:
files[env['APP_CONFIG']].update({
'content_type': 'mako',
'group': 'www-data',
'needed_by': [
f'svc_systemd:{name}',
],
'triggers': [
f'svc_systemd:{name}:restart',
],
})
# secrets
if 'secrets.json' in conf:
env['APP_SECRETS'] = f'/opt/{name}/secrets.json'
files[env['APP_SECRETS']] = {
'content': conf['secrets.json'],
'mode': '0600',
'owner': conf.get('user', 'www-data'),
'group': conf.get('group', 'www-data'),
'needed_by': [
f'svc_systemd:{name}',
],
}
# VENV
actions[f'flask_{name}_create_virtualenv'] = {
'cascade_skip': False,
'command': f'python3 -m venv /opt/{name}/venv',
'unless': f'test -d /opt/{name}/venv',
'needs': [
f'directory:/opt/{name}',
'pkg_apt:python3-venv',
],
'triggers': [
f'action:flask_{name}_pip_install_deps',
],
}
actions[f'flask_{name}_pip_install_deps'] = {
'cascade_skip': False,
'command': f'/opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements-frozen.txt || /opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements.txt',
'triggered': True, # TODO: https://stackoverflow.com/questions/16294819/check-if-my-python-has-all-required-packages
'needs': [
f'git_deploy:/opt/{name}/src',
'pkg_apt:python3-pip',
],
'triggers': [
f'action:flask_{name}_pip_install_gunicorn',
],
}
actions[f'flask_{name}_pip_install_gunicorn'] = {
'command': f'/opt/{name}/venv/bin/pip3 install -U gunicorn',
'triggered': True,
'cascade_skip': False,
'needs': [
f'action:flask_{name}_create_virtualenv',
],
'triggers': [
f'action:flask_{name}_pip_install',
],
}
actions[f'flask_{name}_pip_install'] = {
'command': f'/opt/{name}/venv/bin/pip3 install -e /opt/{name}/src',
'triggered': True,
'cascade_skip': False,
'triggers': [
f'svc_systemd:{name}:restart',
],
}
# UNIT
svc_systemd[name] = {
'needs': [
f'action:flask_{name}_pip_install',
f'file:/usr/local/lib/systemd/system/{name}.service',
],
}

View file

@ -1,61 +0,0 @@
defaults = {
'apt': {
'packages': {
'python3-pip': {},
'python3-dev': {},
'python3-venv': {},
},
},
'flask': {},
}
@metadata_reactor.provides(
'flask',
)
def app_defaults(metadata):
return {
'flask': {
name: {
'user': 'root',
'group': 'root',
'workers': 8,
'timeout': 30,
**conf,
}
for name, conf in metadata.get('flask').items()
}
}
@metadata_reactor.provides(
'systemd/units',
)
def units(metadata):
return {
'systemd': {
'units': {
f'{name}.service': {
'Unit': {
'Description': name,
'After': 'network.target',
},
'Service': {
'Environment': {
f'{k}={v}'
for k, v in conf.get('env', {}).items()
},
'User': conf['user'],
'Group': conf['group'],
'ExecStart': f"/opt/{name}/venv/bin/gunicorn -w {conf['workers']} -b 127.0.0.1:{conf['port']} --timeout {conf['timeout']} {conf['app_module']}:app"
},
'Install': {
'WantedBy': {
'multi-user.target'
}
},
}
for name, conf in metadata.get('flask').items()
}
}
}

View file

@ -1,23 +0,0 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -1,66 +0,0 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -1,121 +0,0 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

View file

@ -8,15 +8,7 @@ defaults = {
'python3-crcmod': {},
},
'sources': {
'google-cloud': {
'url': 'https://packages.cloud.google.com/apt/',
'suites': {
'cloud-sdk',
},
'components': {
'main',
},
},
'deb https://packages.cloud.google.com/apt cloud-sdk main',
},
},
}

View file

@ -1,4 +1,3 @@
[DEFAULT]
APP_NAME = ckn-gitea
RUN_USER = git
RUN_MODE = prod
@ -14,24 +13,40 @@ MEMBERS_PAGING_NUM = 100
[server]
PROTOCOL = http
SSH_DOMAIN = ${domain}
DOMAIN = ${domain}
HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3500
ROOT_URL = https://${domain}/
DISABLE_SSH = true
SSH_PORT = 22
LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
LFS_JWT_SECRET = ${lfs_secret_key}
OFFLINE_MODE = true
START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore
[database]
DB_TYPE = postgres
HOST = ${database.get('host')}:${database.get('port')}
NAME = ${database.get('database')}
USER = ${database.get('username')}
PASSWD = ${database.get('password')}
SSL_MODE = disable
LOG_SQL = false
[admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true
[security]
INTERNAL_TOKEN = ${internal_token}
INSTALL_LOCK = true
SECRET_KEY = ${security_secret_key}
LOGIN_REMEMBER_DAYS = 30
DISABLE_GIT_HOOKS = ${str(not enable_git_hooks).lower()}
[openid]
ENABLE_OPENID_SIGNIN = false
@ -47,6 +62,12 @@ REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.${domain}
[mailer]
ENABLED = true
MAILER_TYPE = sendmail
FROM = "${app_name}" <noreply@${domain}>
[session]
PROVIDER = file
@ -59,17 +80,9 @@ ENABLE_FEDERATED_AVATAR = false
MODE = console
LEVEL = warn
[oauth2]
JWT_SECRET = ${oauth_secret_key}
[other]
SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
[webhook]
ALLOWED_HOST_LIST = *
DELIVER_TIMEOUT = 600
[indexer]
REPO_INDEXER_ENABLED = true
MAX_FILE_SIZE = 10240000
[queue.issue_indexer]
LENGTH = 20

View file

@ -1,15 +1,8 @@
from os.path import join
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
version = version=node.metadata.get('gitea/version')
downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
'sha256_url': '{url}.sha256',
'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'sha256': node.metadata.get('gitea/sha256'),
'triggers': {
'svc_systemd:gitea:restart',
},
@ -18,6 +11,10 @@ downloads['/usr/local/bin/gitea'] = {
},
}
users['git'] = {
'home': '/home/git',
}
directories['/var/lib/gitea'] = {
'owner': 'git',
'mode': '0700',
@ -41,14 +38,8 @@ actions = {
}
files['/etc/gitea/app.ini'] = {
'content': repo.libs.ini.dumps(
merge_dict(
repo.libs.ini.parse(open(join(repo.path, 'bundles', 'gitea', 'files', 'app.ini')).read()),
node.metadata.get('gitea/conf'),
),
),
'content_type': 'mako',
'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'],
'triggers': {
'svc_systemd:gitea:restart',

View file

@ -1,30 +1,19 @@
database_password = repo.vault.password_for(f'{node.name} postgresql gitea').value
database_password = repo.vault.password_for(f'{node.name} postgresql gitea')
defaults = {
'apt': {
'packages': {
'git': {
'needed_by': {
'svc_systemd:gitea',
}
},
},
},
'gitea': {
'conf': {
'DEFAULT': {
'WORK_PATH': '/var/lib/gitea',
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
'host': 'localhost',
'port': '5432',
'username': 'gitea',
'password': database_password,
'database': 'gitea',
},
'app_name': 'Gitea',
'lfs_secret_key': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
'security_secret_key': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
'oauth_secret_key': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
'internal_token': repo.vault.password_for(f'{node.name} gitea internal_token'),
},
'postgresql': {
'roles': {
@ -43,7 +32,8 @@ defaults = {
'gitea.service': {
'Unit': {
'Description': 'gitea',
'After': {'syslog.target', 'network.target'},
'After': 'syslog.target',
'After': 'network.target',
'Requires': 'postgresql.service',
},
'Service': {
@ -62,11 +52,6 @@ defaults = {
},
},
},
'users': {
'git': {
'home': '/home/git',
},
},
'zfs': {
'datasets': {
'tank/gitea': {
@ -77,36 +62,6 @@ defaults = {
}
@metadata_reactor.provides(
'gitea/conf',
)
def conf(metadata):
domain = metadata.get('gitea/domain')
return {
'gitea': {
'conf': {
'server': {
'SSH_DOMAIN': domain,
'DOMAIN': domain,
'ROOT_URL': f'https://{domain}/',
'LFS_JWT_SECRET': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
},
'security': {
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
},
'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}',
},
'oauth2': {
'JWT_SECRET': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
@ -118,7 +73,7 @@ def nginx(metadata):
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3500',
},
}
},
},
},

View file

@ -18,17 +18,16 @@ admin_password = node.metadata.get('grafana/config/security/admin_password')
port = node.metadata.get('grafana/config/server/http_port')
actions['reset_grafana_admin_password'] = {
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
'unless': f"sleep 5 && curl http://admin:{quote(admin_password)}@localhost:{port}/api/org --fail",
'unless': f"curl http://admin:{quote(admin_password)}@localhost:{port}/api/org",
'needs': [
'svc_systemd:grafana-server',
],
}
directories = {
'/etc/grafana': {},
'/etc/grafana': {
},
'/etc/grafana/provisioning': {
'owner': 'grafana',
'group': 'grafana',
},
'/etc/grafana/provisioning/datasources': {
'purge': True,
@ -36,25 +35,13 @@ directories = {
'/etc/grafana/provisioning/dashboards': {
'purge': True,
},
'/var/lib/grafana': {
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
'purge': True,
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
'/var/lib/grafana': {},
'/var/lib/grafana/dashboards': {},
}
files = {
'/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -64,8 +51,6 @@ files = {
'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()),
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -82,8 +67,6 @@ files = {
},
}],
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -139,16 +122,12 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
panel['fieldConfig']['defaults']['min'] = panel_config['min']
if 'max' in panel_config:
panel['fieldConfig']['defaults']['max'] = panel_config['max']
if 'soft_max' in panel_config:
panel['fieldConfig']['defaults']['custom']['axisSoftMax'] = panel_config['soft_max']
if 'legend' in panel_config:
panel['options']['legend'].update(panel_config['legend'])
if 'tooltip' in panel_config:
panel['options']['tooltip']['mode'] = panel_config['tooltip']
if panel_config['tooltip'] == 'multi':
panel['options']['tooltip']['sort'] = 'desc'
for query_name, query_config in panel_config['queries'].items():
panel['targets'].append({
@ -157,13 +136,11 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
bucket=bucket,
host=monitored_node.name,
negative=query_config.get('negative', False),
boolean_to_int=query_config.get('boolean_to_int', False),
minimum=query_config.get('minimum', None),
resolution=query_config.get('resolution', 1) * 4,
filters={
'host': monitored_node.name,
**query_config['filters'],
},
exists=query_config.get('exists', []),
function=query_config.get('function', None),
).strip()
})
@ -172,8 +149,6 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
]

View file

@ -8,19 +8,8 @@ defaults = {
'grafana': {},
},
'sources': {
'grafana': {
'urls': {
'https://packages.grafana.com/oss/deb',
'deb https://packages.grafana.com/oss/deb stable main',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
},
},
'grafana': {
'config': {
@ -66,20 +55,6 @@ defaults = {
}
@metadata_reactor.provides(
'grafana/config/server/domain',
)
def domain(metadata):
return {
'grafana': {
'config': {
'server': {
'domain': metadata.get('grafana/hostname'),
},
},
},
}
@metadata_reactor.provides(
'grafana/datasources',
)
@ -127,7 +102,7 @@ def datasource_key_to_name(metadata):
def dns(metadata):
return {
'dns': {
metadata.get('grafana/hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('grafana/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@ -140,7 +115,10 @@ def nginx(metadata):
'nginx': {
'vhosts': {
metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf',
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:8300',
}
},
},
},

View file

@ -1,5 +0,0 @@
GRUB_DEFAULT=0
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
GRUB_CMDLINE_LINUX_DEFAULT="${' '.join(kernel_params)}"
GRUB_CMDLINE_LINUX=""

View file

@ -1,20 +0,0 @@
files = {
'/etc/default/grub': {
'content_type': 'mako',
'context': {
'timeout': node.metadata.get('grub/timeout'),
'kernel_params': node.metadata.get('grub/kernel_params'),
},
'mode': '0644',
'triggers': {
'action:update-grub',
},
}
}
actions = {
'update-grub': {
'command': 'update-grub',
'triggered': True,
},
}

View file

@ -1,6 +0,0 @@
defaults = {
'grub': {
'timeout': 1,
'kernel_params': set(),
},
}

View file

@ -1,10 +0,0 @@
#!/bin/bash
date=$(date --utc +%s%N)
for cpu in $(cat /sys/devices/system/cpu/cpu0/cpufreq/affected_cpus)
do
# echo "cpu_frequency,cpu=$cpu min=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_min_freq) / 1000) $date"
echo "cpu_frequency,cpu=$cpu current=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_cur_freq) / 1000) $date"
# echo "cpu_frequency,cpu=$cpu max=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_max_freq) / 1000) $date"
done

View file

@ -1,8 +0,0 @@
files = {
'/usr/local/share/telegraf/cpu_frequency': {
'mode': '0755',
'triggers': {
'svc_systemd:telegraf:restart',
},
},
}

View file

@ -1,38 +0,0 @@
defaults = {
'apt': {
'packages': {
'lm-sensors': {},
'console-data': {}, # leykeys de
},
},
'grafana_rows': {
'health',
},
'sudoers': {
'telegraf': {
'/usr/local/share/telegraf/cpu_frequency',
},
},
'telegraf': {
'config': {
'inputs': {
'sensors': {repo.libs.hashable.hashable({
'timeout': '2s',
})},
'exec': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/cpu_frequency"],
'name_override': "cpu_frequency",
'data_format': "influx",
}),
# repo.libs.hashable.hashable({
# 'commands': ["/bin/bash -c 'expr $(cat /sys/class/thermal/thermal_zone0/temp) / 1000'"],
# 'name_override': "cpu_temperature",
# 'data_format': "value",
# 'data_type': "integer",
# }),
},
},
},
},
}

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -0,0 +1,20 @@
users = {
'homeassistant': {
'home': '/var/lib/homeassistant',
},
}
directories = {
'/var/lib/homeassistant': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/config': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/venv': {
'owner': 'homeassistant',
},
}
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/

View file

@ -0,0 +1,9 @@
defaults = {
'apt': {
'packages': {
'python3-dev': {},
'python3-pip': {},
'python3-venv': {},
},
},
}

View file

@ -1,4 +1,4 @@
files[node.metadata.get('hostname_file')] = {
files['/etc/hostname'] = {
'content': node.metadata.get('hostname'),
'triggers': [
'action:update_hostname',
@ -6,6 +6,6 @@ files[node.metadata.get('hostname_file')] = {
}
actions["update_hostname"] = {
"command": f"hostname -F {node.metadata.get('hostname_file')}",
"command": "hostname -F /etc/hostname",
'triggered': True,
}

View file

@ -8,21 +8,12 @@ defaults = {
}
@metadata_reactor.provides(
'hostname_file',
)
def hostname_file(metadata):
return {
'hostname_file': node.metadata.get('hostname_file', '/etc/hostname'),
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('hostname'): repo.libs.ip.get_a_records(metadata),
metadata.get('hostname'): repo.libs.dns.get_a_records(metadata, external=False),
},
}

View file

@ -1,39 +0,0 @@
# Beware! This file is rewritten by htop when settings are changed in the interface.
# The parser is also very primitive, and not human-friendly.
fields=0 48 17 18 38 39 40 2 46 47 109 110 49 1
sort_key=46
sort_direction=-1
tree_sort_key=0
tree_sort_direction=1
hide_kernel_threads=0
hide_userland_threads=0
shadow_other_users=0
show_thread_names=0
show_program_path=1
highlight_base_name=0
highlight_megabytes=1
highlight_threads=1
highlight_changes=0
highlight_changes_delay_secs=5
find_comm_in_cmdline=1
strip_exe_from_cmdline=1
show_merged_command=0
tree_view=0
tree_view_always_by_pid=0
header_margin=1
detailed_cpu_time=0
cpu_count_from_one=1
show_cpu_usage=0
show_cpu_frequency=1
show_cpu_temperature=0
degree_fahrenheit=0
update_process_names=0
account_guest_in_cpu_meter=0
color_scheme=0
enable_mouse=1
delay=20
left_meters=Hostname Tasks DiskIO NetworkIO Blank CPU Memory Swap Blank LeftCPUs${cpus_per_row}
left_meter_modes=2 2 2 2 2 1 1 1 2 1
right_meters=CPU Blank PressureStallCPUSome PressureStallMemorySome PressureStallIOSome Blank RightCPUs${cpus_per_row}
right_meter_modes=3 2 1 1 1 2 1
hide_function_bar=0

View file

@ -1,8 +0,0 @@
files = {
'/etc/htoprc.global': {
'content_type': 'mako',
'context': {
'cpus_per_row': 4 if node.metadata.get('vm/threads', node.metadata.get('vm/cores', 1)) > 8 else 2,
},
},
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'htop': {},
},
},
}

View file

@ -1,36 +0,0 @@
#!/bin/sh
UNKNOWN=3
if [ -z "$SSHMON_COMMAND" ]
then
echo 'check_by_sshmon: Env SSHMON_COMMAND missing' >&2
exit $UNKNOWN
elif [ -z "$SSHMON_HOST" ]
then
echo 'check_by_sshmon: Env SSHMON_HOST missing' >&2
exit $UNKNOWN
fi
if [ -z "$SSHMON_SUDO" ]
then
PREFIX=""
else
PREFIX="sudo "
fi
ssh sshmon@"$SSHMON_HOST" "$PREFIX$SSHMON_COMMAND"
exitcode=$?
if [ "$exitcode" = 124 ]
then
echo 'check_by_sshmon: Timeout while running check remotely' >&2
exit $UNKNOWN
elif [ "$exitcode" = 255 ]
then
echo 'check_by_sshmon: SSH error' >&2
exit $UNKNOWN
else
exit $exitcode
fi

View file

@ -1,10 +0,0 @@
% for name, conf in sorted(users.items()):
object ApiUser "${name}" {
password = "${conf['password']}"
permissions = [
% for permission in conf['permissions']:
"${permission}",
% endfor
]
}
% endfor

View file

@ -1 +0,0 @@
object IcingaApplication "app" { }

View file

@ -1,198 +0,0 @@
/* Command objects */
/* Notification Commands
*
* Please check the documentation for all required and
* optional parameters.
*/
object CheckCommand "sshmon" {
import "ipv4-or-ipv6"
command = [ "/usr/lib/nagios/plugins/check_by_sshmon" ]
env.SSHMON_COMMAND = "$command$"
env.SSHMON_HOST = "$address$"
env.SSHMON_SUDO = "$sudo$"
}
object NotificationCommand "mail-host-notification" {
command = [ ConfigDir + "/scripts/mail-host-notification.sh" ]
arguments += {
"-4" = "$notification_address$"
"-6" = "$notification_address6$"
"-b" = "$notification_author$"
"-c" = "$notification_comment$"
"-d" = {
required = true
value = "$notification_date$"
}
"-f" = {
value = "$notification_from$"
description = "Set from address. Requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE)"
}
"-i" = "$notification_icingaweb2url$"
"-l" = {
required = true
value = "$notification_hostname$"
}
"-n" = {
required = true
value = "$notification_hostdisplayname$"
}
"-o" = {
required = true
value = "$notification_hostoutput$"
}
"-r" = {
required = true
value = "$notification_useremail$"
}
"-s" = {
required = true
value = "$notification_hoststate$"
}
"-t" = {
required = true
value = "$notification_type$"
}
"-v" = "$notification_logtosyslog$"
}
vars += {
notification_address = "$address$"
notification_address6 = "$address6$"
notification_author = "$notification.author$"
notification_comment = "$notification.comment$"
notification_type = "$notification.type$"
notification_date = "$icinga.long_date_time$"
notification_hostname = "$host.name$"
notification_hostdisplayname = "$host.display_name$"
notification_hostoutput = "$host.output$"
notification_hoststate = "$host.state$"
notification_useremail = "$user.email$"
}
}
object NotificationCommand "mail-service-notification" {
command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
arguments += {
"-4" = "$notification_address$"
"-6" = "$notification_address6$"
"-b" = "$notification_author$"
"-c" = "$notification_comment$"
"-d" = {
required = true
value = "$notification_date$"
}
"-e" = {
required = true
value = "$notification_servicename$"
}
"-f" = {
value = "$notification_from$"
description = "Set from address. Requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE)"
}
"-i" = "$notification_icingaweb2url$"
"-l" = {
required = true
value = "$notification_hostname$"
}
"-n" = {
required = true
value = "$notification_hostdisplayname$"
}
"-o" = {
required = true
value = "$notification_serviceoutput$"
}
"-r" = {
required = true
value = "$notification_useremail$"
}
"-s" = {
required = true
value = "$notification_servicestate$"
}
"-t" = {
required = true
value = "$notification_type$"
}
"-u" = {
required = true
value = "$notification_servicedisplayname$"
}
"-v" = "$notification_logtosyslog$"
}
vars += {
notification_address = "$address$"
notification_address6 = "$address6$"
notification_author = "$notification.author$"
notification_comment = "$notification.comment$"
notification_type = "$notification.type$"
notification_date = "$icinga.long_date_time$"
notification_hostname = "$host.name$"
notification_hostdisplayname = "$host.display_name$"
notification_servicename = "$service.name$"
notification_serviceoutput = "$service.output$"
notification_servicestate = "$service.state$"
notification_useremail = "$user.email$"
notification_servicedisplayname = "$service.display_name$"
}
}
/*
* If you prefer to use the notification scripts with environment
* variables instead of command line parameters, you can use
* the following commands. They have been updated from < 2.7
* to support the new notification scripts and should help
* with an upgrade.
* Remove the comment blocks and comment the notification commands above.
*/
/*
object NotificationCommand "mail-host-notification" {
command = [ ConfigDir + "/scripts/mail-host-notification.sh" ]
env = {
NOTIFICATIONTYPE = "$notification.type$"
HOSTDISPLAYNAME = "$host.display_name$"
HOSTNAME = "$host.name$"
HOSTADDRESS = "$address$"
HOSTSTATE = "$host.state$"
LONGDATETIME = "$icinga.long_date_time$"
HOSTOUTPUT = "$host.output$"
NOTIFICATIONAUTHORNAME = "$notification.author$"
NOTIFICATIONCOMMENT = "$notification.comment$"
HOSTDISPLAYNAME = "$host.display_name$"
USEREMAIL = "$user.email$"
}
}
object NotificationCommand "mail-service-notification" {
command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
env = {
NOTIFICATIONTYPE = "$notification.type$"
SERVICENAME = "$service.name$"
HOSTNAME = "$host.name$"
HOSTDISPLAYNAME = "$host.display_name$"
HOSTADDRESS = "$address$"
SERVICESTATE = "$service.state$"
LONGDATETIME = "$icinga.long_date_time$"
SERVICEOUTPUT = "$service.output$"
NOTIFICATIONAUTHORNAME = "$notification.author$"
NOTIFICATIONCOMMENT = "$notification.comment$"
HOSTDISPLAYNAME = "$host.display_name$"
SERVICEDISPLAYNAME = "$service.display_name$"
USEREMAIL = "$user.email$"
}
}
*/

View file

@ -1,37 +0,0 @@
/**
* Host group examples.
*/
object HostGroup "linux-servers" {
display_name = "Linux Servers"
assign where host.vars.os == "Linux"
}
object HostGroup "windows-servers" {
display_name = "Windows Servers"
assign where host.vars.os == "Windows"
}
/**
* Service group examples.
*/
object ServiceGroup "ping" {
display_name = "Ping Checks"
assign where match("ping*", service.name)
}
object ServiceGroup "http" {
display_name = "HTTP Checks"
assign where match("http*", service.check_command)
}
object ServiceGroup "disk" {
display_name = "Disk Checks"
assign where match("disk*", service.check_command)
}

View file

@ -1,33 +0,0 @@
/**
* The example notification apply rules.
*
* Only applied if host/service objects have
* the custom variable `notification` defined
* and containing `mail` as key.
*
* Check `hosts.conf` for an example.
*/
apply Notification "mail-icingaadmin" to Host {
import "mail-host-notification"
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
assign where host.vars.notification.mail
}
apply Notification "mail-icingaadmin" to Service {
import "mail-service-notification"
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
assign where host.vars.notification.mail
}

View file

@ -1,15 +0,0 @@
template Host "generic-host" {
max_check_attempts = 3
check_interval = 1m
retry_interval = 30s
check_command = "hostalive"
}
template Service "generic-service" {
max_check_attempts = 5
check_interval = 1m
retry_interval = 30s
}
template User "generic-user" {}

View file

@ -1,34 +0,0 @@
/**
* Sample timeperiods for Icinga 2.
* Check the documentation for details.
*/
object TimePeriod "24x7" {
display_name = "Icinga 2 24x7 TimePeriod"
ranges = {
"monday" = "00:00-24:00"
"tuesday" = "00:00-24:00"
"wednesday" = "00:00-24:00"
"thursday" = "00:00-24:00"
"friday" = "00:00-24:00"
"saturday" = "00:00-24:00"
"sunday" = "00:00-24:00"
}
}
object TimePeriod "9to5" {
display_name = "Icinga 2 9to5 TimePeriod"
ranges = {
"monday" = "09:00-17:00"
"tuesday" = "09:00-17:00"
"wednesday" = "09:00-17:00"
"thursday" = "09:00-17:00"
"friday" = "09:00-17:00"
}
}
object TimePeriod "never" {
display_name = "Icinga 2 never TimePeriod"
ranges = {
}
}

View file

@ -1,6 +0,0 @@
const PluginDir = "/usr/lib/nagios/plugins"
const ManubulonPluginDir = "/usr/lib/nagios/plugins"
const PluginContribDir = "/usr/lib/nagios/plugins"
const NodeName = "${hostname}"
const ZoneName = NodeName
const TicketSalt = ""

View file

@ -1 +0,0 @@
object ApiListener "api" {}

View file

@ -1 +0,0 @@
object CheckerComponent "checker" { }

View file

@ -1,8 +0,0 @@
library "db_ido_pgsql"
object IdoPgsqlConnection "ido-pgsql" {
user = "icinga2",
password = "${db_password}",
host = "localhost",
database = "icinga2"
}

View file

@ -1,7 +0,0 @@
/**
* The JournaldLogger type writes log information to the systemd journal.
*/
object JournaldLogger "journald" {
severity = "warning"
}

View file

@ -1,4 +0,0 @@
/**
* This file is requires for inital apt install.
* The JournaldLogger type writes log information to the systemd journal.
*/

View file

@ -1 +0,0 @@
object NotificationComponent "notification" { }

View file

@ -1,39 +0,0 @@
<%!
from bundlewrap.utils import Fault
def render_value(key, value):
if isinstance(value, Fault):
return render_value(key, value.value)
elif isinstance(value, type(None)):
return '""'
elif isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
if key.endswith('_interval'):
return value
else:
escaped_value = value.replace('$', '$$').replace('"', '\\"')
return f'"{escaped_value}"'
elif isinstance(value, (list, set)):
return '[' + ', '.join(render_value(e) for e in sorted(value)) + ']'
else:
raise Exception(f"cant process type '{type(value)}' of value '{value}'")
%>
object Host "${host_name}" {
import "generic-host"
% for key, value in sorted(host_settings.items()):
${key} = ${render_value(key, value)}
% endfor
}
% for service_name, service_config in sorted(services.items()):
object Service "${service_name}" {
import "generic-service"
% for key, value in sorted(service_config.items()):
${key} = ${render_value(key, value)}
% endfor
}
% endfor

View file

@ -1,10 +0,0 @@
include "constants.conf"
include "zones.conf"
include <itl>
include <plugins>
include <plugins-contrib>
include "features-enabled/*.conf"
include_recursive "conf.d"
include "hosts.d/*.conf"

Some files were not shown because too many files have changed in this diff Show more