Compare commits
16 commits
master
...
whatsbroke
Author | SHA1 | Date | |
---|---|---|---|
![]() |
ad460b45e7 | ||
![]() |
936dfbadde | ||
![]() |
5340ec0c05 | ||
![]() |
4a845d4956 | ||
![]() |
611c263df5 | ||
![]() |
a962ce32a2 | ||
![]() |
48b904fede | ||
![]() |
f2e4e21c1f | ||
![]() |
581d7c5371 | ||
![]() |
30c759f8bf | ||
![]() |
1f93d3888e | ||
![]() |
c410be5d72 | ||
![]() |
0628fd8ff8 | ||
![]() |
a7ab955a1a | ||
![]() |
a051b4af17 | ||
![]() |
f41ba0b934 |
464 changed files with 48 additions and 19339 deletions
|
@ -1,22 +0,0 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.toml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.yaml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
9
.envrc
9
.envrc
|
@ -1,7 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
PATH_add bin
|
||||
python3 -m venv .venv
|
||||
source ./.venv/bin/activate
|
||||
|
||||
source_env ~/.local/share/direnv/pyenv
|
||||
source_env ~/.local/share/direnv/venv
|
||||
source_env ~/.local/share/direnv/bundlewrap
|
||||
export BW_GIT_DEPLOY_CACHE="$(realpath ~)/.cache/bw/git_deploy"
|
||||
mkdir -p "$BW_GIT_DEPLOY_CACHE"
|
||||
unset PS1
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,4 +1,2 @@
|
|||
.secrets.cfg*
|
||||
.venv
|
||||
.cache
|
||||
*.pyc
|
||||
|
|
1
.python-version
Normal file
1
.python-version
Normal file
|
@ -0,0 +1 @@
|
|||
3.9.0
|
48
README.md
48
README.md
|
@ -1,48 +0,0 @@
|
|||
# TODO
|
||||
|
||||
- dont spamfilter forwarded mails
|
||||
- gollum wiki
|
||||
- blog?
|
||||
- fix dkim not working sometimes
|
||||
- LDAP
|
||||
- oauth2/OpenID
|
||||
- icinga
|
||||
|
||||
Raspberry pi as soundcard
|
||||
- gadget mode
|
||||
- OTG g_audio
|
||||
- https://audiosciencereview.com/forum/index.php?threads/raspberry-pi-as-usb-to-i2s-adapter.8567/post-215824
|
||||
|
||||
# install bw fork
|
||||
|
||||
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork@main#egg=bundlewrap
|
||||
|
||||
# monitor timers
|
||||
|
||||
```sh
|
||||
Timer=backup
|
||||
|
||||
Triggers=$(systemctl show ${Timer}.timer --property=Triggers --value)
|
||||
echo $Triggers
|
||||
if systemctl is-failed "$Triggers"
|
||||
then
|
||||
InvocationID=$(systemctl show "$Triggers" --property=InvocationID --value)
|
||||
echo $InvocationID
|
||||
ExitCode=$(systemctl show "$Triggers" -p ExecStartEx --value | sed 's/^{//' | sed 's/}$//' | tr ';' '\n' | xargs -n 1 | grep '^status=' | cut -d '=' -f 2)
|
||||
echo $ExitCode
|
||||
journalctl INVOCATION_ID="$InvocationID" --output cat
|
||||
fi
|
||||
```
|
||||
|
||||
telegraf: execd for daemons
|
||||
|
||||
TEST
|
||||
|
||||
# git signing
|
||||
|
||||
git config --global gpg.format ssh
|
||||
git config --global commit.gpgsign true
|
||||
|
||||
git config user.name CroneKorkN
|
||||
git config user.email i@ckn.li
|
||||
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"
|
32
bin/rcon
32
bin/rcon
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from sys import argv
|
||||
from os.path import realpath, dirname
|
||||
from shlex import quote
|
||||
from bundlewrap.repo import Repository
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
|
||||
if len(argv) == 1:
|
||||
for node in repo.nodes:
|
||||
for name in node.metadata.get('left4dead2/servers', {}):
|
||||
print(name)
|
||||
exit(0)
|
||||
|
||||
server = argv[1]
|
||||
command = argv[2]
|
||||
|
||||
remote_code = """
|
||||
from rcon.source import Client
|
||||
|
||||
with Client('127.0.0.1', {port}, passwd='''{password}''') as client:
|
||||
response = client.run('''{command}''')
|
||||
|
||||
print(response)
|
||||
"""
|
||||
|
||||
for node in repo.nodes:
|
||||
for name, conf in node.metadata.get('left4dead2/servers', {}).items():
|
||||
if name == server:
|
||||
response = node.run('python3 -c ' + quote(remote_code.format(port=conf['port'], password=conf['rcon_password'], command=command)))
|
||||
print(response.stdout.decode())
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bundlewrap.repo import Repository
|
||||
from os.path import realpath, dirname
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
|
@ -1,70 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bundlewrap.repo import Repository
|
||||
from os.path import realpath, dirname
|
||||
from ipaddress import ip_interface
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
nodes = [
|
||||
node
|
||||
for node in sorted(repo.nodes_in_group('debian'))
|
||||
if not node.dummy
|
||||
]
|
||||
|
||||
print('updating nodes:', sorted(node.name for node in nodes))
|
||||
|
||||
# UPDATE
|
||||
|
||||
for node in nodes:
|
||||
print('--------------------------------------')
|
||||
print('updating', node.name)
|
||||
print('--------------------------------------')
|
||||
repo.libs.wol.wake(node)
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
|
||||
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
|
||||
|
||||
# REBOOT IN ORDER
|
||||
|
||||
wireguard_servers = [
|
||||
node
|
||||
for node in nodes
|
||||
if node.has_bundle('wireguard')
|
||||
and (
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
|
||||
)
|
||||
]
|
||||
|
||||
wireguard_s2s = [
|
||||
node
|
||||
for node in nodes
|
||||
if node.has_bundle('wireguard')
|
||||
and (
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
|
||||
)
|
||||
]
|
||||
|
||||
everything_else = [
|
||||
node
|
||||
for node in nodes
|
||||
if not node.has_bundle('wireguard')
|
||||
]
|
||||
|
||||
print('======================================')
|
||||
|
||||
for node in [
|
||||
*everything_else,
|
||||
*wireguard_s2s,
|
||||
*wireguard_servers,
|
||||
]:
|
||||
try:
|
||||
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
|
||||
print('rebooting', node.name)
|
||||
print(node.run('systemctl reboot').stdout.decode())
|
||||
else:
|
||||
print('not rebooting', node.name)
|
||||
except Exception as e:
|
||||
print(e)
|
9
bin/wake
9
bin/wake
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bundlewrap.repo import Repository
|
||||
from os.path import realpath, dirname
|
||||
from sys import argv
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
|
||||
repo.libs.wol.wake(repo.get_node(argv[1]))
|
|
@ -1,52 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bundlewrap.repo import Repository
|
||||
from os.path import realpath, dirname
|
||||
from sys import argv
|
||||
from ipaddress import ip_network, ip_interface
|
||||
|
||||
if len(argv) != 3:
|
||||
print(f'usage: {argv[0]} <node> <client>')
|
||||
exit(1)
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
server_node = repo.get_node(argv[1])
|
||||
|
||||
if argv[2] not in server_node.metadata.get('wireguard/clients'):
|
||||
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
|
||||
exit(1)
|
||||
|
||||
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
|
||||
|
||||
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
|
||||
allowed_ips = [
|
||||
vpn_network,
|
||||
ip_interface(server_node.metadata.get('network/internal/ipv4')).network,
|
||||
]
|
||||
for peer in server_node.metadata.get('wireguard/s2s').values():
|
||||
for network in peer['allowed_ips']:
|
||||
if not ip_network(network).subnet_of(vpn_network):
|
||||
allowed_ips.append(ip_network(network))
|
||||
|
||||
conf = f'''
|
||||
[Interface]
|
||||
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
|
||||
ListenPort = 51820
|
||||
Address = {data['peer_ip']}
|
||||
DNS = 172.30.0.1
|
||||
|
||||
[Peer]
|
||||
PublicKey = {repo.libs.wireguard.pubkey(server_node.metadata.get('id'))}
|
||||
PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.get('id'))}
|
||||
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
|
||||
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
|
||||
PersistentKeepalive = 10
|
||||
'''
|
||||
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
print(conf)
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
|
||||
import pyqrcode
|
||||
print(pyqrcode.create(conf).terminal(quiet_zone=1))
|
|
@ -1,10 +0,0 @@
|
|||
http://www.apcupsd.org/manual/manual.html#power-down-during-shutdown
|
||||
|
||||
- onbattery: power lost
|
||||
- battery drains
|
||||
- when BATTERYLEVEL or MINUTES threshold is reached, server is shut down and
|
||||
the ups is issued to cut the power
|
||||
- when the mains power returns, the ups will reinstate power to the server
|
||||
- the server will reboot
|
||||
|
||||
NOT IMPLEMENTED
|
|
@ -1,343 +0,0 @@
|
|||
## apcupsd.conf v1.1 ##
|
||||
#
|
||||
# "apcupsd" POSIX config file
|
||||
|
||||
#
|
||||
# Note that the apcupsd daemon must be restarted in order for changes to
|
||||
# this configuration file to become active.
|
||||
#
|
||||
|
||||
#
|
||||
# ========= General configuration parameters ============
|
||||
#
|
||||
|
||||
# UPSNAME xxx
|
||||
# Use this to give your UPS a name in log files and such. This
|
||||
# is particulary useful if you have multiple UPSes. This does not
|
||||
# set the EEPROM. It should be 8 characters or less.
|
||||
#UPSNAME
|
||||
|
||||
# UPSCABLE <cable>
|
||||
# Defines the type of cable connecting the UPS to your computer.
|
||||
#
|
||||
# Possible generic choices for <cable> are:
|
||||
# simple, smart, ether, usb
|
||||
#
|
||||
# Or a specific cable model number may be used:
|
||||
# 940-0119A, 940-0127A, 940-0128A, 940-0020B,
|
||||
# 940-0020C, 940-0023A, 940-0024B, 940-0024C,
|
||||
# 940-1524C, 940-0024G, 940-0095A, 940-0095B,
|
||||
# 940-0095C, 940-0625A, M-04-02-2000
|
||||
#
|
||||
UPSCABLE usb
|
||||
|
||||
# To get apcupsd to work, in addition to defining the cable
|
||||
# above, you must also define a UPSTYPE, which corresponds to
|
||||
# the type of UPS you have (see the Description for more details).
|
||||
# You must also specify a DEVICE, sometimes referred to as a port.
|
||||
# For USB UPSes, please leave the DEVICE directive blank. For
|
||||
# other UPS types, you must specify an appropriate port or address.
|
||||
#
|
||||
# UPSTYPE DEVICE Description
|
||||
# apcsmart /dev/tty** Newer serial character device, appropriate for
|
||||
# SmartUPS models using a serial cable (not USB).
|
||||
#
|
||||
# usb <BLANK> Most new UPSes are USB. A blank DEVICE
|
||||
# setting enables autodetection, which is
|
||||
# the best choice for most installations.
|
||||
#
|
||||
# net hostname:port Network link to a master apcupsd through apcupsd's
|
||||
# Network Information Server. This is used if the
|
||||
# UPS powering your computer is connected to a
|
||||
# different computer for monitoring.
|
||||
#
|
||||
# snmp hostname:port:vendor:community
|
||||
# SNMP network link to an SNMP-enabled UPS device.
|
||||
# Hostname is the ip address or hostname of the UPS
|
||||
# on the network. Vendor can be can be "APC" or
|
||||
# "APC_NOTRAP". "APC_NOTRAP" will disable SNMP trap
|
||||
# catching; you usually want "APC". Port is usually
|
||||
# 161. Community is usually "private".
|
||||
#
|
||||
# netsnmp hostname:port:vendor:community
|
||||
# OBSOLETE
|
||||
# Same as SNMP above but requires use of the
|
||||
# net-snmp library. Unless you have a specific need
|
||||
# for this old driver, you should use 'snmp' instead.
|
||||
#
|
||||
# dumb /dev/tty** Old serial character device for use with
|
||||
# simple-signaling UPSes.
|
||||
#
|
||||
# pcnet ipaddr:username:passphrase:port
|
||||
# PowerChute Network Shutdown protocol which can be
|
||||
# used as an alternative to SNMP with the AP9617
|
||||
# family of smart slot cards. ipaddr is the IP
|
||||
# address of the UPS management card. username and
|
||||
# passphrase are the credentials for which the card
|
||||
# has been configured. port is the port number on
|
||||
# which to listen for messages from the UPS, normally
|
||||
# 3052. If this parameter is empty or missing, the
|
||||
# default of 3052 will be used.
|
||||
#
|
||||
# modbus /dev/tty** Serial device for use with newest SmartUPS models
|
||||
# supporting the MODBUS protocol.
|
||||
# modbus <BLANK> Leave the DEVICE setting blank for MODBUS over USB
|
||||
# or set to the serial number of the UPS to ensure
|
||||
# that apcupsd binds to that particular unit
|
||||
# (helpful if you have more than one USB UPS).
|
||||
#
|
||||
UPSTYPE usb
|
||||
#DEVICE /dev/ttyS0
|
||||
|
||||
# POLLTIME <int>
|
||||
# Interval (in seconds) at which apcupsd polls the UPS for status. This
|
||||
# setting applies both to directly-attached UPSes (UPSTYPE apcsmart, usb,
|
||||
# dumb) and networked UPSes (UPSTYPE net, snmp). Lowering this setting
|
||||
# will improve apcupsd's responsiveness to certain events at the cost of
|
||||
# higher CPU utilization. The default of 60 is appropriate for most
|
||||
# situations.
|
||||
#POLLTIME 60
|
||||
|
||||
# LOCKFILE <path to lockfile>
|
||||
# Path for device lock file for UPSes connected via USB or
|
||||
# serial port. This is the directory into which the lock file
|
||||
# will be written. The directory must already exist; apcupsd will not create
|
||||
# it. The actual name of the lock file is computed from DEVICE.
|
||||
# Not used on Win32.
|
||||
LOCKFILE /var/lock
|
||||
|
||||
# SCRIPTDIR <path to script directory>
|
||||
# Directory in which apccontrol and event scripts are located.
|
||||
SCRIPTDIR /etc/apcupsd
|
||||
|
||||
# PWRFAILDIR <path to powerfail directory>
|
||||
# Directory in which to write the powerfail flag file. This file
|
||||
# is created when apcupsd initiates a system shutdown and is
|
||||
# checked in the OS halt scripts to determine if a killpower
|
||||
# (turning off UPS output power) is required.
|
||||
PWRFAILDIR /etc/apcupsd
|
||||
|
||||
# NOLOGINDIR <path to nologin directory>
|
||||
# Directory in which to write the nologin file. The existence
|
||||
# of this flag file tells the OS to disallow new logins.
|
||||
NOLOGINDIR /etc
|
||||
|
||||
|
||||
#
|
||||
# ======== Configuration parameters used during power failures ==========
|
||||
#
|
||||
|
||||
# The ONBATTERYDELAY is the time in seconds from when a power failure
|
||||
# is detected until we react to it with an onbattery event.
|
||||
#
|
||||
# This means that, apccontrol will be called with the powerout argument
|
||||
# immediately when a power failure is detected. However, the
|
||||
# onbattery argument is passed to apccontrol only after the
|
||||
# ONBATTERYDELAY time. If you don't want to be annoyed by short
|
||||
# powerfailures, make sure that apccontrol powerout does nothing
|
||||
# i.e. comment out the wall.
|
||||
ONBATTERYDELAY 6
|
||||
|
||||
#
|
||||
# Note: BATTERYLEVEL, MINUTES, and TIMEOUT work in conjunction, so
|
||||
# the first that occurs will cause the initation of a shutdown.
|
||||
#
|
||||
|
||||
# If during a power failure, the remaining battery percentage
|
||||
# (as reported by the UPS) is below or equal to BATTERYLEVEL,
|
||||
# apcupsd will initiate a system shutdown.
|
||||
BATTERYLEVEL 10
|
||||
|
||||
# If during a power failure, the remaining runtime in minutes
|
||||
# (as calculated internally by the UPS) is below or equal to MINUTES,
|
||||
# apcupsd, will initiate a system shutdown.
|
||||
MINUTES 5
|
||||
|
||||
# If during a power failure, the UPS has run on batteries for TIMEOUT
|
||||
# many seconds or longer, apcupsd will initiate a system shutdown.
|
||||
# A value of 0 disables this timer.
|
||||
#
|
||||
# Note, if you have a Smart UPS, you will most likely want to disable
|
||||
# this timer by setting it to zero. That way, you UPS will continue
|
||||
# on batteries until either the % charge remaing drops to or below BATTERYLEVEL,
|
||||
# or the remaining battery runtime drops to or below MINUTES. Of course,
|
||||
# if you are testing, setting this to 60 causes a quick system shutdown
|
||||
# if you pull the power plug.
|
||||
# If you have an older dumb UPS, you will want to set this to less than
|
||||
# the time you know you can run on batteries.
|
||||
TIMEOUT 0
|
||||
|
||||
# Time in seconds between annoying users to signoff prior to
|
||||
# system shutdown. 0 disables.
|
||||
ANNOY 300
|
||||
|
||||
# Initial delay after power failure before warning users to get
|
||||
# off the system.
|
||||
ANNOYDELAY 60
|
||||
|
||||
# The condition which determines when users are prevented from
|
||||
# logging in during a power failure.
|
||||
# NOLOGON <string> [ disable | timeout | percent | minutes | always ]
|
||||
NOLOGON disable
|
||||
|
||||
# If KILLDELAY is non-zero, apcupsd will continue running after a
|
||||
# shutdown has been requested, and after the specified time in
|
||||
# seconds attempt to kill the power. This is for use on systems
|
||||
# where apcupsd cannot regain control after a shutdown.
|
||||
# KILLDELAY <seconds> 0 disables
|
||||
KILLDELAY 0
|
||||
|
||||
#
|
||||
# ==== Configuration statements for Network Information Server ====
|
||||
#
|
||||
|
||||
# NETSERVER [ on | off ] on enables, off disables the network
|
||||
# information server. If netstatus is on, a network information
|
||||
# server process will be started for serving the STATUS and
|
||||
# EVENT data over the network (used by CGI programs).
|
||||
NETSERVER on
|
||||
|
||||
# NISIP <dotted notation ip address>
|
||||
# IP address on which NIS server will listen for incoming connections.
|
||||
# This is useful if your server is multi-homed (has more than one
|
||||
# network interface and IP address). Default value is 0.0.0.0 which
|
||||
# means any incoming request will be serviced. Alternatively, you can
|
||||
# configure this setting to any specific IP address of your server and
|
||||
# NIS will listen for connections only on that interface. Use the
|
||||
# loopback address (127.0.0.1) to accept connections only from the
|
||||
# local machine.
|
||||
NISIP 127.0.0.1
|
||||
|
||||
# NISPORT <port> default is 3551 as registered with the IANA
|
||||
# port to use for sending STATUS and EVENTS data over the network.
|
||||
# It is not used unless NETSERVER is on. If you change this port,
|
||||
# you will need to change the corresponding value in the cgi directory
|
||||
# and rebuild the cgi programs.
|
||||
NISPORT 3551
|
||||
|
||||
# If you want the last few EVENTS to be available over the network
|
||||
# by the network information server, you must define an EVENTSFILE.
|
||||
EVENTSFILE /var/log/apcupsd.events
|
||||
|
||||
# EVENTSFILEMAX <kilobytes>
|
||||
# By default, the size of the EVENTSFILE will be not be allowed to exceed
|
||||
# 10 kilobytes. When the file grows beyond this limit, older EVENTS will
|
||||
# be removed from the beginning of the file (first in first out). The
|
||||
# parameter EVENTSFILEMAX can be set to a different kilobyte value, or set
|
||||
# to zero to allow the EVENTSFILE to grow without limit.
|
||||
EVENTSFILEMAX 10
|
||||
|
||||
#
|
||||
# ========== Configuration statements used if sharing =============
|
||||
# a UPS with more than one machine
|
||||
|
||||
#
|
||||
# Remaining items are for ShareUPS (APC expansion card) ONLY
|
||||
#
|
||||
|
||||
# UPSCLASS [ standalone | shareslave | sharemaster ]
|
||||
# Normally standalone unless you share an UPS using an APC ShareUPS
|
||||
# card.
|
||||
UPSCLASS standalone
|
||||
|
||||
# UPSMODE [ disable | share ]
|
||||
# Normally disable unless you share an UPS using an APC ShareUPS card.
|
||||
UPSMODE disable
|
||||
|
||||
#
|
||||
# ===== Configuration statements to control apcupsd system logging ========
|
||||
#
|
||||
|
||||
# Time interval in seconds between writing the STATUS file; 0 disables
|
||||
STATTIME 0
|
||||
|
||||
# Location of STATUS file (written to only if STATTIME is non-zero)
|
||||
STATFILE /var/log/apcupsd.status
|
||||
|
||||
# LOGSTATS [ on | off ] on enables, off disables
|
||||
# Note! This generates a lot of output, so if
|
||||
# you turn this on, be sure that the
|
||||
# file defined in syslog.conf for LOG_NOTICE is a named pipe.
|
||||
# You probably do not want this on.
|
||||
LOGSTATS off
|
||||
|
||||
# Time interval in seconds between writing the DATA records to
|
||||
# the log file. 0 disables.
|
||||
DATATIME 0
|
||||
|
||||
# FACILITY defines the logging facility (class) for logging to syslog.
|
||||
# If not specified, it defaults to "daemon". This is useful
|
||||
# if you want to separate the data logged by apcupsd from other
|
||||
# programs.
|
||||
#FACILITY DAEMON
|
||||
|
||||
#
|
||||
# ========== Configuration statements used in updating the UPS EPROM =========
|
||||
#
|
||||
|
||||
#
|
||||
# These statements are used only by apctest when choosing "Set EEPROM with conf
|
||||
# file values" from the EEPROM menu. THESE STATEMENTS HAVE NO EFFECT ON APCUPSD.
|
||||
#
|
||||
|
||||
# UPS name, max 8 characters
|
||||
#UPSNAME UPS_IDEN
|
||||
|
||||
# Battery date - 8 characters
|
||||
#BATTDATE mm/dd/yy
|
||||
|
||||
# Sensitivity to line voltage quality (H cause faster transfer to batteries)
|
||||
# SENSITIVITY H M L (default = H)
|
||||
#SENSITIVITY H
|
||||
|
||||
# UPS delay after power return (seconds)
|
||||
# WAKEUP 000 060 180 300 (default = 0)
|
||||
#WAKEUP 60
|
||||
|
||||
# UPS Grace period after request to power off (seconds)
|
||||
# SLEEP 020 180 300 600 (default = 20)
|
||||
#SLEEP 180
|
||||
|
||||
# Low line voltage causing transfer to batteries
|
||||
# The permitted values depend on your model as defined by last letter
|
||||
# of FIRMWARE or APCMODEL. Some representative values are:
|
||||
# D 106 103 100 097
|
||||
# M 177 172 168 182
|
||||
# A 092 090 088 086
|
||||
# I 208 204 200 196 (default = 0 => not valid)
|
||||
#LOTRANSFER 208
|
||||
|
||||
# High line voltage causing transfer to batteries
|
||||
# The permitted values depend on your model as defined by last letter
|
||||
# of FIRMWARE or APCMODEL. Some representative values are:
|
||||
# D 127 130 133 136
|
||||
# M 229 234 239 224
|
||||
# A 108 110 112 114
|
||||
# I 253 257 261 265 (default = 0 => not valid)
|
||||
#HITRANSFER 253
|
||||
|
||||
# Battery charge needed to restore power
|
||||
# RETURNCHARGE 00 15 50 90 (default = 15)
|
||||
#RETURNCHARGE 15
|
||||
|
||||
# Alarm delay
|
||||
# 0 = zero delay after pwr fail, T = power fail + 30 sec, L = low battery, N = never
|
||||
# BEEPSTATE 0 T L N (default = 0)
|
||||
#BEEPSTATE T
|
||||
|
||||
# Low battery warning delay in minutes
|
||||
# LOWBATT 02 05 07 10 (default = 02)
|
||||
#LOWBATT 2
|
||||
|
||||
# UPS Output voltage when running on batteries
|
||||
# The permitted values depend on your model as defined by last letter
|
||||
# of FIRMWARE or APCMODEL. Some representative values are:
|
||||
# D 115
|
||||
# M 208
|
||||
# A 100
|
||||
# I 230 240 220 225 (default = 0 => not valid)
|
||||
#OUTPUTVOLTS 230
|
||||
|
||||
# Self test interval in hours 336=2 weeks, 168=1 week, ON=at power on
|
||||
# SELFTEST 336 168 ON OFF (default = 336)
|
||||
#SELFTEST 336
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
date=$(date --utc +%s%N)
|
||||
|
||||
METRICS=$(apcaccess)
|
||||
|
||||
for METRIC in TIMELEFT LOADPCT BCHARGE
|
||||
do
|
||||
echo "apcupsd $METRIC=$(grep $METRIC <<< $METRICS | cut -d ':' -f 2 | xargs | cut -d ' ' -f 1 ) $date"
|
||||
done
|
|
@ -1,20 +0,0 @@
|
|||
files = {
|
||||
'/etc/apcupsd/apcupsd.conf': {
|
||||
'needs': [
|
||||
'pkg_apt:apcupsd',
|
||||
],
|
||||
},
|
||||
'/usr/local/share/telegraf/apcupsd': {
|
||||
'source': 'telegraf_plugin',
|
||||
'mode': '755',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'apcupsd': {
|
||||
'needs': [
|
||||
'pkg_apt:apcupsd',
|
||||
'file:/etc/apcupsd/apcupsd.conf',
|
||||
],
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'apcupsd': {},
|
||||
},
|
||||
},
|
||||
'grafana_rows': {
|
||||
'ups',
|
||||
},
|
||||
'sudoers': {
|
||||
'telegraf': {
|
||||
'/usr/local/share/telegraf/apcupsd',
|
||||
},
|
||||
},
|
||||
'telegraf': {
|
||||
'config': {
|
||||
'inputs': {
|
||||
'exec': {
|
||||
repo.libs.hashable.hashable({
|
||||
'commands': ["sudo /usr/local/share/telegraf/apcupsd"],
|
||||
'name_override': "apcupsd",
|
||||
'data_format': "influx",
|
||||
'interval': '30s',
|
||||
'flush_interval': '30s',
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
# https://manpages.debian.org/latest/apt/sources.list.5.de.html
|
||||
# https://repolib.readthedocs.io/en/latest/deb822-format.html
|
||||
|
||||
```python
|
||||
{
|
||||
'apt': {
|
||||
'packages': {
|
||||
'apt-transport-https': {},
|
||||
},
|
||||
'sources': {
|
||||
'debian': {
|
||||
'types': { # optional, defaults to `{'deb'}``
|
||||
'deb',
|
||||
'deb-src',
|
||||
},
|
||||
'urls': {
|
||||
'https://deb.debian.org/debian',
|
||||
},
|
||||
'suites': { # at least one
|
||||
'{codename}',
|
||||
'{codename}-updates',
|
||||
'{codename}-backports',
|
||||
},
|
||||
'components': { # optional
|
||||
'main',
|
||||
'contrib',
|
||||
'non-frese',
|
||||
},
|
||||
# key:
|
||||
# - optional, defaults to source name (`debian` in this example)
|
||||
# - place key under data/apt/keys/debian-12.{asc|gpg}
|
||||
'key': 'debian-{version}',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
apt update -qq --silent 2> /dev/null
|
||||
|
||||
UPGRADABLE=$(apt list --upgradable -qq 2> /dev/null | cut -d '/' -f 1)
|
||||
|
||||
if test "$UPGRADABLE" != ""
|
||||
then
|
||||
echo "$(wc -l <<< $UPGRADABLE) package(s) upgradable:"
|
||||
echo
|
||||
echo "$UPGRADABLE"
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
|
@ -1,139 +0,0 @@
|
|||
# TODO pin repo: https://superuser.com/a/1595920
|
||||
|
||||
from os.path import join, basename
|
||||
|
||||
directories = {
|
||||
'/etc/apt': {
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
'/etc/apt/apt.conf.d': {
|
||||
# existance is expected
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
'/etc/apt/keyrings': {
|
||||
# https://askubuntu.com/a/1307181
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
# '/etc/apt/listchanges.conf.d': {
|
||||
# 'purge': True,
|
||||
# 'triggers': {
|
||||
# 'action:apt_update',
|
||||
# },
|
||||
# },
|
||||
'/etc/apt/preferences.d': {
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
'/etc/apt/sources.list.d': {
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/apt/apt.conf': {
|
||||
'content': repo.libs.apt.render_apt_conf(node.metadata.get('apt/config')),
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
'/etc/apt/sources.list': {
|
||||
'content': '# managed by bundlewrap\n',
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
},
|
||||
# '/etc/apt/listchanges.conf': {
|
||||
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
|
||||
# },
|
||||
'/usr/lib/nagios/plugins/check_apt_upgradable': {
|
||||
'mode': '0755',
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'apt_update': {
|
||||
'command': 'apt-get update',
|
||||
'needed_by': {
|
||||
'pkg_apt:',
|
||||
},
|
||||
'triggered': True,
|
||||
'cascade_skip': False,
|
||||
},
|
||||
}
|
||||
|
||||
# create sources.lists and respective keyfiles
|
||||
|
||||
for name, config in node.metadata.get('apt/sources').items():
|
||||
# place keyfile
|
||||
keyfile_destination_path = repo.libs.apt.format_variables(node, config['options']['Signed-By'])
|
||||
files[keyfile_destination_path] = {
|
||||
'source': join(repo.path, 'data', 'apt', 'keys', basename(keyfile_destination_path)),
|
||||
'content_type': 'binary',
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
}
|
||||
|
||||
# place sources.list
|
||||
files[f'/etc/apt/sources.list.d/{name}.sources'] = {
|
||||
'content': repo.libs.apt.render_source(node, name),
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
}
|
||||
|
||||
# create backport pinnings
|
||||
|
||||
for package, options in node.metadata.get('apt/packages', {}).items():
|
||||
pkg_apt[package] = options
|
||||
|
||||
if pkg_apt[package].pop('backports', False):
|
||||
files[f'/etc/apt/preferences.d/{package}'] = {
|
||||
'content': '\n'.join([
|
||||
f"Package: {package}",
|
||||
f"Pin: release a={node.metadata.get('os_codename')}-backports",
|
||||
f"Pin-Priority: 900",
|
||||
]),
|
||||
'needed_by': [
|
||||
f'pkg_apt:{package}',
|
||||
],
|
||||
'triggers': {
|
||||
'action:apt_update',
|
||||
},
|
||||
}
|
||||
|
||||
# unattended upgrades
|
||||
#
|
||||
# unattended-upgrades.service: delays shutdown if necessary
|
||||
# apt-daily.timer: performs apt update
|
||||
# apt-daily-upgrade.timer: performs apt upgrade
|
||||
|
||||
svc_systemd['unattended-upgrades.service'] = {
|
||||
'needs': [
|
||||
'pkg_apt:unattended-upgrades',
|
||||
],
|
||||
}
|
||||
svc_systemd['apt-daily.timer'] = {
|
||||
'needs': [
|
||||
'pkg_apt:unattended-upgrades',
|
||||
],
|
||||
}
|
||||
svc_systemd['apt-daily-upgrade.timer'] = {
|
||||
'needs': [
|
||||
'pkg_apt:unattended-upgrades',
|
||||
],
|
||||
}
|
|
@ -1,177 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'apt-listchanges': {
|
||||
'installed': False,
|
||||
},
|
||||
},
|
||||
'config': {
|
||||
'DPkg': {
|
||||
'Pre-Install-Pkgs': {
|
||||
'/usr/sbin/dpkg-preconfigure --apt || true',
|
||||
},
|
||||
'Post-Invoke': {
|
||||
# keep package cache empty
|
||||
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
|
||||
},
|
||||
'Options': {
|
||||
# https://unix.stackexchange.com/a/642541/357916
|
||||
'--force-confold',
|
||||
'--force-confdef',
|
||||
},
|
||||
},
|
||||
'APT': {
|
||||
'NeverAutoRemove': {
|
||||
'^firmware-linux.*',
|
||||
'^linux-firmware$',
|
||||
'^linux-image-[a-z0-9]*$',
|
||||
'^linux-image-[a-z0-9]*-[a-z0-9]*$',
|
||||
},
|
||||
'VersionedKernelPackages': {
|
||||
# kernels
|
||||
'linux-.*',
|
||||
'kfreebsd-.*',
|
||||
'gnumach-.*',
|
||||
# (out-of-tree) modules
|
||||
'.*-modules',
|
||||
'.*-kernel',
|
||||
},
|
||||
'Never-MarkAuto-Sections': {
|
||||
'metapackages',
|
||||
'tasks',
|
||||
},
|
||||
'Move-Autobit-Sections': {
|
||||
'oldlibs',
|
||||
},
|
||||
'Update': {
|
||||
# https://unix.stackexchange.com/a/653377/357916
|
||||
'Error-Mode': 'any',
|
||||
},
|
||||
},
|
||||
},
|
||||
'sources': {},
|
||||
},
|
||||
'monitoring': {
|
||||
'services': {
|
||||
'apt upgradable': {
|
||||
'vars.command': '/usr/lib/nagios/plugins/check_apt_upgradable',
|
||||
'vars.sudo': True,
|
||||
'check_interval': '1h',
|
||||
},
|
||||
'current kernel': {
|
||||
'vars.command': 'ls /boot/vmlinuz-* | sort -V | tail -n 1 | xargs -n1 basename | cut -d "-" -f 2- | grep -q "^$(uname -r)$"',
|
||||
'check_interval': '1h',
|
||||
},
|
||||
'apt reboot-required': {
|
||||
'vars.command': 'ls /var/run/reboot-required 2> /dev/null && exit 1 || exit 0',
|
||||
'check_interval': '1h',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'apt/sources',
|
||||
)
|
||||
def key(metadata):
|
||||
return {
|
||||
'apt': {
|
||||
'sources': {
|
||||
source_name: {
|
||||
'key': source_name,
|
||||
}
|
||||
for source_name, source_config in metadata.get('apt/sources').items()
|
||||
if 'key' not in source_config
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'apt/sources',
|
||||
)
|
||||
def signed_by(metadata):
|
||||
return {
|
||||
'apt': {
|
||||
'sources': {
|
||||
source_name: {
|
||||
'options': {
|
||||
'Signed-By': '/etc/apt/keyrings/' + metadata.get(f'apt/sources/{source_name}/key') + '.' + repo.libs.apt.find_keyfile_extension(node, metadata.get(f'apt/sources/{source_name}/key')),
|
||||
},
|
||||
}
|
||||
for source_name in metadata.get('apt/sources')
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'apt/config',
|
||||
'apt/packages',
|
||||
)
|
||||
def unattended_upgrades(metadata):
|
||||
return {
|
||||
'apt': {
|
||||
'config': {
|
||||
'APT': {
|
||||
'Periodic': {
|
||||
'Update-Package-Lists': '1',
|
||||
'Unattended-Upgrade': '1',
|
||||
},
|
||||
},
|
||||
'Unattended-Upgrade': {
|
||||
'Origins-Pattern': {
|
||||
"origin=*",
|
||||
},
|
||||
},
|
||||
},
|
||||
'packages': {
|
||||
'unattended-upgrades': {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# @metadata_reactor.provides(
|
||||
# 'apt/config',
|
||||
# 'apt/list_changes',
|
||||
# )
|
||||
# def listchanges(metadata):
|
||||
# return {
|
||||
# 'apt': {
|
||||
# 'config': {
|
||||
# 'DPkg': {
|
||||
# 'Pre-Install-Pkgs': {
|
||||
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
|
||||
# },
|
||||
# 'Tools': {
|
||||
# 'Options': {
|
||||
# '/usr/bin/apt-listchanges': {
|
||||
# 'Version': '2',
|
||||
# 'InfoFD': '20',
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# 'Dir': {
|
||||
# 'Etc': {
|
||||
# 'apt-listchanges-main': 'listchanges.conf',
|
||||
# 'apt-listchanges-parts': 'listchanges.conf.d',
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# 'list_changes': {
|
||||
# 'apt': {
|
||||
# 'frontend': 'pager',
|
||||
# 'which': 'news',
|
||||
# 'email_address': 'root',
|
||||
# 'email_format': 'text',
|
||||
# 'confirm': 'false',
|
||||
# 'headers': 'false',
|
||||
# 'reverse': 'false',
|
||||
# 'save_seen': '/var/lib/apt/listchanges.db',
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# }
|
|
@ -1,12 +0,0 @@
|
|||
```
|
||||
defaults = {
|
||||
'archive': {
|
||||
'/var/important': {
|
||||
'exclude': [
|
||||
'\.cache/',
|
||||
'\.log$',
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
|
@ -1,29 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "$1" == 'perform' ]]
|
||||
then
|
||||
echo 'NON-DRY RUN'
|
||||
DRY=''
|
||||
else
|
||||
echo 'DRY RUN'
|
||||
DRY='-n'
|
||||
fi
|
||||
|
||||
% for path, options in paths.items():
|
||||
# ${path}
|
||||
gsutil ${'\\'}
|
||||
-m ${'\\'}
|
||||
-o 'GSUtil:parallel_process_count=${processes}' ${'\\'}
|
||||
-o 'GSUtil:parallel_thread_count=${threads}' ${'\\'}
|
||||
rsync ${'\\'}
|
||||
$DRY ${'\\'}
|
||||
-r ${'\\'}
|
||||
-d ${'\\'}
|
||||
-e ${'\\'}
|
||||
% if options.get('exclude'):
|
||||
-x '${'|'.join(options['exclude'])}' ${'\\'}
|
||||
% endif
|
||||
'${options['encrypted_path']}' ${'\\'}
|
||||
'gs://${bucket}/${node_id}${path}' ${'\\'}
|
||||
2>&1 | logger -st gsutil
|
||||
% endfor
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
FILENAME=$1
|
||||
TMPFILE=$(mktemp /tmp/archive_file.XXXXXXXXXX)
|
||||
BUCKET=$(cat /etc/gcloud/gcloud.json | jq -r .bucket)
|
||||
NODE=$(cat /etc/archive/archive.json | jq -r .node_id)
|
||||
MASTERKEY=$(cat /etc/gocryptfs/masterkey)
|
||||
|
||||
gsutil cat "gs://$BUCKET/$NODE$FILENAME" > "$TMPFILE"
|
||||
/opt/gocryptfs-inspect/gocryptfs.py --aessiv --config=/etc/gocryptfs/gocryptfs.conf --masterkey="$MASTERKEY" "$TMPFILE"
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
FILENAME=$1
|
||||
|
||||
ARCHIVE=$(/opt/archive/get_file "$FILENAME" | sha256sum)
|
||||
ORIGINAL=$(cat "$FILENAME" | sha256sum)
|
||||
|
||||
if [[ "$ARCHIVE" == "$ORIGINAL" ]]
|
||||
then
|
||||
echo "OK"
|
||||
exit 0
|
||||
else
|
||||
echo "ERROR"
|
||||
exit 1
|
||||
fi
|
|
@ -1,43 +0,0 @@
|
|||
assert node.has_bundle('gcloud')
|
||||
assert node.has_bundle('gocryptfs')
|
||||
assert node.has_bundle('gocryptfs-inspect')
|
||||
assert node.has_bundle('systemd')
|
||||
|
||||
from json import dumps
|
||||
|
||||
directories['/opt/archive'] = {}
|
||||
directories['/etc/archive'] = {}
|
||||
|
||||
files['/etc/archive/archive.json'] = {
|
||||
'content': dumps(
|
||||
{
|
||||
'node_id': node.metadata.get('id'),
|
||||
**node.metadata.get('archive'),
|
||||
},
|
||||
indent=4,
|
||||
sort_keys=True
|
||||
),
|
||||
}
|
||||
|
||||
files['/opt/archive/archive'] = {
|
||||
'content_type': 'mako',
|
||||
'mode': '700',
|
||||
'context': {
|
||||
'node_id': node.metadata.get('id'),
|
||||
'paths': node.metadata.get('archive/paths'),
|
||||
'bucket': node.metadata.get('gcloud/bucket'),
|
||||
'processes': 4,
|
||||
'threads': 4,
|
||||
},
|
||||
'needs': [
|
||||
'bundle:gcloud',
|
||||
],
|
||||
}
|
||||
|
||||
files['/opt/archive/get_file'] = {
|
||||
'mode': '700',
|
||||
}
|
||||
|
||||
files['/opt/archive/validate_file'] = {
|
||||
'mode': '700',
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'jq': {},
|
||||
},
|
||||
},
|
||||
'archive': {
|
||||
'paths': {},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'archive/paths',
|
||||
)
|
||||
def paths(metadata):
|
||||
return {
|
||||
'archive': {
|
||||
'paths': {
|
||||
path: {
|
||||
'encrypted_path': f'/mnt/archive.enc{path}',
|
||||
'exclude': [
|
||||
'^\..*',
|
||||
'/\..*',
|
||||
],
|
||||
} for path in metadata.get('archive/paths')
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'gocryptfs/paths',
|
||||
)
|
||||
def gocryptfs(metadata):
|
||||
return {
|
||||
'gocryptfs': {
|
||||
'paths': {
|
||||
path: {
|
||||
'mountpoint': options['encrypted_path'],
|
||||
'reverse': True,
|
||||
} for path, options in metadata.get('archive/paths').items()
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
from subprocess import check_output
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
now = datetime.now()
|
||||
two_days_ago = now - timedelta(days=2)
|
||||
|
||||
with open('/etc/backup-freshness-check.json', 'r') as file:
|
||||
config = json.load(file)
|
||||
|
||||
local_datasets = check_output(['zfs', 'list', '-H', '-o', 'name']).decode().splitlines()
|
||||
errors = set()
|
||||
|
||||
for dataset in config['datasets']:
|
||||
if f'tank/{dataset}' not in local_datasets:
|
||||
errors.add(f'dataset "{dataset}" not present at all')
|
||||
continue
|
||||
|
||||
snapshots = [
|
||||
snapshot
|
||||
for snapshot in check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot', f'tank/{dataset}', '-s', 'creation']).decode().splitlines()
|
||||
if f"@{config['prefix']}" in snapshot
|
||||
]
|
||||
|
||||
if not snapshots:
|
||||
errors.add(f'dataset "{dataset}" has no backup snapshots')
|
||||
continue
|
||||
|
||||
newest_backup_snapshot = snapshots[-1]
|
||||
snapshot_datetime = datetime.utcfromtimestamp(
|
||||
int(check_output(['zfs', 'list', '-p', '-H', '-o', 'creation', '-t', 'snapshot', newest_backup_snapshot]).decode())
|
||||
)
|
||||
|
||||
if snapshot_datetime < two_days_ago:
|
||||
days_ago = (now - snapshot_datetime).days
|
||||
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
|
||||
continue
|
||||
|
||||
if errors:
|
||||
for error in errors:
|
||||
print(error)
|
||||
exit(2)
|
||||
else:
|
||||
print(f"all {len(config['datasets'])} datasets have fresh backups.")
|
|
@ -1,15 +0,0 @@
|
|||
from json import dumps
|
||||
from bundlewrap.metadata import MetadataJSONEncoder
|
||||
|
||||
|
||||
files = {
|
||||
'/etc/backup-freshness-check.json': {
|
||||
'content': dumps({
|
||||
'prefix': node.metadata.get('backup-freshness-check/prefix'),
|
||||
'datasets': node.metadata.get('backup-freshness-check/datasets'),
|
||||
}, indent=4, sort_keys=True, cls=MetadataJSONEncoder),
|
||||
},
|
||||
'/usr/lib/nagios/plugins/check_backup_freshness': {
|
||||
'mode': '0755',
|
||||
},
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
defaults = {
|
||||
'backup-freshness-check': {
|
||||
'server': node.name,
|
||||
'prefix': 'auto-backup_',
|
||||
'datasets': {},
|
||||
},
|
||||
'monitoring': {
|
||||
'services': {
|
||||
'backup freshness': {
|
||||
'vars.command': '/usr/lib/nagios/plugins/check_backup_freshness',
|
||||
'check_interval': '6h',
|
||||
'vars.sudo': True,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'backup-freshness-check/datasets'
|
||||
)
|
||||
def backup_freshness_check(metadata):
|
||||
return {
|
||||
'backup-freshness-check': {
|
||||
'datasets': {
|
||||
f"{other_node.metadata.get('id')}/{dataset}"
|
||||
for other_node in repo.nodes
|
||||
if not other_node.dummy
|
||||
and other_node.has_bundle('backup')
|
||||
and other_node.has_bundle('zfs')
|
||||
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
|
||||
for dataset, options in other_node.metadata.get('zfs/datasets').items()
|
||||
if options.get('backup', True)
|
||||
and not options.get('mountpoint', None) in [None, 'none']
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
!/bin/bash
|
||||
|
||||
zfs send tank/nextcloud@test1 | ssh backup-receiver@10.0.0.5 sudo zfs recv tank/nextcloud
|
|
@ -1,122 +1,7 @@
|
|||
from ipaddress import ip_interface
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'rsync': {},
|
||||
},
|
||||
},
|
||||
'users': {
|
||||
'backup-receiver': {
|
||||
'authorized_keys': set(),
|
||||
},
|
||||
},
|
||||
'sudoers': {
|
||||
'backup-receiver': {
|
||||
'/usr/bin/rsync',
|
||||
'/sbin/zfs',
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank': {
|
||||
'recordsize': "1048576",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'zfs/datasets'
|
||||
)
|
||||
def zfs(metadata):
|
||||
datasets = {}
|
||||
|
||||
for other_node in repo.nodes:
|
||||
if (
|
||||
not other_node.dummy and
|
||||
other_node.has_bundle('backup') and
|
||||
other_node.metadata.get('backup/server') == node.name
|
||||
):
|
||||
id = other_node.metadata.get('id')
|
||||
base_dataset = f'tank/{id}'
|
||||
|
||||
# container
|
||||
datasets[base_dataset] = {
|
||||
'mountpoint': None,
|
||||
'readonly': 'on',
|
||||
'compression': 'lz4',
|
||||
'com.sun:auto-snapshot': 'false',
|
||||
'backup': False,
|
||||
}
|
||||
|
||||
# for rsync backups
|
||||
datasets[f'{base_dataset}/fs'] = {
|
||||
'mountpoint': f"/mnt/backups/{id}",
|
||||
'readonly': 'off',
|
||||
'compression': 'lz4',
|
||||
'com.sun:auto-snapshot': 'true',
|
||||
'backup': False,
|
||||
}
|
||||
|
||||
# for zfs send/recv
|
||||
if other_node.has_bundle('zfs'):
|
||||
|
||||
# base datasets for each tank
|
||||
for pool in other_node.metadata.get('zfs/pools'):
|
||||
datasets[f'{base_dataset}/{pool}'] = {
|
||||
'mountpoint': None,
|
||||
'readonly': 'on',
|
||||
'compression': 'lz4',
|
||||
'com.sun:auto-snapshot': 'false',
|
||||
'backup': False,
|
||||
}
|
||||
|
||||
# actual datasets
|
||||
for path in other_node.metadata.get('backup/paths'):
|
||||
for dataset, config in other_node.metadata.get('zfs/datasets').items():
|
||||
if path == config.get('mountpoint'):
|
||||
datasets[f'{base_dataset}/{dataset}'] = {
|
||||
'mountpoint': None,
|
||||
'readonly': 'on',
|
||||
'compression': 'lz4',
|
||||
'com.sun:auto-snapshot': 'false',
|
||||
'backup': False,
|
||||
}
|
||||
continue
|
||||
|
||||
return {
|
||||
'zfs': {
|
||||
'datasets': datasets,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dns',
|
||||
)
|
||||
def dns(metadata):
|
||||
return {
|
||||
'dns': {
|
||||
metadata.get('backup-server/hostname'): repo.libs.ip.get_a_records(metadata),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/backup-receiver/authorized_keys'
|
||||
)
|
||||
@metadata_reactor
|
||||
def backup_authorized_keys(metadata):
|
||||
return {
|
||||
'users': {
|
||||
'backup-receiver': {
|
||||
'authorized_keys': {
|
||||
other_node.metadata.get('users/root/pubkey')
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('backup')
|
||||
and other_node.metadata.get('backup/server') == node.name
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for other_node in repo.nodes:
|
||||
if other_node.metadata.get('backup/server') == node.name:
|
||||
other_node.metadata.get('users/root/pubkey')
|
||||
|
||||
return {}
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -u
|
||||
|
||||
# FIXME: inelegant
|
||||
% if wol_command:
|
||||
${wol_command}
|
||||
% endif
|
||||
|
||||
exit=0
|
||||
failed_paths=""
|
||||
|
||||
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
|
||||
do
|
||||
echo backing up $path
|
||||
/opt/backup/backup_path "$path"
|
||||
# set exit to 1 if any backup fails
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo ERROR: backing up $path failed >&2
|
||||
exit=5
|
||||
failed_paths="$failed_paths $path"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $exit -ne 0 ]
|
||||
then
|
||||
echo "ERROR: failed to backup paths: $failed_paths" >&2
|
||||
fi
|
||||
|
||||
exit $exit
|
|
@ -1,16 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
|
||||
path=$1
|
||||
|
||||
if zfs list -H -o mountpoint | grep -q "^$path$"
|
||||
then
|
||||
/opt/backup/backup_path_via_zfs "$path"
|
||||
elif test -e "$path"
|
||||
then
|
||||
/opt/backup/backup_path_via_rsync "$path"
|
||||
else
|
||||
echo "UNKNOWN PATH: $path"
|
||||
exit 1
|
||||
fi
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
|
||||
path=$1
|
||||
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
|
||||
server=$(jq -r .server_hostname < /etc/backup/config.json)
|
||||
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
|
||||
|
||||
if test -d "$path"
|
||||
then
|
||||
postfix="/"
|
||||
elif test -f "$path"
|
||||
then
|
||||
postfix=""
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rsync -av --rsync-path="sudo rsync" "$path$postfix" "backup-receiver@$server:/mnt/backups/$uuid$path$postfix"
|
|
@ -1,67 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
path=$1
|
||||
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
|
||||
server=$(jq -r .server_hostname < /etc/backup/config.json)
|
||||
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
|
||||
|
||||
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
|
||||
target_dataset="tank/$uuid/$source_dataset"
|
||||
target_dataset_parent=$(echo $target_dataset | rev | cut -d / -f 2- | rev)
|
||||
bookmark_prefix="auto-backup_"
|
||||
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
|
||||
|
||||
for var in path uuid server ssh source_dataset target_dataset target_dataset_parent new_bookmark
|
||||
do
|
||||
[[ -z "${!var}" ]] && echo "ERROR - $var is empty" && exit 96
|
||||
done
|
||||
|
||||
$ssh true || (echo "ERROR - cant ssh connect to $server" && exit 97)
|
||||
|
||||
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $target_dataset"
|
||||
|
||||
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$target_dataset_parent$"
|
||||
then
|
||||
echo "CREATING PARENT DATASET..."
|
||||
$ssh sudo zfs create -p -o mountpoint=none "$target_dataset_parent"
|
||||
fi
|
||||
|
||||
zfs snap "$source_dataset@$new_bookmark"
|
||||
|
||||
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
|
||||
then
|
||||
echo "INITIAL BACKUP"
|
||||
# do in subshell, otherwise ctr+c will lead to 0 exitcode
|
||||
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
|
||||
else
|
||||
echo "INCREMENTAL BACKUP"
|
||||
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
|
||||
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
|
||||
$(zfs send -v -L -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
|
||||
fi
|
||||
|
||||
if [[ "$?" == "0" ]]
|
||||
then
|
||||
|
||||
# delete old local bookmarks
|
||||
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$source_dataset" | grep "^$source_dataset#$bookmark_prefix")
|
||||
do
|
||||
zfs destroy "$destroyable_bookmark"
|
||||
done
|
||||
|
||||
# delete remote snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
|
||||
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$target_dataset" | grep "^$target_dataset@$bookmark_prefix" | grep -v "$new_bookmark")
|
||||
do
|
||||
$ssh sudo zfs destroy "$destroyable_snapshot"
|
||||
done
|
||||
|
||||
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
|
||||
zfs destroy "$source_dataset@$new_bookmark" # keep snapshots?
|
||||
echo "SUCCESS"
|
||||
else
|
||||
zfs destroy "$source_dataset@$new_bookmark"
|
||||
echo "ERROR"
|
||||
exit 99
|
||||
fi
|
|
@ -1,37 +0,0 @@
|
|||
from json import dumps
|
||||
|
||||
|
||||
backup_node = repo.get_node(node.metadata.get('backup/server'))
|
||||
|
||||
directories['/opt/backup'] = {}
|
||||
|
||||
files['/opt/backup/backup_all'] = {
|
||||
'mode': '700',
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'wol_command': backup_node.metadata.get('wol-sleeper/wake_command', False),
|
||||
},
|
||||
}
|
||||
files['/opt/backup/backup_path'] = {
|
||||
'mode': '700',
|
||||
}
|
||||
files['/opt/backup/backup_path_via_zfs'] = {
|
||||
'mode': '700',
|
||||
}
|
||||
files['/opt/backup/backup_path_via_rsync'] = {
|
||||
'mode': '700',
|
||||
}
|
||||
|
||||
directories['/etc/backup'] = {}
|
||||
|
||||
files['/etc/backup/config.json'] = {
|
||||
'content': dumps(
|
||||
{
|
||||
'server_hostname': backup_node.metadata.get('backup-server/hostname'),
|
||||
'client_uuid': node.metadata.get('id'),
|
||||
'paths': sorted(set(node.metadata.get('backup/paths'))),
|
||||
},
|
||||
indent=4,
|
||||
sort_keys=True
|
||||
),
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'jq': {
|
||||
'needed_by': {
|
||||
'svc_systemd:backup.timer',
|
||||
},
|
||||
},
|
||||
'rsync': {
|
||||
'needed_by': {
|
||||
'svc_systemd:backup.timer',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'backup': {
|
||||
'server': None,
|
||||
'paths': set(),
|
||||
},
|
||||
'systemd-timers': {
|
||||
f'backup': {
|
||||
'command': '/opt/backup/backup_all',
|
||||
'when': '1:00',
|
||||
'persistent': True,
|
||||
'after': {
|
||||
'network-online.target',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
from ipaddress import ip_interface
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dns',
|
||||
)
|
||||
def acme_records(metadata):
|
||||
domains = set()
|
||||
|
||||
for other_node in repo.nodes:
|
||||
for domain, conf in other_node.metadata.get('letsencrypt/domains', {}).items():
|
||||
domains.add(domain)
|
||||
domains.update(conf.get('aliases', []))
|
||||
|
||||
return {
|
||||
'dns': {
|
||||
f'_acme-challenge.{domain}': {
|
||||
'CNAME': {f"{domain}.{metadata.get('bind/acme_zone')}."},
|
||||
}
|
||||
for domain in domains
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/acls/acme',
|
||||
'bind/views/external/keys/acme',
|
||||
'bind/views/external/zones',
|
||||
)
|
||||
def acme_zone(metadata):
|
||||
allowed_ips = {
|
||||
*{
|
||||
str(ip_interface(other_node.metadata.get('network/internal/ipv4')).ip)
|
||||
for other_node in repo.nodes
|
||||
if other_node.metadata.get('letsencrypt/domains', {})
|
||||
},
|
||||
*{
|
||||
str(ip_interface(other_node.metadata.get('wireguard/my_ip')).ip)
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('wireguard')
|
||||
},
|
||||
}
|
||||
|
||||
return {
|
||||
'bind': {
|
||||
'acls': {
|
||||
'acme': {
|
||||
'key acme',
|
||||
'!{ !{' + ' '.join(f'{ip};' for ip in sorted(allowed_ips)) + '}; any;}',
|
||||
},
|
||||
},
|
||||
'views': {
|
||||
'external': {
|
||||
'keys': {
|
||||
'acme': {},
|
||||
},
|
||||
'zones': {
|
||||
metadata.get('bind/acme_zone'): {
|
||||
'allow_update': {
|
||||
'acme',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
#https://lists.isc.org/pipermail/bind-users/2006-January/061051.html
|
|
@ -1,23 +0,0 @@
|
|||
<%!
|
||||
def column_width(column, table):
|
||||
return max(map(lambda row: len(row[column]), table)) if table else 0
|
||||
%>\
|
||||
$TTL 600
|
||||
@ IN SOA ${hostname}. admin.${hostname}. (
|
||||
2021111709 ;Serial
|
||||
3600 ;Refresh
|
||||
200 ;Retry
|
||||
1209600 ;Expire
|
||||
900 ;Negative response caching TTL
|
||||
)
|
||||
|
||||
% for record in sorted(records, key=lambda r: (tuple(reversed(r['name'].split('.'))), r['type'], r['value'])):
|
||||
(${(record['name'] or '@').rjust(column_width('name', records))}) \
|
||||
IN \
|
||||
${record['type'].ljust(column_width('type', records))} \
|
||||
% if record['type'] == 'TXT':
|
||||
(${' '.join('"'+record['value'][i:i+255]+'"' for i in range(0, len(record['value']), 255))})
|
||||
% else:
|
||||
${record['value']}
|
||||
% endif
|
||||
% endfor
|
|
@ -1,2 +0,0 @@
|
|||
RESOLVCONF=no
|
||||
OPTIONS="-u bind"
|
|
@ -1,6 +0,0 @@
|
|||
statistics-channels {
|
||||
inet 127.0.0.1 port 8053;
|
||||
};
|
||||
|
||||
include "/etc/bind/named.conf.options";
|
||||
include "/etc/bind/named.conf.local";
|
|
@ -1,70 +0,0 @@
|
|||
# KEYS
|
||||
|
||||
% for view_name, view_conf in views.items():
|
||||
% for key_name, key_conf in sorted(view_conf['keys'].items()):
|
||||
key "${key_name}" {
|
||||
algorithm hmac-sha512;
|
||||
secret "${key_conf['token']}";
|
||||
};
|
||||
% endfor
|
||||
% endfor
|
||||
|
||||
# ACLS
|
||||
|
||||
% for acl_name, acl_content in acls.items():
|
||||
acl "${acl_name}" {
|
||||
% for ac in sorted(acl_content, key=lambda e: (not e.startswith('!'), not e.startswith('key'), e)):
|
||||
${ac};
|
||||
% endfor
|
||||
};
|
||||
% endfor
|
||||
|
||||
# VIEWS
|
||||
|
||||
% for view_name, view_conf in views.items():
|
||||
view "${view_name}" {
|
||||
match-clients {
|
||||
${view_name};
|
||||
};
|
||||
|
||||
% if view_conf['is_internal']:
|
||||
recursion yes;
|
||||
% else:
|
||||
recursion no;
|
||||
rate-limit {
|
||||
responses-per-second 2;
|
||||
window 25;
|
||||
};
|
||||
% endif
|
||||
|
||||
forward only;
|
||||
forwarders {
|
||||
1.1.1.1;
|
||||
9.9.9.9;
|
||||
8.8.8.8;
|
||||
};
|
||||
|
||||
% for zone_name, zone_conf in sorted(view_conf['zones'].items()):
|
||||
zone "${zone_name}" {
|
||||
% if type == 'slave' and zone_conf.get('allow_update', []):
|
||||
type slave;
|
||||
masters { ${master_ip}; };
|
||||
% else:
|
||||
type master;
|
||||
% if zone_conf.get('allow_update', []):
|
||||
allow-update {
|
||||
% for allow_update in zone_conf['allow_update']:
|
||||
${allow_update};
|
||||
% endfor
|
||||
};
|
||||
% endif
|
||||
% endif
|
||||
file "/var/lib/bind/${view_name}/${zone_name}";
|
||||
};
|
||||
% endfor
|
||||
|
||||
include "/etc/bind/named.conf.default-zones";
|
||||
include "/etc/bind/zones.rfc1918";
|
||||
};
|
||||
|
||||
% endfor
|
|
@ -1,16 +0,0 @@
|
|||
options {
|
||||
directory "/var/cache/bind";
|
||||
dnssec-validation auto;
|
||||
|
||||
listen-on-v6 { any; };
|
||||
allow-query { any; };
|
||||
|
||||
max-cache-size 30%;
|
||||
querylog yes;
|
||||
|
||||
% if type == 'master':
|
||||
notify yes;
|
||||
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
|
||||
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
|
||||
% endif
|
||||
};
|
|
@ -1,144 +0,0 @@
|
|||
from ipaddress import ip_address, ip_interface
|
||||
from datetime import datetime
|
||||
from hashlib import sha3_512
|
||||
|
||||
|
||||
if node.metadata.get('bind/type') == 'master':
|
||||
master_node = node
|
||||
else:
|
||||
master_node = repo.get_node(node.metadata.get('bind/master_node'))
|
||||
|
||||
directories[f'/var/lib/bind'] = {
|
||||
'owner': 'bind',
|
||||
'group': 'bind',
|
||||
'purge': True,
|
||||
'needs': [
|
||||
'pkg_apt:bind9',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
files['/etc/default/bind9'] = {
|
||||
'source': 'defaults',
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
files['/etc/bind/named.conf'] = {
|
||||
'owner': 'root',
|
||||
'group': 'bind',
|
||||
'needs': [
|
||||
'pkg_apt:bind9',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
files['/etc/bind/named.conf.options'] = {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'type': node.metadata.get('bind/type'),
|
||||
'slave_ips': node.metadata.get('bind/slave_ips', []),
|
||||
'master_ip': node.metadata.get('bind/master_ip', None),
|
||||
},
|
||||
'owner': 'root',
|
||||
'group': 'bind',
|
||||
'needs': [
|
||||
'pkg_apt:bind9',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
files['/etc/bind/named.conf.local'] = {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'type': node.metadata.get('bind/type'),
|
||||
'master_ip': node.metadata.get('bind/master_ip', None),
|
||||
'acls': {
|
||||
**master_node.metadata.get('bind/acls'),
|
||||
**{
|
||||
view_name: view_conf['match_clients']
|
||||
for view_name, view_conf in master_node.metadata.get('bind/views').items()
|
||||
},
|
||||
},
|
||||
'views': dict(sorted(
|
||||
master_node.metadata.get('bind/views').items(),
|
||||
key=lambda e: (e[1].get('default', False), e[0]),
|
||||
)),
|
||||
},
|
||||
'owner': 'root',
|
||||
'group': 'bind',
|
||||
'needs': [
|
||||
'pkg_apt:bind9',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
for view_name, view_conf in master_node.metadata.get('bind/views').items():
|
||||
directories[f"/var/lib/bind/{view_name}"] = {
|
||||
'owner': 'bind',
|
||||
'group': 'bind',
|
||||
'purge': True,
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
for zone_name, zone_conf in view_conf['zones'].items():
|
||||
files[f"/var/lib/bind/{view_name}/{zone_name}"] = {
|
||||
'source': 'db',
|
||||
'content_type': 'mako',
|
||||
'unless': f"test -f /var/lib/bind/{view_name}/{zone_name}" if zone_conf.get('allow_update', False) else 'false',
|
||||
'context': {
|
||||
'serial': datetime.now().strftime('%Y%m%d%H'),
|
||||
'records': zone_conf['records'],
|
||||
'hostname': node.metadata.get('bind/hostname'),
|
||||
'type': node.metadata.get('bind/type'),
|
||||
},
|
||||
'owner': 'bind',
|
||||
'group': 'bind',
|
||||
'needed_by': [
|
||||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
svc_systemd['bind9'] = {}
|
||||
|
||||
actions['named-checkconf'] = {
|
||||
'command': 'named-checkconf -z',
|
||||
'unless': 'named-checkconf -z',
|
||||
'needs': [
|
||||
'svc_systemd:bind9',
|
||||
'svc_systemd:bind9:reload',
|
||||
]
|
||||
}
|
|
@ -1,257 +0,0 @@
|
|||
from ipaddress import ip_interface
|
||||
from json import dumps
|
||||
h = repo.libs.hashable.hashable
|
||||
repo.libs.bind.repo = repo
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'bind9': {},
|
||||
},
|
||||
},
|
||||
'bind': {
|
||||
'slaves': {},
|
||||
'acls': {
|
||||
'our-nets': {
|
||||
'127.0.0.1',
|
||||
'10.0.0.0/8',
|
||||
'169.254.0.0/16',
|
||||
'172.16.0.0/12',
|
||||
'192.168.0.0/16',
|
||||
}
|
||||
},
|
||||
'views': {
|
||||
'internal': {
|
||||
'is_internal': True,
|
||||
'keys': {},
|
||||
'match_clients': {
|
||||
'our-nets',
|
||||
},
|
||||
'zones': {},
|
||||
},
|
||||
'external': {
|
||||
'default': True,
|
||||
'is_internal': False,
|
||||
'keys': {},
|
||||
'match_clients': {
|
||||
'any',
|
||||
},
|
||||
'zones': {},
|
||||
},
|
||||
},
|
||||
'zones': set(),
|
||||
},
|
||||
'nftables': {
|
||||
'input': {
|
||||
'tcp dport 53 accept',
|
||||
'udp dport 53 accept',
|
||||
},
|
||||
},
|
||||
'telegraf': {
|
||||
'config': {
|
||||
'inputs': {
|
||||
'bind': [{
|
||||
'urls': ['http://localhost:8053/xml/v3'],
|
||||
'gather_memory_contexts': False,
|
||||
'gather_views': True,
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/type',
|
||||
'bind/master_ip',
|
||||
'bind/slave_ips',
|
||||
)
|
||||
def master_slave(metadata):
|
||||
if metadata.get('bind/master_node', None):
|
||||
return {
|
||||
'bind': {
|
||||
'type': 'slave',
|
||||
'master_ip': str(ip_interface(repo.get_node(metadata.get('bind/master_node')).metadata.get('network/external/ipv4')).ip),
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'bind': {
|
||||
'type': 'master',
|
||||
'slave_ips': {
|
||||
str(ip_interface(repo.get_node(slave).metadata.get('network/external/ipv4')).ip)
|
||||
for slave in metadata.get('bind/slaves')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dns',
|
||||
)
|
||||
def dns(metadata):
|
||||
return {
|
||||
'dns': {
|
||||
metadata.get('bind/hostname'): repo.libs.ip.get_a_records(metadata),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/views',
|
||||
)
|
||||
def collect_records(metadata):
|
||||
if metadata.get('bind/type') == 'slave':
|
||||
return {}
|
||||
|
||||
views = {}
|
||||
|
||||
for view_name, view_conf in metadata.get('bind/views').items():
|
||||
for other_node in repo.nodes:
|
||||
for fqdn, records in other_node.metadata.get('dns', {}).items():
|
||||
matching_zones = sorted(
|
||||
filter(
|
||||
lambda potential_zone: fqdn.endswith(potential_zone),
|
||||
metadata.get('bind/zones')
|
||||
),
|
||||
key=len,
|
||||
)
|
||||
if matching_zones:
|
||||
zone = matching_zones[-1]
|
||||
else:
|
||||
continue
|
||||
|
||||
name = fqdn[0:-len(zone) - 1]
|
||||
|
||||
for type, values in records.items():
|
||||
for value in values:
|
||||
if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata):
|
||||
views\
|
||||
.setdefault(view_name, {})\
|
||||
.setdefault('zones', {})\
|
||||
.setdefault(zone, {})\
|
||||
.setdefault('records', set())\
|
||||
.add(
|
||||
h({'name': name, 'type': type, 'value': value})
|
||||
)
|
||||
|
||||
return {
|
||||
'bind': {
|
||||
'views': views,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/views',
|
||||
)
|
||||
def ns_records(metadata):
|
||||
if metadata.get('bind/type') == 'slave':
|
||||
return {}
|
||||
|
||||
nameservers = [
|
||||
node.metadata.get('bind/hostname'),
|
||||
*[
|
||||
repo.get_node(slave).metadata.get('bind/hostname')
|
||||
for slave in node.metadata.get('bind/slaves')
|
||||
]
|
||||
]
|
||||
return {
|
||||
'bind': {
|
||||
'views': {
|
||||
view_name: {
|
||||
'zones': {
|
||||
zone_name: {
|
||||
'records': {
|
||||
# FIXME: bw currently cant handle lists of dicts :(
|
||||
h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."})
|
||||
for nameserver in nameservers
|
||||
}
|
||||
}
|
||||
for zone_name, zone_conf in view_conf['zones'].items()
|
||||
}
|
||||
}
|
||||
for view_name, view_conf in metadata.get('bind/views').items()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/slaves',
|
||||
)
|
||||
def slaves(metadata):
|
||||
if metadata.get('bind/type') == 'slave':
|
||||
return {}
|
||||
|
||||
return {
|
||||
'bind': {
|
||||
'slaves': [
|
||||
other_node.name
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('bind') and other_node.metadata.get('bind/master_node', None) == node.name
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/views',
|
||||
)
|
||||
def generate_keys(metadata):
|
||||
if metadata.get('bind/type') == 'slave':
|
||||
return {}
|
||||
|
||||
return {
|
||||
'bind': {
|
||||
'views': {
|
||||
view_name: {
|
||||
'keys': {
|
||||
key: {
|
||||
'token':repo.libs.hmac.hmac_sha512(
|
||||
key,
|
||||
str(repo.vault.random_bytes_as_base64_for(
|
||||
f"{metadata.get('id')} bind key {key}",
|
||||
length=32,
|
||||
)),
|
||||
)
|
||||
}
|
||||
for key in view_conf['keys']
|
||||
}
|
||||
}
|
||||
for view_name, view_conf in metadata.get('bind/views').items()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'bind/views',
|
||||
)
|
||||
def generate_acl_entries_for_keys(metadata):
|
||||
if metadata.get('bind/type') == 'slave':
|
||||
return {}
|
||||
|
||||
return {
|
||||
'bind': {
|
||||
'views': {
|
||||
view_name: {
|
||||
'match_clients': {
|
||||
# allow keys from this view
|
||||
*{
|
||||
f'key {key}'
|
||||
for key in view_conf['keys']
|
||||
},
|
||||
# reject keys from other views
|
||||
*{
|
||||
f'! key {key}'
|
||||
for other_view_name, other_view_conf in metadata.get('bind/views').items()
|
||||
if other_view_name != view_name
|
||||
for key in other_view_conf.get('keys', [])
|
||||
}
|
||||
}
|
||||
}
|
||||
for view_name, view_conf in metadata.get('bind/views').items()
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'build-essential': {},
|
||||
# crystal
|
||||
'clang': {},
|
||||
'libssl-dev': {},
|
||||
'libpcre3-dev': {},
|
||||
'libgc-dev': {},
|
||||
'libevent-dev': {},
|
||||
'zlib1g-dev': {},
|
||||
},
|
||||
},
|
||||
'users': {
|
||||
'build-agent': {
|
||||
'home': '/var/lib/build-agent',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/build-agent/authorized_users',
|
||||
)
|
||||
def ssh_keys(metadata):
|
||||
return {
|
||||
'users': {
|
||||
'build-agent': {
|
||||
'authorized_users': {
|
||||
f'build-server@{other_node.name}'
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-server')
|
||||
for architecture in other_node.metadata.get('build-server/architectures').values()
|
||||
if architecture['node'] == node.name
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
for project, options in node.metadata.get('build-ci').items():
|
||||
directories[options['path']] = {
|
||||
'owner': 'build-ci',
|
||||
'group': options['group'],
|
||||
'mode': '770',
|
||||
'needs': [
|
||||
'user:build-ci',
|
||||
],
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
|
||||
defaults = {
|
||||
'build-ci': {},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/build-ci/authorized_users',
|
||||
'sudoers/build-ci',
|
||||
)
|
||||
def ssh_keys(metadata):
|
||||
return {
|
||||
'users': {
|
||||
'build-ci': {
|
||||
'authorized_users': {
|
||||
f'build-server@{other_node.name}'
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-server')
|
||||
},
|
||||
},
|
||||
},
|
||||
'sudoers': {
|
||||
'build-ci': {
|
||||
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
|
||||
for ci in metadata.get('build-ci').values()
|
||||
}
|
||||
},
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
JSON=$(cat bundles/build-server/example.json)
|
||||
curl -X POST 'https://build.sublimity.de/crystal?file=procio.cr' -H "Content-Type: application/json" --data-binary @- <<< $JSON
|
|
@ -1,169 +0,0 @@
|
|||
{
|
||||
"after": "122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"before": "7a358255247926363ef0ef34111f0bc786a8c6f4",
|
||||
"commits": [
|
||||
{
|
||||
"added": [],
|
||||
"author": {
|
||||
"email": "mwiegand@seibert-media.net",
|
||||
"name": "mwiegand",
|
||||
"username": ""
|
||||
},
|
||||
"committer": {
|
||||
"email": "mwiegand@seibert-media.net",
|
||||
"name": "mwiegand",
|
||||
"username": ""
|
||||
},
|
||||
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"message": "wip\n",
|
||||
"modified": [
|
||||
"README.md"
|
||||
],
|
||||
"removed": [],
|
||||
"timestamp": "2021-11-16T22:10:05+01:00",
|
||||
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"verification": null
|
||||
}
|
||||
],
|
||||
"compare_url": "https://git.sublimity.de/cronekorkn/telegraf-procio/compare/7a358255247926363ef0ef34111f0bc786a8c6f4...122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"head_commit": {
|
||||
"added": [],
|
||||
"author": {
|
||||
"email": "mwiegand@seibert-media.net",
|
||||
"name": "mwiegand",
|
||||
"username": ""
|
||||
},
|
||||
"committer": {
|
||||
"email": "mwiegand@seibert-media.net",
|
||||
"name": "mwiegand",
|
||||
"username": ""
|
||||
},
|
||||
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"message": "wip\n",
|
||||
"modified": [
|
||||
"README.md"
|
||||
],
|
||||
"removed": [],
|
||||
"timestamp": "2021-11-16T22:10:05+01:00",
|
||||
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
|
||||
"verification": null
|
||||
},
|
||||
"pusher": {
|
||||
"active": false,
|
||||
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
|
||||
"created": "2021-06-13T19:19:25+02:00",
|
||||
"description": "",
|
||||
"email": "i@ckn.li",
|
||||
"followers_count": 0,
|
||||
"following_count": 0,
|
||||
"full_name": "",
|
||||
"id": 1,
|
||||
"is_admin": false,
|
||||
"language": "",
|
||||
"last_login": "0001-01-01T00:00:00Z",
|
||||
"location": "",
|
||||
"login": "cronekorkn",
|
||||
"prohibit_login": false,
|
||||
"restricted": false,
|
||||
"starred_repos_count": 0,
|
||||
"username": "cronekorkn",
|
||||
"visibility": "public",
|
||||
"website": ""
|
||||
},
|
||||
"ref": "refs/heads/master",
|
||||
"repository": {
|
||||
"allow_merge_commits": true,
|
||||
"allow_rebase": true,
|
||||
"allow_rebase_explicit": true,
|
||||
"allow_squash_merge": true,
|
||||
"archived": false,
|
||||
"avatar_url": "",
|
||||
"clone_url": "https://git.sublimity.de/cronekorkn/telegraf-procio.git",
|
||||
"created_at": "2021-11-05T18:46:04+01:00",
|
||||
"default_branch": "master",
|
||||
"default_merge_style": "merge",
|
||||
"description": "",
|
||||
"empty": false,
|
||||
"fork": false,
|
||||
"forks_count": 0,
|
||||
"full_name": "cronekorkn/telegraf-procio",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_pull_requests": true,
|
||||
"has_wiki": true,
|
||||
"html_url": "https://git.sublimity.de/cronekorkn/telegraf-procio",
|
||||
"id": 5,
|
||||
"ignore_whitespace_conflicts": false,
|
||||
"internal": false,
|
||||
"internal_tracker": {
|
||||
"allow_only_contributors_to_track_time": true,
|
||||
"enable_issue_dependencies": true,
|
||||
"enable_time_tracker": true
|
||||
},
|
||||
"mirror": false,
|
||||
"mirror_interval": "",
|
||||
"name": "telegraf-procio",
|
||||
"open_issues_count": 0,
|
||||
"open_pr_counter": 0,
|
||||
"original_url": "",
|
||||
"owner": {
|
||||
"active": false,
|
||||
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
|
||||
"created": "2021-06-13T19:19:25+02:00",
|
||||
"description": "",
|
||||
"email": "i@ckn.li",
|
||||
"followers_count": 0,
|
||||
"following_count": 0,
|
||||
"full_name": "",
|
||||
"id": 1,
|
||||
"is_admin": false,
|
||||
"language": "",
|
||||
"last_login": "0001-01-01T00:00:00Z",
|
||||
"location": "",
|
||||
"login": "cronekorkn",
|
||||
"prohibit_login": false,
|
||||
"restricted": false,
|
||||
"starred_repos_count": 0,
|
||||
"username": "cronekorkn",
|
||||
"visibility": "public",
|
||||
"website": ""
|
||||
},
|
||||
"parent": null,
|
||||
"permissions": {
|
||||
"admin": true,
|
||||
"pull": true,
|
||||
"push": true
|
||||
},
|
||||
"private": false,
|
||||
"release_counter": 0,
|
||||
"size": 28,
|
||||
"ssh_url": "git@git.sublimity.de:cronekorkn/telegraf-procio.git",
|
||||
"stars_count": 0,
|
||||
"template": false,
|
||||
"updated_at": "2021-11-16T21:41:40+01:00",
|
||||
"watchers_count": 1,
|
||||
"website": ""
|
||||
},
|
||||
"sender": {
|
||||
"active": false,
|
||||
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
|
||||
"created": "2021-06-13T19:19:25+02:00",
|
||||
"description": "",
|
||||
"email": "i@ckn.li",
|
||||
"followers_count": 0,
|
||||
"following_count": 0,
|
||||
"full_name": "",
|
||||
"id": 1,
|
||||
"is_admin": false,
|
||||
"language": "",
|
||||
"last_login": "0001-01-01T00:00:00Z",
|
||||
"location": "",
|
||||
"login": "cronekorkn",
|
||||
"prohibit_login": false,
|
||||
"restricted": false,
|
||||
"starred_repos_count": 0,
|
||||
"username": "cronekorkn",
|
||||
"visibility": "public",
|
||||
"website": ""
|
||||
}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -xu
|
||||
|
||||
|
||||
CONFIG_PATH=${config_path}
|
||||
JSON="$1"
|
||||
REPO_NAME=$(jq -r .repository.name <<< $JSON)
|
||||
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
|
||||
REPO_BRANCH=$(jq -r .ref <<< $JSON | cut -d'/' -f3)
|
||||
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
|
||||
|
||||
for INTEGRATION in "$(cat $CONFIG_PATH | jq -r '.ci | values[]')"
|
||||
do
|
||||
[[ $(jq -r '.repo' <<< $INTEGRATION) = "$REPO_NAME" ]] || continue
|
||||
[[ $(jq -r '.branch' <<< $INTEGRATION) = "$REPO_BRANCH" ]] || continue
|
||||
|
||||
HOSTNAME=$(jq -r '.hostname' <<< $INTEGRATION)
|
||||
DEST_PATH=$(jq -r '.path' <<< $INTEGRATION)
|
||||
DEST_GROUP=$(jq -r '.group' <<< $INTEGRATION)
|
||||
|
||||
[[ -z "$HOSTNAME" ]] || [[ -z "$DEST_PATH" ]] || [[ -z "$DEST_GROUP" ]] && exit 5
|
||||
|
||||
cd ~
|
||||
rm -rf "$REPO_NAME"
|
||||
git clone "$CLONE_URL" "$REPO_NAME"
|
||||
|
||||
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "find \"$DEST_PATH\" -mindepth 1 -delete"
|
||||
scp -r $SSH_OPTIONS "$REPO_NAME"/* "build-ci@$HOSTNAME:$DEST_PATH"
|
||||
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "sudo chown -R build-ci:$DEST_GROUP $(printf "%q" "$DEST_PATH")"
|
||||
done
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
|
||||
DOWNLOAD_SERVER="${download_server}"
|
||||
CONFIG=$(cat ${config_path})
|
||||
JSON="$1"
|
||||
ARGS="$2"
|
||||
REPO_NAME=$(jq -r .repository.name <<< $JSON)
|
||||
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
|
||||
BUILD_FILE=$(jq -r .file <<< $ARGS)
|
||||
DATE=$(date --utc +%s)
|
||||
|
||||
cd ~
|
||||
rm -rf "$REPO_NAME"
|
||||
git clone "$CLONE_URL"
|
||||
cd "$REPO_NAME"
|
||||
shards install
|
||||
|
||||
for ARCH in $(jq -r '.architectures | keys[]' <<< $CONFIG)
|
||||
do
|
||||
TARGET=$(jq -r .architectures.$ARCH.target <<< $CONFIG)
|
||||
IP=$(jq -r .architectures.$ARCH.ip <<< $CONFIG)
|
||||
BUILD_CMD=$(crystal build "$BUILD_FILE" --cross-compile --target="$TARGET" --release -o "$REPO_NAME")
|
||||
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME.o" "build-agent@$IP:~"
|
||||
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP" $BUILD_CMD
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP:~/$REPO_NAME" .
|
||||
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" mkdir -p "~/$REPO_NAME"
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME" "downloads@$DOWNLOAD_SERVER:~/$REPO_NAME/$REPO_NAME-$ARCH-$DATE"
|
||||
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" ln -sf "$REPO_NAME-$ARCH-$DATE" "~/$REPO_NAME/$REPO_NAME-$ARCH-latest"
|
||||
done
|
|
@ -1,32 +0,0 @@
|
|||
import json
|
||||
from bundlewrap.metadata import MetadataJSONEncoder
|
||||
|
||||
directories = {
|
||||
'/opt/build-server/strategies': {
|
||||
'owner': 'build-server',
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/build-server.json': {
|
||||
'owner': 'build-server',
|
||||
'content': json.dumps(node.metadata.get('build-server'), indent=4, sort_keys=True, cls=MetadataJSONEncoder)
|
||||
},
|
||||
'/opt/build-server/strategies/crystal': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'build-server',
|
||||
'mode': '0777', # FIXME
|
||||
'context': {
|
||||
'config_path': '/etc/build-server.json',
|
||||
'download_server': node.metadata.get('build-server/download_server_ip'),
|
||||
},
|
||||
},
|
||||
'/opt/build-server/strategies/ci': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'build-server',
|
||||
'mode': '0777', # FIXME
|
||||
'context': {
|
||||
'config_path': '/etc/build-server.json',
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
from ipaddress import ip_interface
|
||||
|
||||
defaults = {
|
||||
'flask': {
|
||||
'build-server' : {
|
||||
'git_url': "https://git.sublimity.de/cronekorkn/build-server.git",
|
||||
'port': 4000,
|
||||
'app_module': 'build_server',
|
||||
'user': 'build-server',
|
||||
'group': 'build-server',
|
||||
'timeout': 900,
|
||||
'env': {
|
||||
'CONFIG': '/etc/build-server.json',
|
||||
'STRATEGIES_DIR': '/opt/build-server/strategies',
|
||||
},
|
||||
},
|
||||
},
|
||||
'users': {
|
||||
'build-server': {
|
||||
'home': '/var/lib/build-server',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'build-server',
|
||||
)
|
||||
def agent_conf(metadata):
|
||||
download_server = repo.get_node(metadata.get('build-server/download_server'))
|
||||
return {
|
||||
'build-server': {
|
||||
'architectures': {
|
||||
architecture: {
|
||||
'ip': str(ip_interface(repo.get_node(conf['node']).metadata.get('network/internal/ipv4')).ip),
|
||||
}
|
||||
for architecture, conf in metadata.get('build-server/architectures').items()
|
||||
},
|
||||
'download_server_ip': str(ip_interface(download_server.metadata.get('network/internal/ipv4')).ip),
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'build-server',
|
||||
)
|
||||
def ci(metadata):
|
||||
return {
|
||||
'build-server': {
|
||||
'ci': {
|
||||
f'{repo}@{other_node.name}': {
|
||||
'hostname': other_node.metadata.get('hostname'),
|
||||
'repo': repo,
|
||||
**options,
|
||||
}
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-ci')
|
||||
for repo, options in other_node.metadata.get('build-ci').items()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('build-server/hostname'): {
|
||||
'content': 'nginx/proxy_pass.conf',
|
||||
'context': {
|
||||
'target': 'http://127.0.0.1:4000',
|
||||
},
|
||||
'check_path': '/status',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
debian_version = min([node.os_version, (11,)])[0] # FIXME
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'crystal': {},
|
||||
},
|
||||
'sources': {
|
||||
'crystal': {
|
||||
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
|
||||
'urls': {
|
||||
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
|
||||
},
|
||||
'suites': {
|
||||
'/',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
dm-crypt
|
||||
========
|
||||
|
||||
Create encrypted block devices using `dm-crypt` on GNU/Linux. Unlocking
|
||||
these devices will be done on runs of `bw apply`.
|
||||
|
||||
Metadata
|
||||
--------
|
||||
|
||||
'dm-crypt': {
|
||||
'encrypted-devices': {
|
||||
'foobar': {
|
||||
'device': '/dev/sdb',
|
||||
# either
|
||||
'salt': 'muWWU7dr+5Wtk+56OLdqUNZccnzXPUTJprMSMxkstR8=',
|
||||
# or
|
||||
'password': vault.decrypt('passphrase'),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
This will encrypt `/dev/sdb` using the specified passphrase. When the
|
||||
device is going to be unlocked, it will be available as
|
||||
`/dev/mapper/foobar`.
|
|
@ -1,46 +0,0 @@
|
|||
for name, conf in node.metadata.get('dm-crypt').items():
|
||||
actions[f'dm-crypt_format_{name}'] = {
|
||||
'command': f"cryptsetup --batch-mode luksFormat --cipher aes-xts-plain64 --key-size 512 '{conf['device']}'",
|
||||
'data_stdin': conf['password'],
|
||||
'unless': f"blkid -t TYPE=crypto_LUKS '{conf['device']}'",
|
||||
'comment': f"WARNING: This DESTROYS the contents of the device: '{conf['device']}'",
|
||||
'needs': {
|
||||
'pkg_apt:cryptsetup',
|
||||
},
|
||||
}
|
||||
|
||||
actions[f'dm-crypt_test_{name}'] = {
|
||||
'command': 'false',
|
||||
'unless': f"! cryptsetup --batch-mode luksOpen --test-passphrase '{conf['device']}'",
|
||||
'data_stdin': conf['password'],
|
||||
'needs': {
|
||||
f"action:dm-crypt_format_{name}",
|
||||
},
|
||||
}
|
||||
|
||||
actions[f'dm-crypt_open_{name}'] = {
|
||||
'command': f"cryptsetup --batch-mode luksOpen '{conf['device']}' '{name}'",
|
||||
'data_stdin': conf['password'],
|
||||
'unless': f"test -e /dev/mapper/{name}",
|
||||
'comment': f"Unlocks the device '{conf['device']}' and makes it available in: '/dev/mapper/{name}'",
|
||||
'needs': {
|
||||
f"action:dm-crypt_test_{name}",
|
||||
},
|
||||
'needed_by': set(),
|
||||
}
|
||||
|
||||
if node.has_bundle('zfs'):
|
||||
for pool, pool_conf in node.metadata.get('zfs/pools').items():
|
||||
if f'/dev/mapper/{name}' in pool_conf['devices']:
|
||||
actions[f'dm-crypt_open_{name}']['needed_by'].add(f'zfs_pool:{pool}')
|
||||
|
||||
actions[f'zpool_import_{name}'] = {
|
||||
'command': f"zpool import -d /dev/mapper/{name} {pool}",
|
||||
'unless': f"zpool status {pool}",
|
||||
'needs': {
|
||||
f"action:dm-crypt_open_{name}",
|
||||
},
|
||||
'needed_by': {
|
||||
f"zfs_pool:{pool}",
|
||||
},
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'cryptsetup': {},
|
||||
},
|
||||
},
|
||||
'dm-crypt': {},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dm-crypt',
|
||||
)
|
||||
def password_from_salt(metadata):
|
||||
return {
|
||||
'dm-crypt': {
|
||||
name: {
|
||||
'password': repo.vault.password_for(f"dm-crypt/{metadata.get('id')}/{name}"),
|
||||
}
|
||||
for name, conf in metadata.get('dm-crypt').items()
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
DOVECOT
|
||||
=======
|
||||
|
||||
rescan index
|
||||
------------
|
||||
|
||||
https://doc.dovecot.org/configuration_manual/fts/#rescan
|
||||
|
||||
```
|
||||
doveadm fts rescan -u 'i@ckn.li'
|
||||
doveadm index -u 'i@ckn.li' -q '*'
|
||||
```
|
|
@ -1,104 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Example attachment decoder script. The attachment comes from stdin, and
|
||||
# the script is expected to output UTF-8 data to stdout. (If the output isn't
|
||||
# UTF-8, everything except valid UTF-8 sequences are dropped from it.)
|
||||
|
||||
# The attachment decoding is enabled by setting:
|
||||
#
|
||||
# plugin {
|
||||
# fts_decoder = decode2text
|
||||
# }
|
||||
# service decode2text {
|
||||
# executable = script /usr/local/libexec/dovecot/decode2text.sh
|
||||
# user = dovecot
|
||||
# unix_listener decode2text {
|
||||
# mode = 0666
|
||||
# }
|
||||
# }
|
||||
|
||||
libexec_dir=`dirname $0`
|
||||
content_type=$1
|
||||
|
||||
# The second parameter is the format's filename extension, which is used when
|
||||
# found from a filename of application/octet-stream. You can also add more
|
||||
# extensions by giving more parameters.
|
||||
formats='application/pdf pdf
|
||||
application/x-pdf pdf
|
||||
application/msword doc
|
||||
application/mspowerpoint ppt
|
||||
application/vnd.ms-powerpoint ppt
|
||||
application/ms-excel xls
|
||||
application/x-msexcel xls
|
||||
application/vnd.ms-excel xls
|
||||
application/vnd.openxmlformats-officedocument.wordprocessingml.document docx
|
||||
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx
|
||||
application/vnd.openxmlformats-officedocument.presentationml.presentation pptx
|
||||
application/vnd.oasis.opendocument.text odt
|
||||
application/vnd.oasis.opendocument.spreadsheet ods
|
||||
application/vnd.oasis.opendocument.presentation odp
|
||||
'
|
||||
|
||||
if [ "$content_type" = "" ]; then
|
||||
echo "$formats"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
fmt=`echo "$formats" | grep -w "^$content_type" | cut -d ' ' -f 2`
|
||||
if [ "$fmt" = "" ]; then
|
||||
echo "Content-Type: $content_type not supported" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# most decoders can't handle stdin directly, so write the attachment
|
||||
# to a temp file
|
||||
path=`mktemp`
|
||||
trap "rm -f $path" 0 1 2 3 14 15
|
||||
cat > $path
|
||||
|
||||
xmlunzip() {
|
||||
name=$1
|
||||
|
||||
tempdir=`mktemp -d`
|
||||
if [ "$tempdir" = "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
|
||||
cd $tempdir || exit 1
|
||||
unzip -q "$path" 2>/dev/null || exit 0
|
||||
find . -name "$name" -print0 | xargs -0 cat | /usr/lib/dovecot/xml2text
|
||||
}
|
||||
|
||||
wait_timeout() {
|
||||
childpid=$!
|
||||
trap "kill -9 $childpid; rm -f $path" 1 2 3 14 15
|
||||
wait $childpid
|
||||
}
|
||||
|
||||
LANG=en_US.UTF-8
|
||||
export LANG
|
||||
if [ $fmt = "pdf" ]; then
|
||||
/usr/bin/pdftotext $path - 2>/dev/null&
|
||||
wait_timeout 2>/dev/null
|
||||
elif [ $fmt = "doc" ]; then
|
||||
(/usr/bin/catdoc $path; true) 2>/dev/null&
|
||||
wait_timeout 2>/dev/null
|
||||
elif [ $fmt = "ppt" ]; then
|
||||
(/usr/bin/catppt $path; true) 2>/dev/null&
|
||||
wait_timeout 2>/dev/null
|
||||
elif [ $fmt = "xls" ]; then
|
||||
(/usr/bin/xls2csv $path; true) 2>/dev/null&
|
||||
wait_timeout 2>/dev/null
|
||||
elif [ $fmt = "odt" -o $fmt = "ods" -o $fmt = "odp" ]; then
|
||||
xmlunzip "content.xml"
|
||||
elif [ $fmt = "docx" ]; then
|
||||
xmlunzip "document.xml"
|
||||
elif [ $fmt = "xlsx" ]; then
|
||||
xmlunzip "sharedStrings.xml"
|
||||
elif [ $fmt = "pptx" ]; then
|
||||
xmlunzip "slide*.xml"
|
||||
else
|
||||
echo "Buggy decoder script: $fmt not handled" >&2
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
|
@ -1,17 +0,0 @@
|
|||
connect = host=${host} dbname=${name} user=${user} password=${password}
|
||||
driver = pgsql
|
||||
default_pass_scheme = ARGON2ID
|
||||
|
||||
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid
|
||||
|
||||
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
|
||||
FROM users \
|
||||
LEFT JOIN domains ON users.domain_id = domains.id \
|
||||
WHERE redirect IS NULL
|
||||
|
||||
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password \
|
||||
FROM users \
|
||||
LEFT JOIN domains ON users.domain_id = domains.id \
|
||||
WHERE redirect IS NULL \
|
||||
AND users.name = SPLIT_PART('%u', '@', 1) \
|
||||
AND domains.name = SPLIT_PART('%u', '@', 2)
|
|
@ -1,135 +0,0 @@
|
|||
protocols = imap lmtp sieve
|
||||
auth_mechanisms = plain login
|
||||
mail_privileged_group = mail
|
||||
ssl = required
|
||||
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
|
||||
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
|
||||
ssl_dh = </etc/dovecot/dhparam.pem
|
||||
ssl_client_ca_dir = /etc/ssl/certs
|
||||
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
|
||||
mail_plugins = fts fts_xapian
|
||||
|
||||
namespace inbox {
|
||||
inbox = yes
|
||||
separator = .
|
||||
mailbox Drafts {
|
||||
auto = subscribe
|
||||
special_use = \Drafts
|
||||
}
|
||||
mailbox Junk {
|
||||
auto = create
|
||||
special_use = \Junk
|
||||
}
|
||||
mailbox Trash {
|
||||
auto = subscribe
|
||||
special_use = \Trash
|
||||
}
|
||||
mailbox Sent {
|
||||
auto = subscribe
|
||||
special_use = \Sent
|
||||
}
|
||||
}
|
||||
|
||||
passdb {
|
||||
driver = sql
|
||||
args = /etc/dovecot/dovecot-sql.conf
|
||||
}
|
||||
# use sql for userdb too, to enable iterate_query
|
||||
userdb {
|
||||
driver = sql
|
||||
args = /etc/dovecot/dovecot-sql.conf
|
||||
}
|
||||
|
||||
service auth {
|
||||
unix_listener /var/spool/postfix/private/auth {
|
||||
mode = 0660
|
||||
user = postfix
|
||||
group = postfix
|
||||
}
|
||||
}
|
||||
service lmtp {
|
||||
unix_listener /var/spool/postfix/private/dovecot-lmtp {
|
||||
mode = 0600
|
||||
user = postfix
|
||||
group = postfix
|
||||
}
|
||||
}
|
||||
service stats {
|
||||
unix_listener stats-reader {
|
||||
user = vmail
|
||||
group = vmail
|
||||
mode = 0660
|
||||
}
|
||||
unix_listener stats-writer {
|
||||
user = vmail
|
||||
group = vmail
|
||||
mode = 0660
|
||||
}
|
||||
}
|
||||
service managesieve-login {
|
||||
inet_listener sieve {
|
||||
}
|
||||
process_min_avail = 0
|
||||
service_count = 1
|
||||
vsz_limit = 64 M
|
||||
}
|
||||
service managesieve {
|
||||
process_limit = 100
|
||||
}
|
||||
|
||||
protocol imap {
|
||||
mail_plugins = $mail_plugins imap_sieve
|
||||
mail_max_userip_connections = 50
|
||||
imap_idle_notify_interval = 29 mins
|
||||
}
|
||||
protocol lmtp {
|
||||
mail_plugins = $mail_plugins sieve
|
||||
}
|
||||
protocol sieve {
|
||||
plugin {
|
||||
sieve = /var/vmail/sieve/%u.sieve
|
||||
sieve_storage = /var/vmail/sieve/%u/
|
||||
}
|
||||
}
|
||||
|
||||
# fulltext search
|
||||
plugin {
|
||||
fts = xapian
|
||||
fts_xapian = partial=3 full=20 verbose=0
|
||||
fts_autoindex = yes
|
||||
fts_enforced = yes
|
||||
# Index attachements
|
||||
fts_decoder = decode2text
|
||||
}
|
||||
service indexer-worker {
|
||||
vsz_limit = ${indexer_ram}
|
||||
}
|
||||
service decode2text {
|
||||
executable = script /usr/local/libexec/dovecot/decode2text.sh
|
||||
user = dovecot
|
||||
unix_listener decode2text {
|
||||
mode = 0666
|
||||
}
|
||||
}
|
||||
|
||||
# spam filter
|
||||
plugin {
|
||||
sieve_plugins = sieve_imapsieve sieve_extprograms
|
||||
sieve_dir = /var/vmail/sieve/%u/
|
||||
sieve = /var/vmail/sieve/%u.sieve
|
||||
sieve_pipe_bin_dir = /var/vmail/sieve/bin
|
||||
sieve_extensions = +vnd.dovecot.pipe
|
||||
|
||||
sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve
|
||||
|
||||
# From elsewhere to Spam folder
|
||||
imapsieve_mailbox1_name = Junk
|
||||
imapsieve_mailbox1_causes = COPY
|
||||
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
|
||||
|
||||
# From Spam folder to elsewhere
|
||||
imapsieve_mailbox2_name = *
|
||||
imapsieve_mailbox2_from = Junk
|
||||
imapsieve_mailbox2_causes = COPY
|
||||
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/sh
|
||||
exec /usr/bin/rspamc learn_ham
|
|
@ -1,7 +0,0 @@
|
|||
require ["vnd.dovecot.pipe", "copy", "imapsieve", "variables"];
|
||||
|
||||
if string "${mailbox}" "Trash" {
|
||||
stop;
|
||||
}
|
||||
|
||||
pipe :copy "learn-ham.sh";
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/sh
|
||||
exec /usr/bin/rspamc learn_spam
|
|
@ -1,3 +0,0 @@
|
|||
require ["vnd.dovecot.pipe", "copy", "imapsieve"];
|
||||
|
||||
pipe :copy "learn-spam.sh";
|
|
@ -1,6 +0,0 @@
|
|||
require ["fileinto", "mailbox"];
|
||||
|
||||
if header :contains "X-Spam" "Yes" {
|
||||
fileinto :create "Junk";
|
||||
stop;
|
||||
}
|
|
@ -1,145 +0,0 @@
|
|||
assert node.has_bundle('mailserver')
|
||||
|
||||
users['vmail'] = {
|
||||
'home': '/var/vmail',
|
||||
}
|
||||
|
||||
directories = {
|
||||
'/etc/dovecot': {
|
||||
'purge': True,
|
||||
},
|
||||
'/etc/dovecot/conf.d': {
|
||||
'purge': True,
|
||||
'needs': [
|
||||
'pkg_apt:dovecot-sieve',
|
||||
'pkg_apt:dovecot-managesieved',
|
||||
]
|
||||
},
|
||||
'/etc/dovecot/ssl': {},
|
||||
'/var/vmail': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/index': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/sieve': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/sieve/global': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/sieve/bin': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/dovecot/dovecot.conf': {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'admin_email': node.metadata.get('mailserver/admin_email'),
|
||||
'indexer_ram': node.metadata.get('dovecot/indexer_ram'),
|
||||
},
|
||||
'needs': {
|
||||
'pkg_apt:'
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
'/etc/dovecot/dovecot-sql.conf': {
|
||||
'content_type': 'mako',
|
||||
'context': node.metadata.get('mailserver/database'),
|
||||
'needs': {
|
||||
'pkg_apt:'
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
'/etc/dovecot/dhparam.pem': {
|
||||
'content_type': 'any',
|
||||
},
|
||||
'/etc/dovecot/dovecot-sql.conf': {
|
||||
'content_type': 'mako',
|
||||
'context': node.metadata.get('mailserver/database'),
|
||||
'needs': {
|
||||
'pkg_apt:'
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
'/var/vmail/sieve/global/spam-to-folder.sieve': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
'/var/vmail/sieve/global/learn-ham.sieve': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
'/var/vmail/sieve/bin/learn-ham.sh': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
'mode': '550',
|
||||
},
|
||||
'/var/vmail/sieve/global/learn-spam.sieve': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
# /usr/local/libexec/dovecot?
|
||||
# /usr/lib/dovecot/sieve-pipe?
|
||||
'/var/vmail/sieve/bin/learn-spam.sh': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
'mode': '550',
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'dovecot_generate_dhparam': {
|
||||
'command': 'openssl dhparam -out /etc/dovecot/dhparam.pem 2048',
|
||||
'unless': 'test -f /etc/dovecot/dhparam.pem',
|
||||
'cascade_skip': False,
|
||||
'needs': {
|
||||
'pkg_apt:',
|
||||
'directory:/etc/dovecot/ssl',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:dovecot:restart',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'dovecot': {
|
||||
'needs': {
|
||||
'action:letsencrypt_update_certificates',
|
||||
'action:dovecot_generate_dhparam',
|
||||
'file:/etc/dovecot/dovecot.conf',
|
||||
'file:/etc/dovecot/dovecot-sql.conf',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# fulltext search
|
||||
|
||||
directories['/usr/local/libexec/dovecot'] = {}
|
||||
files['/usr/local/libexec/dovecot/decode2text.sh'] = {
|
||||
'owner': 'dovecot',
|
||||
'mode': '500',
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'dovecot-imapd': {},
|
||||
'dovecot-pgsql': {},
|
||||
'dovecot-lmtpd': {},
|
||||
# spam filtering
|
||||
'dovecot-sieve': {},
|
||||
'dovecot-managesieved': {},
|
||||
# fulltext search
|
||||
'dovecot-fts-xapian': {}, # buster-backports
|
||||
'poppler-utils': {}, # pdftotext
|
||||
'catdoc': {}, # catdoc, catppt, xls2csv
|
||||
},
|
||||
},
|
||||
'dovecot': {
|
||||
'database': {
|
||||
'dbname': 'mailserver',
|
||||
'dbuser': 'mailserver',
|
||||
},
|
||||
},
|
||||
'letsencrypt': {
|
||||
'reload_after': {
|
||||
'dovecot',
|
||||
},
|
||||
},
|
||||
'nftables': {
|
||||
'input': {
|
||||
'tcp dport {143, 993, 4190} accept',
|
||||
},
|
||||
},
|
||||
'systemd-timers': {
|
||||
'dovecot-optimize-index': {
|
||||
'command': '/usr/bin/doveadm fts optimize -A',
|
||||
'when': 'daily',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dovecot/indexer_ram',
|
||||
)
|
||||
def indexer_ram(metadata):
|
||||
return {
|
||||
'dovecot': {
|
||||
'indexer_ram': str(metadata.get('vm/ram')//2)+ 'M',
|
||||
},
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
defaults = {
|
||||
'users': {
|
||||
'downloads': {
|
||||
'home': '/var/lib/downloads',
|
||||
'needs': {
|
||||
'zfs_dataset:tank/downloads'
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/downloads': {
|
||||
'mountpoint': '/var/lib/downloads',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd-mount'
|
||||
)
|
||||
def mount_certs(metadata):
|
||||
return {
|
||||
'systemd-mount': {
|
||||
'/var/lib/downloads_nginx': {
|
||||
'source': '/var/lib/downloads',
|
||||
'user': 'www-data',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('download-server/hostname'): {
|
||||
'content': 'nginx/directory_listing.conf',
|
||||
'context': {
|
||||
'directory': '/var/lib/downloads_nginx',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/downloads/authorized_users',
|
||||
)
|
||||
def ssh_keys(metadata):
|
||||
return {
|
||||
'users': {
|
||||
'downloads': {
|
||||
'authorized_users': {
|
||||
f'build-server@{other_node.name}'
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-server')
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
# Flask
|
||||
|
||||
This bundle can deploy one or more Flask applications per node.
|
||||
|
||||
```python
|
||||
'flask': {
|
||||
'myapp': {
|
||||
'app_module': "myapp",
|
||||
'apt_dependencies': [
|
||||
"libffi-dev",
|
||||
"libssl-dev",
|
||||
],
|
||||
'env': {
|
||||
'APP_SECRETS': "/opt/client_secrets.json",
|
||||
},
|
||||
'json_config': {
|
||||
'this json': 'is_visible',
|
||||
'inside': 'your template.cfg',
|
||||
},
|
||||
'git_url': "ssh://git@bitbucket.apps.seibert-media.net:7999/smedia/myapp.git",
|
||||
'git_branch': "master",
|
||||
'deployment_triggers': ["action:do-a-thing"],
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
The git repo containing the application has to obey some conventions:
|
||||
|
||||
* requirements-frozen.txt (preferred) or requirements.txt
|
||||
* minimal setup.py to allow for installation with pip
|
||||
|
||||
The `app` instance has to exists in the module defined by `app_module`.
|
||||
|
||||
It is also very advisable to enable logging in your app (otherwise HTTP 500s won't be logged):
|
||||
|
||||
```python
|
||||
import logging
|
||||
|
||||
if not app.debug:
|
||||
stream_handler = logging.StreamHandler()
|
||||
stream_handler.setLevel(logging.INFO)
|
||||
app.logger.addHandler(stream_handler)
|
||||
```
|
||||
|
||||
If you specify `json_config`, then `/opt/${app}/config.json` will be
|
||||
created. The environment variable `$APP_CONFIG` will point to the exact
|
||||
name. You can use it in your app to load your config:
|
||||
|
||||
```python
|
||||
app.config.from_json(environ['APP_CONFIG'])
|
||||
```
|
||||
|
||||
If `json_config` is *not* specified, you *can* put a static file in
|
||||
`data/flask/files/cfg/$app_name`.
|
|
@ -1,10 +0,0 @@
|
|||
<%
|
||||
from json import dumps
|
||||
from bundlewrap.metadata import MetadataJSONEncoder
|
||||
%>
|
||||
${dumps(
|
||||
json_config,
|
||||
cls=MetadataJSONEncoder,
|
||||
indent=4,
|
||||
sort_keys=True,
|
||||
)}
|
|
@ -1,14 +0,0 @@
|
|||
[Unit]
|
||||
Description=flask application ${name}
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
% for key, value in env.items():
|
||||
Environment=${key}=${value}
|
||||
% endfor
|
||||
User=${user}
|
||||
Group=${group}
|
||||
ExecStart=/opt/${name}/venv/bin/gunicorn -w ${workers} -b ${host}:${port} ${app_module}:app
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,119 +0,0 @@
|
|||
for name, conf in node.metadata.get('flask').items():
|
||||
for dep in conf.get('apt_dependencies', []):
|
||||
pkg_apt[dep] = {
|
||||
'needed_by': {
|
||||
f'svc_systemd:{name}',
|
||||
},
|
||||
}
|
||||
|
||||
directories[f'/opt/{name}'] = {
|
||||
'owner': conf['user'],
|
||||
'group': conf['group'],
|
||||
}
|
||||
directories[f'/opt/{name}/src'] = {}
|
||||
|
||||
git_deploy[f'/opt/{name}/src'] = {
|
||||
'repo': conf['git_url'],
|
||||
'rev': conf.get('git_branch', 'master'),
|
||||
'triggers': [
|
||||
f'action:flask_{name}_pip_install_deps',
|
||||
*conf.get('deployment_triggers', []),
|
||||
],
|
||||
}
|
||||
|
||||
# CONFIG
|
||||
|
||||
env = conf.get('env', {})
|
||||
|
||||
if conf.get('json_config', {}):
|
||||
env['APP_CONFIG'] = f'/opt/{name}/config.json'
|
||||
files[env['APP_CONFIG']] = {
|
||||
'source': 'flask.cfg',
|
||||
'context': {
|
||||
'json_config': conf.get('json_config', {}),
|
||||
},
|
||||
}
|
||||
|
||||
if 'APP_CONFIG' in env:
|
||||
files[env['APP_CONFIG']].update({
|
||||
'content_type': 'mako',
|
||||
'group': 'www-data',
|
||||
'needed_by': [
|
||||
f'svc_systemd:{name}',
|
||||
],
|
||||
'triggers': [
|
||||
f'svc_systemd:{name}:restart',
|
||||
],
|
||||
})
|
||||
|
||||
# secrets
|
||||
|
||||
if 'secrets.json' in conf:
|
||||
env['APP_SECRETS'] = f'/opt/{name}/secrets.json'
|
||||
files[env['APP_SECRETS']] = {
|
||||
'content': conf['secrets.json'],
|
||||
'mode': '0600',
|
||||
'owner': conf.get('user', 'www-data'),
|
||||
'group': conf.get('group', 'www-data'),
|
||||
'needed_by': [
|
||||
f'svc_systemd:{name}',
|
||||
],
|
||||
}
|
||||
|
||||
# VENV
|
||||
|
||||
actions[f'flask_{name}_create_virtualenv'] = {
|
||||
'cascade_skip': False,
|
||||
'command': f'python3 -m venv /opt/{name}/venv',
|
||||
'unless': f'test -d /opt/{name}/venv',
|
||||
'needs': [
|
||||
f'directory:/opt/{name}',
|
||||
'pkg_apt:python3-venv',
|
||||
],
|
||||
'triggers': [
|
||||
f'action:flask_{name}_pip_install_deps',
|
||||
],
|
||||
}
|
||||
|
||||
actions[f'flask_{name}_pip_install_deps'] = {
|
||||
'cascade_skip': False,
|
||||
'command': f'/opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements-frozen.txt || /opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements.txt',
|
||||
'triggered': True, # TODO: https://stackoverflow.com/questions/16294819/check-if-my-python-has-all-required-packages
|
||||
'needs': [
|
||||
f'git_deploy:/opt/{name}/src',
|
||||
'pkg_apt:python3-pip',
|
||||
],
|
||||
'triggers': [
|
||||
f'action:flask_{name}_pip_install_gunicorn',
|
||||
],
|
||||
}
|
||||
|
||||
actions[f'flask_{name}_pip_install_gunicorn'] = {
|
||||
'command': f'/opt/{name}/venv/bin/pip3 install -U gunicorn',
|
||||
'triggered': True,
|
||||
'cascade_skip': False,
|
||||
'needs': [
|
||||
f'action:flask_{name}_create_virtualenv',
|
||||
],
|
||||
'triggers': [
|
||||
f'action:flask_{name}_pip_install',
|
||||
],
|
||||
}
|
||||
|
||||
actions[f'flask_{name}_pip_install'] = {
|
||||
'command': f'/opt/{name}/venv/bin/pip3 install -e /opt/{name}/src',
|
||||
'triggered': True,
|
||||
'cascade_skip': False,
|
||||
'triggers': [
|
||||
f'svc_systemd:{name}:restart',
|
||||
],
|
||||
}
|
||||
|
||||
# UNIT
|
||||
|
||||
svc_systemd[name] = {
|
||||
'needs': [
|
||||
f'action:flask_{name}_pip_install',
|
||||
f'file:/usr/local/lib/systemd/system/{name}.service',
|
||||
],
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'python3-pip': {},
|
||||
'python3-dev': {},
|
||||
'python3-venv': {},
|
||||
},
|
||||
},
|
||||
'flask': {},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'flask',
|
||||
)
|
||||
def app_defaults(metadata):
|
||||
return {
|
||||
'flask': {
|
||||
name: {
|
||||
'user': 'root',
|
||||
'group': 'root',
|
||||
'workers': 8,
|
||||
'timeout': 30,
|
||||
**conf,
|
||||
}
|
||||
for name, conf in metadata.get('flask').items()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd/units',
|
||||
)
|
||||
def units(metadata):
|
||||
return {
|
||||
'systemd': {
|
||||
'units': {
|
||||
f'{name}.service': {
|
||||
'Unit': {
|
||||
'Description': name,
|
||||
'After': 'network.target',
|
||||
},
|
||||
'Service': {
|
||||
'Environment': {
|
||||
f'{k}={v}'
|
||||
for k, v in conf.get('env', {}).items()
|
||||
},
|
||||
'User': conf['user'],
|
||||
'Group': conf['group'],
|
||||
'ExecStart': f"/opt/{name}/venv/bin/gunicorn -w {conf['workers']} -b 127.0.0.1:{conf['port']} --timeout {conf['timeout']} {conf['app_module']}:app"
|
||||
},
|
||||
'Install': {
|
||||
'WantedBy': {
|
||||
'multi-user.target'
|
||||
}
|
||||
},
|
||||
}
|
||||
for name, conf in metadata.get('flask').items()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
Pg Pass workaround: set manually:
|
||||
|
||||
```
|
||||
root@freescout /ro psql freescout
|
||||
psql (15.6 (Debian 15.6-0+deb12u1))
|
||||
Type "help" for help.
|
||||
|
||||
freescout=# \password freescout
|
||||
Enter new password for user "freescout":
|
||||
Enter it again:
|
||||
freescout=#
|
||||
\q
|
||||
```
|
||||
|
||||
|
||||
# problems
|
||||
|
||||
# check if /opt/freescout/.env is resettet
|
||||
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
|
||||
# chown -R www-data:www-data /opt/freescout
|
||||
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
|
||||
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
|
||||
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`
|
|
@ -1,66 +0,0 @@
|
|||
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
|
||||
run_as = repo.libs.tools.run_as
|
||||
php_version = node.metadata.get('php/version')
|
||||
|
||||
|
||||
directories = {
|
||||
'/opt/freescout': {
|
||||
'owner': 'www-data',
|
||||
'group': 'www-data',
|
||||
# chown -R www-data:www-data /opt/freescout
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
# 'clone_freescout': {
|
||||
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
|
||||
# 'unless': 'test -e /opt/freescout/.git',
|
||||
# 'needs': [
|
||||
# 'pkg_apt:git',
|
||||
# 'directory:/opt/freescout',
|
||||
# ],
|
||||
# },
|
||||
# 'pull_freescout': {
|
||||
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
|
||||
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
|
||||
# 'needs': [
|
||||
# 'action:clone_freescout',
|
||||
# ],
|
||||
# 'triggers': [
|
||||
# 'action:freescout_artisan_update',
|
||||
# f'svc_systemd:php{php_version}-fpm.service:restart',
|
||||
# ],
|
||||
# },
|
||||
# 'freescout_artisan_update': {
|
||||
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
|
||||
# 'triggered': True,
|
||||
# 'needs': [
|
||||
# f'svc_systemd:php{php_version}-fpm.service:restart',
|
||||
# 'action:pull_freescout',
|
||||
# ],
|
||||
# },
|
||||
}
|
||||
|
||||
# svc_systemd = {
|
||||
# f'freescout-cron.service': {},
|
||||
# }
|
||||
|
||||
# files = {
|
||||
# '/opt/freescout/.env': {
|
||||
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
|
||||
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
|
||||
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
|
||||
# 'owner': 'www-data',
|
||||
# 'content': '\n'.join(
|
||||
# f'{k}={v}' for k, v in
|
||||
# sorted(node.metadata.get('freescout/env').items())
|
||||
# ) + '\n',
|
||||
# 'needs': [
|
||||
# 'directory:/opt/freescout',
|
||||
# 'action:clone_freescout',
|
||||
# ],
|
||||
# },
|
||||
# }
|
||||
|
||||
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
|
||||
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
|
|
@ -1,121 +0,0 @@
|
|||
from base64 import b64decode
|
||||
|
||||
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
|
||||
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'git': {},
|
||||
'php': {},
|
||||
'php-pgsql': {},
|
||||
'php-fpm': {},
|
||||
'php-mbstring': {},
|
||||
'php-xml': {},
|
||||
'php-imap': {},
|
||||
'php-zip': {},
|
||||
'php-gd': {},
|
||||
'php-curl': {},
|
||||
'php-intl': {},
|
||||
},
|
||||
},
|
||||
'freescout': {
|
||||
'env': {
|
||||
'APP_TIMEZONE': 'Europe/Berlin',
|
||||
'DB_CONNECTION': 'pgsql',
|
||||
'DB_HOST': '127.0.0.1',
|
||||
'DB_PORT': '5432',
|
||||
'DB_DATABASE': 'freescout',
|
||||
'DB_USERNAME': 'freescout',
|
||||
'DB_PASSWORD': database_password,
|
||||
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
|
||||
},
|
||||
},
|
||||
'php': {
|
||||
'php.ini': {
|
||||
'cgi': {
|
||||
'fix_pathinfo': '0',
|
||||
},
|
||||
},
|
||||
},
|
||||
'postgresql': {
|
||||
'roles': {
|
||||
'freescout': {
|
||||
'password_hash': repo.libs.postgres.generate_scram_sha_256(
|
||||
database_password,
|
||||
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
|
||||
),
|
||||
},
|
||||
},
|
||||
'databases': {
|
||||
'freescout': {
|
||||
'owner': 'freescout',
|
||||
},
|
||||
},
|
||||
},
|
||||
# 'systemd': {
|
||||
# 'units': {
|
||||
# f'freescout-cron.service': {
|
||||
# 'Unit': {
|
||||
# 'Description': 'Freescout Cron',
|
||||
# 'After': 'network.target',
|
||||
# },
|
||||
# 'Service': {
|
||||
# 'User': 'www-data',
|
||||
# 'Nice': 10,
|
||||
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
|
||||
# },
|
||||
# 'Install': {
|
||||
# 'WantedBy': {
|
||||
# 'multi-user.target'
|
||||
# }
|
||||
# },
|
||||
# }
|
||||
# },
|
||||
# },
|
||||
'systemd-timers': {
|
||||
'freescout-cron': {
|
||||
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
|
||||
'when': '*-*-* *:*:00',
|
||||
'RuntimeMaxSec': '180',
|
||||
'user': 'www-data',
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/freescout': {
|
||||
'mountpoint': '/opt/freescout',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'freescout/env/APP_URL',
|
||||
)
|
||||
def freescout(metadata):
|
||||
return {
|
||||
'freescout': {
|
||||
'env': {
|
||||
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('freescout/domain'): {
|
||||
'content': 'freescout/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
```
|
||||
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectViewer'
|
||||
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectCreator'
|
||||
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectAdmin'
|
||||
gsutil -o "GSUtil:parallel_process_count=3" -o GSUtil:parallel_thread_count=4 -m rsync -r -d -e /var/vmail gs://sublimity-backup/mailserver
|
||||
gsutil config
|
||||
gsutil versioning set on gs://sublimity-backup
|
||||
|
||||
|
||||
|
||||
gcsfuse --key-file /root/.config/gcloud/service_account.json sublimity-backup gcsfuse
|
||||
```
|
|
@ -1,43 +0,0 @@
|
|||
from os.path import join
|
||||
from json import dumps
|
||||
|
||||
service_account = node.metadata.get('gcloud/service_account')
|
||||
project = node.metadata.get('gcloud/project')
|
||||
|
||||
directories[f'/etc/gcloud'] = {
|
||||
'purge': True,
|
||||
}
|
||||
|
||||
files['/etc/gcloud/gcloud.json'] = {
|
||||
'content': dumps(
|
||||
node.metadata.get('gcloud'),
|
||||
indent=4,
|
||||
sort_keys=True
|
||||
),
|
||||
}
|
||||
|
||||
files['/etc/gcloud/service_account.json'] = {
|
||||
'content': repo.vault.decrypt_file(
|
||||
join(repo.path, 'data', 'gcloud', 'service_accounts', f'{service_account}@{project}.json.enc')
|
||||
),
|
||||
'mode': '500',
|
||||
'needs': [
|
||||
'pkg_apt:google-cloud-sdk',
|
||||
],
|
||||
}
|
||||
|
||||
actions['gcloud_activate_service_account'] = {
|
||||
'command': 'gcloud auth activate-service-account --key-file /etc/gcloud/service_account.json',
|
||||
'unless': f"gcloud auth list | grep -q '^\*[[:space:]]*{service_account}@{project}.iam.gserviceaccount.com'",
|
||||
'needs': [
|
||||
f'file:/etc/gcloud/service_account.json'
|
||||
],
|
||||
}
|
||||
|
||||
actions['gcloud_select_project'] = {
|
||||
'command': f"gcloud config set project '{project}'",
|
||||
'unless': f"gcloud config get-value project | grep -q '^{project}$'",
|
||||
'needs': [
|
||||
f'action:gcloud_activate_service_account'
|
||||
],
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'apt-transport-https': {},
|
||||
'ca-certificates': {},
|
||||
'gnupg': {},
|
||||
'google-cloud-sdk': {},
|
||||
'python3-crcmod': {},
|
||||
},
|
||||
'sources': {
|
||||
'google-cloud': {
|
||||
'url': 'https://packages.cloud.google.com/apt/',
|
||||
'suites': {
|
||||
'cloud-sdk',
|
||||
},
|
||||
'components': {
|
||||
'main',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
[DEFAULT]
|
||||
APP_NAME = ckn-gitea
|
||||
RUN_USER = git
|
||||
RUN_MODE = prod
|
||||
|
||||
[repository]
|
||||
ROOT = /var/lib/gitea/repositories
|
||||
MAX_CREATION_LIMIT = 0
|
||||
DEFAULT_BRANCH = main
|
||||
|
||||
[ui]
|
||||
ISSUE_PAGING_NUM = 50
|
||||
MEMBERS_PAGING_NUM = 100
|
||||
|
||||
[server]
|
||||
PROTOCOL = http
|
||||
HTTP_ADDR = 0.0.0.0
|
||||
HTTP_PORT = 3500
|
||||
DISABLE_SSH = true
|
||||
SSH_PORT = 22
|
||||
LFS_START_SERVER = true
|
||||
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
|
||||
OFFLINE_MODE = true
|
||||
START_SSH_SERVER = false
|
||||
DISABLE_ROUTER_LOG = true
|
||||
LANDING_PAGE = explore
|
||||
|
||||
[admin]
|
||||
DEFAULT_EMAIL_NOTIFICATIONS = onmention
|
||||
DISABLE_REGULAR_ORG_CREATION = true
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = true
|
||||
LOGIN_REMEMBER_DAYS = 30
|
||||
|
||||
[openid]
|
||||
ENABLE_OPENID_SIGNIN = false
|
||||
ENABLE_OPENID_SIGNUP = false
|
||||
|
||||
[service]
|
||||
REGISTER_EMAIL_CONFIRM = true
|
||||
ENABLE_NOTIFY_MAIL = true
|
||||
DISABLE_REGISTRATION = false
|
||||
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
|
||||
ENABLE_CAPTCHA = false
|
||||
REQUIRE_SIGNIN_VIEW = false
|
||||
DEFAULT_KEEP_EMAIL_PRIVATE = true
|
||||
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
|
||||
DEFAULT_ENABLE_TIMETRACKING = true
|
||||
|
||||
[session]
|
||||
PROVIDER = file
|
||||
|
||||
[picture]
|
||||
DISABLE_GRAVATAR = true
|
||||
ENABLE_FEDERATED_AVATAR = false
|
||||
|
||||
[log]
|
||||
MODE = console
|
||||
LEVEL = warn
|
||||
|
||||
[other]
|
||||
SHOW_FOOTER_BRANDING = true
|
||||
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
|
||||
|
||||
[webhook]
|
||||
ALLOWED_HOST_LIST = *
|
||||
DELIVER_TIMEOUT = 600
|
||||
|
||||
[indexer]
|
||||
REPO_INDEXER_ENABLED = true
|
||||
MAX_FILE_SIZE = 10240000
|
||||
|
||||
[queue.issue_indexer]
|
||||
LENGTH = 20
|
|
@ -1,64 +0,0 @@
|
|||
from os.path import join
|
||||
from bundlewrap.utils.dicts import merge_dict
|
||||
|
||||
|
||||
version = node.metadata.get('gitea/version')
|
||||
assert not version.startswith('v')
|
||||
arch = node.metadata.get('system/architecture')
|
||||
|
||||
downloads['/usr/local/bin/gitea'] = {
|
||||
# https://forgejo.org/releases/
|
||||
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
|
||||
'sha256_url': '{url}.sha256',
|
||||
'triggers': {
|
||||
'svc_systemd:gitea:restart',
|
||||
},
|
||||
'preceded_by': {
|
||||
'action:stop_gitea',
|
||||
},
|
||||
}
|
||||
|
||||
directories['/var/lib/gitea'] = {
|
||||
'owner': 'git',
|
||||
'mode': '0700',
|
||||
'triggers': {
|
||||
'svc_systemd:gitea:restart',
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'chmod_gitea': {
|
||||
'command': 'chmod a+x /usr/local/bin/gitea',
|
||||
'unless': 'test -x /usr/local/bin/gitea',
|
||||
'needs': {
|
||||
'download:/usr/local/bin/gitea',
|
||||
},
|
||||
},
|
||||
'stop_gitea': {
|
||||
'command': 'systemctl stop gitea',
|
||||
'triggered': True,
|
||||
},
|
||||
}
|
||||
|
||||
files['/etc/gitea/app.ini'] = {
|
||||
'content': repo.libs.ini.dumps(
|
||||
merge_dict(
|
||||
repo.libs.ini.parse(open(join(repo.path, 'bundles', 'gitea', 'files', 'app.ini')).read()),
|
||||
node.metadata.get('gitea/conf'),
|
||||
),
|
||||
),
|
||||
'owner': 'git',
|
||||
'mode': '0600',
|
||||
'context': node.metadata['gitea'],
|
||||
'triggers': {
|
||||
'svc_systemd:gitea:restart',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd['gitea'] = {
|
||||
'needs': [
|
||||
'action:chmod_gitea',
|
||||
'download:/usr/local/bin/gitea',
|
||||
'file:/etc/gitea/app.ini',
|
||||
],
|
||||
}
|
|
@ -1,125 +0,0 @@
|
|||
database_password = repo.vault.password_for(f'{node.name} postgresql gitea').value
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'git': {
|
||||
'needed_by': {
|
||||
'svc_systemd:gitea',
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
'gitea': {
|
||||
'conf': {
|
||||
'DEFAULT': {
|
||||
'WORK_PATH': '/var/lib/gitea',
|
||||
},
|
||||
'database': {
|
||||
'DB_TYPE': 'postgres',
|
||||
'HOST': 'localhost:5432',
|
||||
'NAME': 'gitea',
|
||||
'USER': 'gitea',
|
||||
'PASSWD': database_password,
|
||||
'SSL_MODE': 'disable',
|
||||
'LOG_SQL': 'false',
|
||||
},
|
||||
},
|
||||
},
|
||||
'postgresql': {
|
||||
'roles': {
|
||||
'gitea': {
|
||||
'password': database_password,
|
||||
},
|
||||
},
|
||||
'databases': {
|
||||
'gitea': {
|
||||
'owner': 'gitea',
|
||||
},
|
||||
},
|
||||
},
|
||||
'systemd': {
|
||||
'units': {
|
||||
'gitea.service': {
|
||||
'Unit': {
|
||||
'Description': 'gitea',
|
||||
'After': {'syslog.target', 'network.target'},
|
||||
'Requires': 'postgresql.service',
|
||||
},
|
||||
'Service': {
|
||||
'RestartSec': '2s',
|
||||
'Type': 'simple',
|
||||
'User': 'git',
|
||||
'Group': 'git',
|
||||
'WorkingDirectory': '/var/lib/gitea/',
|
||||
'ExecStart': '/usr/local/bin/gitea web -c /etc/gitea/app.ini',
|
||||
'Restart': 'always',
|
||||
'Environment': 'USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea',
|
||||
},
|
||||
'Install': {
|
||||
'WantedBy': {'multi-user.target'},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'users': {
|
||||
'git': {
|
||||
'home': '/home/git',
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/gitea': {
|
||||
'mountpoint': '/var/lib/gitea',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'gitea/conf',
|
||||
)
|
||||
def conf(metadata):
|
||||
domain = metadata.get('gitea/domain')
|
||||
|
||||
return {
|
||||
'gitea': {
|
||||
'conf': {
|
||||
'server': {
|
||||
'SSH_DOMAIN': domain,
|
||||
'DOMAIN': domain,
|
||||
'ROOT_URL': f'https://{domain}/',
|
||||
'LFS_JWT_SECRET': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
|
||||
},
|
||||
'security': {
|
||||
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
|
||||
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
|
||||
},
|
||||
'service': {
|
||||
'NO_REPLY_ADDRESS': f'noreply.{domain}',
|
||||
},
|
||||
'oauth2': {
|
||||
'JWT_SECRET': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('gitea/domain'): {
|
||||
'content': 'nginx/proxy_pass.conf',
|
||||
'context': {
|
||||
'target': 'http://127.0.0.1:3500',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
directories['/opt/gocryptfs-inspect'] = {}
|
||||
|
||||
git_deploy['/opt/gocryptfs-inspect'] = {
|
||||
'repo': 'https://github.com/slackner/gocryptfs-inspect.git',
|
||||
'rev': 'ecd296c8f014bf18f5889e3cb9cb64807ff6b9c4',
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'python3-pycryptodome': {},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
from json import dumps
|
||||
|
||||
directories['/etc/gocryptfs'] = {
|
||||
'purge': True,
|
||||
}
|
||||
|
||||
files['/etc/gocryptfs/masterkey'] = {
|
||||
'content': node.metadata.get('gocryptfs/masterkey'),
|
||||
'mode': '500',
|
||||
}
|
||||
|
||||
files['/etc/gocryptfs/gocryptfs.conf'] = {
|
||||
'content': dumps({
|
||||
'Version': 2,
|
||||
'Creator': 'gocryptfs 1.6.1',
|
||||
'ScryptObject': {
|
||||
'Salt': node.metadata.get('gocryptfs/salt'),
|
||||
'N': 65536,
|
||||
'R': 8,
|
||||
'P': 1,
|
||||
'KeyLen': 32,
|
||||
},
|
||||
'FeatureFlags': [
|
||||
'GCMIV128',
|
||||
'HKDF',
|
||||
'PlaintextNames',
|
||||
'AESSIV',
|
||||
]
|
||||
}, indent=4, sort_keys=True)
|
||||
}
|
||||
|
||||
for path, options in node.metadata.get('gocryptfs/paths').items():
|
||||
directories[options['mountpoint']] = {
|
||||
'owner': None,
|
||||
'group': None,
|
||||
'mode': None,
|
||||
'preceded_by': [
|
||||
f'svc_systemd:gocryptfs-{options["id"]}:stop',
|
||||
],
|
||||
'needed_by': [
|
||||
f'svc_systemd:gocryptfs-{options["id"]}',
|
||||
],
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
from hashlib import sha3_256
|
||||
from base64 import b64decode, b64encode
|
||||
from binascii import hexlify
|
||||
from uuid import UUID
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'gocryptfs': {},
|
||||
'fuse': {},
|
||||
'socat': {},
|
||||
},
|
||||
},
|
||||
'gocryptfs': {
|
||||
'paths': {},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'gocryptfs',
|
||||
)
|
||||
def config(metadata):
|
||||
return {
|
||||
'gocryptfs': {
|
||||
'masterkey': hexlify(b64decode(
|
||||
str(repo.vault.random_bytes_as_base64_for(metadata.get('id'), length=32))
|
||||
)).decode(),
|
||||
'salt': b64encode(
|
||||
sha3_256(UUID(metadata.get('id')).bytes).digest()
|
||||
).decode(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'gocryptfs',
|
||||
)
|
||||
def paths(metadata):
|
||||
paths = {}
|
||||
|
||||
for path, options in metadata.get('gocryptfs/paths').items():
|
||||
paths[path] = {
|
||||
'id': hexlify(sha3_256(path.encode()).digest()[:8]).decode(),
|
||||
}
|
||||
|
||||
return {
|
||||
'gocryptfs': {
|
||||
'paths': paths,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd/services',
|
||||
)
|
||||
def systemd(metadata):
|
||||
services = {}
|
||||
|
||||
for path, options in metadata.get('gocryptfs/paths').items():
|
||||
services[f'gocryptfs-{options["id"]}'] = {
|
||||
'content': {
|
||||
'Unit': {
|
||||
'Description': f'gocryptfs@{path} ({options["id"]})',
|
||||
'After': {
|
||||
'filesystem.target',
|
||||
'zfs.target',
|
||||
},
|
||||
},
|
||||
'Service': {
|
||||
'RuntimeDirectory': 'gocryptfs',
|
||||
'Environment': {
|
||||
'MASTERKEY': metadata.get('gocryptfs/masterkey'),
|
||||
'SOCKET': f'/var/run/gocryptfs/{options["id"]}',
|
||||
'PLAIN': path,
|
||||
'CIPHER': options["mountpoint"]
|
||||
},
|
||||
'ExecStart': [
|
||||
'/usr/bin/gocryptfs -fg -plaintextnames -reverse -masterkey $MASTERKEY -ctlsock $SOCKET $PLAIN $CIPHER',
|
||||
],
|
||||
'ExecStopPost': [
|
||||
'/usr/bin/umount $CIPHER'
|
||||
],
|
||||
},
|
||||
},
|
||||
'needs': [
|
||||
'pkg_apt:gocryptfs',
|
||||
'pkg_apt:fuse',
|
||||
'pkg_apt:socat',
|
||||
'file:/etc/gocryptfs/masterkey',
|
||||
'file:/etc/gocryptfs/gocryptfs.conf',
|
||||
],
|
||||
'triggers': [
|
||||
f'svc_systemd:gocryptfs-{options["id"]}:restart',
|
||||
],
|
||||
}
|
||||
|
||||
return {
|
||||
'systemd': {
|
||||
'services': services,
|
||||
},
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
users = {
|
||||
'gollum': {
|
||||
'home': '/var/lib/gollum',
|
||||
}
|
||||
}
|
||||
|
||||
directories = {
|
||||
'/opt/gollum': {
|
||||
'owner': 'gollum',
|
||||
},
|
||||
'/opt/gollum/.bundle': {
|
||||
'owner': 'gollum',
|
||||
},
|
||||
'/var/lib/gollum': {
|
||||
'owner': 'gollum',
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/opt/gollum/.bundle/config': {
|
||||
'content': 'BUNDLE_PATH: ".bundle/gems"',
|
||||
}
|
||||
}
|
||||
|
||||
git_deploy = {
|
||||
'/opt/gollum': {
|
||||
'repo': 'https://github.com/gollum/gollum.git',
|
||||
'rev': f"v{node.metadata.get('gollum/version')}",
|
||||
},
|
||||
'/var/lib/gollum': {
|
||||
'repo': node.metadata.get('gollum/wiki'),
|
||||
'rev': 'main',
|
||||
'unless': 'test -e /var/lib/gollum/.git',
|
||||
},
|
||||
}
|
||||
|
||||
def run(cmd):
|
||||
return f"su gollum -c " + quote(f"cd /opt/gollum && {cmd}")
|
||||
|
||||
actions = {
|
||||
'gollum_install_bundler': {
|
||||
'command': run("gem install bundler --user"),
|
||||
'unless': run("test -e $(ruby -e 'puts Gem.user_dir')/bin/bundle"),
|
||||
'needs': [
|
||||
'file:/opt/gollum/.bundle/config',
|
||||
],
|
||||
},
|
||||
'gollum_bundle_install': {
|
||||
'command': run("$(ruby -e 'puts Gem.user_dir')/bin/bundle install"),
|
||||
'unless': run("$(ruby -e 'puts Gem.user_dir')/bin/bundle check"),
|
||||
'needs': [
|
||||
'git_deploy:/opt/gollum',
|
||||
'action:gollum_install_bundler',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# TODO: AUTH
|
||||
#https://github.com/bjoernalbers/gollum-auth
|
|
@ -1,49 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'libgit2-dev': {},
|
||||
'libssl-dev': {},
|
||||
'cmake': {},
|
||||
},
|
||||
},
|
||||
'systemd': {
|
||||
'units': {
|
||||
'gollum.service': {
|
||||
'Unit': {
|
||||
'Description': 'gollum',
|
||||
'After': 'syslog.target',
|
||||
'After': 'network.target',
|
||||
'Requires': 'postgresql.service',
|
||||
},
|
||||
'Service': {
|
||||
'User': 'gollum',
|
||||
'Group': 'gollum',
|
||||
'WorkingDirectory': '/opt/gollum',
|
||||
'ExecStart': 'true',
|
||||
'Restart': 'always',
|
||||
},
|
||||
'Install': {
|
||||
'WantedBy': {'multi-user.target'},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('gollum/domain'): {
|
||||
'content': 'nginx/proxy_pass.conf',
|
||||
'context': {
|
||||
'target': 'http://127.0.0.1:3600',
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
# metadata
|
||||
|
||||
```python
|
||||
{
|
||||
'hostname': 'example.com',
|
||||
'influxdb_node': 'htz.influx',
|
||||
}
|
||||
```
|
||||
|
||||
# links
|
||||
|
||||
|
||||
https://github.com/grafana/influxdb-flux-datasource/issues/42
|
||||
https://community.grafana.com/t/no-alias-by-when-using-flux/15575/6
|
|
@ -1,181 +0,0 @@
|
|||
assert node.has_bundle('redis')
|
||||
assert node.has_bundle('postgresql')
|
||||
|
||||
from mako.template import Template
|
||||
from shlex import quote
|
||||
from copy import deepcopy
|
||||
from itertools import count
|
||||
import yaml
|
||||
import json
|
||||
|
||||
svc_systemd['grafana-server'] = {
|
||||
'needs': [
|
||||
'pkg_apt:grafana',
|
||||
],
|
||||
}
|
||||
|
||||
admin_password = node.metadata.get('grafana/config/security/admin_password')
|
||||
port = node.metadata.get('grafana/config/server/http_port')
|
||||
actions['reset_grafana_admin_password'] = {
|
||||
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
|
||||
'unless': f"sleep 5 && curl http://admin:{quote(admin_password)}@localhost:{port}/api/org --fail",
|
||||
'needs': [
|
||||
'svc_systemd:grafana-server',
|
||||
],
|
||||
}
|
||||
|
||||
directories = {
|
||||
'/etc/grafana': {},
|
||||
'/etc/grafana/provisioning': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
},
|
||||
'/etc/grafana/provisioning/datasources': {
|
||||
'purge': True,
|
||||
},
|
||||
'/etc/grafana/provisioning/dashboards': {
|
||||
'purge': True,
|
||||
},
|
||||
'/var/lib/grafana': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
},
|
||||
'/var/lib/grafana/dashboards': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'purge': True,
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/grafana/grafana.ini': {
|
||||
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
},
|
||||
'/etc/grafana/provisioning/datasources/managed.yaml': {
|
||||
'content': yaml.dump({
|
||||
'apiVersion': 1,
|
||||
'datasources': list(node.metadata.get('grafana/datasources').values()),
|
||||
}),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
},
|
||||
'/etc/grafana/provisioning/dashboards/managed.yaml': {
|
||||
'content': yaml.dump({
|
||||
'apiVersion': 1,
|
||||
'providers': [{
|
||||
'name': 'Default',
|
||||
'folder': 'Generated',
|
||||
'type': 'file',
|
||||
'options': {
|
||||
'path': '/var/lib/grafana/dashboards',
|
||||
},
|
||||
}],
|
||||
}),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# DASHBOARDS
|
||||
|
||||
with open(repo.path.join([f'data/grafana/dashboard.py'])) as file:
|
||||
dashboard_template = eval(file.read())
|
||||
with open(repo.path.join([f'data/grafana/panel.py'])) as file:
|
||||
panel_template = eval(file.read())
|
||||
with open(repo.path.join([f'data/grafana/flux.mako'])) as file:
|
||||
flux_template = Template(file.read())
|
||||
|
||||
bucket = repo.get_node(node.metadata.get('grafana/influxdb_node')).metadata.get('influxdb/bucket')
|
||||
|
||||
monitored_nodes = [
|
||||
other_node
|
||||
for other_node in repo.nodes
|
||||
if other_node.metadata.get('telegraf/influxdb_node', None) == node.metadata.get('grafana/influxdb_node')
|
||||
]
|
||||
|
||||
for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
|
||||
dashboard = deepcopy(dashboard_template)
|
||||
dashboard['id'] = dashboard_id
|
||||
dashboard['title'] = monitored_node.name
|
||||
dashboard['uid'] = monitored_node.metadata.get('id')
|
||||
panel_id = count(start=1)
|
||||
|
||||
for row_id, row_name in enumerate(sorted(monitored_node.metadata.get('grafana_rows')), start=1):
|
||||
with open(repo.path.join([f'data/grafana/rows/{row_name}.py'])) as file:
|
||||
row = eval(file.read())
|
||||
|
||||
for panel_in_row, (panel_name, panel_config) in enumerate(row.items()):
|
||||
panel = deepcopy(panel_template)
|
||||
panel['id'] = next(panel_id)
|
||||
panel['title'] = f'{row_name} {panel_name}'
|
||||
panel['gridPos']['w'] = 24 // len(row)
|
||||
panel['gridPos']['x'] = (24 // len(row)) * panel_in_row
|
||||
panel['gridPos']['y'] = (row_id - 1) * panel['gridPos']['h']
|
||||
|
||||
if 'display_name' in panel_config:
|
||||
panel['fieldConfig']['defaults']['displayName'] = '${'+panel_config['display_name']+'}'
|
||||
|
||||
if panel_config.get('stacked'):
|
||||
panel['fieldConfig']['defaults']['custom']['stacking']['mode'] = 'normal'
|
||||
|
||||
if 'unit' in panel_config:
|
||||
panel['fieldConfig']['defaults']['unit'] = panel_config['unit']
|
||||
|
||||
if 'min' in panel_config:
|
||||
panel['fieldConfig']['defaults']['min'] = panel_config['min']
|
||||
if 'max' in panel_config:
|
||||
panel['fieldConfig']['defaults']['max'] = panel_config['max']
|
||||
if 'soft_max' in panel_config:
|
||||
panel['fieldConfig']['defaults']['custom']['axisSoftMax'] = panel_config['soft_max']
|
||||
|
||||
if 'legend' in panel_config:
|
||||
panel['options']['legend'].update(panel_config['legend'])
|
||||
|
||||
if 'tooltip' in panel_config:
|
||||
panel['options']['tooltip']['mode'] = panel_config['tooltip']
|
||||
if panel_config['tooltip'] == 'multi':
|
||||
panel['options']['tooltip']['sort'] = 'desc'
|
||||
|
||||
for query_name, query_config in panel_config['queries'].items():
|
||||
panel['targets'].append({
|
||||
'refId': query_name,
|
||||
'query': flux_template.render(
|
||||
bucket=bucket,
|
||||
host=monitored_node.name,
|
||||
negative=query_config.get('negative', False),
|
||||
boolean_to_int=query_config.get('boolean_to_int', False),
|
||||
minimum=query_config.get('minimum', None),
|
||||
filters={
|
||||
'host': monitored_node.name,
|
||||
**query_config['filters'],
|
||||
},
|
||||
exists=query_config.get('exists', []),
|
||||
function=query_config.get('function', None),
|
||||
).strip()
|
||||
})
|
||||
|
||||
dashboard['panels'].append(panel)
|
||||
|
||||
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
|
||||
'content': json.dumps(dashboard, indent=4),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
]
|
||||
}
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
from mako.template import Template
|
||||
|
||||
postgres_password = repo.vault.password_for(f'{node.name} postgres role grafana')
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'grafana': {},
|
||||
},
|
||||
'sources': {
|
||||
'grafana': {
|
||||
'urls': {
|
||||
'https://packages.grafana.com/oss/deb',
|
||||
},
|
||||
'suites': {
|
||||
'stable',
|
||||
},
|
||||
'components': {
|
||||
'main',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
},
|
||||
'grafana': {
|
||||
'config': {
|
||||
'server': {
|
||||
'http_port': 8300,
|
||||
},
|
||||
'database': {
|
||||
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
|
||||
},
|
||||
'remote_cache': {
|
||||
'type': 'redis',
|
||||
'connstr': 'addr=127.0.0.1:6379',
|
||||
},
|
||||
'security': {
|
||||
'admin_user': 'admin',
|
||||
'admin_password': str(repo.vault.password_for(f'{node.name} grafana admin')),
|
||||
},
|
||||
'users': {
|
||||
'allow_signup': False,
|
||||
},
|
||||
},
|
||||
'datasources': {},
|
||||
},
|
||||
'postgresql': {
|
||||
'databases': {
|
||||
'grafana': {
|
||||
'owner': 'grafana',
|
||||
},
|
||||
},
|
||||
'roles': {
|
||||
'grafana': {
|
||||
'password': postgres_password,
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/grafana': {
|
||||
'mountpoint': '/var/lib/grafana'
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'grafana/config/server/domain',
|
||||
)
|
||||
def domain(metadata):
|
||||
return {
|
||||
'grafana': {
|
||||
'config': {
|
||||
'server': {
|
||||
'domain': metadata.get('grafana/hostname'),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'grafana/datasources',
|
||||
)
|
||||
def influxdb2(metadata):
|
||||
influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb')
|
||||
|
||||
return {
|
||||
'grafana': {
|
||||
'datasources': {
|
||||
f"influxdb@{influxdb_metadata['hostname']}": {
|
||||
'type': 'influxdb',
|
||||
'url': f"http://{influxdb_metadata['hostname']}:{influxdb_metadata['port']}",
|
||||
'jsonData': {
|
||||
'version': 'Flux',
|
||||
'organization': influxdb_metadata['org'],
|
||||
'defaultBucket': influxdb_metadata['bucket'],
|
||||
},
|
||||
'secureJsonData': {
|
||||
'token': str(influxdb_metadata['readonly_token']),
|
||||
},
|
||||
'editable': False,
|
||||
'isDefault': True,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'grafana/datasources',
|
||||
)
|
||||
def datasource_key_to_name(metadata):
|
||||
return {
|
||||
'grafana': {
|
||||
'datasources': {
|
||||
name: {'name': name} for name in metadata.get('grafana/datasources').keys()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'dns',
|
||||
)
|
||||
def dns(metadata):
|
||||
return {
|
||||
'dns': {
|
||||
metadata.get('grafana/hostname'): repo.libs.ip.get_a_records(metadata),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('grafana/hostname'): {
|
||||
'content': 'grafana/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="${' '.join(kernel_params)}"
|
||||
GRUB_CMDLINE_LINUX=""
|
|
@ -1,20 +0,0 @@
|
|||
files = {
|
||||
'/etc/default/grub': {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'timeout': node.metadata.get('grub/timeout'),
|
||||
'kernel_params': node.metadata.get('grub/kernel_params'),
|
||||
},
|
||||
'mode': '0644',
|
||||
'triggers': {
|
||||
'action:update-grub',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
actions = {
|
||||
'update-grub': {
|
||||
'command': 'update-grub',
|
||||
'triggered': True,
|
||||
},
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
defaults = {
|
||||
'grub': {
|
||||
'timeout': 1,
|
||||
'kernel_params': set(),
|
||||
},
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
date=$(date --utc +%s%N)
|
||||
|
||||
for cpu in $(cat /sys/devices/system/cpu/cpu0/cpufreq/affected_cpus)
|
||||
do
|
||||
# echo "cpu_frequency,cpu=$cpu min=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_min_freq) / 1000) $date"
|
||||
echo "cpu_frequency,cpu=$cpu current=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_cur_freq) / 1000) $date"
|
||||
# echo "cpu_frequency,cpu=$cpu max=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_max_freq) / 1000) $date"
|
||||
done
|
|
@ -1,8 +0,0 @@
|
|||
files = {
|
||||
'/usr/local/share/telegraf/cpu_frequency': {
|
||||
'mode': '0755',
|
||||
'triggers': {
|
||||
'svc_systemd:telegraf:restart',
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'lm-sensors': {},
|
||||
'console-data': {}, # leykeys de
|
||||
},
|
||||
},
|
||||
'grafana_rows': {
|
||||
'health',
|
||||
},
|
||||
'sudoers': {
|
||||
'telegraf': {
|
||||
'/usr/local/share/telegraf/cpu_frequency',
|
||||
},
|
||||
},
|
||||
'telegraf': {
|
||||
'config': {
|
||||
'inputs': {
|
||||
'sensors': {repo.libs.hashable.hashable({
|
||||
'timeout': '2s',
|
||||
})},
|
||||
'exec': {
|
||||
repo.libs.hashable.hashable({
|
||||
'commands': ["sudo /usr/local/share/telegraf/cpu_frequency"],
|
||||
'name_override': "cpu_frequency",
|
||||
'data_format': "influx",
|
||||
}),
|
||||
# repo.libs.hashable.hashable({
|
||||
# 'commands': ["/bin/bash -c 'expr $(cat /sys/class/thermal/thermal_zone0/temp) / 1000'"],
|
||||
# 'name_override': "cpu_temperature",
|
||||
# 'data_format': "value",
|
||||
# 'data_type': "integer",
|
||||
# }),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
from ipaddress import ip_network, ip_interface
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd/units',
|
||||
)
|
||||
def network(metadata):
|
||||
interface = ip_interface(metadata.get('network/internal/ipv4'))
|
||||
network = ip_interface(f'{interface.ip}/24').network
|
||||
gateway = network[1]
|
||||
|
||||
return {
|
||||
'systemd': {
|
||||
'units': {
|
||||
'internal.network': {
|
||||
f'Route#hetzner_gateway': {
|
||||
'Destination': str(gateway),
|
||||
'Scope': 'link',
|
||||
},
|
||||
f'Route#hetzner_network': {
|
||||
'Destination': str(network),
|
||||
'Gateway': str(gateway),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue