Compare commits

..

9 commits

Author SHA1 Message Date
cdf79c2bd8
wip 2023-08-01 17:13:41 +02:00
13e52027cf
wip 2023-08-01 17:13:41 +02:00
96c2df1c09
wip 2023-08-01 17:13:41 +02:00
cadb32cffd
wip 2023-08-01 17:13:41 +02:00
8b296ba6db
wip 2023-08-01 17:13:41 +02:00
b1ea126c2a
wip 2023-08-01 17:13:41 +02:00
1bf795c262
wip 2023-08-01 17:13:41 +02:00
3546633969
wip 2023-08-01 17:13:41 +02:00
f8f500718b
wip 2023-08-01 17:13:41 +02:00
182 changed files with 833 additions and 4180 deletions

2
.envrc
View file

@ -1,7 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
PATH_add bin
source_env ~/.local/share/direnv/pyenv source_env ~/.local/share/direnv/pyenv
source_env ~/.local/share/direnv/venv source_env ~/.local/share/direnv/venv
source_env ~/.local/share/direnv/bundlewrap source_env ~/.local/share/direnv/bundlewrap

View file

@ -37,12 +37,3 @@ fi
telegraf: execd for daemons telegraf: execd for daemons
TEST TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

195
bin/dnssec Executable file
View file

@ -0,0 +1,195 @@
#!/usr/bin/env python3
# https://medium.com/iocscan/how-dnssec-works-9c652257be0
# https://de.wikipedia.org/wiki/RRSIG_Resource_Record
# https://metebalci.com/blog/a-minimum-complete-tutorial-of-dnssec/
# https://bind9.readthedocs.io/en/latest/dnssec-guide.html
from sys import argv
from os.path import realpath, dirname
from bundlewrap.repo import Repository
from base64 import b64decode, urlsafe_b64encode
from cryptography.utils import int_to_bytes
from cryptography.hazmat.primitives import serialization as crypto_serialization
from struct import pack, unpack
from hashlib import sha1, sha256
from json import dumps
from cache_to_disk import cache_to_disk
def long_to_base64(n):
return urlsafe_b64encode(int_to_bytes(n, None)).decode()
zone = argv[1]
repo = Repository(dirname(dirname(realpath(__file__))))
flags = 256
protocol = 3
algorithm = 8
algorithm_name = 'RSASHA256'
# ZSK/KSK DNSKEY
#
# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateNumbers
# https://crypto.stackexchange.com/a/21104
def generate_signing_key_pair(zone, salt):
privkey = repo.libs.rsa.generate_deterministic_rsa_private_key(
b64decode(str(repo.vault.random_bytes_as_base64_for(f'dnssec {salt} ' + zone)))
)
public_exponent = privkey.private_numbers().public_numbers.e
modulo = privkey.private_numbers().public_numbers.n
private_exponent = privkey.private_numbers().d
prime1 = privkey.private_numbers().p
prime2 = privkey.private_numbers().q
exponent1 = privkey.private_numbers().dmp1
exponent2 = privkey.private_numbers().dmq1
coefficient = privkey.private_numbers().iqmp
dnskey = ''.join(privkey.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
).decode().split('\n')[1:-2])
return {
'dnskey': dnskey,
'dnskey_record': f'{zone}. IN DNSKEY {flags} {protocol} {algorithm} {dnskey}',
'privkey': privkey,
'privkey_file': {
'Private-key-format': 'v1.3',
'Algorithm': f'{algorithm} ({algorithm_name})',
'Modulus': long_to_base64(modulo),
'PublicExponent': long_to_base64(public_exponent),
'PrivateExponent': long_to_base64(private_exponent),
'Prime1': long_to_base64(prime1),
'Prime2': long_to_base64(prime2),
'Exponent1': long_to_base64(exponent1),
'Exponent2': long_to_base64(exponent2),
'Coefficient': long_to_base64(coefficient),
'Created': 20230428110109,
'Publish': 20230428110109,
'Activate': 20230428110109,
},
}
# DS
#
# https://gist.github.com/wido/4c6288b2f5ba6d16fce37dca3fc2cb4a#file-dnskey_to_dsrecord-py-L40
def _calc_ds(zone, flags, protocol, algorithm, dnskey):
if zone.endswith('.') is False:
zone += '.'
signature = bytes()
for i in zone.split('.'):
signature += pack('B', len(i)) + i.encode()
signature += pack('!HBB', int(flags), int(protocol), int(algorithm))
signature += b64decode(dnskey)
return {
'sha1': sha1(signature).hexdigest().upper(),
'sha256': sha256(signature).hexdigest().upper(),
}
def _calc_keyid(flags, protocol, algorithm, dnskey):
st = pack('!HBB', int(flags), int(protocol), int(algorithm))
st += b64decode(dnskey)
cnt = 0
for idx in range(len(st)):
s = unpack('B', st[idx:idx+1])[0]
if (idx % 2) == 0:
cnt += s << 8
else:
cnt += s
return ((cnt & 0xFFFF) + (cnt >> 16)) & 0xFFFF
def dnskey_to_ds(zone, flags, protocol, algorithm, dnskey):
keyid = _calc_keyid(flags, protocol, algorithm, dnskey)
ds = _calc_ds(zone, flags, protocol, algorithm, dnskey)
return[
f"{zone}. IN DS {str(keyid)} {str(algorithm)} 1 {ds['sha1'].lower()}",
f"{zone}. IN DS {str(keyid)} {str(algorithm)} 2 {ds['sha256'].lower()}",
]
# Result
#@cache_to_disk(30)
def generate_dnssec_for_zone(zone):
zsk_data = generate_signing_key_pair(zone, salt='zsk')
ksk_data = generate_signing_key_pair(zone, salt='ksk')
ds_records = dnskey_to_ds(zone, flags, protocol, algorithm, ksk_data['dnskey'])
return {
'zsk_data': zsk_data,
'ksk_data': ksk_data,
'ds_records': ds_records,
}
print(
generate_dnssec_for_zone(zone),
)
# #########################
# from dns import rrset, rdatatype, rdata
# from dns.rdataclass import IN
# from dns.dnssec import sign, make_dnskey
# from dns.name import Name
# from dns.rdtypes.IN.A import A
# data = generate_dnssec_for_zone(zone)
# zone_name = Name(f'{zone}.'.split('.'))
# assert zone_name.is_absolute()
# # rrset = rrset.from_text_list(
# # name=Name(['test']).derelativize(zone_name),
# # origin=zone_name,
# # relativize=False,
# # ttl=60,
# # rdclass=IN,
# # rdtype=rdatatype.from_text('A'),
# # text_rdatas=[
# # '100.2.3.4',
# # '10.0.0.55',
# # ],
# # )
# rrset = rrset.from_rdata_list(
# name=Name(['test']).derelativize(zone_name),
# ttl=60,
# rdatas=[
# rdata.from_text(
# rdclass=IN,
# rdtype=rdatatype.from_text('A'),
# origin=zone_name,
# tok='1.2.3.4',
# relativize=False,
# ),
# A(IN, rdatatype.from_text('A'), '10.20.30.40')
# ],
# )
# # for e in rrset:
# # print(e.is_absolute())
# dnskey = make_dnskey(
# public_key=data['zsk_data']['privkey'].public_key(),
# algorithm=algorithm,
# flags=flags,
# protocol=protocol,
# )
# sign(
# rrset=rrset,
# private_key=data['zsk_data']['privkey'],
# signer=Name(f'{zone}.'),
# dnskey=dnskey,
# lifetime=99999,
# )

47
bin/test Executable file
View file

@ -0,0 +1,47 @@
import dns.zone
import dns.rdatatype
import dns.rdataclass
import dns.dnssec
# Define the zone name and domain names
zone_name = 'example.com.'
a_name = 'www.example.com.'
txt_name = 'example.com.'
mx_name = 'example.com.'
# Define the DNSKEY algorithm and size
algorithm = 8
key_size = 2048
# Generate the DNSSEC key pair
keypair = dns.dnssec.make_dnskey(algorithm, key_size)
# Create the zone
zone = dns.zone.Zone(origin=zone_name)
# Add A record to zone
a_rrset = zone.get_rdataset(a_name, rdtype=dns.rdatatype.A, create=True)
a_rrset.add(dns.rdataclass.IN, dns.rdatatype.A, '192.0.2.1')
# Add TXT record to zone
txt_rrset = zone.get_rdataset(txt_name, rdtype=dns.rdatatype.TXT, create=True)
txt_rrset.add(dns.rdataclass.IN, dns.rdatatype.TXT, 'Hello, world!')
# Add MX record to zone
mx_rrset = zone.get_rdataset(mx_name, rdtype=dns.rdatatype.MX, create=True)
mx_rrset.add(dns.rdataclass.IN, dns.rdatatype.MX, '10 mail.example.com.')
# Create the DNSKEY record for the zone
key_name = f'{keypair.name}-K{keypair.fingerprint()}'
dnskey_rrset = dns.rrset.RRset(name=keypair.name, rdclass=dns.rdataclass.IN, rdtype=dns.rdatatype.DNSKEY)
dnskey_rrset.ttl = 86400
dnskey_rrset.add(dns.rdataclass.IN, dns.rdatatype.DNSKEY, keypair.key, key_name=key_name)
# Add the DNSKEY record to the zone
zone.replace_rdataset(keypair.name, dnskey_rrset)
# Sign the zone with the DNSSEC key pair
dns.dnssec.sign_zone(zone, keypair, inception=0, expiration=3600)
# Print the resulting zone with the RRSIG records
print(zone.to_text())

View file

@ -10,6 +10,7 @@ nodes = [
for node in sorted(repo.nodes_in_group('debian')) for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy if not node.dummy
] ]
reboot_nodes = []
print('updating nodes:', sorted(node.name for node in nodes)) print('updating nodes:', sorted(node.name for node in nodes))
@ -23,13 +24,14 @@ for node in nodes:
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()): if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
reboot_nodes.append(node)
# REBOOT IN ORDER # REBOOT IN ORDER
wireguard_servers = [ wireguard_servers = [
node node
for node in nodes for node in reboot_nodes
if node.has_bundle('wireguard') if node.has_bundle('wireguard')
and ( and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen < ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
@ -39,7 +41,7 @@ wireguard_servers = [
wireguard_s2s = [ wireguard_s2s = [
node node
for node in nodes for node in reboot_nodes
if node.has_bundle('wireguard') if node.has_bundle('wireguard')
and ( and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen == ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
@ -49,7 +51,7 @@ wireguard_s2s = [
everything_else = [ everything_else = [
node node
for node in nodes for node in reboot_nodes
if not node.has_bundle('wireguard') if not node.has_bundle('wireguard')
] ]
@ -60,11 +62,8 @@ for node in [
*wireguard_s2s, *wireguard_s2s,
*wireguard_servers, *wireguard_servers,
]: ]:
print('rebooting', node.name)
try: try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0: print(node.run('systemctl reboot').stdout.decode())
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
except Exception as e: except Exception as e:
print(e) print(e)

View file

@ -5,17 +5,9 @@ from os.path import realpath, dirname
from sys import argv from sys import argv
from ipaddress import ip_network, ip_interface from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__)))) repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1]) server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}') data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
@ -28,7 +20,9 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
if not ip_network(network).subnet_of(vpn_network): if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network)) allowed_ips.append(ip_network(network))
conf = f''' conf = \
f'''>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
[Interface] [Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])} PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820 ListenPort = 51820
@ -41,12 +35,11 @@ PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.ge
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))} AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820 Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10 PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'''
print(conf) print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']: if input("print qrcode? [yN]: ").upper() == 'Y':
import pyqrcode import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1)) print(pyqrcode.create(conf).terminal(quiet_zone=1))

View file

@ -13,9 +13,6 @@
'deb', 'deb',
'deb-src', 'deb-src',
}, },
'options': { # optional
'aarch': 'amd64',
},
'urls': { 'urls': {
'https://deb.debian.org/debian', 'https://deb.debian.org/debian',
}, },

View file

@ -23,12 +23,12 @@ directories = {
'action:apt_update', 'action:apt_update',
}, },
}, },
# '/etc/apt/listchanges.conf.d': { '/etc/apt/listchanges.conf.d': {
# 'purge': True, 'purge': True,
# 'triggers': { 'triggers': {
# 'action:apt_update', 'action:apt_update',
# }, },
# }, },
'/etc/apt/preferences.d': { '/etc/apt/preferences.d': {
'purge': True, 'purge': True,
'triggers': { 'triggers': {
@ -50,24 +50,17 @@ files = {
'action:apt_update', 'action:apt_update',
}, },
}, },
'/etc/apt/sources.list': { '/etc/apt/listchanges.conf': {
'content': '# managed by bundlewrap\n', 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
'triggers': {
'action:apt_update',
},
}, },
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': { '/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755', 'mode': '0755',
}, },
# /etc/kernel/postinst.d/apt-auto-removal
} }
actions = { actions = {
'apt_update': { 'apt_update': {
'command': 'apt-get update', 'command': 'apt-get update -o APT::Update::Error-Mode=any',
'needed_by': { 'needed_by': {
'pkg_apt:', 'pkg_apt:',
}, },

View file

@ -1,24 +1,13 @@
defaults = { defaults = {
'apt': { 'apt': {
'packages': {
'apt-listchanges': {
'installed': False,
},
},
'config': { 'config': {
'DPkg': { 'DPkg': {
'Pre-Install-Pkgs': { 'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true', '/usr/sbin/dpkg-preconfigure --apt || true',
}, },
'Post-Invoke': { 'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true', '/bin/rm -f /var/cache/apt/archives/*.deb || true',
}, },
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
}, },
'APT': { 'APT': {
'NeverAutoRemove': { 'NeverAutoRemove': {
@ -40,13 +29,7 @@ defaults = {
'metapackages', 'metapackages',
'tasks', 'tasks',
}, },
'Move-Autobit-Sections': { 'Move-Autobit-Sections': 'oldlibs',
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
}, },
}, },
'sources': {}, 'sources': {},
@ -133,45 +116,45 @@ def unattended_upgrades(metadata):
} }
# @metadata_reactor.provides( @metadata_reactor.provides(
# 'apt/config', 'apt/config',
# 'apt/list_changes', 'apt/list_changes',
# ) )
# def listchanges(metadata): def listchanges(metadata):
# return { return {
# 'apt': { 'apt': {
# 'config': { 'config': {
# 'DPkg': { 'DPkg': {
# 'Pre-Install-Pkgs': { 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10', '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# }, },
# 'Tools': { },
# 'Options': { 'Tools': {
# '/usr/bin/apt-listchanges': { 'Options': {
# 'Version': '2', '/usr/bin/apt-listchanges': {
# 'InfoFD': '20', 'Version': '2',
# }, 'InfoFD': '20',
# }, },
# }, },
# }, },
# 'Dir': { 'Dir': {
# 'Etc': { 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf', 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d', 'apt-listchanges-parts': 'listchanges.conf.d',
# }, },
# }, },
# }, },
# 'list_changes': { 'list_changes': {
# 'apt': { 'apt': {
# 'frontend': 'pager', 'frontend': 'pager',
# 'which': 'news', 'which': 'news',
# 'email_address': 'root', 'email_address': 'root',
# 'email_format': 'text', 'email_format': 'text',
# 'confirm': 'false', 'confirm': 'false',
# 'headers': 'false', 'headers': 'false',
# 'reverse': 'false', 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db', 'save_seen': '/var/lib/apt/listchanges.db',
# }, },
# }, },
# }, },
# } }

View file

@ -36,7 +36,7 @@ for dataset in config['datasets']:
if snapshot_datetime < two_days_ago: if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days') errors.add(f'dataset "{dataset}" has no backups sind {days_ago} days')
continue continue
if errors: if errors:

View file

@ -25,8 +25,7 @@ def backup_freshness_check(metadata):
'datasets': { 'datasets': {
f"{other_node.metadata.get('id')}/{dataset}" f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes for other_node in repo.nodes
if not other_node.dummy if other_node.has_bundle('backup')
and other_node.has_bundle('backup')
and other_node.has_bundle('zfs') and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server') and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items() for dataset, options in other_node.metadata.get('zfs/datasets').items()

View file

@ -35,7 +35,6 @@ def zfs(metadata):
for other_node in repo.nodes: for other_node in repo.nodes:
if ( if (
not other_node.dummy and
other_node.has_bundle('backup') and other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name other_node.metadata.get('backup/server') == node.name
): ):

View file

@ -1,31 +1,13 @@
#!/bin/bash #!/bin/bash
set -u set -exu
# FIXME: inelegant # FIXME: inelegant
% if wol_command: % if wol_command:
${wol_command} ${wol_command}
% endif % endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json) for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do do
echo backing up $path
/opt/backup/backup_path "$path" /opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
path=$1 path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json) uuid=$(jq -r .client_uuid < /etc/backup/config.json)

29
bundles/bind/README.md Normal file
View file

@ -0,0 +1,29 @@
## DNSSEC
https://wiki.debian.org/DNSSEC%20Howto%20for%20BIND%209.9+#The_signing_part
https://blog.apnic.net/2021/11/02/dnssec-provisioning-automation-with-cds-cdnskey-in-the-real-world/
https://gist.github.com/wido/4c6288b2f5ba6d16fce37dca3fc2cb4a
```python
import dns.dnssec
algorithm = dns.dnssec.RSASHA256
```
```python
import cryptography
pk = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(key_size=2048, public_exponent=65537)
```
## Nomenclature
### parent
DNSKEY:
the public key
DS
### sub
ZSK/KSK:
https://www.cloudflare.com/de-de/dns/dnssec/how-dnssec-works/

View file

@ -10,7 +10,7 @@ options {
% if type == 'master': % if type == 'master':
notify yes; notify yes;
also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} }; also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} }; allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
% endif % endif
}; };

View file

@ -19,7 +19,7 @@ directories[f'/var/lib/bind'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -29,7 +29,7 @@ files['/etc/default/bind9'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -43,7 +43,7 @@ files['/etc/bind/named.conf'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -63,7 +63,7 @@ files['/etc/bind/named.conf.options'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -93,7 +93,7 @@ files['/etc/bind/named.conf.local'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -106,7 +106,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -127,7 +127,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -139,6 +139,6 @@ actions['named-checkconf'] = {
'unless': 'named-checkconf -z', 'unless': 'named-checkconf -z',
'needs': [ 'needs': [
'svc_systemd:bind9', 'svc_systemd:bind9',
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
] ]
} }

View file

@ -3,7 +3,6 @@ from json import dumps
h = repo.libs.hashable.hashable h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo repo.libs.bind.repo = repo
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {

View file

@ -1,160 +0,0 @@
#!/usr/bin/env python3
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from scipy.fft import rfft, rfftfreq
import shutil
import traceback
RECORDINGS_DIR = "recordings"
PROCESSED_RECORDINGS_DIR = "recordings/processed"
DETECTIONS_DIR = "events"
DETECT_FREQUENCY = 211 # Hz
DETECT_FREQUENCY_TOLERANCE = 2 # Hz
ADJACENCY_FACTOR = 2 # area to look for the frequency (e.g. 2 means 100Hz to 400Hz for 200Hz detection)
BLOCK_SECONDS = 3 # seconds (longer means more frequency resolution, but less time resolution)
DETECTION_DISTANCE_SECONDS = 30 # seconds (minimum time between detections)
BLOCK_OVERLAP_FACTOR = 0.9 # overlap between blocks (0.2 means 20% overlap)
MIN_SIGNAL_QUALITY = 1000.0 # maximum noise level (relative DB) to consider a detection valid
PLOT_PADDING_START_SECONDS = 2 # seconds (padding before and after the event in the plot)
PLOT_PADDING_END_SECONDS = 3 # seconds (padding before and after the event in the plot)
DETECTION_DISTANCE_BLOCKS = DETECTION_DISTANCE_SECONDS // BLOCK_SECONDS # number of blocks to skip after a detection
DETECT_FREQUENCY_FROM = DETECT_FREQUENCY - DETECT_FREQUENCY_TOLERANCE # Hz
DETECT_FREQUENCY_TO = DETECT_FREQUENCY + DETECT_FREQUENCY_TOLERANCE # Hz
def process_recording(filename):
print('processing', filename)
# get ISO 8601 nanosecond recording date from filename
date_string_from_filename = os.path.splitext(filename)[0]
recording_date = datetime.datetime.strptime(date_string_from_filename, "%Y-%m-%d_%H-%M-%S.%f%z")
# get data and metadata from recording
path = os.path.join(RECORDINGS_DIR, filename)
soundfile = sf.SoundFile(path)
samplerate = soundfile.samplerate
samples_per_block = int(BLOCK_SECONDS * samplerate)
overlapping_samples = int(samples_per_block * BLOCK_OVERLAP_FACTOR)
sample_num = 0
current_event = None
while sample_num < len(soundfile):
soundfile.seek(sample_num)
block = soundfile.read(frames=samples_per_block, dtype='float32', always_2d=False)
if len(block) == 0:
break
# calculate FFT
labels = rfftfreq(len(block), d=1/samplerate)
complex_amplitudes = rfft(block)
amplitudes = np.abs(complex_amplitudes)
# get the frequency with the highest amplitude within the search range
search_amplitudes = amplitudes[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
search_labels = labels[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
max_amplitude = max(search_amplitudes)
max_amplitude_index = np.argmax(search_amplitudes)
max_freq = search_labels[max_amplitude_index]
max_freq_detected = DETECT_FREQUENCY_FROM <= max_freq <= DETECT_FREQUENCY_TO
# calculate signal quality
adjacent_amplitudes = amplitudes[(labels < DETECT_FREQUENCY_FROM) | (labels > DETECT_FREQUENCY_TO)]
signal_quality = max_amplitude/np.mean(adjacent_amplitudes)
good_signal_quality = signal_quality > MIN_SIGNAL_QUALITY
# conclude detection
if (
max_freq_detected and
good_signal_quality
):
block_date = recording_date + datetime.timedelta(seconds=sample_num / samplerate)
# detecting an event
if not current_event:
current_event = {
'start_at': block_date,
'end_at': block_date,
'start_sample': sample_num,
'end_sample': sample_num + samples_per_block,
'start_freq': max_freq,
'end_freq': max_freq,
'max_amplitude': max_amplitude,
}
else:
current_event.update({
'end_at': block_date,
'end_freq': max_freq,
'end_sample': sample_num + samples_per_block,
'max_amplitude': max(max_amplitude, current_event['max_amplitude']),
})
print(f'- {block_date.strftime('%Y-%m-%d %H:%M:%S')}: {max_amplitude:.1f}rDB @ {max_freq:.1f}Hz (signal {signal_quality:.3f}x)')
else:
# not detecting an event
if current_event:
duration = (current_event['end_at'] - current_event['start_at']).total_seconds()
current_event['duration'] = duration
print(f'🔊 {current_event['start_at'].strftime('%Y-%m-%d %H:%M:%S')} ({duration:.1f}s): {current_event['start_freq']:.1f}Hz->{current_event['end_freq']:.1f}Hz @{current_event['max_amplitude']:.0f}rDB')
# read full audio clip again for writing
write_event(current_event=current_event, soundfile=soundfile, samplerate=samplerate)
current_event = None
sample_num += DETECTION_DISTANCE_BLOCKS * samples_per_block
sample_num += samples_per_block - overlapping_samples
# write a spectrogram using the sound from start to end of the event
def write_event(current_event, soundfile, samplerate):
# date and filename
event_date = current_event['start_at'] - datetime.timedelta(seconds=PLOT_PADDING_START_SECONDS)
filename_prefix = event_date.strftime('%Y-%m-%d_%H-%M-%S.%f%z')
# event clip
event_start_sample = current_event['start_sample'] - samplerate * PLOT_PADDING_START_SECONDS
event_end_sample = current_event['end_sample'] + samplerate * PLOT_PADDING_END_SECONDS
total_samples = event_end_sample - event_start_sample
soundfile.seek(event_start_sample)
event_clip = soundfile.read(frames=total_samples, dtype='float32', always_2d=False)
# write flac
flac_path = os.path.join(DETECTIONS_DIR, f"{filename_prefix}.flac")
sf.write(flac_path, event_clip, samplerate, format='FLAC')
# write spectrogram
plt.figure(figsize=(8, 6))
plt.specgram(event_clip, Fs=samplerate, NFFT=samplerate, noverlap=samplerate//2, cmap='inferno', vmin=-100, vmax=-10)
plt.title(f"Bootshorn @{event_date.strftime('%Y-%m-%d %H:%M:%S%z')}")
plt.xlabel(f"Time {current_event['duration']:.1f}s")
plt.ylabel(f"Frequency {current_event['start_freq']:.1f}Hz -> {current_event['end_freq']:.1f}Hz")
plt.colorbar(label="Intensity (rDB)")
plt.ylim(50, 1000)
plt.savefig(os.path.join(DETECTIONS_DIR, f"{filename_prefix}.png"))
plt.close()
def main():
os.makedirs(RECORDINGS_DIR, exist_ok=True)
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
for filename in sorted(os.listdir(RECORDINGS_DIR)):
if filename.endswith(".flac"):
try:
process_recording(filename)
except Exception as e:
print(f"Error processing {filename}: {e}")
# print stacktrace
traceback.print_exc()
if __name__ == "__main__":
main()

View file

@ -1,23 +0,0 @@
#!/bin/sh
mkdir -p recordings
while true
do
# get date in ISO 8601 format with nanoseconds
PROGRAMM=$(test $(uname) = "Darwin" && echo "gdate" || echo "date")
DATE=$($PROGRAMM "+%Y-%m-%d_%H-%M-%S.%6N%z")
# record audio using ffmpeg
ffmpeg \
-y \
-f pulse \
-i "alsa_input.usb-HANMUS_USB_AUDIO_24BIT_2I2O_1612310-00.analog-stereo" \
-ac 1 \
-ar 96000 \
-sample_fmt s32 \
-t "3600" \
-c:a flac \
-compression_level 12 \
"recordings/$DATE.flac"
done

View file

@ -1,47 +0,0 @@
# nano /etc/selinux/config
# SELINUX=disabled
# reboot
directories = {
'/opt/bootshorn': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/recordings/processed': {
'owner': 'ckn',
'group': 'ckn',
},
'/opt/bootshorn/events': {
'owner': 'ckn',
'group': 'ckn',
},
}
files = {
'/opt/bootshorn/record': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
'/opt/bootshorn/process': {
'owner': 'ckn',
'group': 'ckn',
'mode': '755',
},
}
svc_systemd = {
'bootshorn-record.service': {
'needs': {
'file:/opt/bootshorn/record',
},
},
}

View file

@ -1,37 +0,0 @@
defaults = {
'systemd': {
'units': {
'bootshorn-record.service': {
'Unit': {
'Description': 'Bootshorn Recorder',
'After': 'network.target',
},
'Service': {
'User': 'ckn',
'Group': 'ckn',
'Type': 'simple',
'WorkingDirectory': '/opt/bootshorn',
'ExecStart': '/opt/bootshorn/record',
'Restart': 'always',
'RestartSec': 5,
'Environment': {
"XDG_RUNTIME_DIR": "/run/user/1000",
"PULSE_SERVER": "unix:/run/user/1000/pulse/native",
},
},
},
},
},
'systemd-timers': {
'bootshorn-process': {
'command': '/opt/bootshorn/process',
'when': 'minutely',
'working_dir': '/opt/bootshorn',
'user': 'ckn',
'group': 'ckn',
'after': {
'bootshorn-process.service',
},
},
},
}

View file

@ -1,10 +1,6 @@
from shlex import quote from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides( @metadata_reactor.provides(
'users/build-ci/authorized_users', 'users/build-ci/authorized_users',
'sudoers/build-ci', 'sudoers/build-ci',
@ -22,7 +18,7 @@ def ssh_keys(metadata):
}, },
'sudoers': { 'sudoers': {
'build-ci': { 'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}" f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values() for ci in metadata.get('build-ci').values()
} }
}, },

View file

@ -9,7 +9,7 @@ defaults = {
'crystal': { 'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal # https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': { 'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/', 'https://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
}, },
'suites': { 'suites': {
'/', '/',

View file

@ -6,7 +6,7 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u mail_location = maildir:~
mail_plugins = fts fts_xapian mail_plugins = fts fts_xapian
namespace inbox { namespace inbox {

View file

@ -20,10 +20,6 @@ directories = {
'owner': 'vmail', 'owner': 'vmail',
'group': 'vmail', 'group': 'vmail',
}, },
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': { '/var/vmail/sieve': {
'owner': 'vmail', 'owner': 'vmail',
'group': 'vmail', 'group': 'vmail',

View file

@ -0,0 +1,6 @@
# directories = {
# '/var/lib/downloads': {
# 'owner': 'downloads',
# 'group': 'www-data',
# }
# }

View file

@ -1,23 +0,0 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -1,66 +0,0 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -1,121 +0,0 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

View file

@ -40,7 +40,7 @@ ENABLE_OPENID_SIGNUP = false
[service] [service]
REGISTER_EMAIL_CONFIRM = true REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = true DISABLE_REGISTRATION = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false REQUIRE_SIGNIN_VIEW = false

View file

@ -2,13 +2,10 @@ from os.path import join
from bundlewrap.utils.dicts import merge_dict from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version') version = version=node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
downloads['/usr/local/bin/gitea'] = { downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/ 'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
'sha256_url': '{url}.sha256', 'sha256_url': '{url}.sha256',
'triggers': { 'triggers': {
'svc_systemd:gitea:restart', 'svc_systemd:gitea:restart',
@ -48,7 +45,6 @@ files['/etc/gitea/app.ini'] = {
), ),
), ),
'owner': 'git', 'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'], 'context': node.metadata['gitea'],
'triggers': { 'triggers': {
'svc_systemd:gitea:restart', 'svc_systemd:gitea:restart',

View file

@ -11,20 +11,7 @@ defaults = {
}, },
}, },
'gitea': { 'gitea': {
'conf': { 'conf': {},
'DEFAULT': {
'WORK_PATH': '/var/lib/gitea',
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
},
}, },
'postgresql': { 'postgresql': {
'roles': { 'roles': {
@ -96,6 +83,15 @@ def conf(metadata):
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'), 'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'), 'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
}, },
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
'service': { 'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}', 'NO_REPLY_ADDRESS': f'noreply.{domain}',
}, },
@ -118,7 +114,7 @@ def nginx(metadata):
'content': 'nginx/proxy_pass.conf', 'content': 'nginx/proxy_pass.conf',
'context': { 'context': {
'target': 'http://127.0.0.1:3500', 'target': 'http://127.0.0.1:3500',
}, }
}, },
}, },
}, },

View file

@ -26,20 +26,14 @@ actions['reset_grafana_admin_password'] = {
directories = { directories = {
'/etc/grafana': {}, '/etc/grafana': {},
'/etc/grafana/provisioning': { '/etc/grafana/provisioning': {},
'owner': 'grafana',
'group': 'grafana',
},
'/etc/grafana/provisioning/datasources': { '/etc/grafana/provisioning/datasources': {
'purge': True, 'purge': True,
}, },
'/etc/grafana/provisioning/dashboards': { '/etc/grafana/provisioning/dashboards': {
'purge': True, 'purge': True,
}, },
'/var/lib/grafana': { '/var/lib/grafana': {},
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana/dashboards': { '/var/lib/grafana/dashboards': {
'owner': 'grafana', 'owner': 'grafana',
'group': 'grafana', 'group': 'grafana',
@ -53,8 +47,6 @@ directories = {
files = { files = {
'/etc/grafana/grafana.ini': { '/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')), 'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -64,8 +56,6 @@ files = {
'apiVersion': 1, 'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()), 'datasources': list(node.metadata.get('grafana/datasources').values()),
}), }),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -82,8 +72,6 @@ files = {
}, },
}], }],
}), }),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -172,8 +160,6 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = { files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4), 'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
] ]

View file

@ -26,15 +26,9 @@ defaults = {
'config': { 'config': {
'server': { 'server': {
'http_port': 8300, 'http_port': 8300,
'http_addr': '127.0.0.1',
'enable_gzip': True,
}, },
'database': { 'database': {
'type': 'postgres', 'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
'host': '127.0.0.1:5432',
'name': 'grafana',
'user': 'grafana',
'password': postgres_password,
}, },
'remote_cache': { 'remote_cache': {
'type': 'redis', 'type': 'redis',
@ -139,13 +133,11 @@ def dns(metadata):
@metadata_reactor.provides( @metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts', 'nginx/vhosts',
) )
def nginx(metadata): def nginx(metadata):
return { return {
'nginx': { 'nginx': {
'has_websockets': True,
'vhosts': { 'vhosts': {
metadata.get('grafana/hostname'): { metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf', 'content': 'grafana/vhost.conf',

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -0,0 +1,20 @@
users = {
'homeassistant': {
'home': '/var/lib/homeassistant',
},
}
directories = {
'/var/lib/homeassistant': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/config': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/venv': {
'owner': 'homeassistant',
},
}
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/

View file

@ -0,0 +1,20 @@
defaults = {
'apt': {
'packages': {
'python3': {},
'python3-dev': {},
'python3-pip': {},
'python3-venv': {},
'libffi-dev': {},
'libssl-dev': {},
'libjpeg-dev': {},
'zlib1g-dev': {},
'autoconf': {},
'build-essential': {},
'libopenjp2-7': {},
'libtiff5': {},
'libturbojpeg0-dev': {},
'tzdata': {},
},
},
}

View file

@ -13,9 +13,9 @@ apply Notification "mail-icingaadmin" to Host {
user_groups = host.vars.notification.mail.groups user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail assign where host.vars.notification.mail
} }
@ -25,9 +25,9 @@ apply Notification "mail-icingaadmin" to Service {
user_groups = host.vars.notification.mail.groups user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail assign where host.vars.notification.mail
} }

View file

@ -269,7 +269,7 @@ svc_systemd = {
'icinga2.service': { 'icinga2.service': {
'needs': [ 'needs': [
'pkg_apt:icinga2-ido-pgsql', 'pkg_apt:icinga2-ido-pgsql',
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
}, },
} }

View file

@ -11,7 +11,7 @@ defaults = {
'php-imagick': {}, 'php-imagick': {},
'php-pgsql': {}, 'php-pgsql': {},
'icingaweb2': {}, 'icingaweb2': {},
#'icingaweb2-module-monitoring': {}, # ? 'icingaweb2-module-monitoring': {},
}, },
'sources': { 'sources': {
'icinga': { 'icinga': {

View file

@ -1,3 +0,0 @@
# svc_systemd = {
# 'ifupdown.service': {},
# }

View file

@ -1,21 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/kea/kea-dhcp4.conf': {
'content': dumps(node.metadata.get('kea'), indent=4, sort_keys=True, cls=MetadataJSONEncoder),
'triggers': [
'svc_systemd:kea-dhcp4-server:restart',
],
},
}
svc_systemd = {
'kea-dhcp4-server': {
'needs': [
'pkg_apt:kea-dhcp4-server',
'file:/etc/kea/kea-dhcp4.conf',
'svc_systemd:systemd-networkd.service:restart',
],
},
}

View file

@ -1,96 +0,0 @@
from ipaddress import ip_interface, ip_network
hashable = repo.libs.hashable.hashable
defaults = {
'apt': {
'packages': {
'kea-dhcp4-server': {},
},
},
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': set(),
},
'lease-database': {
'type': 'memfile',
'lfc-interval': 3600
},
'subnet4': set(),
'loggers': set([
hashable({
'name': 'kea-dhcp4',
'output_options': [
{
'output': 'syslog',
}
],
'severity': 'INFO',
}),
]),
},
},
}
@metadata_reactor.provides(
'kea/Dhcp4/interfaces-config/interfaces',
'kea/Dhcp4/subnet4',
)
def subnets(metadata):
subnet4 = set()
interfaces = set()
reservations = set(
hashable({
'hw-address': network_conf['mac'],
'ip-address': str(ip_interface(network_conf['ipv4']).ip),
})
for other_node in repo.nodes
for network_conf in other_node.metadata.get('network', {}).values()
if 'mac' in network_conf
)
for network_name, network_conf in metadata.get('network').items():
dhcp_server_config = network_conf.get('dhcp_server_config', None)
if dhcp_server_config:
_network = ip_network(dhcp_server_config['subnet'])
subnet4.add(hashable({
'subnet': dhcp_server_config['subnet'],
'pools': [
{
'pool': f'{dhcp_server_config['pool_from']} - {dhcp_server_config['pool_to']}',
},
],
'option-data': [
{
'name': 'routers',
'data': dhcp_server_config['router'],
},
{
'name': 'domain-name-servers',
'data': '10.0.0.1',
},
],
'reservations': set(
reservation
for reservation in reservations
if ip_interface(reservation['ip-address']).ip in _network
),
}))
interfaces.add(network_conf.get('interface', network_name))
return {
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': interfaces,
},
'subnet4': subnet4,
},
},
}

View file

@ -1,36 +1,36 @@
hostname "CroneKorkN : ${name}" hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de" sv_contact "admin@sublimity.de"
// assign serevr to steam group
sv_steamgroup "${','.join(steamgroups)}" sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}" rcon_password "${rcon_password}"
// no annoying message of the day
motd_enabled 0 motd_enabled 0
// enable cheats
sv_cheats 1 sv_cheats 1
// allow inconsistent files on clients (weapon mods for example)
sv_consistency 0 sv_consistency 0
// connect from internet
sv_lan 0 sv_lan 0
// join game at any point
sv_allow_lobby_connect_only 0 sv_allow_lobby_connect_only 0
// allowed modes
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge" sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
// network
sv_minrate 30000 sv_minrate 30000
sv_maxrate 60000 sv_maxrate 60000
sv_mincmdrate 66 sv_mincmdrate 66
sv_maxcmdrate 101 sv_maxcmdrate 101
// logging
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored. sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off) log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console. sv_logecho 0 //default 0; Echo log information to the console.

View file

@ -56,7 +56,6 @@ for domain in node.metadata.get('letsencrypt/domains').keys():
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true', 'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
'needs': { 'needs': {
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate', 'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
'pkg_apt:dehydrated',
}, },
'needed_by': { 'needed_by': {
'svc_systemd:nginx', 'svc_systemd:nginx',

View file

@ -1,41 +0,0 @@
from shlex import quote
def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
if isinstance(json_data, dict):
for key, value in json_data.items():
yield from generate_sysctl_key_value_pairs_from_json(value, [*parents, key])
elif isinstance(json_data, list):
raise ValueError(f"List not supported: '{json_data}'")
else:
# If it's a leaf node, yield the path
yield (parents, json_data)
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
files= {
'/etc/sysctl.conf': {
'content': '\n'.join(
sorted(
f"{'.'.join(path)}={value}"
for path, value in key_value_pairs
),
),
'triggers': [
'svc_systemd:systemd-sysctl.service:restart',
],
},
}
svc_systemd = {
'systemd-sysctl.service': {},
}
for path, value in key_value_pairs:
actions[f'reload_sysctl.conf_{path}'] = {
'command': f"sysctl --values {'.'.join(path)} | grep -q {quote('^'+value+'$')}",
'needs': [
f'action:systemd-sysctl.service',
f'action:systemd-sysctl.service:restart',
],
}

View file

@ -1,3 +0,0 @@
defaults = {
'sysctl': {},
}

View file

@ -20,19 +20,18 @@ files = {
} }
actions = { actions = {
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'triggers': {
'action:locale-gen',
},
},
'locale-gen': { 'locale-gen': {
'command': 'locale-gen', 'command': 'locale-gen',
'triggered': True, 'triggered': True,
'needs': { 'needs': {
'pkg_apt:locales', 'pkg_apt:locales',
'action:systemd-locale', },
},
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'preceded_by': {
'action:locale-gen',
}, },
}, },
} }

View file

@ -2,5 +2,5 @@
cd "$OLDPWD" cd "$OLDPWD"
export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10) export BW_ITEM_WORKERS=$(expr "$(nproc)" '*' 15 '/' 10)
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS") export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")

View file

@ -2,5 +2,7 @@
cd "$OLDPWD" cd "$OLDPWD"
PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin" GNU_PATH="$HOME/.local/gnu_bin"
PATH_add "/opt/homebrew/opt/grep/libexec/gnubin" mkdir -p "$GNU_PATH"
test -f "$GNU_PATH/sed" || ln -s "$(which gsed)" "$GNU_PATH/sed"
PATH_add "$GNU_PATH"

View file

@ -10,7 +10,6 @@ password required pam_deny.so
session required pam_permit.so session required pam_permit.so
EOT EOT
sudo xcodebuild -license accept
xcode-select --install xcode-select --install
git -C ~/.zsh/oh-my-zsh pull git -C ~/.zsh/oh-my-zsh pull
@ -18,7 +17,7 @@ git -C ~/.zsh/oh-my-zsh pull
brew upgrade brew upgrade
brew upgrade --cask --greedy brew upgrade --cask --greedy
pyenv install --skip-existing pyenv install --keep-existing
sudo softwareupdate -ia --verbose sudo softwareupdate -ia --verbose
@ -42,5 +41,3 @@ fi
sudo systemsetup -setremotelogin on # enable ssh sudo systemsetup -setremotelogin on # enable ssh
pip install --upgrade pip pip install --upgrade pip
# https://sysadmin-journal.com/apache-directory-studio-on-the-apple-m1/

View file

@ -5,5 +5,5 @@ cd "$OLDPWD"
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")" if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
then then
echo "rebuilding venv für new python version" echo "rebuilding venv für new python version"
rm -rf .venv .pip_upgrade_timestamp rm -rf .venv
fi fi

View file

@ -3,7 +3,7 @@
cd "$OLDPWD" cd "$OLDPWD"
python3 -m venv .venv python3 -m venv .venv
source .venv/bin/activate source ./.venv/bin/activate
PATH_add .venv/bin PATH_add .venv/bin
NOW=$(date +%s) NOW=$(date +%s)
@ -19,9 +19,5 @@ if test "$DELTA" -gt 86400
then then
python3 -m pip --require-virtualenv install pip wheel --upgrade python3 -m pip --require-virtualenv install pip wheel --upgrade
python3 -m pip --require-virtualenv install -r requirements.txt --upgrade python3 -m pip --require-virtualenv install -r requirements.txt --upgrade
if test -e optional-requirements.txt
then
python3 -m pip --require-virtualenv install -r optional-requirements.txt --upgrade
fi
date +%s > .pip_upgrade_timestamp date +%s > .pip_upgrade_timestamp
fi fi

View file

@ -1,9 +1,6 @@
export PATH=~/.bin:$PATH export PATH=~/.bin:$PATH
export PATH=~/.cargo/bin:$PATH
export ZSH=~/.zsh/oh-my-zsh export ZSH=~/.zsh/oh-my-zsh
export ZSH_HOSTNAME='sm' ZSH_THEME="ckn"
ZSH_THEME="bw"
HIST_STAMPS="yyyy/mm/dd" HIST_STAMPS="yyyy/mm/dd"
plugins=( plugins=(
zsh-autosuggestions zsh-autosuggestions
@ -13,6 +10,13 @@ source $ZSH/oh-my-zsh.sh
ulimit -S -n 24000 ulimit -S -n 24000
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
antivir() { antivir() {
printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' && printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' &&
sleep 1 && echo '\nyour computer is safe!' sleep 1 && echo '\nyour computer is safe!'
@ -22,12 +26,3 @@ eval "$(rbenv init -)"
eval "$(pyenv init -)" eval "$(pyenv init -)"
eval "$(direnv hook zsh)" eval "$(direnv hook zsh)"
eval "$(op completion zsh)"; compdef _op op eval "$(op completion zsh)"; compdef _op op
# //S/M
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}

View file

@ -1,12 +1,3 @@
# brew install
actions['brew_install'] = {
'command': '/opt/homebrew/bin/brew install ' + ' '.join(node.metadata.get('brew')),
'unless': f"""PKGS=$(/opt/homebrew/bin/brew leaves); for p in {' '.join(node.metadata.get('brew'))}; do grep -q "$p" <<< $PKGS || exit 9; done"""
}
# bw init
directories['/Users/mwiegand/.config/bundlewrap/lock'] = {} directories['/Users/mwiegand/.config/bundlewrap/lock'] = {}
# home # home
@ -22,12 +13,6 @@ files['/Users/mwiegand/.bin/macbook-update'] = {
'mode': '755', 'mode': '755',
} }
with open(f'{repo.path}/bundles/zsh/files/bw.zsh-theme') as f:
files['/Users/mwiegand/.zsh/oh-my-zsh/themes/bw.zsh-theme'] = {
'content': f.read(),
'mode': '0644',
}
# direnv # direnv
directories['/Users/mwiegand/.local/share/direnv'] = {} directories['/Users/mwiegand/.local/share/direnv'] = {}
@ -36,7 +21,6 @@ files['/Users/mwiegand/.local/share/direnv/pyenv'] = {}
files['/Users/mwiegand/.local/share/direnv/venv'] = {} files['/Users/mwiegand/.local/share/direnv/venv'] = {}
files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {} files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {}
################## ##################
for element in [*files.values(), *directories.values()]: for element in [*files.values(), *directories.values()]:

View file

@ -1,3 +1 @@
defaults = { defaults = {}
'brew': {},
}

View file

@ -1,22 +0,0 @@
# This is the mailman extension configuration file to enable HyperKitty as an
# archiver. Remember to add the following lines in the mailman.cfg file:
#
# [archiver.hyperkitty]
# class: mailman_hyperkitty.Archiver
# enable: yes
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
#
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
# However, if your Mailman installation is accessed via HTTPS, the URL needs
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
base_url: http://${hostname}/mailman3/hyperkitty/
# The shared api_key, must be identical except for quoting to the value of
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
api_key: ${archiver_key}

View file

@ -1,190 +0,0 @@
ACCOUNT_EMAIL_VERIFICATION='none'
# This file is imported by the Mailman Suite. It is used to override
# the default settings from /usr/share/mailman3-web/settings.py.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '${secret_key}'
ADMINS = (
('Mailman Suite Admin', 'root@localhost'),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
# is meant to run behind a webserver reverse proxy anyway.
ALLOWED_HOSTS = [
'${hostname}',
]
# Mailman API credentials
MAILMAN_REST_API_URL = 'http://localhost:8001'
MAILMAN_REST_API_USER = 'restadmin'
MAILMAN_REST_API_PASS = '${api_password}'
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
# Application definition
INSTALLED_APPS = (
'hyperkitty',
'postorius',
'django_mailman3',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_gravatar',
'compressor',
'haystack',
'django_extensions',
'django_q',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_mailman3.lib.auth.fedora',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
'NAME': 'mailman',
# The following settings are not used with sqlite3:
'USER': 'mailman',
'PASSWORD': '${db_password}',
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '127.0.0.1',
# PORT: set to empty string for default.
'PORT': '5432',
# OPTIONS: Extra parameters to use when connecting to the database.
'OPTIONS': {
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
# https://docs.djangoproject.com/en/1.11/ref/
# databases/#setting-sql-mode
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
# Other security settings
# SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
# SECURE_REDIRECT_EXEMPT = [
# "archives/api/mailman/.*", # Request from Mailman.
# ]
# SESSION_COOKIE_SECURE = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# X_FRAME_OPTIONS = 'DENY'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set default domain for email addresses.
EMAILNAME = 'localhost.local'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
# SERVER_EMAIL = 'root@your-domain.org'
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
# Django Allauth
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
#
# Social auth
#
SOCIALACCOUNT_PROVIDERS = {
#'openid': {
# 'SERVERS': [
# dict(id='yahoo',
# name='Yahoo',
# openid_url='http://me.yahoo.com'),
# ],
#},
#'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
#},
#'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
#},
}
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
# significant performance improvement, as CSS files will not need to be
# recompiled on each requests. It means running an additional "compress"
# management command after each code upgrade.
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
COMPRESS_OFFLINE = True
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'

View file

@ -1,277 +0,0 @@
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
# This file contains the Debian configuration for mailman. It uses ini-style
# formats under the lazr.config regime to define all system configuration
# options. See <https://launchpad.net/lazr.config> for details.
[mailman]
# This address is the "site owner" address. Certain messages which must be
# delivered to a human, but which can't be delivered to a list owner (e.g. a
# bounce from a list owner), will be sent to this address. It should point to
# a human.
site_owner: ${site_owner_email}
# This is the local-part of an email address used in the From field whenever a
# message comes from some entity to which there is no natural reply recipient.
# Mailman will append '@' and the host name of the list involved. This
# address must not bounce and it must not point to a Mailman process.
noreply_address: noreply
# The default language for this server.
default_language: de
# Membership tests for posting purposes are usually performed by looking at a
# set of headers, passing the test if any of their values match a member of
# the list. Headers are checked in the order given in this variable. The
# value From_ means to use the envelope sender. Field names are case
# insensitive. This is a space separate list of headers.
sender_headers: from from_ reply-to sender
# Mail command processor will ignore mail command lines after designated max.
email_commands_max_lines: 10
# Default length of time a pending request is live before it is evicted from
# the pending database.
pending_request_life: 3d
# How long should files be saved before they are evicted from the cache?
cache_life: 7d
# A callable to run with no arguments early in the initialization process.
# This runs before database initialization.
pre_hook:
# A callable to run with no arguments late in the initialization process.
# This runs after adapters are initialized.
post_hook:
# Which paths.* file system layout to use.
# You should not change this variable.
layout: debian
# Can MIME filtered messages be preserved by list owners?
filtered_messages_are_preservable: no
# How should text/html parts be converted to text/plain when the mailing list
# is set to convert HTML to plaintext? This names a command to be called,
# where the substitution variable $filename is filled in by Mailman, and
# contains the path to the temporary file that the command should read from.
# The command should print the converted text to stdout.
html_to_plain_text_command: /usr/bin/lynx -dump $filename
# Specify what characters are allowed in list names. Characters outside of
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
# but this specifies a subset as the only allowable characters. This must be
# a valid character class regexp or the effect on list creation is
# unpredictable.
listname_chars: [-_.0-9a-z]
[shell]
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
# can use to interact with an initialized and configured Mailman system. Use
# --help for more information. This section allows you to configure certain
# aspects of this interactive shell.
# Customize the interpreter prompt.
prompt: >>>
# Banner to show on startup.
banner: Welcome to the GNU Mailman shell
# Use IPython as the shell, which must be found on the system. Valid values
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
# that any import errors will be displayed to stderr.
use_ipython: no
# Set this to allow for command line history if readline is available. This
# can be as simple as $var_dir/history.py to put the file in the var directory.
history_file:
[paths.debian]
# Important directories for Mailman operation. These are defined here so that
# different layouts can be supported. For example, a developer layout would
# be different from a FHS layout. Most paths are based off the var_dir, and
# often just setting that will do the right thing for all the other paths.
# You might also have to set spool_dir though.
#
# Substitutions are allowed, but must be of the form $var where 'var' names a
# configuration variable in the paths.* section. Substitutions are expanded
# recursively until no more $-variables are present. Beware of infinite
# expansion loops!
#
# This is the root of the directory structure that Mailman will use to store
# its run-time data.
var_dir: /var/lib/mailman3
# This is where the Mailman queue files directories will be created.
queue_dir: $var_dir/queue
# This is the directory containing the Mailman 'runner' and 'master' commands
# if set to the string '$argv', it will be taken as the directory containing
# the 'mailman' command.
bin_dir: /usr/lib/mailman3/bin
# All list-specific data.
list_data_dir: $var_dir/lists
# Directory where log files go.
log_dir: /var/log/mailman3
# Directory for system-wide locks.
lock_dir: $var_dir/locks
# Directory for system-wide data.
data_dir: $var_dir/data
# Cache files.
cache_dir: $var_dir/cache
# Directory for configuration files and such.
etc_dir: /etc/mailman3
# Directory containing Mailman plugins.
ext_dir: $var_dir/ext
# Directory where the default IMessageStore puts its messages.
messages_dir: $var_dir/messages
# Directory for archive backends to store their messages in. Archivers should
# create a subdirectory in here to store their files.
archive_dir: $var_dir/archives
# Root directory for site-specific template override files.
template_dir: $var_dir/templates
# There are also a number of paths to specific file locations that can be
# defined. For these, the directory containing the file must already exist,
# or be one of the directories created by Mailman as per above.
#
# This is where PID file for the master runner is stored.
pid_file: /run/mailman3/master.pid
# Lock file.
lock_file: $lock_dir/master.lck
[database]
# The class implementing the IDatabase.
class: mailman.database.sqlite.SQLiteDatabase
#class: mailman.database.mysql.MySQLDatabase
#class: mailman.database.postgresql.PostgreSQLDatabase
# Use this to set the Storm database engine URL. You generally have one
# primary database connection for all of Mailman. List data and most rosters
# will store their data in this database, although external rosters may access
# other databases in their own way. This string supports standard
# 'configuration' substitutions.
url: sqlite:///$DATA_DIR/mailman.db
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
#url: postgresql://mailman3:mmpass@localhost/mailman3
debug: no
[logging.debian]
# This defines various log settings. The options available are:
#
# - level -- Overrides the default level; this may be any of the
# standard Python logging levels, case insensitive.
# - format -- Overrides the default format string
# - datefmt -- Overrides the default date format string
# - path -- Overrides the default logger path. This may be a relative
# path name, in which case it is relative to Mailman's LOG_DIR,
# or it may be an absolute path name. You cannot change the
# handler class that will be used.
# - propagate -- Boolean specifying whether to propagate log message from this
# logger to the root "mailman" logger. You cannot override
# settings for the root logger.
#
# In this section, you can define defaults for all loggers, which will be
# prefixed by 'mailman.'. Use subsections to override settings for specific
# loggers. The names of the available loggers are:
#
# - archiver -- All archiver output
# - bounce -- All bounce processing logs go here
# - config -- Configuration issues
# - database -- Database logging (SQLAlchemy and Alembic)
# - debug -- Only used for development
# - error -- All exceptions go to this log
# - fromusenet -- Information related to the Usenet to Mailman gateway
# - http -- Internal wsgi-based web interface
# - locks -- Lock state changes
# - mischief -- Various types of hostile activity
# - runner -- Runner process start/stops
# - smtp -- Successful SMTP activity
# - smtp-failure -- Unsuccessful SMTP activity
# - subscribe -- Information about leaves/joins
# - vette -- Message vetting information
format: %(asctime)s (%(process)d) %(message)s
datefmt: %b %d %H:%M:%S %Y
propagate: no
level: info
path: mailman.log
[webservice]
# The hostname at which admin web service resources are exposed.
hostname: localhost
# The port at which the admin web service resources are exposed.
port: 8001
# Whether or not requests to the web service are secured through SSL.
use_https: no
# Whether or not to show tracebacks in an HTTP response for a request that
# raised an exception.
show_tracebacks: yes
# The API version number for the current (highest) API.
api_version: 3.1
# The administrative username.
admin_user: restadmin
# The administrative password.
admin_pass: ${api_password}
[mta]
# The class defining the interface to the incoming mail transport agent.
#incoming: mailman.mta.exim4.LMTP
incoming: mailman.mta.postfix.LMTP
# The callable implementing delivery to the outgoing mail transport agent.
# This must accept three arguments, the mailing list, the message, and the
# message metadata dictionary.
outgoing: mailman.mta.deliver.deliver
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
# then Mailman will attempt to log into the MTA when making a new connection.
# smtp_host: smtp.ionos.de
# smtp_port: 587
# smtp_user: ${smtp_user}
# smtp_pass: ${smtp_password}
# smtp_secure_mode: starttls
smtp_host: 127.0.0.1
smtp_port: 25
smtp_user:
smtp_pass:
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
# localhost for Postfix integration, because Postfix only consults DNS
# (e.g. not /etc/hosts).
lmtp_host: 127.0.0.1
lmtp_port: 8024
# Where can we find the mail server specific configuration file? The path can
# be either a file system path or a Python import path. If the value starts
# with python: then it is a Python import path, otherwise it is a file system
# path. File system paths must be absolute since no guarantees are made about
# the current working directory. Python paths should not include the trailing
# .cfg, which the file must end with.
#configuration: python:mailman.config.exim4
configuration: python:mailman.config.postfix

View file

@ -1,52 +0,0 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
# fresh installs.
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level=may
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = all
unknown_local_recipient_reject_code = 550
owner_request_special = no
transport_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
local_recipient_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
relay_domains =
hash:/var/lib/mailman3/data/postfix_domains

View file

@ -1,50 +0,0 @@
[uwsgi]
# Port on which uwsgi will be listening.
uwsgi-socket = /run/mailman3-web/uwsgi.sock
#Enable threading for python
enable-threads = true
# Move to the directory wher the django files are.
chdir = /usr/share/mailman3-web
# Use the wsgi file provided with the django project.
wsgi-file = wsgi.py
# Setup default number of processes and threads per process.
master = true
process = 2
threads = 2
# Drop privielges and don't run as root.
uid = www-data
gid = www-data
plugins = python3
# Setup the django_q related worker processes.
attach-daemon = python3 manage.py qcluster
# Setup hyperkitty's cron jobs.
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
# Setup the request log.
#req-logger = file:/var/log/mailman3/web/mailman-web.log
# Log cron seperately.
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
#log-route = cron uwsgi-cron
# Log qcluster commands seperately.
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
#log-route = qcluster uwsgi-daemons
# Last log and it logs the rest of the stuff.
#logger = file:/var/log/mailman3/web/mailman-web-error.log
logto = /var/log/mailman3/web/mailman-web.log

View file

@ -1,104 +0,0 @@
directories = {
'/var/lib/mailman3': {
'owner': 'list',
'group': 'list',
'needs': {
'zfs_dataset:tank/mailman',
'pkg_apt:mailman3-full',
},
'needed_by': {
'svc_systemd:mailman3.service',
'svc_systemd:mailman3-web.service',
},
},
}
files = {
'/etc/postfix/main.cf': {
'source': 'postfix.cf',
'content_type': 'mako',
'mode': '0644',
'context': {
'hostname': node.metadata.get('mailman/hostname'),
},
'needs': {
'pkg_apt:postfix',
},
'triggers': {
'svc_systemd:postfix.service:restart',
},
},
'/etc/mailman3/mailman.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-web.py': {
'content_type': 'mako',
'owner': 'root',
'group': 'www-data',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-hyperkitty.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/uwsgi.ini': {
'content_type': 'text',
'owner': 'root',
'group': 'root',
'mode': '0644',
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
}
svc_systemd = {
'postfix.service': {
'needs': {
'pkg_apt:postfix',
},
},
'mailman3.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
'mailman3-web.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
}

View file

@ -1,116 +0,0 @@
import base64
def derive_mailadmin_secret(metadata, salt):
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
)
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
defaults = {
'apt': {
'packages': {
'mailman3-full': {
'needs': {
'postgres_db:mailman',
'postgres_role:mailman',
'zfs_dataset:tank/mailman',
}
},
'postfix': {},
'python3-psycopg2': {
'needed_by': {
'pkg_apt:mailman3-full',
},
},
'apache2': {
'installed': False,
'needs': {
'pkg_apt:mailman3-full',
},
},
},
},
'zfs': {
'datasets': {
'tank/mailman': {
'mountpoint': '/var/lib/mailman3',
},
},
},
}
@metadata_reactor.provides(
'postgresql',
'mailman',
)
def postgresql(metadata):
node_id = metadata.get('id')
db_password = repo.vault.password_for(f'{node_id} database mailman')
return {
'postgresql': {
'databases': {
'mailman': {
'owner': 'mailman',
},
},
'roles': {
'mailman': {
'password': db_password,
},
},
},
'mailman': {
'db_password': db_password,
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('mailman/hostname'): {
'content': 'mailman/vhost.conf',
},
},
},
}
@metadata_reactor.provides(
'mailman/secret_key',
)
def secret_key(metadata):
import base64
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
)
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
return {
'mailman': {
'secret_key': secret_key,
},
}
@metadata_reactor.provides(
'mailman',
)
def secrets(metadata):
return {
'mailman': {
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
},
}

View file

@ -1,6 +1,6 @@
<?php <?php
// https://raw.githubusercontent.com/Radiergummi/autodiscover/master/autodiscover/autodiscover.php
/******************************** /********************************
* Autodiscover responder * Autodiscover responder
@ -8,45 +8,45 @@
* This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml. * This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml.
* If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the * If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the
* appropriate mail services. * appropriate mail services.
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if * If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices. * you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices.
* *
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct * To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* domain names, adjust ports and SSL. * domain names, adjust ports and SSL.
*/ */
//get raw POST data so we can extract the email address
$request = file_get_contents("php://input"); $request = file_get_contents("php://input");
// optional debug log
# file_put_contents( 'request.log', $request, FILE_APPEND ); # file_put_contents( 'request.log', $request, FILE_APPEND );
// retrieve email address from client request
preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email ); preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email );
// check for invalid mail, to prevent XSS
if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) { if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) {
throw new Exception('Invalid E-Mail provided'); throw new Exception('Invalid E-Mail provided');
} }
// get domain from email address
$domain = substr( strrchr( $email[1], "@" ), 1 ); $domain = substr( strrchr( $email[1], "@" ), 1 );
/************************************** /**************************************
* Port and server settings below * * Port and server settings below *
**************************************/ **************************************/
// IMAP settings
$imapServer = 'imap.' . $domain; // imap.example.com $imapServer = 'imap.' . $domain; // imap.example.com
$imapPort = 993; $imapPort = 993;
$imapSSL = true; $imapSSL = true;
// SMTP settings
$smtpServer = 'smtp.' . $domain; // smtp.example.com $smtpServer = 'smtp.' . $domain; // smtp.example.com
$smtpPort = 587; $smtpPort = 587;
$smtpSSL = true; $smtpSSL = true;
//set Content-Type
header( 'Content-Type: application/xml' ); header( 'Content-Type: application/xml' );
?> ?>
<?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?> <?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?>

View file

@ -33,12 +33,6 @@ defaults = {
'mountpoint': '/var/vmail', 'mountpoint': '/var/vmail',
'compression': 'on', 'compression': 'on',
}, },
'tank/vmail/index': {
'mountpoint': '/var/vmail/index',
'compression': 'on',
'com.sun:auto-snapshot': 'false',
'backup': False,
},
}, },
}, },
} }

View file

@ -1 +0,0 @@
https://mariadb.com/kb/en/systemd/#configuring-mariadb-to-write-the-error-log-to-syslog

View file

@ -1,87 +0,0 @@
from shlex import quote
def mariadb(sql, **kwargs):
kwargs_string = ''.join(f" --{k} {v}" for k, v in kwargs.items())
return f"mariadb{kwargs_string} -Bsr --execute {quote(sql)}"
directories = {
'/var/lib/mysql': {
'owner': 'mysql',
'group': 'mysql',
'needs': [
'zfs_dataset:tank/mariadb',
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
files = {
'/etc/mysql/conf.d/override.conf': {
'content': repo.libs.ini.dumps(node.metadata.get('mariadb/conf')),
'content_type': 'text',
},
}
svc_systemd = {
'mariadb.service': {
'needs': [
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
actions = {
'mariadb_sec_remove_anonymous_users': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User=''"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User = ''") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
'mariadb_sec_remove_remote_root': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
}
for db, conf in node.metadata.get('mariadb/databases', {}).items():
actions[f'mariadb_create_database_{db}'] = {
'command': mariadb(f"CREATE DATABASE {db}"),
'unless': mariadb(f"SHOW DATABASES LIKE '{db}'") + f" | grep -q '^{db}$'",
'needs': [
'svc_systemd:mariadb.service',
],
}
actions[f'mariadb_user_{db}_create'] = {
'command': mariadb(f"CREATE USER {db}"),
'unless': mariadb(f"SELECT User FROM mysql.user WHERE User = '{db}'") + f" | grep -q '^{db}$'",
'needs': [
f'action:mariadb_create_database_{db}',
],
}
pw = conf['password']
actions[f'mariadb_user_{db}_password'] = {
'command': mariadb(f"SET PASSWORD FOR {db} = PASSWORD('{conf['password']}')"),
'unless': f'echo {quote(pw)} | mariadb -u {db} -e quit -p',
'needs': [
f'action:mariadb_user_{db}_create',
],
}
actions[f'mariadb_grant_privileges_to_{db}'] = {
'command': mariadb(f"GRANT ALL PRIVILEGES ON {db}.* TO '{db}'", database=db),
'unless': mariadb(f"SHOW GRANTS FOR {db}") + f" | grep -q '^GRANT ALL PRIVILEGES ON `{db}`.* TO `{db}`@`%`'",
'needs': [
f'action:mariadb_user_{db}_create',
],
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'mariadb-server': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
'mariadb-client': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
},
},
'mariadb': {
'databases': {},
'conf': {
# https://www.reddit.com/r/zfs/comments/u1xklc/mariadbmysql_database_settings_for_zfs
'mysqld': {
'skip-innodb_doublewrite': None,
'innodb_flush_method': 'fsync',
'innodb_doublewrite': '0',
'innodb_use_atomic_writes': '0',
'innodb_use_native_aio': '0',
'innodb_read_io_threads': '10',
'innodb_write_io_threads': '10',
'innodb_buffer_pool_size': '26G',
'innodb_flush_log_at_trx_commit': '1',
'innodb_log_file_size': '1G',
'innodb_flush_neighbors': '0',
'innodb_fast_shutdown': '2',
},
},
},
'zfs': {
'datasets': {
'tank/mariadb': {
'mountpoint': '/var/lib/mysql',
'recordsize': '16384',
'atime': 'off',
},
},
},
}

View file

@ -1,19 +0,0 @@
for network_name, network_conf in node.metadata.get('network').items():
if 'qdisc' in network_conf:
svc_systemd[f'qdisc-{network_name}.service'] = {
'enabled': True,
'running': None,
'needs': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
}
actions[f'qdisc-{network_name}.service_restart_workaround'] = {
'command': 'true',
'triggered': True,
'triggered_by': {
f'file:/usr/local/lib/systemd/system/qdisc-{network_name}.service',
},
'triggers': {
f'svc_systemd:qdisc-{network_name}.service:restart',
},
}

View file

@ -5,137 +5,41 @@ defaults = {
} }
@metadata_reactor.provides(
'network',
)
def dhcp(metadata):
networks = {}
for network_name, network_conf in metadata.get('network').items():
_interface = ip_interface(network_conf['ipv4'])
_ip = _interface.ip
_network = _interface.network
_hosts = list(_network.hosts())
if network_conf.get('dhcp_server', False):
networks[network_name] = {
'dhcp_server_config': {
'subnet': str(_network),
'pool_from': str(_hosts[len(_hosts)//2]),
'pool_to': str(_hosts[-3]),
'router': str(_ip),
'domain-name-servers': str(_ip),
}
}
return {
'network': networks,
}
@metadata_reactor.provides( @metadata_reactor.provides(
'systemd/units', 'systemd/units',
) )
def units(metadata): def units(metadata):
if node.has_bundle('systemd-networkd'): units = {}
units = {}
for network_name, network_conf in metadata.get('network').items(): for type, network in metadata.get('network').items():
interface_type = network_conf.get('type', None) units[f'{type}.network'] = {
'Match': {
# network 'Name': network['interface'],
units[f'{network_name}.network'] = {
'Match': {
'Name': network_name if interface_type == 'vlan' else network_conf['interface'],
},
'Network': {
'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(
other_network_name
for other_network_name, other_network_conf in metadata.get('network', {}).items()
if other_network_conf.get('type') == 'vlan' and other_network_conf['vlan_interface'] == network_name
)
}
}
# type
if interface_type:
units[f'{network_name}.network']['Match']['Type'] = interface_type
# ips
for i in [4, 6]:
if network_conf.get(f'ipv{i}', None):
units[f'{network_name}.network'].update({
f'Address#ipv{i}': {
'Address': network_conf[f'ipv{i}'],
},
})
if f'gateway{i}' in network_conf:
units[f'{network_name}.network'].update({
f'Route#ipv{i}': {
'Gateway': network_conf[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
# as vlan
if interface_type == 'vlan':
units[f"{network_name}.netdev"] = {
'NetDev': {
'Name': network_name,
'Kind': 'vlan',
},
'VLAN': {
'Id': network_conf['id'],
}
}
# cake WIP
# if 'cake' in network_conf:
# units[f'{network_name}.network']['CAKE'] = network_conf['cake']
return {
'systemd': {
'units': units,
}
}
else:
return {}
@metadata_reactor.provides(
'systemd/units',
)
def queuing_disciplines(metadata):
if node.has_bundle('systemd-networkd'):
return {
'systemd': {
'units': {
f'qdisc-{network_name}.service': {
'Unit': {
'Description': f'setup queuing discipline for interface {network_name}',
'Wants': 'network.target',
'After': 'network.target',
'BindsTo': 'network.target',
},
'Service': {
'Type': 'oneshot',
'ExecStart': f'/sbin/tc qdisc replace root dev {network_name} {network_conf["qdisc"]}',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'network-online.target',
},
}
for network_name, network_conf in metadata.get('network').items()
if 'qdisc' in network_conf
},
}, },
'Network': {
'DHCP': network.get('dhcp', 'no'),
'IPv6AcceptRA': network.get('dhcp', 'no'),
}
} }
else:
return {} for i in [4, 6]:
if network.get(f'ipv{i}', None):
units[f'{type}.network'].update({
f'Address#ipv{i}': {
'Address': network[f'ipv{i}'],
},
})
if f'gateway{i}' in network:
units[f'{type}.network'].update({
f'Route#ipv{i}': {
'Gateway': network[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
return {
'systemd': {
'units': units,
}
}

View file

@ -29,8 +29,8 @@ defaults = {
'exclude': [ 'exclude': [
'^appdata_', '^appdata_',
'^updater-', '^updater-',
'^nextcloud\\.log', '^nextcloud\.log',
'^updater\\.log', '^updater\.log',
'^[^/]+/cache', '^[^/]+/cache',
'^[^/]+/files_versions', '^[^/]+/files_versions',
'^[^/]+/files_trashbin', '^[^/]+/files_trashbin',
@ -123,9 +123,9 @@ def config(metadata):
], ],
'cache_path': '/var/lib/nextcloud/.cache', 'cache_path': '/var/lib/nextcloud/.cache',
'upgrade.disable-web': True, 'upgrade.disable-web': True,
'memcache.local': '\\OC\\Memcache\\Redis', 'memcache.local': '\OC\Memcache\Redis',
'memcache.locking': '\\OC\\Memcache\\Redis', 'memcache.locking': '\OC\Memcache\Redis',
'memcache.distributed': '\\OC\\Memcache\\Redis', 'memcache.distributed': '\OC\Memcache\Redis',
'redis': { 'redis': {
'host': '/var/run/redis/nextcloud.sock' 'host': '/var/run/redis/nextcloud.sock'
}, },
@ -142,7 +142,6 @@ def config(metadata):
'versions_retention_obligation': 'auto, 90', 'versions_retention_obligation': 'auto, 90',
'simpleSignUpLink.shown': False, 'simpleSignUpLink.shown': False,
'allow_local_remote_servers': True, # FIXME? 'allow_local_remote_servers': True, # FIXME?
'maintenance_window_start': 1, # https://docs.nextcloud.com/server/29/admin_manual/configuration_server/background_jobs_configuration.html#maintenance-window-start
}, },
}, },
} }

View file

@ -8,5 +8,4 @@ examples
```sh ```sh
nft add rule inet filter input tcp dport 5201 accept nft add rule inet filter input tcp dport 5201 accept
nft add rule inet filter input udp dport 5201 accept
``` ```

View file

@ -2,23 +2,6 @@
flush ruleset flush ruleset
% if nat:
table ip nat {
# NAT
chain postrouting {
type nat hook postrouting priority 100
policy accept
# rules
% for rule in sorted(nat):
${rule}
% endfor
}
}
% endif
table inet filter { table inet filter {
# INPUT # INPUT

View file

@ -6,7 +6,6 @@ files = {
'input': node.metadata.get('nftables/input'), 'input': node.metadata.get('nftables/input'),
'forward': node.metadata.get('nftables/forward'), 'forward': node.metadata.get('nftables/forward'),
'output': node.metadata.get('nftables/output'), 'output': node.metadata.get('nftables/output'),
'nat': node.metadata.get('nftables/nat'),
}, },
'triggers': [ 'triggers': [
'svc_systemd:nftables.service:reload', 'svc_systemd:nftables.service:reload',

View file

@ -8,8 +8,7 @@ defaults = {
'input': { 'input': {
'tcp dport 22 accept', 'tcp dport 22 accept',
}, },
'forward': set(), 'forward': {},
'nat': set(), 'output': {},
'output': set(),
}, },
} }

View file

@ -1,6 +1,6 @@
pid /var/run/nginx.pid; pid /var/run/nginx.pid;
user www-data; user www-data;
worker_processes ${worker_processes}; worker_processes 10;
% for module in sorted(modules): % for module in sorted(modules):
load_module modules/ngx_${module}_module.so; load_module modules/ngx_${module}_module.so;
@ -21,9 +21,6 @@ http {
server_names_hash_bucket_size 128; server_names_hash_bucket_size 128;
tcp_nopush on; tcp_nopush on;
client_max_body_size 32G; client_max_body_size 32G;
ssl_dhparam "/etc/ssl/certs/dhparam.pem";
# dont show nginx version
server_tokens off;
% if node.has_bundle('php'): % if node.has_bundle('php'):
upstream php-handler { upstream php-handler {
@ -31,13 +28,5 @@ http {
} }
% endif % endif
include /etc/nginx/sites/*;
% if has_websockets:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
% endif
include /etc/nginx/sites-enabled/*;
} }

View file

@ -9,7 +9,7 @@ directories = {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
}, },
}, },
'/etc/nginx/sites-available': { '/etc/nginx/sites': {
'purge': True, 'purge': True,
'triggers': { 'triggers': {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
@ -32,8 +32,6 @@ files = {
'content_type': 'mako', 'content_type': 'mako',
'context': { 'context': {
'modules': node.metadata.get('nginx/modules'), 'modules': node.metadata.get('nginx/modules'),
'worker_processes': node.metadata.get('vm/cores'),
'has_websockets': node.metadata.get('nginx/has_websockets'),
}, },
'triggers': { 'triggers': {
'svc_systemd:nginx:restart', 'svc_systemd:nginx:restart',
@ -76,15 +74,9 @@ files = {
}, },
} }
symlinks = {
'/etc/nginx/sites-enabled': {
'target': '/etc/nginx/sites-available',
},
}
actions = { actions = {
'nginx-generate-dhparam': { 'nginx-generate-dhparam': {
'command': 'openssl dhparam -dsaparam -out /etc/ssl/certs/dhparam.pem 4096', 'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
'unless': 'test -f /etc/ssl/certs/dhparam.pem', 'unless': 'test -f /etc/ssl/certs/dhparam.pem',
}, },
} }
@ -100,7 +92,7 @@ svc_systemd = {
for name, config in node.metadata.get('nginx/vhosts').items(): for name, config in node.metadata.get('nginx/vhosts').items():
files[f'/etc/nginx/sites-available/{name}'] = { files[f'/etc/nginx/sites/{name}'] = {
'content': Template(filename=join(repo.path, 'data', config['content'])).render( 'content': Template(filename=join(repo.path, 'data', config['content'])).render(
server_name=name, server_name=name,
**config.get('context', {}), **config.get('context', {}),
@ -116,6 +108,6 @@ for name, config in node.metadata.get('nginx/vhosts').items():
} }
if name in node.metadata.get('letsencrypt/domains'): if name in node.metadata.get('letsencrypt/domains'):
files[f'/etc/nginx/sites-available/{name}']['needs'].append( files[f'/etc/nginx/sites/{name}']['needs'].append(
f'action:letsencrypt_ensure-some-certificate_{name}', f'action:letsencrypt_ensure-some-certificate_{name}',
) )

View file

@ -18,7 +18,6 @@ defaults = {
'nginx': { 'nginx': {
'vhosts': {}, 'vhosts': {},
'modules': set(), 'modules': set(),
'has_websockets': False,
}, },
'systemd': { 'systemd': {
'units': { 'units': {
@ -74,6 +73,7 @@ def dns(metadata):
@metadata_reactor.provides( @metadata_reactor.provides(
'letsencrypt/domains', 'letsencrypt/domains',
'letsencrypt/reload_after',
) )
def letsencrypt(metadata): def letsencrypt(metadata):
return { return {
@ -96,7 +96,7 @@ def monitoring(metadata):
'monitoring': { 'monitoring': {
'services': { 'services': {
hostname: { hostname: {
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {vhost.get('check_protocol', 'https')}://{quote(hostname + vhost.get('check_path', '/'))}", 'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {quote(hostname + vhost.get('check_path', ''))}",
} }
for hostname, vhost in metadata.get('nginx/vhosts').items() for hostname, vhost in metadata.get('nginx/vhosts').items()
}, },

View file

@ -1,7 +1,5 @@
from os.path import join, exists
from re import sub from re import sub
from cryptography.hazmat.primitives import serialization as crypto_serialization from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from base64 import b64decode from base64 import b64decode

View file

@ -1,3 +1,9 @@
from os.path import join
import json
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('php/version') version = node.metadata.get('php/version')
files = { files = {
@ -15,7 +21,7 @@ files = {
f'pkg_apt:php{version}-fpm', f'pkg_apt:php{version}-fpm',
}, },
'triggers': { 'triggers': {
f'svc_systemd:php{version}-fpm.service:restart', f'svc_systemd:php{version}-fpm:restart',
}, },
}, },
f'/etc/php/{version}/fpm/pool.d/www.conf': { f'/etc/php/{version}/fpm/pool.d/www.conf': {
@ -27,13 +33,13 @@ files = {
f'pkg_apt:php{version}-fpm', f'pkg_apt:php{version}-fpm',
}, },
'triggers': { 'triggers': {
f'svc_systemd:php{version}-fpm.service:restart', f'svc_systemd:php{version}-fpm:restart',
}, },
}, },
} }
svc_systemd = { svc_systemd = {
f'php{version}-fpm.service': { f'php{version}-fpm': {
'needs': { 'needs': {
'pkg_apt:', 'pkg_apt:',
f'file:/etc/php/{version}/fpm/php.ini', f'file:/etc/php/{version}/fpm/php.ini',

View file

@ -113,7 +113,7 @@ def php_ini(metadata):
'opcache.revalidate_freq': '60', 'opcache.revalidate_freq': '60',
}, },
} }
return { return {
'php': { 'php': {
'php.ini': { 'php.ini': {
@ -145,7 +145,7 @@ def www_conf(metadata):
'pm': 'dynamic', 'pm': 'dynamic',
'pm.max_children': int(threads*2), 'pm.max_children': int(threads*2),
'pm.start_servers': int(threads), 'pm.start_servers': int(threads),
'pm.min_spare_servers': max([1, int(threads/2)]), 'pm.min_spare_servers': int(threads/2),
'pm.max_spare_servers': int(threads), 'pm.max_spare_servers': int(threads),
'pm.max_requests': int(threads*32), 'pm.max_requests': int(threads*32),
}, },

View file

@ -44,9 +44,7 @@ smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions
mua_client_restrictions = permit_sasl_authenticated, reject mua_client_restrictions = permit_sasl_authenticated, reject
mua_sender_restrictions = permit_sasl_authenticated, reject mua_sender_restrictions = permit_sasl_authenticated, reject
## MS Outlook, incompatible with reject_non_fqdn_hostname and/or reject_invalid_hostname mua_helo_restrictions = permit_mynetworks, reject_non_fqdn_hostname, reject_invalid_hostname, permit
## https://unix.stackexchange.com/a/91753/357916
mua_helo_restrictions = permit_mynetworks, permit
smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332 smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332 non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332

View file

@ -86,8 +86,6 @@ if node.has_bundle('telegraf'):
'needs': [ 'needs': [
'pkg_apt:acl', 'pkg_apt:acl',
'svc_systemd:postfix', 'svc_systemd:postfix',
'svc_systemd:postfix:reload',
'svc_systemd:postfix:restart',
], ],
} }
actions['postfix_setfacl_default_telegraf'] = { actions['postfix_setfacl_default_telegraf'] = {
@ -96,7 +94,5 @@ if node.has_bundle('telegraf'):
'needs': [ 'needs': [
'pkg_apt:acl', 'pkg_apt:acl',
'svc_systemd:postfix', 'svc_systemd:postfix',
'svc_systemd:postfix:reload',
'svc_systemd:postfix:restart',
], ],
} }

View file

@ -1,22 +0,0 @@
# DO NOT DISABLE!
# If you change this first entry you will need to make sure that the
# database superuser can access the database using some other method.
# Noninteractive access to all databases is required during automatic
# maintenance (custom daily cronjobs, replication, and similar tasks).
#
# Database administrative login by Unix domain socket
local all postgres peer
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all peer
# IPv4 local connections:
host all all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# IPv6 local connections:
host all all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all peer
host replication all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
host replication all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}

View file

@ -12,27 +12,12 @@ directories = {
'zfs_dataset:tank/postgresql', 'zfs_dataset:tank/postgresql',
], ],
'needed_by': [ 'needed_by': [
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
} }
} }
files = { files = {
f"/etc/postgresql/{version}/main/pg_hba.conf": {
'content_type': 'mako',
'mode': '0640',
'owner': 'postgres',
'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
],
'triggers': [
'svc_systemd:postgresql.service:restart',
],
},
f"/etc/postgresql/{version}/main/conf.d/managed.conf": { f"/etc/postgresql/{version}/main/conf.d/managed.conf": {
'content': '\n'.join( 'content': '\n'.join(
f'{key} = {value}' f'{key} = {value}'
@ -40,19 +25,16 @@ files = {
) + '\n', ) + '\n',
'owner': 'postgres', 'owner': 'postgres',
'group': 'postgres', 'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [ 'needed_by': [
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
'triggers': [ 'triggers': [
'svc_systemd:postgresql.service:restart', 'svc_systemd:postgresql:restart',
], ],
}, },
} }
svc_systemd['postgresql.service'] = { svc_systemd['postgresql'] = {
'needs': [ 'needs': [
'pkg_apt:postgresql', 'pkg_apt:postgresql',
], ],
@ -61,13 +43,13 @@ svc_systemd['postgresql.service'] = {
for user, config in node.metadata.get('postgresql/roles').items(): for user, config in node.metadata.get('postgresql/roles').items():
postgres_roles[user] = merge_dict(config, { postgres_roles[user] = merge_dict(config, {
'needs': [ 'needs': [
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
}) })
for database, config in node.metadata.get('postgresql/databases').items(): for database, config in node.metadata.get('postgresql/databases').items():
postgres_dbs[database] = merge_dict(config, { postgres_dbs[database] = merge_dict(config, {
'needs': [ 'needs': [
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
}) })

View file

@ -6,11 +6,7 @@ root_password = repo.vault.password_for(f'{node.name} postgresql root')
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
'postgresql': { 'postgresql': {},
'needs': {
'zfs_dataset:tank/postgresql',
},
},
}, },
}, },
'backup': { 'backup': {
@ -58,25 +54,6 @@ def conf(metadata):
} }
@metadata_reactor.provides(
'apt/config/APT/NeverAutoRemove',
)
def apt(metadata):
return {
'apt': {
'config': {
'APT': {
'NeverAutoRemove': {
# https://github.com/credativ/postgresql-common/blob/master/pg_updateaptconfig#L17-L21
f"^postgresql.*-{metadata.get('postgresql/version')}",
},
},
},
},
}
@metadata_reactor.provides( @metadata_reactor.provides(
'zfs/datasets', 'zfs/datasets',
) )

View file

@ -1,36 +0,0 @@
# Firtzbox
Internet > Zugangsdaten
Internetanbieter
- weitere Internetanbieter
- anderer Internetanbieter
- Name: "My PPPOE" (nicht leer lassen)
Anschluss
(x) Anschluss an einen DSL-Anschluss
Zugangsdaten
(x) Nein
Verbindungseinstellungen
[x] VLAN für den Internetanschluss verwenden
VLAN-ID: 7
PBit: 0
DSL-ATM-Einstellungen
VPI: 1
VCI: 32
Kapselung
(x) Routed Bridge Encapsulation
[x] IP-Adresse automatisch über DHCP beziehen
DHCP-Hostname: fritz.box
PPPoE-Passthrough
[x] Angeschlossene Netzwerkgeräte dürfen zusätzlich ihre eigene Internetverbindung aufbauen (nicht empfohlen)
[ ] Internetzugang nach dem "Übernehmen" prüfen
-> Danach muss bei "Internetanbieter" statt "weitere Internetanbieter" der gewählte Name stehen, also zB "My PPPOE"

View file

@ -1,3 +0,0 @@
# Secrets for authentication using CHAP
# client server secret IP addresses
"${user}" * "${secret}" *

View file

@ -1,10 +0,0 @@
linkname ppp0
noauth
defaultroute
replacedefaultroute
persist
maxfail 0
lcp-echo-interval 20
lcp-echo-failure 3
plugin rp-pppoe.so ${interface}
user "${user}"

View file

@ -1,42 +0,0 @@
files = {
'/etc/modules-load.d/pppoe.conf': {
'content': 'pppoe\npppox\nppp_generic',
'mode': '0644',
},
'/etc/ppp/peers/isp': {
'content_type': 'mako',
'mode': '0644',
'context': {
'interface': node.metadata.get('pppoe/interface'),
'user': node.metadata.get('pppoe/user'),
},
'needs': {
'pkg_apt:pppoe',
},
},
'/etc/ppp/chap-secrets': {
'content_type': 'mako',
'mode': '0600',
'context': {
'user': node.metadata.get('pppoe/user'),
'secret': node.metadata.get('pppoe/secret'),
},
'needs': {
'pkg_apt:pppoe',
},
},
}
svc_systemd = {
'pppoe-isp.service': {
'needs': {
'file:/etc/ppp/peers/isp',
'file:/etc/ppp/chap-secrets',
},
},
'qdisc-ppp0.service': {
'needs': {
'svc_systemd:pppoe-isp.service',
},
},
}

View file

@ -1,43 +0,0 @@
defaults = {
'apt': {
'packages': {
'pppoe': {},
},
},
'nftables': {
'nat': {
'oifname ppp0 masquerade',
},
},
'systemd': {
'units': {
'pppoe-isp.service': {
'Unit': {
'Description': 'PPPoE Internet Connection',
'After': 'network.target',
},
'Service': {
'Type': 'forking',
'ExecStart': '/usr/sbin/pppd call isp',
'Restart': 'on-failure',
'RestartSec': 5,
},
},
'qdisc-ppp0.service': {
'Unit': {
'Description': 'setup queuing discipline for interface ppp0',
'After': 'sys-devices-virtual-net-ppp0.device',
'BindsTo': 'sys-devices-virtual-net-ppp0.device',
},
'Service': {
'Type': 'oneshot',
'ExecStart': '/sbin/tc qdisc replace root dev ppp0 cake bandwidth 30Mbit rtt 50ms diffserv4 nat egress',
'RemainAfterExit': 'yes',
},
'Install': {
'WantedBy': 'network-online.target',
},
}
},
},
}

View file

@ -1,21 +0,0 @@
files = {
'/etc/apt/apt.conf.d/10pveapthook': {
'content_type': 'any',
'mode': '0644',
},
'/etc/apt/apt.conf.d/76pveconf': {
'content_type': 'any',
'mode': '0444',
},
'/etc/apt/apt.conf.d/76pveproxy': {
'content_type': 'any',
'mode': '0644',
},
'/etc/network/interfaces': {
'content_type': 'any',
},
}
symlinks['/etc/ssh/ssh_host_rsa_key.pub'] = {
'target': '/etc/ssh/ssh_host_managed_key.pub',
}

View file

@ -1,100 +0,0 @@
defaults = {
'apt': {
'packages': {
'linux-image-amd64': {
'installed': False,
},
'proxmox-default-kernel': {},
# after reboot
'proxmox-ve': {},
'postfix': {},
'open-iscsi': {},
'chrony': {},
'os-prober': {
'installed': False,
},
'dnsmasq-base': {},
},
'sources': {
'proxmox-ve': {
'options': {
'aarch': 'amd64',
},
'urls': {
'http://download.proxmox.com/debian/pve',
},
'suites': {
'{codename}',
},
'components': {
'pve-no-subscription',
},
'key': 'proxmox-ve-{codename}',
},
},
},
# 'nftables': {
# 'input': {
# 'tcp dport 8006 accept',
# },
# },
'zfs': {
'datasets': {
'tank/proxmox-ve': {
'mountpoint': '/var/lib/proxmox-ve',
},
}
}
}
# @metadata_reactor.provides(
# 'systemd',
# )
# def bridge(metadata):
# return {
# 'systemd': {
# 'units': {
# # f'internal.network': {
# # 'Network': {
# # 'Bridge': 'br0',
# # },
# # },
# 'br0.netdev': {
# 'NetDev': {
# 'Name': 'br0',
# 'Kind': 'bridge'
# },
# },
# 'br0.network': {
# 'Match': {
# 'Name': 'br0',
# },
# 'Network': {
# 'Unmanaged': 'yes'
# },
# },
# },
# },
# }
@metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'has_websockets': True,
'vhosts': {
metadata.get('proxmox-ve/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'https://localhost:8006',
'websockets': True,
}
},
},
},
}

View file

@ -1,25 +0,0 @@
from shlex import quote
directories = {
'/opt/pyenv': {},
'/opt/pyenv/install': {},
}
git_deploy = {
'/opt/pyenv/install': {
'repo': 'https://github.com/pyenv/pyenv.git',
'rev': 'master',
'needs': {
'directory:/opt/pyenv/install',
},
},
}
for version in node.metadata.get('pyenv/versions'):
actions[f'pyenv_install_{version}'] = {
'command': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv install {quote(version)}',
'unless': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv versions --bare | grep -Fxq {quote(version)}',
'needs': {
'git_deploy:/opt/pyenv/install',
},
}

View file

@ -1,23 +0,0 @@
defaults = {
'apt': {
'packages': {
'build-essential': {},
'libssl-dev': {},
'zlib1g-dev': {},
'libbz2-dev': {},
'libreadline-dev': {},
'libsqlite3-dev': {},
'curl': {},
'libncurses-dev': {},
'xz-utils': {},
'tk-dev': {},
'libxml2-dev': {},
'libxmlsec1-dev': {},
'libffi-dev': {},
'liblzma-dev': {},
},
},
'pyenv': {
'versions': set(),
},
}

View file

@ -1,3 +0,0 @@
- Homematic > Settings > Control panel > Security > SSH > active & set password
- ssh to node > `ssh-copy-id -o StrictHostKeyChecking=no root@{homematic}`
- Homematic > Settings > Control panel > Security > Automatic forwarding to HTTPS > active

View file

@ -1,3 +1,6 @@
from shlex import quote
@metadata_reactor.provides( @metadata_reactor.provides(
'letsencrypt/domains', 'letsencrypt/domains',
) )
@ -17,6 +20,8 @@ def letsencrypt(metadata):
'systemd-timers/raspberrymatic-cert', 'systemd-timers/raspberrymatic-cert',
) )
def systemd_timers(metadata): def systemd_timers(metadata):
domain = metadata.get('raspberrymatic-cert/domain')
return { return {
'systemd-timers': { 'systemd-timers': {
'raspberrymatic-cert': { 'raspberrymatic-cert': {

Some files were not shown because too many files have changed in this diff Show more