Compare commits

..

3 commits

Author SHA1 Message Date
4567b66c60
wip 2023-08-01 19:03:08 +02:00
5b0b2e77ce
wip 2023-08-01 18:58:37 +02:00
2256f8b384
wip 2023-08-01 18:08:54 +02:00
165 changed files with 818 additions and 3633 deletions

2
.envrc
View file

@ -1,7 +1,5 @@
#!/usr/bin/env bash
PATH_add bin
source_env ~/.local/share/direnv/pyenv
source_env ~/.local/share/direnv/venv
source_env ~/.local/share/direnv/bundlewrap

View file

@ -37,12 +37,3 @@ fi
telegraf: execd for daemons
TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

View file

@ -10,6 +10,7 @@ nodes = [
for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy
]
reboot_nodes = []
print('updating nodes:', sorted(node.name for node in nodes))
@ -23,13 +24,14 @@ for node in nodes:
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
reboot_nodes.append(node)
# REBOOT IN ORDER
wireguard_servers = [
node
for node in nodes
for node in reboot_nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
@ -39,7 +41,7 @@ wireguard_servers = [
wireguard_s2s = [
node
for node in nodes
for node in reboot_nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
@ -49,7 +51,7 @@ wireguard_s2s = [
everything_else = [
node
for node in nodes
for node in reboot_nodes
if not node.has_bundle('wireguard')
]
@ -60,11 +62,8 @@ for node in [
*wireguard_s2s,
*wireguard_servers,
]:
print('rebooting', node.name)
try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
except Exception as e:
print(e)

View file

@ -5,17 +5,9 @@ from os.path import realpath, dirname
from sys import argv
from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
@ -28,7 +20,9 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network))
conf = f'''
conf = \
f'''>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
[Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820
@ -41,12 +35,11 @@ PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.ge
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'''
print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
if input("print qrcode? [yN]: ").upper() == 'Y':
import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1))

View file

@ -13,9 +13,6 @@
'deb',
'deb-src',
},
'options': { # optional
'aarch': 'amd64',
},
'urls': {
'https://deb.debian.org/debian',
},

View file

@ -23,12 +23,12 @@ directories = {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf.d': {
# 'purge': True,
# 'triggers': {
# 'action:apt_update',
# },
# },
'/etc/apt/listchanges.conf.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
@ -50,24 +50,17 @@ files = {
'action:apt_update',
},
},
'/etc/apt/sources.list': {
'content': '# managed by bundlewrap\n',
'triggers': {
'action:apt_update',
},
'/etc/apt/listchanges.conf': {
'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
},
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755',
},
# /etc/kernel/postinst.d/apt-auto-removal
}
actions = {
'apt_update': {
'command': 'apt-get update',
'command': 'apt-get update -o APT::Update::Error-Mode=any',
'needed_by': {
'pkg_apt:',
},

View file

@ -1,24 +1,13 @@
defaults = {
'apt': {
'packages': {
'apt-listchanges': {
'installed': False,
},
},
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true',
},
'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
},
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
},
'APT': {
'NeverAutoRemove': {
@ -40,13 +29,7 @@ defaults = {
'metapackages',
'tasks',
},
'Move-Autobit-Sections': {
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
'Move-Autobit-Sections': 'oldlibs',
},
},
'sources': {},
@ -133,45 +116,45 @@ def unattended_upgrades(metadata):
}
# @metadata_reactor.provides(
# 'apt/config',
# 'apt/list_changes',
# )
# def listchanges(metadata):
# return {
# 'apt': {
# 'config': {
# 'DPkg': {
# 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# },
# 'Tools': {
# 'Options': {
# '/usr/bin/apt-listchanges': {
# 'Version': '2',
# 'InfoFD': '20',
# },
# },
# },
# },
# 'Dir': {
# 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d',
# },
# },
# },
# 'list_changes': {
# 'apt': {
# 'frontend': 'pager',
# 'which': 'news',
# 'email_address': 'root',
# 'email_format': 'text',
# 'confirm': 'false',
# 'headers': 'false',
# 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db',
# },
# },
# },
# }
@metadata_reactor.provides(
'apt/config',
'apt/list_changes',
)
def listchanges(metadata):
return {
'apt': {
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/bin/apt-listchanges --apt || test $? -lt 10',
},
},
'Tools': {
'Options': {
'/usr/bin/apt-listchanges': {
'Version': '2',
'InfoFD': '20',
},
},
},
'Dir': {
'Etc': {
'apt-listchanges-main': 'listchanges.conf',
'apt-listchanges-parts': 'listchanges.conf.d',
},
},
},
'list_changes': {
'apt': {
'frontend': 'pager',
'which': 'news',
'email_address': 'root',
'email_format': 'text',
'confirm': 'false',
'headers': 'false',
'reverse': 'false',
'save_seen': '/var/lib/apt/listchanges.db',
},
},
},
}

View file

@ -36,7 +36,7 @@ for dataset in config['datasets']:
if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
errors.add(f'dataset "{dataset}" has no backups sind {days_ago} days')
continue
if errors:

View file

@ -25,8 +25,7 @@ def backup_freshness_check(metadata):
'datasets': {
f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes
if not other_node.dummy
and other_node.has_bundle('backup')
if other_node.has_bundle('backup')
and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items()

View file

@ -35,7 +35,6 @@ def zfs(metadata):
for other_node in repo.nodes:
if (
not other_node.dummy and
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):

View file

@ -1,31 +1,13 @@
#!/bin/bash
set -u
set -exu
# FIXME: inelegant
% if wol_command:
${wol_command}
% endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do
echo backing up $path
/opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)

29
bundles/bind/README.md Normal file
View file

@ -0,0 +1,29 @@
## DNSSEC
https://wiki.debian.org/DNSSEC%20Howto%20for%20BIND%209.9+#The_signing_part
https://blog.apnic.net/2021/11/02/dnssec-provisioning-automation-with-cds-cdnskey-in-the-real-world/
https://gist.github.com/wido/4c6288b2f5ba6d16fce37dca3fc2cb4a
```python
import dns.dnssec
algorithm = dns.dnssec.RSASHA256
```
```python
import cryptography
pk = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(key_size=2048, public_exponent=65537)
```
## Nomenclature
### parent
DNSKEY:
the public key
DS
### sub
ZSK/KSK:
https://www.cloudflare.com/de-de/dns/dnssec/how-dnssec-works/

View file

@ -0,0 +1,2 @@
; ${type} ${key_id}
${record}

View file

@ -0,0 +1,13 @@
Private-key-format: ${data['Private-key-format']}
Algorithm: ${data['Algorithm']}
Modulus: ${data['Modulus']}
PublicExponent: ${data['PublicExponent']}
PrivateExponent: ${data['PrivateExponent']}
Prime1: ${data['Prime1']}
Prime2: ${data['Prime2']}
Exponent1: ${data['Exponent1']}
Exponent2: ${data['Exponent2']}
Coefficient: ${data['Coefficient']}
Created: ${data['Created']}
Publish: ${data['Publish']}
Activate: ${data['Activate']}

View file

@ -10,7 +10,7 @@ options {
% if type == 'master':
notify yes;
also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
% endif
};

View file

@ -19,7 +19,21 @@ directories[f'/var/lib/bind'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
directories[f'/var/cache/bind/keys'] = {
'group': 'bind',
'purge': True,
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
@ -29,7 +43,7 @@ files['/etc/default/bind9'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -43,7 +57,7 @@ files['/etc/bind/named.conf'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -63,7 +77,7 @@ files['/etc/bind/named.conf.options'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -93,7 +107,7 @@ files['/etc/bind/named.conf.local'] = {
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -106,7 +120,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
@ -127,7 +141,50 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
],
}
for zone_name, zone_conf in master_node.metadata.get('bind/zones').items():
for sk in ('zsk_data', 'ksk_data'):
data = zone_conf['dnssec'][sk]
files[f"/var/cache/bind/keys/K{zone_name}.+008+{data['key_id']}.private"] = {
'content_type': 'mako',
'source': 'dnssec.private',
'context': {
'data': data['privkey_file'],
},
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files[f"/var/cache/bind/keys/K{zone_name}.+008+{data['key_id']}.key"] = {
'content_type': 'mako',
'source': 'dnssec.key',
'context': {
'type': sk,
'key_id': data['key_id'],
'record': data['dnskey_record'],
},
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
@ -139,6 +196,6 @@ actions['named-checkconf'] = {
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
'svc_systemd:bind9:reload',
'svc_systemd:bind9:restart',
]
}

View file

@ -3,7 +3,6 @@ from json import dumps
h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo
defaults = {
'apt': {
'packages': {
@ -40,7 +39,7 @@ defaults = {
'zones': {},
},
},
'zones': set(),
'zones': {},
},
'nftables': {
'input': {
@ -62,6 +61,22 @@ defaults = {
}
@metadata_reactor.provides(
'bind/zones',
)
def dnssec(metadata):
return {
'bind': {
'zones': {
zone: {
'dnssec': repo.libs.dnssec.generate_dnssec_for_zone(zone, node),
}
for zone in metadata.get('bind/zones')
},
},
}
@metadata_reactor.provides(
'bind/type',
'bind/master_ip',

View file

@ -1,10 +1,6 @@
from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides(
'users/build-ci/authorized_users',
'sudoers/build-ci',
@ -22,7 +18,7 @@ def ssh_keys(metadata):
},
'sudoers': {
'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values()
}
},

View file

@ -9,7 +9,7 @@ defaults = {
'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
'https://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},
'suites': {
'/',

View file

@ -6,7 +6,7 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
mail_location = maildir:~
mail_plugins = fts fts_xapian
namespace inbox {

View file

@ -20,10 +20,6 @@ directories = {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': {
'owner': 'vmail',
'group': 'vmail',

View file

@ -0,0 +1,6 @@
# directories = {
# '/var/lib/downloads': {
# 'owner': 'downloads',
# 'group': 'www-data',
# }
# }

View file

@ -1,23 +0,0 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -1,66 +0,0 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -1,121 +0,0 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

View file

@ -40,7 +40,7 @@ ENABLE_OPENID_SIGNUP = false
[service]
REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = true
DISABLE_REGISTRATION = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false

View file

@ -2,13 +2,10 @@ from os.path import join
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
version = version=node.metadata.get('gitea/version')
downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'sha256_url': '{url}.sha256',
'triggers': {
'svc_systemd:gitea:restart',
@ -48,7 +45,6 @@ files['/etc/gitea/app.ini'] = {
),
),
'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'],
'triggers': {
'svc_systemd:gitea:restart',

View file

@ -11,20 +11,7 @@ defaults = {
},
},
'gitea': {
'conf': {
'DEFAULT': {
'WORK_PATH': '/var/lib/gitea',
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
},
'conf': {},
},
'postgresql': {
'roles': {
@ -96,6 +83,15 @@ def conf(metadata):
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}',
},
@ -118,7 +114,7 @@ def nginx(metadata):
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3500',
},
}
},
},
},

View file

@ -26,20 +26,14 @@ actions['reset_grafana_admin_password'] = {
directories = {
'/etc/grafana': {},
'/etc/grafana/provisioning': {
'owner': 'grafana',
'group': 'grafana',
},
'/etc/grafana/provisioning': {},
'/etc/grafana/provisioning/datasources': {
'purge': True,
},
'/etc/grafana/provisioning/dashboards': {
'purge': True,
},
'/var/lib/grafana': {
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana': {},
'/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
@ -53,8 +47,6 @@ directories = {
files = {
'/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -64,8 +56,6 @@ files = {
'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()),
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -82,8 +72,6 @@ files = {
},
}],
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
@ -172,8 +160,6 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
]

View file

@ -26,15 +26,9 @@ defaults = {
'config': {
'server': {
'http_port': 8300,
'http_addr': '127.0.0.1',
'enable_gzip': True,
},
'database': {
'type': 'postgres',
'host': '127.0.0.1:5432',
'name': 'grafana',
'user': 'grafana',
'password': postgres_password,
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
},
'remote_cache': {
'type': 'redis',
@ -69,9 +63,6 @@ defaults = {
},
},
},
'nginx': {
'has_websockets': True,
},
}
@ -147,7 +138,6 @@ def dns(metadata):
def nginx(metadata):
return {
'nginx': {
'has_websockets': True,
'vhosts': {
metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf',

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -0,0 +1,20 @@
users = {
'homeassistant': {
'home': '/var/lib/homeassistant',
},
}
directories = {
'/var/lib/homeassistant': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/config': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/venv': {
'owner': 'homeassistant',
},
}
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/

View file

@ -0,0 +1,20 @@
defaults = {
'apt': {
'packages': {
'python3': {},
'python3-dev': {},
'python3-pip': {},
'python3-venv': {},
'libffi-dev': {},
'libssl-dev': {},
'libjpeg-dev': {},
'zlib1g-dev': {},
'autoconf': {},
'build-essential': {},
'libopenjp2-7': {},
'libtiff5': {},
'libturbojpeg0-dev': {},
'tzdata': {},
},
},
}

View file

@ -13,9 +13,9 @@ apply Notification "mail-icingaadmin" to Host {
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail
}
@ -25,9 +25,9 @@ apply Notification "mail-icingaadmin" to Service {
user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail
}

View file

@ -269,7 +269,7 @@ svc_systemd = {
'icinga2.service': {
'needs': [
'pkg_apt:icinga2-ido-pgsql',
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
},
}

View file

@ -11,7 +11,7 @@ defaults = {
'php-imagick': {},
'php-pgsql': {},
'icingaweb2': {},
#'icingaweb2-module-monitoring': {}, # ?
'icingaweb2-module-monitoring': {},
},
'sources': {
'icinga': {

View file

@ -1,3 +0,0 @@
# svc_systemd = {
# 'ifupdown.service': {},
# }

View file

@ -1,21 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/kea/kea-dhcp4.conf': {
'content': dumps(node.metadata.get('kea'), indent=4, sort_keys=True, cls=MetadataJSONEncoder),
'triggers': [
'svc_systemd:kea-dhcp4-server:restart',
],
},
}
svc_systemd = {
'kea-dhcp4-server': {
'needs': [
'pkg_apt:kea-dhcp4-server',
'file:/etc/kea/kea-dhcp4.conf',
'svc_systemd:systemd-networkd.service:restart',
],
},
}

View file

@ -1,96 +0,0 @@
from ipaddress import ip_interface, ip_network
hashable = repo.libs.hashable.hashable
defaults = {
'apt': {
'packages': {
'kea-dhcp4-server': {},
},
},
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': set(),
},
'lease-database': {
'type': 'memfile',
'lfc-interval': 3600
},
'subnet4': set(),
'loggers': set([
hashable({
'name': 'kea-dhcp4',
'output_options': [
{
'output': 'syslog',
}
],
'severity': 'INFO',
}),
]),
},
},
}
@metadata_reactor.provides(
'kea/Dhcp4/interfaces-config/interfaces',
'kea/Dhcp4/subnet4',
)
def subnets(metadata):
subnet4 = set()
interfaces = set()
reservations = set(
hashable({
'hw-address': network_conf['mac'],
'ip-address': str(ip_interface(network_conf['ipv4']).ip),
})
for other_node in repo.nodes
for network_conf in other_node.metadata.get('network', {}).values()
if 'mac' in network_conf
)
for network_name, network_conf in metadata.get('network').items():
dhcp_server_config = network_conf.get('dhcp_server_config', None)
if dhcp_server_config:
_network = ip_network(dhcp_server_config['subnet'])
subnet4.add(hashable({
'subnet': dhcp_server_config['subnet'],
'pools': [
{
'pool': f'{dhcp_server_config['pool_from']} - {dhcp_server_config['pool_to']}',
},
],
'option-data': [
{
'name': 'routers',
'data': dhcp_server_config['router'],
},
{
'name': 'domain-name-servers',
'data': '10.0.0.1',
},
],
'reservations': set(
reservation
for reservation in reservations
if ip_interface(reservation['ip-address']).ip in _network
),
}))
interfaces.add(network_conf.get('interface', network_name))
return {
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': interfaces,
},
'subnet4': subnet4,
},
},
}

View file

@ -1,36 +1,36 @@
hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de"
// assign serevr to steam group
sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}"
// no annoying message of the day
motd_enabled 0
// enable cheats
sv_cheats 1
// allow inconsistent files on clients (weapon mods for example)
sv_consistency 0
// connect from internet
sv_lan 0
// join game at any point
sv_allow_lobby_connect_only 0
// allowed modes
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
// network
sv_minrate 30000
sv_maxrate 60000
sv_mincmdrate 66
sv_maxcmdrate 101
// logging
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console.

View file

@ -56,7 +56,6 @@ for domain in node.metadata.get('letsencrypt/domains').keys():
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
'needs': {
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
'pkg_apt:dehydrated',
},
'needed_by': {
'svc_systemd:nginx',

View file

@ -1,41 +0,0 @@
from shlex import quote
def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
if isinstance(json_data, dict):
for key, value in json_data.items():
yield from generate_sysctl_key_value_pairs_from_json(value, [*parents, key])
elif isinstance(json_data, list):
raise ValueError(f"List not supported: '{json_data}'")
else:
# If it's a leaf node, yield the path
yield (parents, json_data)
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
files= {
'/etc/sysctl.conf': {
'content': '\n'.join(
sorted(
f"{'.'.join(path)}={value}"
for path, value in key_value_pairs
),
),
'triggers': [
'svc_systemd:systemd-sysctl.service:restart',
],
},
}
svc_systemd = {
'systemd-sysctl.service': {},
}
for path, value in key_value_pairs:
actions[f'reload_sysctl.conf_{path}'] = {
'command': f"sysctl --values {'.'.join(path)} | grep -q {quote('^'+value+'$')}",
'needs': [
f'action:systemd-sysctl.service',
f'action:systemd-sysctl.service:restart',
],
}

View file

@ -1,3 +0,0 @@
defaults = {
'sysctl': {},
}

View file

@ -20,19 +20,18 @@ files = {
}
actions = {
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'triggers': {
'action:locale-gen',
},
},
'locale-gen': {
'command': 'locale-gen',
'triggered': True,
'needs': {
'pkg_apt:locales',
'action:systemd-locale',
},
},
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'preceded_by': {
'action:locale-gen',
},
},
}

View file

@ -2,5 +2,5 @@
cd "$OLDPWD"
export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10)
export BW_ITEM_WORKERS=$(expr "$(nproc)" '*' 15 '/' 10)
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")

View file

@ -2,5 +2,7 @@
cd "$OLDPWD"
PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin"
PATH_add "/opt/homebrew/opt/grep/libexec/gnubin"
GNU_PATH="$HOME/.local/gnu_bin"
mkdir -p "$GNU_PATH"
test -f "$GNU_PATH/sed" || ln -s "$(which gsed)" "$GNU_PATH/sed"
PATH_add "$GNU_PATH"

View file

@ -10,7 +10,6 @@ password required pam_deny.so
session required pam_permit.so
EOT
sudo xcodebuild -license accept
xcode-select --install
git -C ~/.zsh/oh-my-zsh pull
@ -18,7 +17,7 @@ git -C ~/.zsh/oh-my-zsh pull
brew upgrade
brew upgrade --cask --greedy
pyenv install --skip-existing
pyenv install --keep-existing
sudo softwareupdate -ia --verbose
@ -42,5 +41,3 @@ fi
sudo systemsetup -setremotelogin on # enable ssh
pip install --upgrade pip
# https://sysadmin-journal.com/apache-directory-studio-on-the-apple-m1/

View file

@ -5,5 +5,5 @@ cd "$OLDPWD"
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
then
echo "rebuilding venv für new python version"
rm -rf .venv .pip_upgrade_timestamp
rm -rf .venv
fi

View file

@ -3,7 +3,7 @@
cd "$OLDPWD"
python3 -m venv .venv
source .venv/bin/activate
source ./.venv/bin/activate
PATH_add .venv/bin
NOW=$(date +%s)
@ -19,9 +19,5 @@ if test "$DELTA" -gt 86400
then
python3 -m pip --require-virtualenv install pip wheel --upgrade
python3 -m pip --require-virtualenv install -r requirements.txt --upgrade
if test -e optional-requirements.txt
then
python3 -m pip --require-virtualenv install -r optional-requirements.txt --upgrade
fi
date +%s > .pip_upgrade_timestamp
fi

View file

@ -1,9 +1,6 @@
export PATH=~/.bin:$PATH
export PATH=~/.cargo/bin:$PATH
export ZSH=~/.zsh/oh-my-zsh
export ZSH_HOSTNAME='sm'
ZSH_THEME="bw"
ZSH_THEME="ckn"
HIST_STAMPS="yyyy/mm/dd"
plugins=(
zsh-autosuggestions
@ -13,6 +10,13 @@ source $ZSH/oh-my-zsh.sh
ulimit -S -n 24000
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
antivir() {
printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' &&
sleep 1 && echo '\nyour computer is safe!'
@ -22,12 +26,3 @@ eval "$(rbenv init -)"
eval "$(pyenv init -)"
eval "$(direnv hook zsh)"
eval "$(op completion zsh)"; compdef _op op
# //S/M
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}

View file

@ -1,12 +1,3 @@
# brew install
actions['brew_install'] = {
'command': '/opt/homebrew/bin/brew install ' + ' '.join(node.metadata.get('brew')),
'unless': f"""PKGS=$(/opt/homebrew/bin/brew leaves); for p in {' '.join(node.metadata.get('brew'))}; do grep -q "$p" <<< $PKGS || exit 9; done"""
}
# bw init
directories['/Users/mwiegand/.config/bundlewrap/lock'] = {}
# home
@ -22,12 +13,6 @@ files['/Users/mwiegand/.bin/macbook-update'] = {
'mode': '755',
}
with open(f'{repo.path}/bundles/zsh/files/bw.zsh-theme') as f:
files['/Users/mwiegand/.zsh/oh-my-zsh/themes/bw.zsh-theme'] = {
'content': f.read(),
'mode': '0644',
}
# direnv
directories['/Users/mwiegand/.local/share/direnv'] = {}
@ -36,7 +21,6 @@ files['/Users/mwiegand/.local/share/direnv/pyenv'] = {}
files['/Users/mwiegand/.local/share/direnv/venv'] = {}
files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {}
##################
for element in [*files.values(), *directories.values()]:

View file

@ -1,3 +1 @@
defaults = {
'brew': {},
}
defaults = {}

View file

@ -1,22 +0,0 @@
# This is the mailman extension configuration file to enable HyperKitty as an
# archiver. Remember to add the following lines in the mailman.cfg file:
#
# [archiver.hyperkitty]
# class: mailman_hyperkitty.Archiver
# enable: yes
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
#
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
# However, if your Mailman installation is accessed via HTTPS, the URL needs
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
base_url: http://${hostname}/mailman3/hyperkitty/
# The shared api_key, must be identical except for quoting to the value of
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
api_key: ${archiver_key}

View file

@ -1,190 +0,0 @@
ACCOUNT_EMAIL_VERIFICATION='none'
# This file is imported by the Mailman Suite. It is used to override
# the default settings from /usr/share/mailman3-web/settings.py.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '${secret_key}'
ADMINS = (
('Mailman Suite Admin', 'root@localhost'),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
# is meant to run behind a webserver reverse proxy anyway.
ALLOWED_HOSTS = [
'${hostname}',
]
# Mailman API credentials
MAILMAN_REST_API_URL = 'http://localhost:8001'
MAILMAN_REST_API_USER = 'restadmin'
MAILMAN_REST_API_PASS = '${api_password}'
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
# Application definition
INSTALLED_APPS = (
'hyperkitty',
'postorius',
'django_mailman3',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_gravatar',
'compressor',
'haystack',
'django_extensions',
'django_q',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_mailman3.lib.auth.fedora',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
'NAME': 'mailman',
# The following settings are not used with sqlite3:
'USER': 'mailman',
'PASSWORD': '${db_password}',
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '127.0.0.1',
# PORT: set to empty string for default.
'PORT': '5432',
# OPTIONS: Extra parameters to use when connecting to the database.
'OPTIONS': {
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
# https://docs.djangoproject.com/en/1.11/ref/
# databases/#setting-sql-mode
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
# Other security settings
# SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
# SECURE_REDIRECT_EXEMPT = [
# "archives/api/mailman/.*", # Request from Mailman.
# ]
# SESSION_COOKIE_SECURE = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# X_FRAME_OPTIONS = 'DENY'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set default domain for email addresses.
EMAILNAME = 'localhost.local'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
# SERVER_EMAIL = 'root@your-domain.org'
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
# Django Allauth
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
#
# Social auth
#
SOCIALACCOUNT_PROVIDERS = {
#'openid': {
# 'SERVERS': [
# dict(id='yahoo',
# name='Yahoo',
# openid_url='http://me.yahoo.com'),
# ],
#},
#'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
#},
#'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
#},
}
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
# significant performance improvement, as CSS files will not need to be
# recompiled on each requests. It means running an additional "compress"
# management command after each code upgrade.
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
COMPRESS_OFFLINE = True
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'

View file

@ -1,277 +0,0 @@
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
# This file contains the Debian configuration for mailman. It uses ini-style
# formats under the lazr.config regime to define all system configuration
# options. See <https://launchpad.net/lazr.config> for details.
[mailman]
# This address is the "site owner" address. Certain messages which must be
# delivered to a human, but which can't be delivered to a list owner (e.g. a
# bounce from a list owner), will be sent to this address. It should point to
# a human.
site_owner: ${site_owner_email}
# This is the local-part of an email address used in the From field whenever a
# message comes from some entity to which there is no natural reply recipient.
# Mailman will append '@' and the host name of the list involved. This
# address must not bounce and it must not point to a Mailman process.
noreply_address: noreply
# The default language for this server.
default_language: de
# Membership tests for posting purposes are usually performed by looking at a
# set of headers, passing the test if any of their values match a member of
# the list. Headers are checked in the order given in this variable. The
# value From_ means to use the envelope sender. Field names are case
# insensitive. This is a space separate list of headers.
sender_headers: from from_ reply-to sender
# Mail command processor will ignore mail command lines after designated max.
email_commands_max_lines: 10
# Default length of time a pending request is live before it is evicted from
# the pending database.
pending_request_life: 3d
# How long should files be saved before they are evicted from the cache?
cache_life: 7d
# A callable to run with no arguments early in the initialization process.
# This runs before database initialization.
pre_hook:
# A callable to run with no arguments late in the initialization process.
# This runs after adapters are initialized.
post_hook:
# Which paths.* file system layout to use.
# You should not change this variable.
layout: debian
# Can MIME filtered messages be preserved by list owners?
filtered_messages_are_preservable: no
# How should text/html parts be converted to text/plain when the mailing list
# is set to convert HTML to plaintext? This names a command to be called,
# where the substitution variable $filename is filled in by Mailman, and
# contains the path to the temporary file that the command should read from.
# The command should print the converted text to stdout.
html_to_plain_text_command: /usr/bin/lynx -dump $filename
# Specify what characters are allowed in list names. Characters outside of
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
# but this specifies a subset as the only allowable characters. This must be
# a valid character class regexp or the effect on list creation is
# unpredictable.
listname_chars: [-_.0-9a-z]
[shell]
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
# can use to interact with an initialized and configured Mailman system. Use
# --help for more information. This section allows you to configure certain
# aspects of this interactive shell.
# Customize the interpreter prompt.
prompt: >>>
# Banner to show on startup.
banner: Welcome to the GNU Mailman shell
# Use IPython as the shell, which must be found on the system. Valid values
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
# that any import errors will be displayed to stderr.
use_ipython: no
# Set this to allow for command line history if readline is available. This
# can be as simple as $var_dir/history.py to put the file in the var directory.
history_file:
[paths.debian]
# Important directories for Mailman operation. These are defined here so that
# different layouts can be supported. For example, a developer layout would
# be different from a FHS layout. Most paths are based off the var_dir, and
# often just setting that will do the right thing for all the other paths.
# You might also have to set spool_dir though.
#
# Substitutions are allowed, but must be of the form $var where 'var' names a
# configuration variable in the paths.* section. Substitutions are expanded
# recursively until no more $-variables are present. Beware of infinite
# expansion loops!
#
# This is the root of the directory structure that Mailman will use to store
# its run-time data.
var_dir: /var/lib/mailman3
# This is where the Mailman queue files directories will be created.
queue_dir: $var_dir/queue
# This is the directory containing the Mailman 'runner' and 'master' commands
# if set to the string '$argv', it will be taken as the directory containing
# the 'mailman' command.
bin_dir: /usr/lib/mailman3/bin
# All list-specific data.
list_data_dir: $var_dir/lists
# Directory where log files go.
log_dir: /var/log/mailman3
# Directory for system-wide locks.
lock_dir: $var_dir/locks
# Directory for system-wide data.
data_dir: $var_dir/data
# Cache files.
cache_dir: $var_dir/cache
# Directory for configuration files and such.
etc_dir: /etc/mailman3
# Directory containing Mailman plugins.
ext_dir: $var_dir/ext
# Directory where the default IMessageStore puts its messages.
messages_dir: $var_dir/messages
# Directory for archive backends to store their messages in. Archivers should
# create a subdirectory in here to store their files.
archive_dir: $var_dir/archives
# Root directory for site-specific template override files.
template_dir: $var_dir/templates
# There are also a number of paths to specific file locations that can be
# defined. For these, the directory containing the file must already exist,
# or be one of the directories created by Mailman as per above.
#
# This is where PID file for the master runner is stored.
pid_file: /run/mailman3/master.pid
# Lock file.
lock_file: $lock_dir/master.lck
[database]
# The class implementing the IDatabase.
class: mailman.database.sqlite.SQLiteDatabase
#class: mailman.database.mysql.MySQLDatabase
#class: mailman.database.postgresql.PostgreSQLDatabase
# Use this to set the Storm database engine URL. You generally have one
# primary database connection for all of Mailman. List data and most rosters
# will store their data in this database, although external rosters may access
# other databases in their own way. This string supports standard
# 'configuration' substitutions.
url: sqlite:///$DATA_DIR/mailman.db
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
#url: postgresql://mailman3:mmpass@localhost/mailman3
debug: no
[logging.debian]
# This defines various log settings. The options available are:
#
# - level -- Overrides the default level; this may be any of the
# standard Python logging levels, case insensitive.
# - format -- Overrides the default format string
# - datefmt -- Overrides the default date format string
# - path -- Overrides the default logger path. This may be a relative
# path name, in which case it is relative to Mailman's LOG_DIR,
# or it may be an absolute path name. You cannot change the
# handler class that will be used.
# - propagate -- Boolean specifying whether to propagate log message from this
# logger to the root "mailman" logger. You cannot override
# settings for the root logger.
#
# In this section, you can define defaults for all loggers, which will be
# prefixed by 'mailman.'. Use subsections to override settings for specific
# loggers. The names of the available loggers are:
#
# - archiver -- All archiver output
# - bounce -- All bounce processing logs go here
# - config -- Configuration issues
# - database -- Database logging (SQLAlchemy and Alembic)
# - debug -- Only used for development
# - error -- All exceptions go to this log
# - fromusenet -- Information related to the Usenet to Mailman gateway
# - http -- Internal wsgi-based web interface
# - locks -- Lock state changes
# - mischief -- Various types of hostile activity
# - runner -- Runner process start/stops
# - smtp -- Successful SMTP activity
# - smtp-failure -- Unsuccessful SMTP activity
# - subscribe -- Information about leaves/joins
# - vette -- Message vetting information
format: %(asctime)s (%(process)d) %(message)s
datefmt: %b %d %H:%M:%S %Y
propagate: no
level: info
path: mailman.log
[webservice]
# The hostname at which admin web service resources are exposed.
hostname: localhost
# The port at which the admin web service resources are exposed.
port: 8001
# Whether or not requests to the web service are secured through SSL.
use_https: no
# Whether or not to show tracebacks in an HTTP response for a request that
# raised an exception.
show_tracebacks: yes
# The API version number for the current (highest) API.
api_version: 3.1
# The administrative username.
admin_user: restadmin
# The administrative password.
admin_pass: ${api_password}
[mta]
# The class defining the interface to the incoming mail transport agent.
#incoming: mailman.mta.exim4.LMTP
incoming: mailman.mta.postfix.LMTP
# The callable implementing delivery to the outgoing mail transport agent.
# This must accept three arguments, the mailing list, the message, and the
# message metadata dictionary.
outgoing: mailman.mta.deliver.deliver
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
# then Mailman will attempt to log into the MTA when making a new connection.
# smtp_host: smtp.ionos.de
# smtp_port: 587
# smtp_user: ${smtp_user}
# smtp_pass: ${smtp_password}
# smtp_secure_mode: starttls
smtp_host: 127.0.0.1
smtp_port: 25
smtp_user:
smtp_pass:
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
# localhost for Postfix integration, because Postfix only consults DNS
# (e.g. not /etc/hosts).
lmtp_host: 127.0.0.1
lmtp_port: 8024
# Where can we find the mail server specific configuration file? The path can
# be either a file system path or a Python import path. If the value starts
# with python: then it is a Python import path, otherwise it is a file system
# path. File system paths must be absolute since no guarantees are made about
# the current working directory. Python paths should not include the trailing
# .cfg, which the file must end with.
#configuration: python:mailman.config.exim4
configuration: python:mailman.config.postfix

View file

@ -1,52 +0,0 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
# fresh installs.
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level=may
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = all
unknown_local_recipient_reject_code = 550
owner_request_special = no
transport_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
local_recipient_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
relay_domains =
hash:/var/lib/mailman3/data/postfix_domains

View file

@ -1,50 +0,0 @@
[uwsgi]
# Port on which uwsgi will be listening.
uwsgi-socket = /run/mailman3-web/uwsgi.sock
#Enable threading for python
enable-threads = true
# Move to the directory wher the django files are.
chdir = /usr/share/mailman3-web
# Use the wsgi file provided with the django project.
wsgi-file = wsgi.py
# Setup default number of processes and threads per process.
master = true
process = 2
threads = 2
# Drop privielges and don't run as root.
uid = www-data
gid = www-data
plugins = python3
# Setup the django_q related worker processes.
attach-daemon = python3 manage.py qcluster
# Setup hyperkitty's cron jobs.
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
# Setup the request log.
#req-logger = file:/var/log/mailman3/web/mailman-web.log
# Log cron seperately.
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
#log-route = cron uwsgi-cron
# Log qcluster commands seperately.
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
#log-route = qcluster uwsgi-daemons
# Last log and it logs the rest of the stuff.
#logger = file:/var/log/mailman3/web/mailman-web-error.log
logto = /var/log/mailman3/web/mailman-web.log

View file

@ -1,104 +0,0 @@
directories = {
'/var/lib/mailman3': {
'owner': 'list',
'group': 'list',
'needs': {
'zfs_dataset:tank/mailman',
'pkg_apt:mailman3-full',
},
'needed_by': {
'svc_systemd:mailman3.service',
'svc_systemd:mailman3-web.service',
},
},
}
files = {
'/etc/postfix/main.cf': {
'source': 'postfix.cf',
'content_type': 'mako',
'mode': '0644',
'context': {
'hostname': node.metadata.get('mailman/hostname'),
},
'needs': {
'pkg_apt:postfix',
},
'triggers': {
'svc_systemd:postfix.service:restart',
},
},
'/etc/mailman3/mailman.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-web.py': {
'content_type': 'mako',
'owner': 'root',
'group': 'www-data',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-hyperkitty.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/uwsgi.ini': {
'content_type': 'text',
'owner': 'root',
'group': 'root',
'mode': '0644',
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
}
svc_systemd = {
'postfix.service': {
'needs': {
'pkg_apt:postfix',
},
},
'mailman3.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
'mailman3-web.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
}

View file

@ -1,116 +0,0 @@
import base64
def derive_mailadmin_secret(metadata, salt):
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
)
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
defaults = {
'apt': {
'packages': {
'mailman3-full': {
'needs': {
'postgres_db:mailman',
'postgres_role:mailman',
'zfs_dataset:tank/mailman',
}
},
'postfix': {},
'python3-psycopg2': {
'needed_by': {
'pkg_apt:mailman3-full',
},
},
'apache2': {
'installed': False,
'needs': {
'pkg_apt:mailman3-full',
},
},
},
},
'zfs': {
'datasets': {
'tank/mailman': {
'mountpoint': '/var/lib/mailman3',
},
},
},
}
@metadata_reactor.provides(
'postgresql',
'mailman',
)
def postgresql(metadata):
node_id = metadata.get('id')
db_password = repo.vault.password_for(f'{node_id} database mailman')
return {
'postgresql': {
'databases': {
'mailman': {
'owner': 'mailman',
},
},
'roles': {
'mailman': {
'password': db_password,
},
},
},
'mailman': {
'db_password': db_password,
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('mailman/hostname'): {
'content': 'mailman/vhost.conf',
},
},
},
}
@metadata_reactor.provides(
'mailman/secret_key',
)
def secret_key(metadata):
import base64
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
)
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
return {
'mailman': {
'secret_key': secret_key,
},
}
@metadata_reactor.provides(
'mailman',
)
def secrets(metadata):
return {
'mailman': {
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
},
}

View file

@ -1,6 +1,6 @@
<?php
// https://raw.githubusercontent.com/Radiergummi/autodiscover/master/autodiscover/autodiscover.php
/********************************
* Autodiscover responder
@ -8,45 +8,45 @@
* This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml.
* If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the
* appropriate mail services.
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices.
*
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* domain names, adjust ports and SSL.
*/
//get raw POST data so we can extract the email address
$request = file_get_contents("php://input");
// optional debug log
# file_put_contents( 'request.log', $request, FILE_APPEND );
// retrieve email address from client request
preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email );
// check for invalid mail, to prevent XSS
if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) {
throw new Exception('Invalid E-Mail provided');
}
// get domain from email address
$domain = substr( strrchr( $email[1], "@" ), 1 );
/**************************************
* Port and server settings below *
**************************************/
// IMAP settings
$imapServer = 'imap.' . $domain; // imap.example.com
$imapPort = 993;
$imapSSL = true;
// SMTP settings
$smtpServer = 'smtp.' . $domain; // smtp.example.com
$smtpPort = 587;
$smtpSSL = true;
//set Content-Type
header( 'Content-Type: application/xml' );
?>
<?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?>

View file

@ -33,12 +33,6 @@ defaults = {
'mountpoint': '/var/vmail',
'compression': 'on',
},
'tank/vmail/index': {
'mountpoint': '/var/vmail/index',
'compression': 'on',
'com.sun:auto-snapshot': 'false',
'backup': False,
},
},
},
}

View file

@ -1 +0,0 @@
https://mariadb.com/kb/en/systemd/#configuring-mariadb-to-write-the-error-log-to-syslog

View file

@ -1,87 +0,0 @@
from shlex import quote
def mariadb(sql, **kwargs):
kwargs_string = ''.join(f" --{k} {v}" for k, v in kwargs.items())
return f"mariadb{kwargs_string} -Bsr --execute {quote(sql)}"
directories = {
'/var/lib/mysql': {
'owner': 'mysql',
'group': 'mysql',
'needs': [
'zfs_dataset:tank/mariadb',
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
files = {
'/etc/mysql/conf.d/override.conf': {
'content': repo.libs.ini.dumps(node.metadata.get('mariadb/conf')),
'content_type': 'text',
},
}
svc_systemd = {
'mariadb.service': {
'needs': [
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
actions = {
'mariadb_sec_remove_anonymous_users': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User=''"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User = ''") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
'mariadb_sec_remove_remote_root': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
}
for db, conf in node.metadata.get('mariadb/databases', {}).items():
actions[f'mariadb_create_database_{db}'] = {
'command': mariadb(f"CREATE DATABASE {db}"),
'unless': mariadb(f"SHOW DATABASES LIKE '{db}'") + f" | grep -q '^{db}$'",
'needs': [
'svc_systemd:mariadb.service',
],
}
actions[f'mariadb_user_{db}_create'] = {
'command': mariadb(f"CREATE USER {db}"),
'unless': mariadb(f"SELECT User FROM mysql.user WHERE User = '{db}'") + f" | grep -q '^{db}$'",
'needs': [
f'action:mariadb_create_database_{db}',
],
}
pw = conf['password']
actions[f'mariadb_user_{db}_password'] = {
'command': mariadb(f"SET PASSWORD FOR {db} = PASSWORD('{conf['password']}')"),
'unless': f'echo {quote(pw)} | mariadb -u {db} -e quit -p',
'needs': [
f'action:mariadb_user_{db}_create',
],
}
actions[f'mariadb_grant_privileges_to_{db}'] = {
'command': mariadb(f"GRANT ALL PRIVILEGES ON {db}.* TO '{db}'", database=db),
'unless': mariadb(f"SHOW GRANTS FOR {db}") + f" | grep -q '^GRANT ALL PRIVILEGES ON `{db}`.* TO `{db}`@`%`'",
'needs': [
f'action:mariadb_user_{db}_create',
],
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'mariadb-server': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
'mariadb-client': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
},
},
'mariadb': {
'databases': {},
'conf': {
# https://www.reddit.com/r/zfs/comments/u1xklc/mariadbmysql_database_settings_for_zfs
'mysqld': {
'skip-innodb_doublewrite': None,
'innodb_flush_method': 'fsync',
'innodb_doublewrite': '0',
'innodb_use_atomic_writes': '0',
'innodb_use_native_aio': '0',
'innodb_read_io_threads': '10',
'innodb_write_io_threads': '10',
'innodb_buffer_pool_size': '26G',
'innodb_flush_log_at_trx_commit': '1',
'innodb_log_file_size': '1G',
'innodb_flush_neighbors': '0',
'innodb_fast_shutdown': '2',
},
},
},
'zfs': {
'datasets': {
'tank/mariadb': {
'mountpoint': '/var/lib/mysql',
'recordsize': '16384',
'atime': 'off',
},
},
},
}

View file

@ -5,99 +5,41 @@ defaults = {
}
@metadata_reactor.provides(
'network',
)
def dhcp(metadata):
networks = {}
for network_name, network_conf in metadata.get('network').items():
_interface = ip_interface(network_conf['ipv4'])
_ip = _interface.ip
_network = _interface.network
_hosts = list(_network.hosts())
if network_conf.get('dhcp_server', False):
networks[network_name] = {
'dhcp_server_config': {
'subnet': str(_network),
'pool_from': str(_hosts[len(_hosts)//2]),
'pool_to': str(_hosts[-3]),
'router': str(_ip),
'domain-name-servers': str(_ip),
}
}
return {
'network': networks,
}
@metadata_reactor.provides(
'systemd/units',
)
def units(metadata):
if node.has_bundle('systemd-networkd'):
units = {}
units = {}
for network_name, network_conf in metadata.get('network').items():
interface_type = network_conf.get('type', None)
# network
units[f'{network_name}.network'] = {
'Match': {
'Name': network_name if interface_type == 'vlan' else network_conf['interface'],
},
'Network': {
'DHCP': network_conf.get('dhcp', 'no'),
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
'VLAN': set(
other_network_name
for other_network_name, other_network_conf in metadata.get('network', {}).items()
if other_network_conf.get('type') == 'vlan' and other_network_conf['vlan_interface'] == network_name
)
}
}
# type
if interface_type:
units[f'{network_name}.network']['Match']['Type'] = interface_type
# ips
for i in [4, 6]:
if network_conf.get(f'ipv{i}', None):
units[f'{network_name}.network'].update({
f'Address#ipv{i}': {
'Address': network_conf[f'ipv{i}'],
},
})
if f'gateway{i}' in network_conf:
units[f'{network_name}.network'].update({
f'Route#ipv{i}': {
'Gateway': network_conf[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
# as vlan
if interface_type == 'vlan':
units[f"{network_name}.netdev"] = {
'NetDev': {
'Name': network_name,
'Kind': 'vlan',
},
'VLAN': {
'Id': network_conf['id'],
}
}
return {
'systemd': {
'units': units,
for type, network in metadata.get('network').items():
units[f'{type}.network'] = {
'Match': {
'Name': network['interface'],
},
'Network': {
'DHCP': network.get('dhcp', 'no'),
'IPv6AcceptRA': network.get('dhcp', 'no'),
}
}
else:
return {}
for i in [4, 6]:
if network.get(f'ipv{i}', None):
units[f'{type}.network'].update({
f'Address#ipv{i}': {
'Address': network[f'ipv{i}'],
},
})
if f'gateway{i}' in network:
units[f'{type}.network'].update({
f'Route#ipv{i}': {
'Gateway': network[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
return {
'systemd': {
'units': units,
}
}

View file

@ -29,8 +29,8 @@ defaults = {
'exclude': [
'^appdata_',
'^updater-',
'^nextcloud\\.log',
'^updater\\.log',
'^nextcloud\.log',
'^updater\.log',
'^[^/]+/cache',
'^[^/]+/files_versions',
'^[^/]+/files_trashbin',
@ -123,9 +123,9 @@ def config(metadata):
],
'cache_path': '/var/lib/nextcloud/.cache',
'upgrade.disable-web': True,
'memcache.local': '\\OC\\Memcache\\Redis',
'memcache.locking': '\\OC\\Memcache\\Redis',
'memcache.distributed': '\\OC\\Memcache\\Redis',
'memcache.local': '\OC\Memcache\Redis',
'memcache.locking': '\OC\Memcache\Redis',
'memcache.distributed': '\OC\Memcache\Redis',
'redis': {
'host': '/var/run/redis/nextcloud.sock'
},
@ -142,7 +142,6 @@ def config(metadata):
'versions_retention_obligation': 'auto, 90',
'simpleSignUpLink.shown': False,
'allow_local_remote_servers': True, # FIXME?
'maintenance_window_start': 1, # https://docs.nextcloud.com/server/29/admin_manual/configuration_server/background_jobs_configuration.html#maintenance-window-start
},
},
}

View file

@ -1,6 +1,6 @@
pid /var/run/nginx.pid;
user www-data;
worker_processes ${worker_processes};
worker_processes 10;
% for module in sorted(modules):
load_module modules/ngx_${module}_module.so;
@ -21,9 +21,6 @@ http {
server_names_hash_bucket_size 128;
tcp_nopush on;
client_max_body_size 32G;
ssl_dhparam "/etc/ssl/certs/dhparam.pem";
# dont show nginx version
server_tokens off;
% if node.has_bundle('php'):
upstream php-handler {
@ -31,13 +28,5 @@ http {
}
% endif
% if has_websockets:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
% endif
include /etc/nginx/sites-enabled/*;
include /etc/nginx/sites/*;
}

View file

@ -9,7 +9,7 @@ directories = {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/sites-available': {
'/etc/nginx/sites': {
'purge': True,
'triggers': {
'svc_systemd:nginx:restart',
@ -32,8 +32,6 @@ files = {
'content_type': 'mako',
'context': {
'modules': node.metadata.get('nginx/modules'),
'worker_processes': node.metadata.get('vm/cores'),
'has_websockets': node.metadata.get('nginx/has_websockets'),
},
'triggers': {
'svc_systemd:nginx:restart',
@ -76,15 +74,9 @@ files = {
},
}
symlinks = {
'/etc/nginx/sites-enabled': {
'target': '/etc/nginx/sites-available',
},
}
actions = {
'nginx-generate-dhparam': {
'command': 'openssl dhparam -dsaparam -out /etc/ssl/certs/dhparam.pem 4096',
'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
'unless': 'test -f /etc/ssl/certs/dhparam.pem',
},
}
@ -100,7 +92,7 @@ svc_systemd = {
for name, config in node.metadata.get('nginx/vhosts').items():
files[f'/etc/nginx/sites-available/{name}'] = {
files[f'/etc/nginx/sites/{name}'] = {
'content': Template(filename=join(repo.path, 'data', config['content'])).render(
server_name=name,
**config.get('context', {}),
@ -116,6 +108,6 @@ for name, config in node.metadata.get('nginx/vhosts').items():
}
if name in node.metadata.get('letsencrypt/domains'):
files[f'/etc/nginx/sites-available/{name}']['needs'].append(
files[f'/etc/nginx/sites/{name}']['needs'].append(
f'action:letsencrypt_ensure-some-certificate_{name}',
)

View file

@ -18,7 +18,6 @@ defaults = {
'nginx': {
'vhosts': {},
'modules': set(),
'has_websockets': False,
},
'systemd': {
'units': {
@ -74,6 +73,7 @@ def dns(metadata):
@metadata_reactor.provides(
'letsencrypt/domains',
'letsencrypt/reload_after',
)
def letsencrypt(metadata):
return {
@ -96,7 +96,7 @@ def monitoring(metadata):
'monitoring': {
'services': {
hostname: {
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {vhost.get('check_protocol', 'https')}://{quote(hostname + vhost.get('check_path', '/'))}",
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {quote(hostname + vhost.get('check_path', ''))}",
}
for hostname, vhost in metadata.get('nginx/vhosts').items()
},

View file

@ -1,3 +1,9 @@
from os.path import join
import json
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('php/version')
files = {
@ -15,7 +21,7 @@ files = {
f'pkg_apt:php{version}-fpm',
},
'triggers': {
f'svc_systemd:php{version}-fpm.service:restart',
f'svc_systemd:php{version}-fpm:restart',
},
},
f'/etc/php/{version}/fpm/pool.d/www.conf': {
@ -27,13 +33,13 @@ files = {
f'pkg_apt:php{version}-fpm',
},
'triggers': {
f'svc_systemd:php{version}-fpm.service:restart',
f'svc_systemd:php{version}-fpm:restart',
},
},
}
svc_systemd = {
f'php{version}-fpm.service': {
f'php{version}-fpm': {
'needs': {
'pkg_apt:',
f'file:/etc/php/{version}/fpm/php.ini',

View file

@ -113,7 +113,7 @@ def php_ini(metadata):
'opcache.revalidate_freq': '60',
},
}
return {
'php': {
'php.ini': {
@ -145,7 +145,7 @@ def www_conf(metadata):
'pm': 'dynamic',
'pm.max_children': int(threads*2),
'pm.start_servers': int(threads),
'pm.min_spare_servers': max([1, int(threads/2)]),
'pm.min_spare_servers': int(threads/2),
'pm.max_spare_servers': int(threads),
'pm.max_requests': int(threads*32),
},

View file

@ -44,9 +44,7 @@ smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions
mua_client_restrictions = permit_sasl_authenticated, reject
mua_sender_restrictions = permit_sasl_authenticated, reject
## MS Outlook, incompatible with reject_non_fqdn_hostname and/or reject_invalid_hostname
## https://unix.stackexchange.com/a/91753/357916
mua_helo_restrictions = permit_mynetworks, permit
mua_helo_restrictions = permit_mynetworks, reject_non_fqdn_hostname, reject_invalid_hostname, permit
smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332

View file

@ -86,8 +86,6 @@ if node.has_bundle('telegraf'):
'needs': [
'pkg_apt:acl',
'svc_systemd:postfix',
'svc_systemd:postfix:reload',
'svc_systemd:postfix:restart',
],
}
actions['postfix_setfacl_default_telegraf'] = {
@ -96,7 +94,5 @@ if node.has_bundle('telegraf'):
'needs': [
'pkg_apt:acl',
'svc_systemd:postfix',
'svc_systemd:postfix:reload',
'svc_systemd:postfix:restart',
],
}

View file

@ -1,22 +0,0 @@
# DO NOT DISABLE!
# If you change this first entry you will need to make sure that the
# database superuser can access the database using some other method.
# Noninteractive access to all databases is required during automatic
# maintenance (custom daily cronjobs, replication, and similar tasks).
#
# Database administrative login by Unix domain socket
local all postgres peer
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all peer
# IPv4 local connections:
host all all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# IPv6 local connections:
host all all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all peer
host replication all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
host replication all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}

View file

@ -12,27 +12,12 @@ directories = {
'zfs_dataset:tank/postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
}
}
files = {
f"/etc/postgresql/{version}/main/pg_hba.conf": {
'content_type': 'mako',
'mode': '0640',
'owner': 'postgres',
'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
],
'triggers': [
'svc_systemd:postgresql.service:restart',
],
},
f"/etc/postgresql/{version}/main/conf.d/managed.conf": {
'content': '\n'.join(
f'{key} = {value}'
@ -40,19 +25,16 @@ files = {
) + '\n',
'owner': 'postgres',
'group': 'postgres',
'needs': [
'pkg_apt:postgresql',
],
'needed_by': [
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
'triggers': [
'svc_systemd:postgresql.service:restart',
'svc_systemd:postgresql:restart',
],
},
}
svc_systemd['postgresql.service'] = {
svc_systemd['postgresql'] = {
'needs': [
'pkg_apt:postgresql',
],
@ -61,13 +43,13 @@ svc_systemd['postgresql.service'] = {
for user, config in node.metadata.get('postgresql/roles').items():
postgres_roles[user] = merge_dict(config, {
'needs': [
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
})
for database, config in node.metadata.get('postgresql/databases').items():
postgres_dbs[database] = merge_dict(config, {
'needs': [
'svc_systemd:postgresql.service',
'svc_systemd:postgresql',
],
})

View file

@ -6,11 +6,7 @@ root_password = repo.vault.password_for(f'{node.name} postgresql root')
defaults = {
'apt': {
'packages': {
'postgresql': {
'needs': {
'zfs_dataset:tank/postgresql',
},
},
'postgresql': {},
},
},
'backup': {
@ -58,25 +54,6 @@ def conf(metadata):
}
@metadata_reactor.provides(
'apt/config/APT/NeverAutoRemove',
)
def apt(metadata):
return {
'apt': {
'config': {
'APT': {
'NeverAutoRemove': {
# https://github.com/credativ/postgresql-common/blob/master/pg_updateaptconfig#L17-L21
f"^postgresql.*-{metadata.get('postgresql/version')}",
},
},
},
},
}
@metadata_reactor.provides(
'zfs/datasets',
)

View file

@ -1,21 +0,0 @@
files = {
'/etc/apt/apt.conf.d/10pveapthook': {
'content_type': 'any',
'mode': '0644',
},
'/etc/apt/apt.conf.d/76pveconf': {
'content_type': 'any',
'mode': '0444',
},
'/etc/apt/apt.conf.d/76pveproxy': {
'content_type': 'any',
'mode': '0644',
},
'/etc/network/interfaces': {
'content_type': 'any',
},
}
symlinks['/etc/ssh/ssh_host_rsa_key.pub'] = {
'target': '/etc/ssh/ssh_host_managed_key.pub',
}

View file

@ -1,100 +0,0 @@
defaults = {
'apt': {
'packages': {
'linux-image-amd64': {
'installed': False,
},
'proxmox-default-kernel': {},
# after reboot
'proxmox-ve': {},
'postfix': {},
'open-iscsi': {},
'chrony': {},
'os-prober': {
'installed': False,
},
'dnsmasq-base': {},
},
'sources': {
'proxmox-ve': {
'options': {
'aarch': 'amd64',
},
'urls': {
'http://download.proxmox.com/debian/pve',
},
'suites': {
'{codename}',
},
'components': {
'pve-no-subscription',
},
'key': 'proxmox-ve-{codename}',
},
},
},
# 'nftables': {
# 'input': {
# 'tcp dport 8006 accept',
# },
# },
'zfs': {
'datasets': {
'tank/proxmox-ve': {
'mountpoint': '/var/lib/proxmox-ve',
},
}
}
}
# @metadata_reactor.provides(
# 'systemd',
# )
# def bridge(metadata):
# return {
# 'systemd': {
# 'units': {
# # f'internal.network': {
# # 'Network': {
# # 'Bridge': 'br0',
# # },
# # },
# 'br0.netdev': {
# 'NetDev': {
# 'Name': 'br0',
# 'Kind': 'bridge'
# },
# },
# 'br0.network': {
# 'Match': {
# 'Name': 'br0',
# },
# 'Network': {
# 'Unmanaged': 'yes'
# },
# },
# },
# },
# }
@metadata_reactor.provides(
'nginx/has_websockets',
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'has_websockets': True,
'vhosts': {
metadata.get('proxmox-ve/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'https://localhost:8006',
'websockets': True,
}
},
},
},
}

View file

@ -1,25 +0,0 @@
from shlex import quote
directories = {
'/opt/pyenv': {},
'/opt/pyenv/install': {},
}
git_deploy = {
'/opt/pyenv/install': {
'repo': 'https://github.com/pyenv/pyenv.git',
'rev': 'master',
'needs': {
'directory:/opt/pyenv/install',
},
},
}
for version in node.metadata.get('pyenv/versions'):
actions[f'pyenv_install_{version}'] = {
'command': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv install {quote(version)}',
'unless': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv versions --bare | grep -Fxq {quote(version)}',
'needs': {
'git_deploy:/opt/pyenv/install',
},
}

View file

@ -1,23 +0,0 @@
defaults = {
'apt': {
'packages': {
'build-essential': {},
'libssl-dev': {},
'zlib1g-dev': {},
'libbz2-dev': {},
'libreadline-dev': {},
'libsqlite3-dev': {},
'curl': {},
'libncurses-dev': {},
'xz-utils': {},
'tk-dev': {},
'libxml2-dev': {},
'libxmlsec1-dev': {},
'libffi-dev': {},
'liblzma-dev': {},
},
},
'pyenv': {
'versions': set(),
},
}

View file

@ -1,3 +0,0 @@
- Homematic > Settings > Control panel > Security > SSH > active & set password
- ssh to node > `ssh-copy-id -o StrictHostKeyChecking=no root@{homematic}`
- Homematic > Settings > Control panel > Security > Automatic forwarding to HTTPS > active

View file

@ -1,3 +1,6 @@
from shlex import quote
@metadata_reactor.provides(
'letsencrypt/domains',
)
@ -17,6 +20,8 @@ def letsencrypt(metadata):
'systemd-timers/raspberrymatic-cert',
)
def systemd_timers(metadata):
domain = metadata.get('raspberrymatic-cert/domain')
return {
'systemd-timers': {
'raspberrymatic-cert': {

View file

@ -1,15 +1,12 @@
directories = {
'/etc/redis': {
'purge': True,
'owner': 'redis',
'mode': '2770',
'needs': [
'pkg_apt:redis-server',
],
},
'/var/lib/redis': {
'owner': 'redis',
'mode': '0750',
'needs': [
'pkg_apt:redis-server',
],
@ -48,7 +45,7 @@ for name, conf in node.metadata.get('redis').items():
f'svc_systemd:redis-{name}:restart'
],
}
svc_systemd[f'redis-{name}'] = {
'needs': [
'svc_systemd:redis',

View file

@ -6,16 +6,80 @@ $config['enable_installer'] = true;
/* Local configuration for Roundcube Webmail */
// ----------------------------------
// SQL DATABASE
// ----------------------------------
// Database connection string (DSN) for read+write operations
// Format (compatible with PEAR MDB2): db_provider://user:password@host/database
// Currently supported db_providers: mysql, pgsql, sqlite, mssql or sqlsrv
// For examples see http://pear.php.net/manual/en/package.database.mdb2.intro-dsn.php
// NOTE: for SQLite use absolute path: 'sqlite:////full/path/to/sqlite.db?mode=0646'
$config['db_dsnw'] = '${database['provider']}://${database['user']}:${database['password']}@${database['host']}/${database['name']}';
$config['imap_host'] = 'localhost';
$config['smtp_host'] = 'tls://localhost';
// ----------------------------------
// IMAP
// ----------------------------------
// The mail host chosen to perform the log-in.
// Leave blank to show a textbox at login, give a list of hosts
// to display a pulldown menu or set one host as string.
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
// Supported replacement variables:
// %n - hostname ($_SERVER['SERVER_NAME'])
// %t - hostname without the first part
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
// %s - domain name after the '@' from e-mail address provided at login screen
// For example %n = mail.domain.tld, %t = domain.tld
// WARNING: After hostname change update of mail_host column in users table is
// required to match old user data records with the new host.
$config['default_host'] = 'localhost';
// ----------------------------------
// SMTP
// ----------------------------------
// SMTP server host (for sending mails).
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
// If left blank, the PHP mail() function is used
// Supported replacement variables:
// %h - user's IMAP hostname
// %n - hostname ($_SERVER['SERVER_NAME'])
// %t - hostname without the first part
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
// %z - IMAP domain (IMAP hostname without the first part)
// For example %n = mail.domain.tld, %t = domain.tld
$config['smtp_server'] = 'tls://localhost';
// SMTP username (if required) if you use %u as the username Roundcube
// will use the current username for login
$config['smtp_user'] = '%u';
// SMTP password (if required) if you use %p as the password Roundcube
// will use the current user's password for login
$config['smtp_pass'] = '%p';
// provide an URL where a user can get support for this Roundcube installation
// PLEASE DO NOT LINK TO THE ROUNDCUBE.NET WEBSITE HERE!
$config['support_url'] = '';
// this key is used to encrypt the users imap password which is stored
// in the session record (and the client cookie if remember password is enabled).
// please provide a string of exactly 24 chars.
$config['des_key'] = '${des_key}';
// Name your service. This is displayed on the login screen and in the window title
$config['product_name'] = '${product_name}';
// ----------------------------------
// PLUGINS
// ----------------------------------
// List of active plugins (in plugins/ directory)
$config['plugins'] = array(${', '.join(f'"{plugin}"' for plugin in plugins)});
// the default locale setting (leave empty for auto-detection)
// RFC1766 formatted language name like en_US, de_DE, de_CH, fr_FR, pt_BR
$config['language'] = 'de_DE';
// https://serverfault.com/a/991304
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,

View file

@ -14,4 +14,4 @@ $config['password_dovecotpw'] = '/usr/bin/sudo /usr/bin/doveadm pw';
$config['password_dovecotpw_method'] = 'ARGON2ID';
$config['password_dovecotpw_with_method'] = true;
$config['password_db_dsn'] = 'pgsql://mailserver:${mailserver_db_password}@localhost/mailserver';
$config['password_query'] = "UPDATE users SET password = %P FROM domains WHERE domains.id = users.domain_id AND domains.name = %d AND users.name = %l";
$config['password_query'] = "UPDATE users SET password=%D FROM domains WHERE domains.id = domain_id AND domains.name = %d AND users.name = %l";

View file

@ -1,8 +1,7 @@
assert node.has_bundle('php')
assert node.has_bundle('mailserver')
roundcube_version = node.metadata.get('roundcube/version')
php_version = node.metadata.get('php/version')
version = node.metadata.get('roundcube/version')
directories = {
'/opt/roundcube': {
@ -23,9 +22,9 @@ directories = {
}
files[f'/tmp/roundcube-{roundcube_version}.tar.gz'] = {
files[f'/tmp/roundcube-{version}.tar.gz'] = {
'content_type': 'download',
'source': f'https://github.com/roundcube/roundcubemail/releases/download/{roundcube_version}/roundcubemail-{roundcube_version}-complete.tar.gz',
'source': f'https://github.com/roundcube/roundcubemail/releases/download/{version}/roundcubemail-{version}-complete.tar.gz',
'triggered': True,
}
actions['delete_roundcube'] = {
@ -33,18 +32,18 @@ actions['delete_roundcube'] = {
'triggered': True,
}
actions['extract_roundcube'] = {
'command': f'tar xfvz /tmp/roundcube-{roundcube_version}.tar.gz --strip 1 -C /opt/roundcube',
'unless': f'grep -q "Version {roundcube_version}" /opt/roundcube/index.php',
'command': f'tar xfvz /tmp/roundcube-{version}.tar.gz --strip 1 -C /opt/roundcube',
'unless': f'grep -q "Version {version}" /opt/roundcube/index.php',
'preceded_by': [
'action:delete_roundcube',
f'file:/tmp/roundcube-{roundcube_version}.tar.gz',
f'file:/tmp/roundcube-{version}.tar.gz',
],
'needs': [
'directory:/opt/roundcube',
],
'triggers': [
'action:chown_roundcube',
'action:composer_lock_reset',
'action:composer_install',
],
}
actions['chown_roundcube'] = {
@ -65,9 +64,6 @@ files['/opt/roundcube/config/config.inc.php'] = {
'needs': [
'action:chown_roundcube',
],
'triggers': [
f'svc_systemd:php{php_version}-fpm.service:restart',
],
}
files['/opt/roundcube/plugins/password/config.inc.php'] = {
'source': 'password.config.inc.php',
@ -79,16 +75,7 @@ files['/opt/roundcube/plugins/password/config.inc.php'] = {
'action:chown_roundcube',
],
}
actions['composer_lock_reset'] = {
'command': 'rm /opt/roundcube/composer.lock',
'triggered': True,
'needs': [
'action:chown_roundcube',
],
'triggers': [
'action:composer_install',
],
}
actions['composer_install'] = {
'command': "cp /opt/roundcube/composer.json-dist /opt/roundcube/composer.json && su www-data -s /bin/bash -c '/usr/bin/composer -d /opt/roundcube install'",
'triggered': True,

View file

@ -1,8 +0,0 @@
- reset (hold reset for 5-10 seconds, until user light starts flashing)
- open webinterface under 192.168.88.1
- set password
- vlans need to be configured and an additional ip needs to be assined to a vlan which es later accessible preferably through an untagged port
- for example add 10.0.0.62/24 to "home" vlan
- this happens on the first apply
- when vlan filering gets enabled, the apply freezes and the switch is no longer available under the old ip
- now that filtering is active, the switch is available under its new ip, because now you dont speak to the bridge anymore, where the old ip was residing, but to the vlan interface, where the new ip is residing

View file

@ -1,122 +0,0 @@
routeros['/ip/dns'] = {
'servers': '8.8.8.8',
}
routeros['/system/identity'] = {
'name': node.name,
}
# for service in (
# 'api-ssl', # slow :(
# 'ftp', # we can download files via HTTP
# 'telnet',
# 'www-ssl', # slow :(
# 'winbox',
# ):
# routeros[f'/ip/service?name={service}'] = {
# 'disabled': True,
# }
# LOGGING_TOPICS = (
# 'critical',
# 'error',
# 'info',
# 'stp',
# 'warning',
# )
# for topic in LOGGING_TOPICS:
# routeros[f'/system/logging?action=memory&topics={topic}'] = {}
# routeros['/snmp'] = {
# 'enabled': True,
# }
# routeros['/snmp/community?name=public'] = {
# 'addresses': '0.0.0.0/0',
# 'disabled': False,
# 'read-access': True,
# 'write-access': False,
# }
# routeros['/system/clock'] = {
# 'time-zone-autodetect': False,
# 'time-zone-name': 'UTC',
# }
# routeros['/ip/neighbor/discovery-settings'] = {
# 'protocol': 'cdp,lldp,mndp',
# }
# routeros['/ip/route?dst-address=0.0.0.0/0'] = {
# 'gateway': node.metadata.get('routeros/gateway'),
# }
for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
routeros[f'/interface/vlan?name={vlan_name}'] = {
'vlan-id': vlan_id,
'interface': 'bridge',
'tags': {
'routeros-vlan',
},
'needs': {
#'routeros:/interface/bridge?name=bridge',
},
}
routeros[f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false"] = {
'bridge': 'bridge',
'untagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/untagged')),
'tagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/tagged')),
'_comment': vlan_name,
'tags': {
'routeros-vlan-ports',
},
'needs': {
'tag:routeros-vlan',
},
}
# create IPs
for ip, ip_conf in node.metadata.get('routeros/ips').items():
routeros[f'/ip/address?address={ip}'] = {
'interface': ip_conf['interface'],
'tags': {
'routeros-ip',
},
'needs': {
'tag:routeros-vlan',
},
}
routeros['/interface/bridge?name=bridge'] = {
'vlan-filtering': True, # ENABLE AFTER PORT VLANS ARE SET UP
'igmp-snooping': False,
'priority': node.metadata.get('routeros/bridge_priority'),
'protocol-mode': 'rstp',
'needs': {
'tag:routeros-vlan',
'tag:routeros-vlan-ports',
'tag:routeros-ip',
},
}
# purge unused vlans
routeros['/interface/vlan'] = {
'purge': {
'id-by': 'name',
},
'needed_by': {
'tag:routeros-vlan',
}
}
routeros['/interface/bridge/vlan'] = {
'purge': {
'id-by': 'vlan-ids',
'keep': {
'dynamic': True,
},
},
'needed_by': {
'tag:routeros-vlan',
}
}

View file

@ -1,26 +0,0 @@
defaults = {}
@metadata_reactor.provides(
'routeros/vlan_ports',
)
def routeros__(metadata):
return {
'routeros': {
'vlan_ports': {
vlan_name: {
'untagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name == metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/untagged')
},
'tagged': {
port_name
for port_name, port_conf in metadata.get('routeros/ports').items()
if vlan_name in metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/tagged')
},
}
for vlan_name in metadata.get('routeros/vlans').keys()
},
},
}

View file

@ -21,4 +21,3 @@ ClientAliveInterval 30
ClientAliveCountMax 5
AcceptEnv LANG
Subsystem sftp /usr/lib/openssh/sftp-server
HostKey /etc/ssh/ssh_host_managed_key

View file

@ -51,14 +51,14 @@ files = {
],
'skip': dont_touch_sshd,
},
'/etc/ssh/ssh_host_managed_key': {
'/etc/ssh/ssh_host_ed25519_key': {
'content': node.metadata.get('ssh/host_key/private') + '\n',
'mode': '0600',
'triggers': [
'svc_systemd:ssh:restart'
],
},
'/etc/ssh/ssh_host_managed_key.pub': {
'/etc/ssh/ssh_host_ed25519_key.pub': {
'content': node.metadata.get('ssh/host_key/public') + '\n',
'mode': '0644',
'triggers': [

View file

@ -34,19 +34,18 @@ defaults = {
)
def systemd_timer(metadata):
return {
# steam python login is broken: https://github.com/ValvePython/steam/issues/442
# 'systemd-timers': {
# f'steam-chat-logger': {
# 'command': '/opt/steam_chat_logger/steam_chat_logger.py',
# 'when': 'hourly',
# 'user': 'steam_chat_logger',
# 'env': {
# 'DB_NAME': 'steam_chat_logger',
# 'DB_USER': 'steam_chat_logger',
# 'DB_PASSWORD': metadata.get('postgresql/roles/steam_chat_logger/password'),
# **metadata.get('steam_chat_logger'),
# },
# 'working_dir': '/var/lib/steam_chat_logger',
# },
# },
'systemd-timers': {
f'steam-chat-logger': {
'command': '/opt/steam_chat_logger/steam_chat_logger.py',
'when': 'hourly',
'user': 'steam_chat_logger',
'env': {
'DB_NAME': 'steam_chat_logger',
'DB_USER': 'steam_chat_logger',
'DB_PASSWORD': metadata.get('postgresql/roles/steam_chat_logger/password'),
**metadata.get('steam_chat_logger'),
},
'working_dir': '/var/lib/steam_chat_logger',
},
},
}

View file

@ -1,7 +1,7 @@
files = {
'/etc/systemd/journald.conf.d/managed.conf': {
'content': repo.libs.systemd.generate_unitfile({
'Journal': node.metadata.get('systemd-journald'),
'Jorunal': node.metadata.get('systemd-journald'),
}),
'triggers': {
'svc_systemd:systemd-journald:restart',

View file

@ -1,10 +1,3 @@
<%
nameservers = (
node.metadata.get('overwrite_nameservers', []) or
node.metadata.get('nameservers', [])
)
%>\
\
% for nameserver in nameservers:
% for nameserver in sorted(node.metadata.get('nameservers')):
nameserver ${nameserver}
% endfor
% endfor

View file

@ -19,5 +19,5 @@ directories = {
}
svc_systemd = {
'systemd-networkd.service': {},
'systemd-networkd': {},
}

View file

@ -42,8 +42,6 @@ def systemd(metadata):
units[f'{name}.service']['Service']['SuccessExitStatus'] = config['success_exit_status']
if config.get('kill_mode'):
units[f'{name}.service']['Service']['KillMode'] = config['kill_mode']
if config.get('RuntimeMaxSec'):
units[f'{name}.service']['Service']['RuntimeMaxSec'] = config['RuntimeMaxSec']
services[f'{name}.timer'] = {}

View file

@ -24,10 +24,10 @@ for name, unit in node.metadata.get('systemd/units').items():
path = f'/etc/systemd/network/{name}'
dependencies = {
'needed_by': [
'svc_systemd:systemd-networkd.service',
'svc_systemd:systemd-networkd',
],
'triggers': [
'svc_systemd:systemd-networkd.service:restart',
'svc_systemd:systemd-networkd:restart',
],
}
elif extension in ['timer', 'service', 'mount', 'swap', 'target']:

Some files were not shown because too many files have changed in this diff Show more