Compare commits
5 commits
master
...
rufbereits
Author | SHA1 | Date | |
---|---|---|---|
c0277fa8b9 | |||
d95a8e6d59 | |||
e9c64ec089 | |||
ed0295c4f7 | |||
1910398b60 |
159 changed files with 617 additions and 3438 deletions
|
@ -37,12 +37,3 @@ fi
|
|||
telegraf: execd for daemons
|
||||
|
||||
TEST
|
||||
|
||||
# git signing
|
||||
|
||||
git config --global gpg.format ssh
|
||||
git config --global commit.gpgsign true
|
||||
|
||||
git config user.name CroneKorkN
|
||||
git config user.email i@ckn.li
|
||||
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"
|
||||
|
|
|
@ -23,7 +23,7 @@ for node in nodes:
|
|||
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
|
||||
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
|
||||
|
||||
# REBOOT IN ORDER
|
||||
|
||||
|
@ -61,10 +61,8 @@ for node in [
|
|||
*wireguard_servers,
|
||||
]:
|
||||
try:
|
||||
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
|
||||
if node.run('test -e /var/run/reboot-required').return_code == 0:
|
||||
print('rebooting', node.name)
|
||||
print(node.run('systemctl reboot').stdout.decode())
|
||||
else:
|
||||
print('not rebooting', node.name)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
|
|
@ -5,17 +5,9 @@ from os.path import realpath, dirname
|
|||
from sys import argv
|
||||
from ipaddress import ip_network, ip_interface
|
||||
|
||||
if len(argv) != 3:
|
||||
print(f'usage: {argv[0]} <node> <client>')
|
||||
exit(1)
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
|
||||
server_node = repo.get_node(argv[1])
|
||||
|
||||
if argv[2] not in server_node.metadata.get('wireguard/clients'):
|
||||
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
|
||||
exit(1)
|
||||
|
||||
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
|
||||
|
||||
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
|
||||
|
@ -28,7 +20,9 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
|
|||
if not ip_network(network).subnet_of(vpn_network):
|
||||
allowed_ips.append(ip_network(network))
|
||||
|
||||
conf = f'''
|
||||
conf = \
|
||||
f'''>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
|
||||
[Interface]
|
||||
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
|
||||
ListenPort = 51820
|
||||
|
@ -41,12 +35,11 @@ PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.ge
|
|||
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
|
||||
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
|
||||
PersistentKeepalive = 10
|
||||
'''
|
||||
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'''
|
||||
|
||||
print(conf)
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
|
||||
if input("print qrcode? [yN]: ").upper() == 'Y':
|
||||
import pyqrcode
|
||||
print(pyqrcode.create(conf).terminal(quiet_zone=1))
|
||||
|
|
|
@ -13,9 +13,6 @@
|
|||
'deb',
|
||||
'deb-src',
|
||||
},
|
||||
'options': { # optional
|
||||
'aarch': 'amd64',
|
||||
},
|
||||
'urls': {
|
||||
'https://deb.debian.org/debian',
|
||||
},
|
||||
|
|
|
@ -62,7 +62,6 @@ files = {
|
|||
'/usr/lib/nagios/plugins/check_apt_upgradable': {
|
||||
'mode': '0755',
|
||||
},
|
||||
# /etc/kernel/postinst.d/apt-auto-removal
|
||||
}
|
||||
|
||||
actions = {
|
||||
|
|
|
@ -1,31 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -u
|
||||
set -exu
|
||||
|
||||
# FIXME: inelegant
|
||||
% if wol_command:
|
||||
${wol_command}
|
||||
% endif
|
||||
|
||||
exit=0
|
||||
failed_paths=""
|
||||
|
||||
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
|
||||
do
|
||||
echo backing up $path
|
||||
/opt/backup/backup_path "$path"
|
||||
# set exit to 1 if any backup fails
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo ERROR: backing up $path failed >&2
|
||||
exit=5
|
||||
failed_paths="$failed_paths $path"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $exit -ne 0 ]
|
||||
then
|
||||
echo "ERROR: failed to backup paths: $failed_paths" >&2
|
||||
fi
|
||||
|
||||
exit $exit
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
set -exu
|
||||
|
||||
path=$1
|
||||
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
|
||||
|
|
|
@ -10,7 +10,7 @@ options {
|
|||
|
||||
% if type == 'master':
|
||||
notify yes;
|
||||
also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
|
||||
allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} };
|
||||
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
|
||||
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
|
||||
% endif
|
||||
};
|
||||
|
|
|
@ -19,7 +19,7 @@ directories[f'/var/lib/bind'] = {
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ files['/etc/default/bind9'] = {
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ files['/etc/bind/named.conf'] = {
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ files['/etc/bind/named.conf.options'] = {
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ files['/etc/bind/named.conf.local'] = {
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
|
|||
'svc_systemd:bind9',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -139,6 +139,6 @@ actions['named-checkconf'] = {
|
|||
'unless': 'named-checkconf -z',
|
||||
'needs': [
|
||||
'svc_systemd:bind9',
|
||||
'svc_systemd:bind9:reload',
|
||||
'svc_systemd:bind9:restart',
|
||||
]
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ from json import dumps
|
|||
h = repo.libs.hashable.hashable
|
||||
repo.libs.bind.repo = repo
|
||||
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
from shlex import quote
|
||||
|
||||
|
||||
defaults = {
|
||||
'build-ci': {},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/build-ci/authorized_users',
|
||||
'sudoers/build-ci',
|
||||
|
@ -22,7 +18,7 @@ def ssh_keys(metadata):
|
|||
},
|
||||
'sudoers': {
|
||||
'build-ci': {
|
||||
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
|
||||
f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
|
||||
for ci in metadata.get('build-ci').values()
|
||||
}
|
||||
},
|
||||
|
|
|
@ -9,7 +9,7 @@ defaults = {
|
|||
'crystal': {
|
||||
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
|
||||
'urls': {
|
||||
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
|
||||
'https://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
|
||||
},
|
||||
'suites': {
|
||||
'/',
|
||||
|
|
|
@ -6,7 +6,7 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
|
|||
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
|
||||
ssl_dh = </etc/dovecot/dhparam.pem
|
||||
ssl_client_ca_dir = /etc/ssl/certs
|
||||
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
|
||||
mail_location = maildir:~
|
||||
mail_plugins = fts fts_xapian
|
||||
|
||||
namespace inbox {
|
||||
|
|
|
@ -20,10 +20,6 @@ directories = {
|
|||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/index': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
},
|
||||
'/var/vmail/sieve': {
|
||||
'owner': 'vmail',
|
||||
'group': 'vmail',
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
Pg Pass workaround: set manually:
|
||||
|
||||
```
|
||||
root@freescout /ro psql freescout
|
||||
psql (15.6 (Debian 15.6-0+deb12u1))
|
||||
Type "help" for help.
|
||||
|
||||
freescout=# \password freescout
|
||||
Enter new password for user "freescout":
|
||||
Enter it again:
|
||||
freescout=#
|
||||
\q
|
||||
```
|
||||
|
||||
|
||||
# problems
|
||||
|
||||
# check if /opt/freescout/.env is resettet
|
||||
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
|
||||
# chown -R www-data:www-data /opt/freescout
|
||||
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
|
||||
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
|
||||
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`
|
|
@ -1,66 +0,0 @@
|
|||
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
|
||||
run_as = repo.libs.tools.run_as
|
||||
php_version = node.metadata.get('php/version')
|
||||
|
||||
|
||||
directories = {
|
||||
'/opt/freescout': {
|
||||
'owner': 'www-data',
|
||||
'group': 'www-data',
|
||||
# chown -R www-data:www-data /opt/freescout
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
# 'clone_freescout': {
|
||||
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
|
||||
# 'unless': 'test -e /opt/freescout/.git',
|
||||
# 'needs': [
|
||||
# 'pkg_apt:git',
|
||||
# 'directory:/opt/freescout',
|
||||
# ],
|
||||
# },
|
||||
# 'pull_freescout': {
|
||||
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
|
||||
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
|
||||
# 'needs': [
|
||||
# 'action:clone_freescout',
|
||||
# ],
|
||||
# 'triggers': [
|
||||
# 'action:freescout_artisan_update',
|
||||
# f'svc_systemd:php{php_version}-fpm.service:restart',
|
||||
# ],
|
||||
# },
|
||||
# 'freescout_artisan_update': {
|
||||
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
|
||||
# 'triggered': True,
|
||||
# 'needs': [
|
||||
# f'svc_systemd:php{php_version}-fpm.service:restart',
|
||||
# 'action:pull_freescout',
|
||||
# ],
|
||||
# },
|
||||
}
|
||||
|
||||
# svc_systemd = {
|
||||
# f'freescout-cron.service': {},
|
||||
# }
|
||||
|
||||
# files = {
|
||||
# '/opt/freescout/.env': {
|
||||
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
|
||||
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
|
||||
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
|
||||
# 'owner': 'www-data',
|
||||
# 'content': '\n'.join(
|
||||
# f'{k}={v}' for k, v in
|
||||
# sorted(node.metadata.get('freescout/env').items())
|
||||
# ) + '\n',
|
||||
# 'needs': [
|
||||
# 'directory:/opt/freescout',
|
||||
# 'action:clone_freescout',
|
||||
# ],
|
||||
# },
|
||||
# }
|
||||
|
||||
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
|
||||
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
|
|
@ -1,121 +0,0 @@
|
|||
from base64 import b64decode
|
||||
|
||||
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
|
||||
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'git': {},
|
||||
'php': {},
|
||||
'php-pgsql': {},
|
||||
'php-fpm': {},
|
||||
'php-mbstring': {},
|
||||
'php-xml': {},
|
||||
'php-imap': {},
|
||||
'php-zip': {},
|
||||
'php-gd': {},
|
||||
'php-curl': {},
|
||||
'php-intl': {},
|
||||
},
|
||||
},
|
||||
'freescout': {
|
||||
'env': {
|
||||
'APP_TIMEZONE': 'Europe/Berlin',
|
||||
'DB_CONNECTION': 'pgsql',
|
||||
'DB_HOST': '127.0.0.1',
|
||||
'DB_PORT': '5432',
|
||||
'DB_DATABASE': 'freescout',
|
||||
'DB_USERNAME': 'freescout',
|
||||
'DB_PASSWORD': database_password,
|
||||
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
|
||||
},
|
||||
},
|
||||
'php': {
|
||||
'php.ini': {
|
||||
'cgi': {
|
||||
'fix_pathinfo': '0',
|
||||
},
|
||||
},
|
||||
},
|
||||
'postgresql': {
|
||||
'roles': {
|
||||
'freescout': {
|
||||
'password_hash': repo.libs.postgres.generate_scram_sha_256(
|
||||
database_password,
|
||||
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
|
||||
),
|
||||
},
|
||||
},
|
||||
'databases': {
|
||||
'freescout': {
|
||||
'owner': 'freescout',
|
||||
},
|
||||
},
|
||||
},
|
||||
# 'systemd': {
|
||||
# 'units': {
|
||||
# f'freescout-cron.service': {
|
||||
# 'Unit': {
|
||||
# 'Description': 'Freescout Cron',
|
||||
# 'After': 'network.target',
|
||||
# },
|
||||
# 'Service': {
|
||||
# 'User': 'www-data',
|
||||
# 'Nice': 10,
|
||||
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
|
||||
# },
|
||||
# 'Install': {
|
||||
# 'WantedBy': {
|
||||
# 'multi-user.target'
|
||||
# }
|
||||
# },
|
||||
# }
|
||||
# },
|
||||
# },
|
||||
'systemd-timers': {
|
||||
'freescout-cron': {
|
||||
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
|
||||
'when': '*-*-* *:*:00',
|
||||
'RuntimeMaxSec': '180',
|
||||
'user': 'www-data',
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/freescout': {
|
||||
'mountpoint': '/opt/freescout',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'freescout/env/APP_URL',
|
||||
)
|
||||
def freescout(metadata):
|
||||
return {
|
||||
'freescout': {
|
||||
'env': {
|
||||
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('freescout/domain'): {
|
||||
'content': 'freescout/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -40,7 +40,7 @@ ENABLE_OPENID_SIGNUP = false
|
|||
[service]
|
||||
REGISTER_EMAIL_CONFIRM = true
|
||||
ENABLE_NOTIFY_MAIL = true
|
||||
DISABLE_REGISTRATION = true
|
||||
DISABLE_REGISTRATION = false
|
||||
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
|
||||
ENABLE_CAPTCHA = false
|
||||
REQUIRE_SIGNIN_VIEW = false
|
||||
|
|
|
@ -118,7 +118,7 @@ def nginx(metadata):
|
|||
'content': 'nginx/proxy_pass.conf',
|
||||
'context': {
|
||||
'target': 'http://127.0.0.1:3500',
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -26,20 +26,14 @@ actions['reset_grafana_admin_password'] = {
|
|||
|
||||
directories = {
|
||||
'/etc/grafana': {},
|
||||
'/etc/grafana/provisioning': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
},
|
||||
'/etc/grafana/provisioning': {},
|
||||
'/etc/grafana/provisioning/datasources': {
|
||||
'purge': True,
|
||||
},
|
||||
'/etc/grafana/provisioning/dashboards': {
|
||||
'purge': True,
|
||||
},
|
||||
'/var/lib/grafana': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
},
|
||||
'/var/lib/grafana': {},
|
||||
'/var/lib/grafana/dashboards': {
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
|
@ -53,8 +47,6 @@ directories = {
|
|||
files = {
|
||||
'/etc/grafana/grafana.ini': {
|
||||
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
|
@ -64,8 +56,6 @@ files = {
|
|||
'apiVersion': 1,
|
||||
'datasources': list(node.metadata.get('grafana/datasources').values()),
|
||||
}),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
|
@ -82,8 +72,6 @@ files = {
|
|||
},
|
||||
}],
|
||||
}),
|
||||
'owner': 'grafana',
|
||||
'group': 'grafana',
|
||||
'triggers': [
|
||||
'svc_systemd:grafana-server:restart',
|
||||
],
|
||||
|
|
|
@ -26,15 +26,9 @@ defaults = {
|
|||
'config': {
|
||||
'server': {
|
||||
'http_port': 8300,
|
||||
'http_addr': '127.0.0.1',
|
||||
'enable_gzip': True,
|
||||
},
|
||||
'database': {
|
||||
'type': 'postgres',
|
||||
'host': '127.0.0.1:5432',
|
||||
'name': 'grafana',
|
||||
'user': 'grafana',
|
||||
'password': postgres_password,
|
||||
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
|
||||
},
|
||||
'remote_cache': {
|
||||
'type': 'redis',
|
||||
|
@ -69,9 +63,6 @@ defaults = {
|
|||
},
|
||||
},
|
||||
},
|
||||
'nginx': {
|
||||
'has_websockets': True,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
@ -147,7 +138,6 @@ def dns(metadata):
|
|||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'has_websockets': True,
|
||||
'vhosts': {
|
||||
metadata.get('grafana/hostname'): {
|
||||
'content': 'grafana/vhost.conf',
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
|
||||
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
|
||||
https://docs.docker.com/engine/install/debian/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
|
||||
https://github.com/home-assistant/supervised-installer
|
||||
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
|
||||
|
||||
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
|
||||
|
||||
neu debian
|
||||
ha installieren
|
||||
gucken ob geht
|
||||
dann bw drüberbügeln
|
||||
|
||||
|
||||
https://www.home-assistant.io/integrations/http/#ssl_certificate
|
||||
|
||||
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`
|
|
@ -1,30 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
|
||||
version = node.metadata.get('homeassistant/os_agent_version')
|
||||
|
||||
directories = {
|
||||
'/usr/share/hassio': {},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'install_os_agent': {
|
||||
'command': ' && '.join([
|
||||
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
|
||||
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
|
||||
]),
|
||||
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
|
||||
'needs': {
|
||||
'pkg_apt:',
|
||||
'zfs_dataset:tank/homeassistant',
|
||||
},
|
||||
},
|
||||
'install_homeassistant_supervised': {
|
||||
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
|
||||
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
|
||||
'needs': {
|
||||
'action:install_os_agent',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
# homeassistant-supervised
|
||||
'apparmor': {},
|
||||
'bluez': {},
|
||||
'cifs-utils': {},
|
||||
'curl': {},
|
||||
'dbus': {},
|
||||
'jq': {},
|
||||
'libglib2.0-bin': {},
|
||||
'lsb-release': {},
|
||||
'network-manager': {},
|
||||
'nfs-common': {},
|
||||
'systemd-journal-remote': {},
|
||||
'systemd-resolved': {},
|
||||
'udisks2': {},
|
||||
'wget': {},
|
||||
# docker
|
||||
'docker-ce': {},
|
||||
'docker-ce-cli': {},
|
||||
'containerd.io': {},
|
||||
'docker-buildx-plugin': {},
|
||||
'docker-compose-plugin': {},
|
||||
},
|
||||
'sources': {
|
||||
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
|
||||
'docker': {
|
||||
'urls': {
|
||||
'https://download.docker.com/linux/debian',
|
||||
},
|
||||
'suites': {
|
||||
'{codename}',
|
||||
},
|
||||
'components': {
|
||||
'stable',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/homeassistant': {
|
||||
'mountpoint': '/usr/share/hassio',
|
||||
'needed_by': {
|
||||
'directory:/usr/share/hassio',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('homeassistant/domain'): {
|
||||
'content': 'homeassistant/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
20
bundles/homeassistant/items.py
Normal file
20
bundles/homeassistant/items.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
users = {
|
||||
'homeassistant': {
|
||||
'home': '/var/lib/homeassistant',
|
||||
},
|
||||
}
|
||||
|
||||
directories = {
|
||||
'/var/lib/homeassistant': {
|
||||
'owner': 'homeassistant',
|
||||
},
|
||||
'/var/lib/homeassistant/config': {
|
||||
'owner': 'homeassistant',
|
||||
},
|
||||
'/var/lib/homeassistant/venv': {
|
||||
'owner': 'homeassistant',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/
|
20
bundles/homeassistant/metadata.py
Normal file
20
bundles/homeassistant/metadata.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'python3': {},
|
||||
'python3-dev': {},
|
||||
'python3-pip': {},
|
||||
'python3-venv': {},
|
||||
'libffi-dev': {},
|
||||
'libssl-dev': {},
|
||||
'libjpeg-dev': {},
|
||||
'zlib1g-dev': {},
|
||||
'autoconf': {},
|
||||
'build-essential': {},
|
||||
'libopenjp2-7': {},
|
||||
'libtiff5': {},
|
||||
'libturbojpeg0-dev': {},
|
||||
'tzdata': {},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -13,9 +13,9 @@ apply Notification "mail-icingaadmin" to Host {
|
|||
user_groups = host.vars.notification.mail.groups
|
||||
users = host.vars.notification.mail.users
|
||||
|
||||
//interval = 2h
|
||||
|
||||
|
||||
|
||||
//vars.notification_logtosyslog = true
|
||||
|
||||
assign where host.vars.notification.mail
|
||||
}
|
||||
|
@ -25,9 +25,9 @@ apply Notification "mail-icingaadmin" to Service {
|
|||
user_groups = host.vars.notification.mail.groups
|
||||
users = host.vars.notification.mail.users
|
||||
|
||||
//interval = 2h
|
||||
|
||||
|
||||
|
||||
//vars.notification_logtosyslog = true
|
||||
|
||||
assign where host.vars.notification.mail
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ svc_systemd = {
|
|||
'icinga2.service': {
|
||||
'needs': [
|
||||
'pkg_apt:icinga2-ido-pgsql',
|
||||
'svc_systemd:postgresql.service',
|
||||
'svc_systemd:postgresql',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ defaults = {
|
|||
'php-imagick': {},
|
||||
'php-pgsql': {},
|
||||
'icingaweb2': {},
|
||||
#'icingaweb2-module-monitoring': {}, # ?
|
||||
'icingaweb2-module-monitoring': {},
|
||||
},
|
||||
'sources': {
|
||||
'icinga': {
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# svc_systemd = {
|
||||
# 'ifupdown.service': {},
|
||||
# }
|
|
@ -1,21 +0,0 @@
|
|||
from json import dumps
|
||||
from bundlewrap.metadata import MetadataJSONEncoder
|
||||
|
||||
files = {
|
||||
'/etc/kea/kea-dhcp4.conf': {
|
||||
'content': dumps(node.metadata.get('kea'), indent=4, sort_keys=True, cls=MetadataJSONEncoder),
|
||||
'triggers': [
|
||||
'svc_systemd:kea-dhcp4-server:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'kea-dhcp4-server': {
|
||||
'needs': [
|
||||
'pkg_apt:kea-dhcp4-server',
|
||||
'file:/etc/kea/kea-dhcp4.conf',
|
||||
'svc_systemd:systemd-networkd.service:restart',
|
||||
],
|
||||
},
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
from ipaddress import ip_interface, ip_network
|
||||
|
||||
hashable = repo.libs.hashable.hashable
|
||||
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'kea-dhcp4-server': {},
|
||||
},
|
||||
},
|
||||
'kea': {
|
||||
'Dhcp4': {
|
||||
'interfaces-config': {
|
||||
'interfaces': set(),
|
||||
},
|
||||
'lease-database': {
|
||||
'type': 'memfile',
|
||||
'lfc-interval': 3600
|
||||
},
|
||||
'subnet4': set(),
|
||||
'loggers': set([
|
||||
hashable({
|
||||
'name': 'kea-dhcp4',
|
||||
'output_options': [
|
||||
{
|
||||
'output': 'syslog',
|
||||
}
|
||||
],
|
||||
'severity': 'INFO',
|
||||
}),
|
||||
]),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'kea/Dhcp4/interfaces-config/interfaces',
|
||||
'kea/Dhcp4/subnet4',
|
||||
)
|
||||
def subnets(metadata):
|
||||
subnet4 = set()
|
||||
interfaces = set()
|
||||
reservations = set(
|
||||
hashable({
|
||||
'hw-address': network_conf['mac'],
|
||||
'ip-address': str(ip_interface(network_conf['ipv4']).ip),
|
||||
})
|
||||
for other_node in repo.nodes
|
||||
for network_conf in other_node.metadata.get('network', {}).values()
|
||||
if 'mac' in network_conf
|
||||
)
|
||||
|
||||
for network_name, network_conf in metadata.get('network').items():
|
||||
dhcp_server_config = network_conf.get('dhcp_server_config', None)
|
||||
|
||||
if dhcp_server_config:
|
||||
_network = ip_network(dhcp_server_config['subnet'])
|
||||
|
||||
subnet4.add(hashable({
|
||||
'subnet': dhcp_server_config['subnet'],
|
||||
'pools': [
|
||||
{
|
||||
'pool': f'{dhcp_server_config['pool_from']} - {dhcp_server_config['pool_to']}',
|
||||
},
|
||||
],
|
||||
'option-data': [
|
||||
{
|
||||
'name': 'routers',
|
||||
'data': dhcp_server_config['router'],
|
||||
},
|
||||
{
|
||||
'name': 'domain-name-servers',
|
||||
'data': '10.0.0.1',
|
||||
},
|
||||
],
|
||||
'reservations': set(
|
||||
reservation
|
||||
for reservation in reservations
|
||||
if ip_interface(reservation['ip-address']).ip in _network
|
||||
),
|
||||
}))
|
||||
|
||||
interfaces.add(network_conf.get('interface', network_name))
|
||||
|
||||
return {
|
||||
'kea': {
|
||||
'Dhcp4': {
|
||||
'interfaces-config': {
|
||||
'interfaces': interfaces,
|
||||
},
|
||||
'subnet4': subnet4,
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,36 +1,36 @@
|
|||
hostname "CroneKorkN : ${name}"
|
||||
sv_contact "admin@sublimity.de"
|
||||
|
||||
|
||||
// assign serevr to steam group
|
||||
sv_steamgroup "${','.join(steamgroups)}"
|
||||
|
||||
rcon_password "${rcon_password}"
|
||||
|
||||
|
||||
// no annoying message of the day
|
||||
motd_enabled 0
|
||||
|
||||
|
||||
// enable cheats
|
||||
sv_cheats 1
|
||||
|
||||
|
||||
// allow inconsistent files on clients (weapon mods for example)
|
||||
sv_consistency 0
|
||||
|
||||
|
||||
// connect from internet
|
||||
sv_lan 0
|
||||
|
||||
|
||||
// join game at any point
|
||||
sv_allow_lobby_connect_only 0
|
||||
|
||||
|
||||
// allowed modes
|
||||
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
|
||||
|
||||
|
||||
// network
|
||||
sv_minrate 30000
|
||||
sv_maxrate 60000
|
||||
sv_mincmdrate 66
|
||||
sv_maxcmdrate 101
|
||||
|
||||
|
||||
// logging
|
||||
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
|
||||
log on //Creates a logfile (on | off)
|
||||
sv_logecho 0 //default 0; Echo log information to the console.
|
||||
|
|
|
@ -56,7 +56,6 @@ for domain in node.metadata.get('letsencrypt/domains').keys():
|
|||
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
|
||||
'needs': {
|
||||
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
|
||||
'pkg_apt:dehydrated',
|
||||
},
|
||||
'needed_by': {
|
||||
'svc_systemd:nginx',
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
|
||||
if isinstance(json_data, dict):
|
||||
for key, value in json_data.items():
|
||||
yield from generate_sysctl_key_value_pairs_from_json(value, [*parents, key])
|
||||
elif isinstance(json_data, list):
|
||||
raise ValueError(f"List not supported: '{json_data}'")
|
||||
else:
|
||||
# If it's a leaf node, yield the path
|
||||
yield (parents, json_data)
|
||||
|
||||
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
|
||||
|
||||
|
||||
files= {
|
||||
'/etc/sysctl.conf': {
|
||||
'content': '\n'.join(
|
||||
sorted(
|
||||
f"{'.'.join(path)}={value}"
|
||||
for path, value in key_value_pairs
|
||||
),
|
||||
),
|
||||
'triggers': [
|
||||
'svc_systemd:systemd-sysctl.service:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'systemd-sysctl.service': {},
|
||||
}
|
||||
|
||||
for path, value in key_value_pairs:
|
||||
actions[f'reload_sysctl.conf_{path}'] = {
|
||||
'command': f"sysctl --values {'.'.join(path)} | grep -q {quote('^'+value+'$')}",
|
||||
'needs': [
|
||||
f'action:systemd-sysctl.service',
|
||||
f'action:systemd-sysctl.service:restart',
|
||||
],
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
defaults = {
|
||||
'sysctl': {},
|
||||
}
|
|
@ -20,19 +20,18 @@ files = {
|
|||
}
|
||||
|
||||
actions = {
|
||||
'systemd-locale': {
|
||||
'command': f'localectl set-locale LANG="{default_locale}"',
|
||||
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
|
||||
'triggers': {
|
||||
'action:locale-gen',
|
||||
},
|
||||
},
|
||||
'locale-gen': {
|
||||
'command': 'locale-gen',
|
||||
'triggered': True,
|
||||
'needs': {
|
||||
'pkg_apt:locales',
|
||||
'action:systemd-locale',
|
||||
},
|
||||
},
|
||||
'systemd-locale': {
|
||||
'command': f'localectl set-locale LANG="{default_locale}"',
|
||||
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
|
||||
'preceded_by': {
|
||||
'action:locale-gen',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
|
||||
cd "$OLDPWD"
|
||||
|
||||
export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10)
|
||||
export BW_ITEM_WORKERS=$(expr "$(nproc)" '*' 12 '/' 10)
|
||||
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")
|
||||
|
|
1
bundles/macbook/files/freshclam.conf
Normal file
1
bundles/macbook/files/freshclam.conf
Normal file
|
@ -0,0 +1 @@
|
|||
DatabaseMirror database.clamav.net
|
|
@ -2,5 +2,7 @@
|
|||
|
||||
cd "$OLDPWD"
|
||||
|
||||
PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin"
|
||||
PATH_add "/opt/homebrew/opt/grep/libexec/gnubin"
|
||||
GNU_PATH="$HOME/.local/gnu_bin"
|
||||
mkdir -p "$GNU_PATH"
|
||||
test -f "$GNU_PATH/sed" || ln -s "$(which gsed)" "$GNU_PATH/sed"
|
||||
PATH_add "$GNU_PATH"
|
||||
|
|
|
@ -18,7 +18,7 @@ git -C ~/.zsh/oh-my-zsh pull
|
|||
brew upgrade
|
||||
brew upgrade --cask --greedy
|
||||
|
||||
pyenv install --skip-existing
|
||||
pyenv install --keep-existing
|
||||
|
||||
sudo softwareupdate -ia --verbose
|
||||
|
||||
|
|
|
@ -5,5 +5,5 @@ cd "$OLDPWD"
|
|||
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
|
||||
then
|
||||
echo "rebuilding venv für new python version"
|
||||
rm -rf .venv .pip_upgrade_timestamp
|
||||
rm -rf .venv
|
||||
fi
|
||||
|
|
|
@ -19,9 +19,5 @@ if test "$DELTA" -gt 86400
|
|||
then
|
||||
python3 -m pip --require-virtualenv install pip wheel --upgrade
|
||||
python3 -m pip --require-virtualenv install -r requirements.txt --upgrade
|
||||
if test -e optional-requirements.txt
|
||||
then
|
||||
python3 -m pip --require-virtualenv install -r optional-requirements.txt --upgrade
|
||||
fi
|
||||
date +%s > .pip_upgrade_timestamp
|
||||
fi
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
export PATH=~/.bin:$PATH
|
||||
export PATH=~/.cargo/bin:$PATH
|
||||
|
||||
export ZSH=~/.zsh/oh-my-zsh
|
||||
export ZSH_HOSTNAME='sm'
|
||||
ZSH_THEME="bw"
|
||||
ZSH_THEME="ckn"
|
||||
HIST_STAMPS="yyyy/mm/dd"
|
||||
plugins=(
|
||||
zsh-autosuggestions
|
||||
|
@ -13,6 +10,13 @@ source $ZSH/oh-my-zsh.sh
|
|||
|
||||
ulimit -S -n 24000
|
||||
|
||||
sshn() {
|
||||
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
|
||||
}
|
||||
pingn() {
|
||||
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
|
||||
}
|
||||
|
||||
antivir() {
|
||||
printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' &&
|
||||
sleep 1 && echo '\nyour computer is safe!'
|
||||
|
@ -22,12 +26,3 @@ eval "$(rbenv init -)"
|
|||
eval "$(pyenv init -)"
|
||||
eval "$(direnv hook zsh)"
|
||||
eval "$(op completion zsh)"; compdef _op op
|
||||
|
||||
# //S/M
|
||||
|
||||
sshn() {
|
||||
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
|
||||
}
|
||||
pingn() {
|
||||
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
|
||||
}
|
||||
|
|
|
@ -22,12 +22,6 @@ files['/Users/mwiegand/.bin/macbook-update'] = {
|
|||
'mode': '755',
|
||||
}
|
||||
|
||||
with open(f'{repo.path}/bundles/zsh/files/bw.zsh-theme') as f:
|
||||
files['/Users/mwiegand/.zsh/oh-my-zsh/themes/bw.zsh-theme'] = {
|
||||
'content': f.read(),
|
||||
'mode': '0644',
|
||||
}
|
||||
|
||||
# direnv
|
||||
|
||||
directories['/Users/mwiegand/.local/share/direnv'] = {}
|
||||
|
@ -37,6 +31,15 @@ files['/Users/mwiegand/.local/share/direnv/venv'] = {}
|
|||
files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {}
|
||||
|
||||
|
||||
# clamav
|
||||
|
||||
files['/opt/homebrew/etc/clamav/freshclam.conf'] = {
|
||||
'group': 'admin',
|
||||
}
|
||||
# run me baby one more time:
|
||||
# freshclam && clamscan --infected --recursive --exclude-dir ~/Library/Mail ~
|
||||
|
||||
|
||||
##################
|
||||
|
||||
for element in [*files.values(), *directories.values()]:
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
defaults = {
|
||||
'brew': {},
|
||||
'brew': {
|
||||
'clamav',
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
# This is the mailman extension configuration file to enable HyperKitty as an
|
||||
# archiver. Remember to add the following lines in the mailman.cfg file:
|
||||
#
|
||||
# [archiver.hyperkitty]
|
||||
# class: mailman_hyperkitty.Archiver
|
||||
# enable: yes
|
||||
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
|
||||
#
|
||||
|
||||
[general]
|
||||
|
||||
# This is your HyperKitty installation, preferably on the localhost. This
|
||||
# address will be used by Mailman to forward incoming emails to HyperKitty
|
||||
# for archiving. It does not need to be publicly available, in fact it's
|
||||
# better if it is not.
|
||||
# However, if your Mailman installation is accessed via HTTPS, the URL needs
|
||||
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
|
||||
base_url: http://${hostname}/mailman3/hyperkitty/
|
||||
|
||||
# The shared api_key, must be identical except for quoting to the value of
|
||||
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
|
||||
api_key: ${archiver_key}
|
|
@ -1,190 +0,0 @@
|
|||
ACCOUNT_EMAIL_VERIFICATION='none'
|
||||
|
||||
# This file is imported by the Mailman Suite. It is used to override
|
||||
# the default settings from /usr/share/mailman3-web/settings.py.
|
||||
|
||||
# SECURITY WARNING: keep the secret key used in production secret!
|
||||
SECRET_KEY = '${secret_key}'
|
||||
|
||||
ADMINS = (
|
||||
('Mailman Suite Admin', 'root@localhost'),
|
||||
)
|
||||
|
||||
# Hosts/domain names that are valid for this site; required if DEBUG is False
|
||||
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
|
||||
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
|
||||
# is meant to run behind a webserver reverse proxy anyway.
|
||||
ALLOWED_HOSTS = [
|
||||
'${hostname}',
|
||||
]
|
||||
|
||||
# Mailman API credentials
|
||||
MAILMAN_REST_API_URL = 'http://localhost:8001'
|
||||
MAILMAN_REST_API_USER = 'restadmin'
|
||||
MAILMAN_REST_API_PASS = '${api_password}'
|
||||
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
|
||||
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
|
||||
|
||||
# Application definition
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'hyperkitty',
|
||||
'postorius',
|
||||
'django_mailman3',
|
||||
# Uncomment the next line to enable the admin:
|
||||
'django.contrib.admin',
|
||||
# Uncomment the next line to enable admin documentation:
|
||||
# 'django.contrib.admindocs',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.sites',
|
||||
'django.contrib.messages',
|
||||
'django.contrib.staticfiles',
|
||||
'rest_framework',
|
||||
'django_gravatar',
|
||||
'compressor',
|
||||
'haystack',
|
||||
'django_extensions',
|
||||
'django_q',
|
||||
'allauth',
|
||||
'allauth.account',
|
||||
'allauth.socialaccount',
|
||||
'django_mailman3.lib.auth.fedora',
|
||||
#'allauth.socialaccount.providers.openid',
|
||||
#'allauth.socialaccount.providers.github',
|
||||
#'allauth.socialaccount.providers.gitlab',
|
||||
#'allauth.socialaccount.providers.google',
|
||||
#'allauth.socialaccount.providers.facebook',
|
||||
#'allauth.socialaccount.providers.twitter',
|
||||
#'allauth.socialaccount.providers.stackexchange',
|
||||
)
|
||||
|
||||
|
||||
# Database
|
||||
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
|
||||
#'ENGINE': 'django.db.backends.sqlite3',
|
||||
'ENGINE': 'django.db.backends.postgresql_psycopg2',
|
||||
#'ENGINE': 'django.db.backends.mysql',
|
||||
# DB name or path to database file if using sqlite3.
|
||||
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
|
||||
'NAME': 'mailman',
|
||||
# The following settings are not used with sqlite3:
|
||||
'USER': 'mailman',
|
||||
'PASSWORD': '${db_password}',
|
||||
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
|
||||
# localhost through TCP.
|
||||
'HOST': '127.0.0.1',
|
||||
# PORT: set to empty string for default.
|
||||
'PORT': '5432',
|
||||
# OPTIONS: Extra parameters to use when connecting to the database.
|
||||
'OPTIONS': {
|
||||
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
|
||||
# https://docs.djangoproject.com/en/1.11/ref/
|
||||
# databases/#setting-sql-mode
|
||||
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# If you're behind a proxy, use the X-Forwarded-Host header
|
||||
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
|
||||
USE_X_FORWARDED_HOST = True
|
||||
|
||||
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
|
||||
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
|
||||
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
||||
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
|
||||
|
||||
# Other security settings
|
||||
# SECURE_SSL_REDIRECT = True
|
||||
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
|
||||
# contains at least this line:
|
||||
# SECURE_REDIRECT_EXEMPT = [
|
||||
# "archives/api/mailman/.*", # Request from Mailman.
|
||||
# ]
|
||||
# SESSION_COOKIE_SECURE = True
|
||||
# SECURE_CONTENT_TYPE_NOSNIFF = True
|
||||
# SECURE_BROWSER_XSS_FILTER = True
|
||||
# CSRF_COOKIE_SECURE = True
|
||||
# CSRF_COOKIE_HTTPONLY = True
|
||||
# X_FRAME_OPTIONS = 'DENY'
|
||||
|
||||
|
||||
# Internationalization
|
||||
# https://docs.djangoproject.com/en/1.8/topics/i18n/
|
||||
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
USE_I18N = True
|
||||
USE_L10N = True
|
||||
USE_TZ = True
|
||||
|
||||
|
||||
# Set default domain for email addresses.
|
||||
EMAILNAME = 'localhost.local'
|
||||
|
||||
# If you enable internal authentication, this is the address that the emails
|
||||
# will appear to be coming from. Make sure you set a valid domain name,
|
||||
# otherwise the emails may get rejected.
|
||||
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
|
||||
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
|
||||
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
|
||||
|
||||
# If you enable email reporting for error messages, this is where those emails
|
||||
# will appear to be coming from. Make sure you set a valid domain name,
|
||||
# otherwise the emails may get rejected.
|
||||
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
|
||||
# SERVER_EMAIL = 'root@your-domain.org'
|
||||
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
|
||||
|
||||
|
||||
# Django Allauth
|
||||
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
|
||||
|
||||
|
||||
#
|
||||
# Social auth
|
||||
#
|
||||
SOCIALACCOUNT_PROVIDERS = {
|
||||
#'openid': {
|
||||
# 'SERVERS': [
|
||||
# dict(id='yahoo',
|
||||
# name='Yahoo',
|
||||
# openid_url='http://me.yahoo.com'),
|
||||
# ],
|
||||
#},
|
||||
#'google': {
|
||||
# 'SCOPE': ['profile', 'email'],
|
||||
# 'AUTH_PARAMS': {'access_type': 'online'},
|
||||
#},
|
||||
#'facebook': {
|
||||
# 'METHOD': 'oauth2',
|
||||
# 'SCOPE': ['email'],
|
||||
# 'FIELDS': [
|
||||
# 'email',
|
||||
# 'name',
|
||||
# 'first_name',
|
||||
# 'last_name',
|
||||
# 'locale',
|
||||
# 'timezone',
|
||||
# ],
|
||||
# 'VERSION': 'v2.4',
|
||||
#},
|
||||
}
|
||||
|
||||
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
|
||||
# significant performance improvement, as CSS files will not need to be
|
||||
# recompiled on each requests. It means running an additional "compress"
|
||||
# management command after each code upgrade.
|
||||
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
|
||||
COMPRESS_OFFLINE = True
|
||||
|
||||
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'
|
|
@ -1,277 +0,0 @@
|
|||
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is part of GNU Mailman.
|
||||
#
|
||||
# GNU Mailman is free software: you can redistribute it and/or modify it under
|
||||
# the terms of the GNU General Public License as published by the Free
|
||||
# Software Foundation, either version 3 of the License, or (at your option)
|
||||
# any later version.
|
||||
#
|
||||
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
# more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along with
|
||||
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This file contains the Debian configuration for mailman. It uses ini-style
|
||||
# formats under the lazr.config regime to define all system configuration
|
||||
# options. See <https://launchpad.net/lazr.config> for details.
|
||||
|
||||
|
||||
[mailman]
|
||||
# This address is the "site owner" address. Certain messages which must be
|
||||
# delivered to a human, but which can't be delivered to a list owner (e.g. a
|
||||
# bounce from a list owner), will be sent to this address. It should point to
|
||||
# a human.
|
||||
site_owner: ${site_owner_email}
|
||||
|
||||
# This is the local-part of an email address used in the From field whenever a
|
||||
# message comes from some entity to which there is no natural reply recipient.
|
||||
# Mailman will append '@' and the host name of the list involved. This
|
||||
# address must not bounce and it must not point to a Mailman process.
|
||||
noreply_address: noreply
|
||||
|
||||
# The default language for this server.
|
||||
default_language: de
|
||||
|
||||
# Membership tests for posting purposes are usually performed by looking at a
|
||||
# set of headers, passing the test if any of their values match a member of
|
||||
# the list. Headers are checked in the order given in this variable. The
|
||||
# value From_ means to use the envelope sender. Field names are case
|
||||
# insensitive. This is a space separate list of headers.
|
||||
sender_headers: from from_ reply-to sender
|
||||
|
||||
# Mail command processor will ignore mail command lines after designated max.
|
||||
email_commands_max_lines: 10
|
||||
|
||||
# Default length of time a pending request is live before it is evicted from
|
||||
# the pending database.
|
||||
pending_request_life: 3d
|
||||
|
||||
# How long should files be saved before they are evicted from the cache?
|
||||
cache_life: 7d
|
||||
|
||||
# A callable to run with no arguments early in the initialization process.
|
||||
# This runs before database initialization.
|
||||
pre_hook:
|
||||
|
||||
# A callable to run with no arguments late in the initialization process.
|
||||
# This runs after adapters are initialized.
|
||||
post_hook:
|
||||
|
||||
# Which paths.* file system layout to use.
|
||||
# You should not change this variable.
|
||||
layout: debian
|
||||
|
||||
# Can MIME filtered messages be preserved by list owners?
|
||||
filtered_messages_are_preservable: no
|
||||
|
||||
# How should text/html parts be converted to text/plain when the mailing list
|
||||
# is set to convert HTML to plaintext? This names a command to be called,
|
||||
# where the substitution variable $filename is filled in by Mailman, and
|
||||
# contains the path to the temporary file that the command should read from.
|
||||
# The command should print the converted text to stdout.
|
||||
html_to_plain_text_command: /usr/bin/lynx -dump $filename
|
||||
|
||||
# Specify what characters are allowed in list names. Characters outside of
|
||||
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
|
||||
# but this specifies a subset as the only allowable characters. This must be
|
||||
# a valid character class regexp or the effect on list creation is
|
||||
# unpredictable.
|
||||
listname_chars: [-_.0-9a-z]
|
||||
|
||||
|
||||
[shell]
|
||||
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
|
||||
# can use to interact with an initialized and configured Mailman system. Use
|
||||
# --help for more information. This section allows you to configure certain
|
||||
# aspects of this interactive shell.
|
||||
|
||||
# Customize the interpreter prompt.
|
||||
prompt: >>>
|
||||
|
||||
# Banner to show on startup.
|
||||
banner: Welcome to the GNU Mailman shell
|
||||
|
||||
# Use IPython as the shell, which must be found on the system. Valid values
|
||||
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
|
||||
# that any import errors will be displayed to stderr.
|
||||
use_ipython: no
|
||||
|
||||
# Set this to allow for command line history if readline is available. This
|
||||
# can be as simple as $var_dir/history.py to put the file in the var directory.
|
||||
history_file:
|
||||
|
||||
|
||||
[paths.debian]
|
||||
# Important directories for Mailman operation. These are defined here so that
|
||||
# different layouts can be supported. For example, a developer layout would
|
||||
# be different from a FHS layout. Most paths are based off the var_dir, and
|
||||
# often just setting that will do the right thing for all the other paths.
|
||||
# You might also have to set spool_dir though.
|
||||
#
|
||||
# Substitutions are allowed, but must be of the form $var where 'var' names a
|
||||
# configuration variable in the paths.* section. Substitutions are expanded
|
||||
# recursively until no more $-variables are present. Beware of infinite
|
||||
# expansion loops!
|
||||
#
|
||||
# This is the root of the directory structure that Mailman will use to store
|
||||
# its run-time data.
|
||||
var_dir: /var/lib/mailman3
|
||||
# This is where the Mailman queue files directories will be created.
|
||||
queue_dir: $var_dir/queue
|
||||
# This is the directory containing the Mailman 'runner' and 'master' commands
|
||||
# if set to the string '$argv', it will be taken as the directory containing
|
||||
# the 'mailman' command.
|
||||
bin_dir: /usr/lib/mailman3/bin
|
||||
# All list-specific data.
|
||||
list_data_dir: $var_dir/lists
|
||||
# Directory where log files go.
|
||||
log_dir: /var/log/mailman3
|
||||
# Directory for system-wide locks.
|
||||
lock_dir: $var_dir/locks
|
||||
# Directory for system-wide data.
|
||||
data_dir: $var_dir/data
|
||||
# Cache files.
|
||||
cache_dir: $var_dir/cache
|
||||
# Directory for configuration files and such.
|
||||
etc_dir: /etc/mailman3
|
||||
# Directory containing Mailman plugins.
|
||||
ext_dir: $var_dir/ext
|
||||
# Directory where the default IMessageStore puts its messages.
|
||||
messages_dir: $var_dir/messages
|
||||
# Directory for archive backends to store their messages in. Archivers should
|
||||
# create a subdirectory in here to store their files.
|
||||
archive_dir: $var_dir/archives
|
||||
# Root directory for site-specific template override files.
|
||||
template_dir: $var_dir/templates
|
||||
# There are also a number of paths to specific file locations that can be
|
||||
# defined. For these, the directory containing the file must already exist,
|
||||
# or be one of the directories created by Mailman as per above.
|
||||
#
|
||||
# This is where PID file for the master runner is stored.
|
||||
pid_file: /run/mailman3/master.pid
|
||||
# Lock file.
|
||||
lock_file: $lock_dir/master.lck
|
||||
|
||||
|
||||
[database]
|
||||
# The class implementing the IDatabase.
|
||||
class: mailman.database.sqlite.SQLiteDatabase
|
||||
#class: mailman.database.mysql.MySQLDatabase
|
||||
#class: mailman.database.postgresql.PostgreSQLDatabase
|
||||
|
||||
# Use this to set the Storm database engine URL. You generally have one
|
||||
# primary database connection for all of Mailman. List data and most rosters
|
||||
# will store their data in this database, although external rosters may access
|
||||
# other databases in their own way. This string supports standard
|
||||
# 'configuration' substitutions.
|
||||
url: sqlite:///$DATA_DIR/mailman.db
|
||||
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
|
||||
#url: postgresql://mailman3:mmpass@localhost/mailman3
|
||||
|
||||
debug: no
|
||||
|
||||
|
||||
[logging.debian]
|
||||
# This defines various log settings. The options available are:
|
||||
#
|
||||
# - level -- Overrides the default level; this may be any of the
|
||||
# standard Python logging levels, case insensitive.
|
||||
# - format -- Overrides the default format string
|
||||
# - datefmt -- Overrides the default date format string
|
||||
# - path -- Overrides the default logger path. This may be a relative
|
||||
# path name, in which case it is relative to Mailman's LOG_DIR,
|
||||
# or it may be an absolute path name. You cannot change the
|
||||
# handler class that will be used.
|
||||
# - propagate -- Boolean specifying whether to propagate log message from this
|
||||
# logger to the root "mailman" logger. You cannot override
|
||||
# settings for the root logger.
|
||||
#
|
||||
# In this section, you can define defaults for all loggers, which will be
|
||||
# prefixed by 'mailman.'. Use subsections to override settings for specific
|
||||
# loggers. The names of the available loggers are:
|
||||
#
|
||||
# - archiver -- All archiver output
|
||||
# - bounce -- All bounce processing logs go here
|
||||
# - config -- Configuration issues
|
||||
# - database -- Database logging (SQLAlchemy and Alembic)
|
||||
# - debug -- Only used for development
|
||||
# - error -- All exceptions go to this log
|
||||
# - fromusenet -- Information related to the Usenet to Mailman gateway
|
||||
# - http -- Internal wsgi-based web interface
|
||||
# - locks -- Lock state changes
|
||||
# - mischief -- Various types of hostile activity
|
||||
# - runner -- Runner process start/stops
|
||||
# - smtp -- Successful SMTP activity
|
||||
# - smtp-failure -- Unsuccessful SMTP activity
|
||||
# - subscribe -- Information about leaves/joins
|
||||
# - vette -- Message vetting information
|
||||
format: %(asctime)s (%(process)d) %(message)s
|
||||
datefmt: %b %d %H:%M:%S %Y
|
||||
propagate: no
|
||||
level: info
|
||||
path: mailman.log
|
||||
|
||||
[webservice]
|
||||
# The hostname at which admin web service resources are exposed.
|
||||
hostname: localhost
|
||||
|
||||
# The port at which the admin web service resources are exposed.
|
||||
port: 8001
|
||||
|
||||
# Whether or not requests to the web service are secured through SSL.
|
||||
use_https: no
|
||||
|
||||
# Whether or not to show tracebacks in an HTTP response for a request that
|
||||
# raised an exception.
|
||||
show_tracebacks: yes
|
||||
|
||||
# The API version number for the current (highest) API.
|
||||
api_version: 3.1
|
||||
|
||||
# The administrative username.
|
||||
admin_user: restadmin
|
||||
|
||||
# The administrative password.
|
||||
admin_pass: ${api_password}
|
||||
|
||||
[mta]
|
||||
# The class defining the interface to the incoming mail transport agent.
|
||||
#incoming: mailman.mta.exim4.LMTP
|
||||
incoming: mailman.mta.postfix.LMTP
|
||||
|
||||
# The callable implementing delivery to the outgoing mail transport agent.
|
||||
# This must accept three arguments, the mailing list, the message, and the
|
||||
# message metadata dictionary.
|
||||
outgoing: mailman.mta.deliver.deliver
|
||||
|
||||
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
|
||||
# then Mailman will attempt to log into the MTA when making a new connection.
|
||||
# smtp_host: smtp.ionos.de
|
||||
# smtp_port: 587
|
||||
# smtp_user: ${smtp_user}
|
||||
# smtp_pass: ${smtp_password}
|
||||
# smtp_secure_mode: starttls
|
||||
|
||||
smtp_host: 127.0.0.1
|
||||
smtp_port: 25
|
||||
smtp_user:
|
||||
smtp_pass:
|
||||
|
||||
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
|
||||
# localhost for Postfix integration, because Postfix only consults DNS
|
||||
# (e.g. not /etc/hosts).
|
||||
lmtp_host: 127.0.0.1
|
||||
lmtp_port: 8024
|
||||
|
||||
# Where can we find the mail server specific configuration file? The path can
|
||||
# be either a file system path or a Python import path. If the value starts
|
||||
# with python: then it is a Python import path, otherwise it is a file system
|
||||
# path. File system paths must be absolute since no guarantees are made about
|
||||
# the current working directory. Python paths should not include the trailing
|
||||
# .cfg, which the file must end with.
|
||||
#configuration: python:mailman.config.exim4
|
||||
configuration: python:mailman.config.postfix
|
|
@ -1,52 +0,0 @@
|
|||
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
|
||||
|
||||
# Debian specific: Specifying a file name will cause the first
|
||||
# line of that file to be used as the name. The Debian default
|
||||
# is /etc/mailname.
|
||||
#myorigin = /etc/mailname
|
||||
|
||||
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
|
||||
biff = no
|
||||
|
||||
# appending .domain is the MUA's job.
|
||||
append_dot_mydomain = no
|
||||
|
||||
# Uncomment the next line to generate "delayed mail" warnings
|
||||
#delay_warning_time = 4h
|
||||
|
||||
readme_directory = no
|
||||
|
||||
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
|
||||
# fresh installs.
|
||||
compatibility_level = 3.6
|
||||
|
||||
# TLS parameters
|
||||
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
|
||||
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
|
||||
smtpd_tls_security_level=may
|
||||
|
||||
smtp_tls_CApath=/etc/ssl/certs
|
||||
smtp_tls_security_level=may
|
||||
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
|
||||
|
||||
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
|
||||
myhostname = ${hostname}
|
||||
alias_maps = hash:/etc/aliases
|
||||
alias_database = hash:/etc/aliases
|
||||
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
|
||||
relayhost =
|
||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||
mailbox_size_limit = 0
|
||||
recipient_delimiter = +
|
||||
inet_interfaces = all
|
||||
inet_protocols = all
|
||||
|
||||
unknown_local_recipient_reject_code = 550
|
||||
owner_request_special = no
|
||||
|
||||
transport_maps =
|
||||
hash:/var/lib/mailman3/data/postfix_lmtp
|
||||
local_recipient_maps =
|
||||
hash:/var/lib/mailman3/data/postfix_lmtp
|
||||
relay_domains =
|
||||
hash:/var/lib/mailman3/data/postfix_domains
|
|
@ -1,50 +0,0 @@
|
|||
[uwsgi]
|
||||
# Port on which uwsgi will be listening.
|
||||
uwsgi-socket = /run/mailman3-web/uwsgi.sock
|
||||
|
||||
#Enable threading for python
|
||||
enable-threads = true
|
||||
|
||||
# Move to the directory wher the django files are.
|
||||
chdir = /usr/share/mailman3-web
|
||||
|
||||
# Use the wsgi file provided with the django project.
|
||||
wsgi-file = wsgi.py
|
||||
|
||||
# Setup default number of processes and threads per process.
|
||||
master = true
|
||||
process = 2
|
||||
threads = 2
|
||||
|
||||
# Drop privielges and don't run as root.
|
||||
uid = www-data
|
||||
gid = www-data
|
||||
|
||||
plugins = python3
|
||||
|
||||
# Setup the django_q related worker processes.
|
||||
attach-daemon = python3 manage.py qcluster
|
||||
|
||||
# Setup hyperkitty's cron jobs.
|
||||
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
|
||||
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
|
||||
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
|
||||
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
|
||||
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
|
||||
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
|
||||
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
|
||||
|
||||
# Setup the request log.
|
||||
#req-logger = file:/var/log/mailman3/web/mailman-web.log
|
||||
|
||||
# Log cron seperately.
|
||||
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
|
||||
#log-route = cron uwsgi-cron
|
||||
|
||||
# Log qcluster commands seperately.
|
||||
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
|
||||
#log-route = qcluster uwsgi-daemons
|
||||
|
||||
# Last log and it logs the rest of the stuff.
|
||||
#logger = file:/var/log/mailman3/web/mailman-web-error.log
|
||||
logto = /var/log/mailman3/web/mailman-web.log
|
|
@ -1,104 +0,0 @@
|
|||
directories = {
|
||||
'/var/lib/mailman3': {
|
||||
'owner': 'list',
|
||||
'group': 'list',
|
||||
'needs': {
|
||||
'zfs_dataset:tank/mailman',
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
'needed_by': {
|
||||
'svc_systemd:mailman3.service',
|
||||
'svc_systemd:mailman3-web.service',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/postfix/main.cf': {
|
||||
'source': 'postfix.cf',
|
||||
'content_type': 'mako',
|
||||
'mode': '0644',
|
||||
'context': {
|
||||
'hostname': node.metadata.get('mailman/hostname'),
|
||||
},
|
||||
'needs': {
|
||||
'pkg_apt:postfix',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:postfix.service:restart',
|
||||
},
|
||||
},
|
||||
'/etc/mailman3/mailman.cfg': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'root',
|
||||
'group': 'list',
|
||||
'mode': '0640',
|
||||
'context': node.metadata.get('mailman'),
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:mailman3.service:restart',
|
||||
'svc_systemd:mailman3-web.service:restart',
|
||||
},
|
||||
},
|
||||
'/etc/mailman3/mailman-web.py': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'root',
|
||||
'group': 'www-data',
|
||||
'mode': '0640',
|
||||
'context': node.metadata.get('mailman'),
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:mailman3.service:restart',
|
||||
'svc_systemd:mailman3-web.service:restart',
|
||||
},
|
||||
},
|
||||
'/etc/mailman3/mailman-hyperkitty.cfg': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'root',
|
||||
'group': 'list',
|
||||
'mode': '0640',
|
||||
'context': node.metadata.get('mailman'),
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:mailman3.service:restart',
|
||||
'svc_systemd:mailman3-web.service:restart',
|
||||
},
|
||||
},
|
||||
'/etc/mailman3/uwsgi.ini': {
|
||||
'content_type': 'text',
|
||||
'owner': 'root',
|
||||
'group': 'root',
|
||||
'mode': '0644',
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:mailman3.service:restart',
|
||||
'svc_systemd:mailman3-web.service:restart',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'postfix.service': {
|
||||
'needs': {
|
||||
'pkg_apt:postfix',
|
||||
},
|
||||
},
|
||||
'mailman3.service': {
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
},
|
||||
'mailman3-web.service': {
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
import base64
|
||||
|
||||
def derive_mailadmin_secret(metadata, salt):
|
||||
node_id = metadata.get('id')
|
||||
raw = base64.b64decode(
|
||||
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
|
||||
)
|
||||
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
|
||||
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'mailman3-full': {
|
||||
'needs': {
|
||||
'postgres_db:mailman',
|
||||
'postgres_role:mailman',
|
||||
'zfs_dataset:tank/mailman',
|
||||
}
|
||||
},
|
||||
'postfix': {},
|
||||
'python3-psycopg2': {
|
||||
'needed_by': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
},
|
||||
'apache2': {
|
||||
'installed': False,
|
||||
'needs': {
|
||||
'pkg_apt:mailman3-full',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/mailman': {
|
||||
'mountpoint': '/var/lib/mailman3',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'postgresql',
|
||||
'mailman',
|
||||
)
|
||||
def postgresql(metadata):
|
||||
node_id = metadata.get('id')
|
||||
db_password = repo.vault.password_for(f'{node_id} database mailman')
|
||||
|
||||
return {
|
||||
'postgresql': {
|
||||
'databases': {
|
||||
'mailman': {
|
||||
'owner': 'mailman',
|
||||
},
|
||||
},
|
||||
'roles': {
|
||||
'mailman': {
|
||||
'password': db_password,
|
||||
},
|
||||
},
|
||||
},
|
||||
'mailman': {
|
||||
'db_password': db_password,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
metadata.get('mailman/hostname'): {
|
||||
'content': 'mailman/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'mailman/secret_key',
|
||||
)
|
||||
def secret_key(metadata):
|
||||
import base64
|
||||
|
||||
node_id = metadata.get('id')
|
||||
raw = base64.b64decode(
|
||||
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
|
||||
)
|
||||
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
|
||||
|
||||
return {
|
||||
'mailman': {
|
||||
'secret_key': secret_key,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'mailman',
|
||||
)
|
||||
def secrets(metadata):
|
||||
return {
|
||||
'mailman': {
|
||||
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
|
||||
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
|
||||
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
|
||||
},
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
<?php
|
||||
|
||||
|
||||
// https://raw.githubusercontent.com/Radiergummi/autodiscover/master/autodiscover/autodiscover.php
|
||||
|
||||
/********************************
|
||||
* Autodiscover responder
|
||||
|
@ -8,45 +8,45 @@
|
|||
* This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml.
|
||||
* If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the
|
||||
* appropriate mail services.
|
||||
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
|
||||
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
|
||||
* you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices.
|
||||
*
|
||||
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
|
||||
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
|
||||
* domain names, adjust ports and SSL.
|
||||
*/
|
||||
|
||||
|
||||
//get raw POST data so we can extract the email address
|
||||
$request = file_get_contents("php://input");
|
||||
|
||||
|
||||
// optional debug log
|
||||
# file_put_contents( 'request.log', $request, FILE_APPEND );
|
||||
|
||||
|
||||
// retrieve email address from client request
|
||||
preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email );
|
||||
|
||||
|
||||
// check for invalid mail, to prevent XSS
|
||||
if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) {
|
||||
throw new Exception('Invalid E-Mail provided');
|
||||
}
|
||||
|
||||
|
||||
// get domain from email address
|
||||
$domain = substr( strrchr( $email[1], "@" ), 1 );
|
||||
|
||||
/**************************************
|
||||
* Port and server settings below *
|
||||
**************************************/
|
||||
|
||||
|
||||
// IMAP settings
|
||||
$imapServer = 'imap.' . $domain; // imap.example.com
|
||||
$imapPort = 993;
|
||||
$imapSSL = true;
|
||||
|
||||
|
||||
// SMTP settings
|
||||
$smtpServer = 'smtp.' . $domain; // smtp.example.com
|
||||
$smtpPort = 587;
|
||||
$smtpSSL = true;
|
||||
|
||||
|
||||
//set Content-Type
|
||||
header( 'Content-Type: application/xml' );
|
||||
?>
|
||||
<?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?>
|
||||
|
|
|
@ -33,12 +33,6 @@ defaults = {
|
|||
'mountpoint': '/var/vmail',
|
||||
'compression': 'on',
|
||||
},
|
||||
'tank/vmail/index': {
|
||||
'mountpoint': '/var/vmail/index',
|
||||
'compression': 'on',
|
||||
'com.sun:auto-snapshot': 'false',
|
||||
'backup': False,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
https://mariadb.com/kb/en/systemd/#configuring-mariadb-to-write-the-error-log-to-syslog
|
|
@ -1,87 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
def mariadb(sql, **kwargs):
|
||||
kwargs_string = ''.join(f" --{k} {v}" for k, v in kwargs.items())
|
||||
return f"mariadb{kwargs_string} -Bsr --execute {quote(sql)}"
|
||||
|
||||
directories = {
|
||||
'/var/lib/mysql': {
|
||||
'owner': 'mysql',
|
||||
'group': 'mysql',
|
||||
'needs': [
|
||||
'zfs_dataset:tank/mariadb',
|
||||
'pkg_apt:mariadb-server',
|
||||
'pkg_apt:mariadb-client',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/mysql/conf.d/override.conf': {
|
||||
'content': repo.libs.ini.dumps(node.metadata.get('mariadb/conf')),
|
||||
'content_type': 'text',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'mariadb.service': {
|
||||
'needs': [
|
||||
'pkg_apt:mariadb-server',
|
||||
'pkg_apt:mariadb-client',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'mariadb_sec_remove_anonymous_users': {
|
||||
'command': mariadb("DELETE FROM mysql.global_priv WHERE User=''"),
|
||||
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User = ''") + " | grep -q '^0$'",
|
||||
'needs': [
|
||||
'svc_systemd:mariadb.service',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:mariadb.service:restart',
|
||||
],
|
||||
},
|
||||
'mariadb_sec_remove_remote_root': {
|
||||
'command': mariadb("DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"),
|
||||
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')") + " | grep -q '^0$'",
|
||||
'needs': [
|
||||
'svc_systemd:mariadb.service',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:mariadb.service:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
for db, conf in node.metadata.get('mariadb/databases', {}).items():
|
||||
actions[f'mariadb_create_database_{db}'] = {
|
||||
'command': mariadb(f"CREATE DATABASE {db}"),
|
||||
'unless': mariadb(f"SHOW DATABASES LIKE '{db}'") + f" | grep -q '^{db}$'",
|
||||
'needs': [
|
||||
'svc_systemd:mariadb.service',
|
||||
],
|
||||
}
|
||||
actions[f'mariadb_user_{db}_create'] = {
|
||||
'command': mariadb(f"CREATE USER {db}"),
|
||||
'unless': mariadb(f"SELECT User FROM mysql.user WHERE User = '{db}'") + f" | grep -q '^{db}$'",
|
||||
'needs': [
|
||||
f'action:mariadb_create_database_{db}',
|
||||
],
|
||||
}
|
||||
pw = conf['password']
|
||||
actions[f'mariadb_user_{db}_password'] = {
|
||||
'command': mariadb(f"SET PASSWORD FOR {db} = PASSWORD('{conf['password']}')"),
|
||||
'unless': f'echo {quote(pw)} | mariadb -u {db} -e quit -p',
|
||||
'needs': [
|
||||
f'action:mariadb_user_{db}_create',
|
||||
],
|
||||
}
|
||||
actions[f'mariadb_grant_privileges_to_{db}'] = {
|
||||
'command': mariadb(f"GRANT ALL PRIVILEGES ON {db}.* TO '{db}'", database=db),
|
||||
'unless': mariadb(f"SHOW GRANTS FOR {db}") + f" | grep -q '^GRANT ALL PRIVILEGES ON `{db}`.* TO `{db}`@`%`'",
|
||||
'needs': [
|
||||
f'action:mariadb_user_{db}_create',
|
||||
],
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'mariadb-server': {
|
||||
'needs': {
|
||||
'zfs_dataset:tank/mariadb',
|
||||
},
|
||||
},
|
||||
'mariadb-client': {
|
||||
'needs': {
|
||||
'zfs_dataset:tank/mariadb',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'mariadb': {
|
||||
'databases': {},
|
||||
'conf': {
|
||||
# https://www.reddit.com/r/zfs/comments/u1xklc/mariadbmysql_database_settings_for_zfs
|
||||
'mysqld': {
|
||||
'skip-innodb_doublewrite': None,
|
||||
'innodb_flush_method': 'fsync',
|
||||
'innodb_doublewrite': '0',
|
||||
'innodb_use_atomic_writes': '0',
|
||||
'innodb_use_native_aio': '0',
|
||||
'innodb_read_io_threads': '10',
|
||||
'innodb_write_io_threads': '10',
|
||||
'innodb_buffer_pool_size': '26G',
|
||||
'innodb_flush_log_at_trx_commit': '1',
|
||||
'innodb_log_file_size': '1G',
|
||||
'innodb_flush_neighbors': '0',
|
||||
'innodb_fast_shutdown': '2',
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/mariadb': {
|
||||
'mountpoint': '/var/lib/mysql',
|
||||
'recordsize': '16384',
|
||||
'atime': 'off',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -5,99 +5,41 @@ defaults = {
|
|||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'network',
|
||||
)
|
||||
def dhcp(metadata):
|
||||
networks = {}
|
||||
|
||||
for network_name, network_conf in metadata.get('network').items():
|
||||
_interface = ip_interface(network_conf['ipv4'])
|
||||
_ip = _interface.ip
|
||||
_network = _interface.network
|
||||
_hosts = list(_network.hosts())
|
||||
|
||||
if network_conf.get('dhcp_server', False):
|
||||
networks[network_name] = {
|
||||
'dhcp_server_config': {
|
||||
'subnet': str(_network),
|
||||
'pool_from': str(_hosts[len(_hosts)//2]),
|
||||
'pool_to': str(_hosts[-3]),
|
||||
'router': str(_ip),
|
||||
'domain-name-servers': str(_ip),
|
||||
}
|
||||
}
|
||||
return {
|
||||
'network': networks,
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd/units',
|
||||
)
|
||||
def units(metadata):
|
||||
if node.has_bundle('systemd-networkd'):
|
||||
units = {}
|
||||
units = {}
|
||||
|
||||
for network_name, network_conf in metadata.get('network').items():
|
||||
interface_type = network_conf.get('type', None)
|
||||
|
||||
# network
|
||||
|
||||
units[f'{network_name}.network'] = {
|
||||
'Match': {
|
||||
'Name': network_name if interface_type == 'vlan' else network_conf['interface'],
|
||||
},
|
||||
'Network': {
|
||||
'DHCP': network_conf.get('dhcp', 'no'),
|
||||
'IPv6AcceptRA': network_conf.get('dhcp', 'no'),
|
||||
'VLAN': set(
|
||||
other_network_name
|
||||
for other_network_name, other_network_conf in metadata.get('network', {}).items()
|
||||
if other_network_conf.get('type') == 'vlan' and other_network_conf['vlan_interface'] == network_name
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
# type
|
||||
|
||||
if interface_type:
|
||||
units[f'{network_name}.network']['Match']['Type'] = interface_type
|
||||
|
||||
# ips
|
||||
|
||||
for i in [4, 6]:
|
||||
if network_conf.get(f'ipv{i}', None):
|
||||
units[f'{network_name}.network'].update({
|
||||
f'Address#ipv{i}': {
|
||||
'Address': network_conf[f'ipv{i}'],
|
||||
},
|
||||
})
|
||||
if f'gateway{i}' in network_conf:
|
||||
units[f'{network_name}.network'].update({
|
||||
f'Route#ipv{i}': {
|
||||
'Gateway': network_conf[f'gateway{i}'],
|
||||
'GatewayOnlink': 'yes',
|
||||
}
|
||||
})
|
||||
|
||||
# as vlan
|
||||
|
||||
if interface_type == 'vlan':
|
||||
units[f"{network_name}.netdev"] = {
|
||||
'NetDev': {
|
||||
'Name': network_name,
|
||||
'Kind': 'vlan',
|
||||
},
|
||||
'VLAN': {
|
||||
'Id': network_conf['id'],
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
'systemd': {
|
||||
'units': units,
|
||||
for type, network in metadata.get('network').items():
|
||||
units[f'{type}.network'] = {
|
||||
'Match': {
|
||||
'Name': network['interface'],
|
||||
},
|
||||
'Network': {
|
||||
'DHCP': network.get('dhcp', 'no'),
|
||||
'IPv6AcceptRA': network.get('dhcp', 'no'),
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
for i in [4, 6]:
|
||||
if network.get(f'ipv{i}', None):
|
||||
units[f'{type}.network'].update({
|
||||
f'Address#ipv{i}': {
|
||||
'Address': network[f'ipv{i}'],
|
||||
},
|
||||
})
|
||||
if f'gateway{i}' in network:
|
||||
units[f'{type}.network'].update({
|
||||
f'Route#ipv{i}': {
|
||||
'Gateway': network[f'gateway{i}'],
|
||||
'GatewayOnlink': 'yes',
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
return {
|
||||
'systemd': {
|
||||
'units': units,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ defaults = {
|
|||
'exclude': [
|
||||
'^appdata_',
|
||||
'^updater-',
|
||||
'^nextcloud\\.log',
|
||||
'^updater\\.log',
|
||||
'^nextcloud\.log',
|
||||
'^updater\.log',
|
||||
'^[^/]+/cache',
|
||||
'^[^/]+/files_versions',
|
||||
'^[^/]+/files_trashbin',
|
||||
|
@ -123,9 +123,9 @@ def config(metadata):
|
|||
],
|
||||
'cache_path': '/var/lib/nextcloud/.cache',
|
||||
'upgrade.disable-web': True,
|
||||
'memcache.local': '\\OC\\Memcache\\Redis',
|
||||
'memcache.locking': '\\OC\\Memcache\\Redis',
|
||||
'memcache.distributed': '\\OC\\Memcache\\Redis',
|
||||
'memcache.local': '\OC\Memcache\Redis',
|
||||
'memcache.locking': '\OC\Memcache\Redis',
|
||||
'memcache.distributed': '\OC\Memcache\Redis',
|
||||
'redis': {
|
||||
'host': '/var/run/redis/nextcloud.sock'
|
||||
},
|
||||
|
@ -142,7 +142,6 @@ def config(metadata):
|
|||
'versions_retention_obligation': 'auto, 90',
|
||||
'simpleSignUpLink.shown': False,
|
||||
'allow_local_remote_servers': True, # FIXME?
|
||||
'maintenance_window_start': 1, # https://docs.nextcloud.com/server/29/admin_manual/configuration_server/background_jobs_configuration.html#maintenance-window-start
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param QUERY_STRING $query_string;
|
||||
fastcgi_param REQUEST_METHOD $request_method;
|
||||
fastcgi_param CONTENT_TYPE $content_type;
|
||||
|
@ -24,6 +23,3 @@ fastcgi_param SERVER_NAME $server_name;
|
|||
|
||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
||||
fastcgi_param REDIRECT_STATUS 200;
|
||||
|
||||
# This is the only thing that's different to the debian default.
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
pid /var/run/nginx.pid;
|
||||
user www-data;
|
||||
worker_processes ${worker_processes};
|
||||
worker_processes 10;
|
||||
|
||||
% for module in sorted(modules):
|
||||
load_module modules/ngx_${module}_module.so;
|
||||
|
@ -21,9 +21,6 @@ http {
|
|||
server_names_hash_bucket_size 128;
|
||||
tcp_nopush on;
|
||||
client_max_body_size 32G;
|
||||
ssl_dhparam "/etc/ssl/certs/dhparam.pem";
|
||||
# dont show nginx version
|
||||
server_tokens off;
|
||||
|
||||
% if node.has_bundle('php'):
|
||||
upstream php-handler {
|
||||
|
@ -31,13 +28,5 @@ http {
|
|||
}
|
||||
|
||||
% endif
|
||||
|
||||
% if has_websockets:
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
% endif
|
||||
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
include /etc/nginx/sites/*;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ directories = {
|
|||
'svc_systemd:nginx:restart',
|
||||
},
|
||||
},
|
||||
'/etc/nginx/sites-available': {
|
||||
'/etc/nginx/sites': {
|
||||
'purge': True,
|
||||
'triggers': {
|
||||
'svc_systemd:nginx:restart',
|
||||
|
@ -32,8 +32,6 @@ files = {
|
|||
'content_type': 'mako',
|
||||
'context': {
|
||||
'modules': node.metadata.get('nginx/modules'),
|
||||
'worker_processes': node.metadata.get('vm/cores'),
|
||||
'has_websockets': node.metadata.get('nginx/has_websockets'),
|
||||
},
|
||||
'triggers': {
|
||||
'svc_systemd:nginx:restart',
|
||||
|
@ -76,15 +74,9 @@ files = {
|
|||
},
|
||||
}
|
||||
|
||||
symlinks = {
|
||||
'/etc/nginx/sites-enabled': {
|
||||
'target': '/etc/nginx/sites-available',
|
||||
},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'nginx-generate-dhparam': {
|
||||
'command': 'openssl dhparam -dsaparam -out /etc/ssl/certs/dhparam.pem 4096',
|
||||
'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
|
||||
'unless': 'test -f /etc/ssl/certs/dhparam.pem',
|
||||
},
|
||||
}
|
||||
|
@ -100,7 +92,7 @@ svc_systemd = {
|
|||
|
||||
|
||||
for name, config in node.metadata.get('nginx/vhosts').items():
|
||||
files[f'/etc/nginx/sites-available/{name}'] = {
|
||||
files[f'/etc/nginx/sites/{name}'] = {
|
||||
'content': Template(filename=join(repo.path, 'data', config['content'])).render(
|
||||
server_name=name,
|
||||
**config.get('context', {}),
|
||||
|
@ -116,6 +108,6 @@ for name, config in node.metadata.get('nginx/vhosts').items():
|
|||
}
|
||||
|
||||
if name in node.metadata.get('letsencrypt/domains'):
|
||||
files[f'/etc/nginx/sites-available/{name}']['needs'].append(
|
||||
files[f'/etc/nginx/sites/{name}']['needs'].append(
|
||||
f'action:letsencrypt_ensure-some-certificate_{name}',
|
||||
)
|
||||
|
|
|
@ -18,7 +18,6 @@ defaults = {
|
|||
'nginx': {
|
||||
'vhosts': {},
|
||||
'modules': set(),
|
||||
'has_websockets': False,
|
||||
},
|
||||
'systemd': {
|
||||
'units': {
|
||||
|
@ -74,6 +73,7 @@ def dns(metadata):
|
|||
|
||||
@metadata_reactor.provides(
|
||||
'letsencrypt/domains',
|
||||
'letsencrypt/reload_after',
|
||||
)
|
||||
def letsencrypt(metadata):
|
||||
return {
|
||||
|
@ -96,7 +96,7 @@ def monitoring(metadata):
|
|||
'monitoring': {
|
||||
'services': {
|
||||
hostname: {
|
||||
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {vhost.get('check_protocol', 'https')}://{quote(hostname + vhost.get('check_path', '/'))}",
|
||||
'vars.command': f"/usr/bin/curl -X GET -L --fail --no-progress-meter -o /dev/null {quote(hostname + vhost.get('check_path', ''))}",
|
||||
}
|
||||
for hostname, vhost in metadata.get('nginx/vhosts').items()
|
||||
},
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
from os.path import join
|
||||
import json
|
||||
|
||||
from bundlewrap.utils.dicts import merge_dict
|
||||
|
||||
|
||||
version = node.metadata.get('php/version')
|
||||
|
||||
files = {
|
||||
|
@ -15,7 +21,7 @@ files = {
|
|||
f'pkg_apt:php{version}-fpm',
|
||||
},
|
||||
'triggers': {
|
||||
f'svc_systemd:php{version}-fpm.service:restart',
|
||||
f'svc_systemd:php{version}-fpm:restart',
|
||||
},
|
||||
},
|
||||
f'/etc/php/{version}/fpm/pool.d/www.conf': {
|
||||
|
@ -27,13 +33,13 @@ files = {
|
|||
f'pkg_apt:php{version}-fpm',
|
||||
},
|
||||
'triggers': {
|
||||
f'svc_systemd:php{version}-fpm.service:restart',
|
||||
f'svc_systemd:php{version}-fpm:restart',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
f'php{version}-fpm.service': {
|
||||
f'php{version}-fpm': {
|
||||
'needs': {
|
||||
'pkg_apt:',
|
||||
f'file:/etc/php/{version}/fpm/php.ini',
|
||||
|
|
|
@ -113,7 +113,7 @@ def php_ini(metadata):
|
|||
'opcache.revalidate_freq': '60',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
'php': {
|
||||
'php.ini': {
|
||||
|
@ -145,7 +145,7 @@ def www_conf(metadata):
|
|||
'pm': 'dynamic',
|
||||
'pm.max_children': int(threads*2),
|
||||
'pm.start_servers': int(threads),
|
||||
'pm.min_spare_servers': max([1, int(threads/2)]),
|
||||
'pm.min_spare_servers': int(threads/2),
|
||||
'pm.max_spare_servers': int(threads),
|
||||
'pm.max_requests': int(threads*32),
|
||||
},
|
||||
|
|
|
@ -44,9 +44,7 @@ smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
|||
smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions
|
||||
mua_client_restrictions = permit_sasl_authenticated, reject
|
||||
mua_sender_restrictions = permit_sasl_authenticated, reject
|
||||
## MS Outlook, incompatible with reject_non_fqdn_hostname and/or reject_invalid_hostname
|
||||
## https://unix.stackexchange.com/a/91753/357916
|
||||
mua_helo_restrictions = permit_mynetworks, permit
|
||||
mua_helo_restrictions = permit_mynetworks, reject_non_fqdn_hostname, reject_invalid_hostname, permit
|
||||
|
||||
smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
|
||||
non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
|
||||
|
|
|
@ -86,8 +86,6 @@ if node.has_bundle('telegraf'):
|
|||
'needs': [
|
||||
'pkg_apt:acl',
|
||||
'svc_systemd:postfix',
|
||||
'svc_systemd:postfix:reload',
|
||||
'svc_systemd:postfix:restart',
|
||||
],
|
||||
}
|
||||
actions['postfix_setfacl_default_telegraf'] = {
|
||||
|
@ -96,7 +94,5 @@ if node.has_bundle('telegraf'):
|
|||
'needs': [
|
||||
'pkg_apt:acl',
|
||||
'svc_systemd:postfix',
|
||||
'svc_systemd:postfix:reload',
|
||||
'svc_systemd:postfix:restart',
|
||||
],
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
# DO NOT DISABLE!
|
||||
# If you change this first entry you will need to make sure that the
|
||||
# database superuser can access the database using some other method.
|
||||
# Noninteractive access to all databases is required during automatic
|
||||
# maintenance (custom daily cronjobs, replication, and similar tasks).
|
||||
#
|
||||
# Database administrative login by Unix domain socket
|
||||
local all postgres peer
|
||||
|
||||
# TYPE DATABASE USER ADDRESS METHOD
|
||||
|
||||
# "local" is for Unix domain socket connections only
|
||||
local all all peer
|
||||
# IPv4 local connections:
|
||||
host all all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
|
||||
# IPv6 local connections:
|
||||
host all all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
|
||||
# Allow replication connections from localhost, by a user with the
|
||||
# replication privilege.
|
||||
local replication all peer
|
||||
host replication all 127.0.0.1/32 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
|
||||
host replication all ::1/128 ${node.metadata.get('postgresql/password_algorithm', 'md5')}
|
|
@ -12,27 +12,12 @@ directories = {
|
|||
'zfs_dataset:tank/postgresql',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:postgresql.service',
|
||||
'svc_systemd:postgresql',
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
files = {
|
||||
f"/etc/postgresql/{version}/main/pg_hba.conf": {
|
||||
'content_type': 'mako',
|
||||
'mode': '0640',
|
||||
'owner': 'postgres',
|
||||
'group': 'postgres',
|
||||
'needs': [
|
||||
'pkg_apt:postgresql',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:postgresql.service',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:postgresql.service:restart',
|
||||
],
|
||||
},
|
||||
f"/etc/postgresql/{version}/main/conf.d/managed.conf": {
|
||||
'content': '\n'.join(
|
||||
f'{key} = {value}'
|
||||
|
@ -40,19 +25,16 @@ files = {
|
|||
) + '\n',
|
||||
'owner': 'postgres',
|
||||
'group': 'postgres',
|
||||
'needs': [
|
||||
'pkg_apt:postgresql',
|
||||
],
|
||||
'needed_by': [
|
||||
'svc_systemd:postgresql.service',
|
||||
'svc_systemd:postgresql',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:postgresql.service:restart',
|
||||
'svc_systemd:postgresql:restart',
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd['postgresql.service'] = {
|
||||
svc_systemd['postgresql'] = {
|
||||
'needs': [
|
||||
'pkg_apt:postgresql',
|
||||
],
|
||||
|
@ -61,13 +43,13 @@ svc_systemd['postgresql.service'] = {
|
|||
for user, config in node.metadata.get('postgresql/roles').items():
|
||||
postgres_roles[user] = merge_dict(config, {
|
||||
'needs': [
|
||||
'svc_systemd:postgresql.service',
|
||||
'svc_systemd:postgresql',
|
||||
],
|
||||
})
|
||||
|
||||
for database, config in node.metadata.get('postgresql/databases').items():
|
||||
postgres_dbs[database] = merge_dict(config, {
|
||||
'needs': [
|
||||
'svc_systemd:postgresql.service',
|
||||
'svc_systemd:postgresql',
|
||||
],
|
||||
})
|
||||
|
|
|
@ -6,11 +6,7 @@ root_password = repo.vault.password_for(f'{node.name} postgresql root')
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'postgresql': {
|
||||
'needs': {
|
||||
'zfs_dataset:tank/postgresql',
|
||||
},
|
||||
},
|
||||
'postgresql': {},
|
||||
},
|
||||
},
|
||||
'backup': {
|
||||
|
@ -58,25 +54,6 @@ def conf(metadata):
|
|||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'apt/config/APT/NeverAutoRemove',
|
||||
)
|
||||
def apt(metadata):
|
||||
return {
|
||||
'apt': {
|
||||
'config': {
|
||||
'APT': {
|
||||
'NeverAutoRemove': {
|
||||
# https://github.com/credativ/postgresql-common/blob/master/pg_updateaptconfig#L17-L21
|
||||
f"^postgresql.*-{metadata.get('postgresql/version')}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'zfs/datasets',
|
||||
)
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
files = {
|
||||
'/etc/apt/apt.conf.d/10pveapthook': {
|
||||
'content_type': 'any',
|
||||
'mode': '0644',
|
||||
},
|
||||
'/etc/apt/apt.conf.d/76pveconf': {
|
||||
'content_type': 'any',
|
||||
'mode': '0444',
|
||||
},
|
||||
'/etc/apt/apt.conf.d/76pveproxy': {
|
||||
'content_type': 'any',
|
||||
'mode': '0644',
|
||||
},
|
||||
'/etc/network/interfaces': {
|
||||
'content_type': 'any',
|
||||
},
|
||||
}
|
||||
|
||||
symlinks['/etc/ssh/ssh_host_rsa_key.pub'] = {
|
||||
'target': '/etc/ssh/ssh_host_managed_key.pub',
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'linux-image-amd64': {
|
||||
'installed': False,
|
||||
},
|
||||
'proxmox-default-kernel': {},
|
||||
# after reboot
|
||||
'proxmox-ve': {},
|
||||
'postfix': {},
|
||||
'open-iscsi': {},
|
||||
'chrony': {},
|
||||
'os-prober': {
|
||||
'installed': False,
|
||||
},
|
||||
'dnsmasq-base': {},
|
||||
},
|
||||
'sources': {
|
||||
'proxmox-ve': {
|
||||
'options': {
|
||||
'aarch': 'amd64',
|
||||
},
|
||||
'urls': {
|
||||
'http://download.proxmox.com/debian/pve',
|
||||
},
|
||||
'suites': {
|
||||
'{codename}',
|
||||
},
|
||||
'components': {
|
||||
'pve-no-subscription',
|
||||
},
|
||||
'key': 'proxmox-ve-{codename}',
|
||||
},
|
||||
},
|
||||
},
|
||||
# 'nftables': {
|
||||
# 'input': {
|
||||
# 'tcp dport 8006 accept',
|
||||
# },
|
||||
# },
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
'tank/proxmox-ve': {
|
||||
'mountpoint': '/var/lib/proxmox-ve',
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# @metadata_reactor.provides(
|
||||
# 'systemd',
|
||||
# )
|
||||
# def bridge(metadata):
|
||||
# return {
|
||||
# 'systemd': {
|
||||
# 'units': {
|
||||
# # f'internal.network': {
|
||||
# # 'Network': {
|
||||
# # 'Bridge': 'br0',
|
||||
# # },
|
||||
# # },
|
||||
# 'br0.netdev': {
|
||||
# 'NetDev': {
|
||||
# 'Name': 'br0',
|
||||
# 'Kind': 'bridge'
|
||||
# },
|
||||
# },
|
||||
# 'br0.network': {
|
||||
# 'Match': {
|
||||
# 'Name': 'br0',
|
||||
# },
|
||||
# 'Network': {
|
||||
# 'Unmanaged': 'yes'
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# }
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/has_websockets',
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'has_websockets': True,
|
||||
'vhosts': {
|
||||
metadata.get('proxmox-ve/domain'): {
|
||||
'content': 'nginx/proxy_pass.conf',
|
||||
'context': {
|
||||
'target': 'https://localhost:8006',
|
||||
'websockets': True,
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
from shlex import quote
|
||||
|
||||
directories = {
|
||||
'/opt/pyenv': {},
|
||||
'/opt/pyenv/install': {},
|
||||
}
|
||||
|
||||
git_deploy = {
|
||||
'/opt/pyenv/install': {
|
||||
'repo': 'https://github.com/pyenv/pyenv.git',
|
||||
'rev': 'master',
|
||||
'needs': {
|
||||
'directory:/opt/pyenv/install',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for version in node.metadata.get('pyenv/versions'):
|
||||
actions[f'pyenv_install_{version}'] = {
|
||||
'command': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv install {quote(version)}',
|
||||
'unless': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv versions --bare | grep -Fxq {quote(version)}',
|
||||
'needs': {
|
||||
'git_deploy:/opt/pyenv/install',
|
||||
},
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'build-essential': {},
|
||||
'libssl-dev': {},
|
||||
'zlib1g-dev': {},
|
||||
'libbz2-dev': {},
|
||||
'libreadline-dev': {},
|
||||
'libsqlite3-dev': {},
|
||||
'curl': {},
|
||||
'libncurses-dev': {},
|
||||
'xz-utils': {},
|
||||
'tk-dev': {},
|
||||
'libxml2-dev': {},
|
||||
'libxmlsec1-dev': {},
|
||||
'libffi-dev': {},
|
||||
'liblzma-dev': {},
|
||||
},
|
||||
},
|
||||
'pyenv': {
|
||||
'versions': set(),
|
||||
},
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
- Homematic > Settings > Control panel > Security > SSH > active & set password
|
||||
- ssh to node > `ssh-copy-id -o StrictHostKeyChecking=no root@{homematic}`
|
||||
- Homematic > Settings > Control panel > Security > Automatic forwarding to HTTPS > active
|
|
@ -1,3 +1,6 @@
|
|||
from shlex import quote
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'letsencrypt/domains',
|
||||
)
|
||||
|
@ -17,6 +20,8 @@ def letsencrypt(metadata):
|
|||
'systemd-timers/raspberrymatic-cert',
|
||||
)
|
||||
def systemd_timers(metadata):
|
||||
domain = metadata.get('raspberrymatic-cert/domain')
|
||||
|
||||
return {
|
||||
'systemd-timers': {
|
||||
'raspberrymatic-cert': {
|
||||
|
|
|
@ -1,15 +1,12 @@
|
|||
directories = {
|
||||
'/etc/redis': {
|
||||
'purge': True,
|
||||
'owner': 'redis',
|
||||
'mode': '2770',
|
||||
'needs': [
|
||||
'pkg_apt:redis-server',
|
||||
],
|
||||
},
|
||||
'/var/lib/redis': {
|
||||
'owner': 'redis',
|
||||
'mode': '0750',
|
||||
'needs': [
|
||||
'pkg_apt:redis-server',
|
||||
],
|
||||
|
@ -48,7 +45,7 @@ for name, conf in node.metadata.get('redis').items():
|
|||
f'svc_systemd:redis-{name}:restart'
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
svc_systemd[f'redis-{name}'] = {
|
||||
'needs': [
|
||||
'svc_systemd:redis',
|
||||
|
|
|
@ -6,16 +6,80 @@ $config['enable_installer'] = true;
|
|||
|
||||
/* Local configuration for Roundcube Webmail */
|
||||
|
||||
// ----------------------------------
|
||||
// SQL DATABASE
|
||||
// ----------------------------------
|
||||
// Database connection string (DSN) for read+write operations
|
||||
// Format (compatible with PEAR MDB2): db_provider://user:password@host/database
|
||||
// Currently supported db_providers: mysql, pgsql, sqlite, mssql or sqlsrv
|
||||
// For examples see http://pear.php.net/manual/en/package.database.mdb2.intro-dsn.php
|
||||
// NOTE: for SQLite use absolute path: 'sqlite:////full/path/to/sqlite.db?mode=0646'
|
||||
$config['db_dsnw'] = '${database['provider']}://${database['user']}:${database['password']}@${database['host']}/${database['name']}';
|
||||
|
||||
// ----------------------------------
|
||||
// IMAP
|
||||
// ----------------------------------
|
||||
// The mail host chosen to perform the log-in.
|
||||
// Leave blank to show a textbox at login, give a list of hosts
|
||||
// to display a pulldown menu or set one host as string.
|
||||
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
|
||||
// Supported replacement variables:
|
||||
// %n - hostname ($_SERVER['SERVER_NAME'])
|
||||
// %t - hostname without the first part
|
||||
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
|
||||
// %s - domain name after the '@' from e-mail address provided at login screen
|
||||
// For example %n = mail.domain.tld, %t = domain.tld
|
||||
// WARNING: After hostname change update of mail_host column in users table is
|
||||
// required to match old user data records with the new host.
|
||||
$config['imap_host'] = 'localhost';
|
||||
|
||||
// ----------------------------------
|
||||
// SMTP
|
||||
// ----------------------------------
|
||||
// SMTP server host (for sending mails).
|
||||
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
|
||||
// If left blank, the PHP mail() function is used
|
||||
// Supported replacement variables:
|
||||
// %h - user's IMAP hostname
|
||||
// %n - hostname ($_SERVER['SERVER_NAME'])
|
||||
// %t - hostname without the first part
|
||||
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
|
||||
// %z - IMAP domain (IMAP hostname without the first part)
|
||||
// For example %n = mail.domain.tld, %t = domain.tld
|
||||
$config['smtp_host'] = 'tls://localhost';
|
||||
|
||||
// SMTP username (if required) if you use %u as the username Roundcube
|
||||
// will use the current username for login
|
||||
$config['smtp_user'] = '%u';
|
||||
|
||||
// SMTP password (if required) if you use %p as the password Roundcube
|
||||
// will use the current user's password for login
|
||||
$config['smtp_pass'] = '%p';
|
||||
|
||||
// provide an URL where a user can get support for this Roundcube installation
|
||||
// PLEASE DO NOT LINK TO THE ROUNDCUBE.NET WEBSITE HERE!
|
||||
$config['support_url'] = '';
|
||||
|
||||
// this key is used to encrypt the users imap password which is stored
|
||||
// in the session record (and the client cookie if remember password is enabled).
|
||||
// please provide a string of exactly 24 chars.
|
||||
$config['des_key'] = '${des_key}';
|
||||
|
||||
// Name your service. This is displayed on the login screen and in the window title
|
||||
$config['product_name'] = '${product_name}';
|
||||
|
||||
// ----------------------------------
|
||||
// PLUGINS
|
||||
// ----------------------------------
|
||||
// List of active plugins (in plugins/ directory)
|
||||
$config['plugins'] = array(${', '.join(f'"{plugin}"' for plugin in plugins)});
|
||||
|
||||
// the default locale setting (leave empty for auto-detection)
|
||||
// RFC1766 formatted language name like en_US, de_DE, de_CH, fr_FR, pt_BR
|
||||
$config['language'] = 'de_DE';
|
||||
|
||||
|
||||
// https://serverfault.com/a/991304
|
||||
$config['smtp_conn_options'] = array(
|
||||
'ssl' => array(
|
||||
'verify_peer' => false,
|
||||
|
|
|
@ -14,4 +14,4 @@ $config['password_dovecotpw'] = '/usr/bin/sudo /usr/bin/doveadm pw';
|
|||
$config['password_dovecotpw_method'] = 'ARGON2ID';
|
||||
$config['password_dovecotpw_with_method'] = true;
|
||||
$config['password_db_dsn'] = 'pgsql://mailserver:${mailserver_db_password}@localhost/mailserver';
|
||||
$config['password_query'] = "UPDATE users SET password = %P FROM domains WHERE domains.id = users.domain_id AND domains.name = %d AND users.name = %l";
|
||||
$config['password_query'] = "UPDATE users SET password=%D FROM domains WHERE domains.id = domain_id AND domains.name = %d AND users.name = %l";
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
assert node.has_bundle('php')
|
||||
assert node.has_bundle('mailserver')
|
||||
|
||||
roundcube_version = node.metadata.get('roundcube/version')
|
||||
php_version = node.metadata.get('php/version')
|
||||
version = node.metadata.get('roundcube/version')
|
||||
|
||||
directories = {
|
||||
'/opt/roundcube': {
|
||||
|
@ -23,9 +22,9 @@ directories = {
|
|||
}
|
||||
|
||||
|
||||
files[f'/tmp/roundcube-{roundcube_version}.tar.gz'] = {
|
||||
files[f'/tmp/roundcube-{version}.tar.gz'] = {
|
||||
'content_type': 'download',
|
||||
'source': f'https://github.com/roundcube/roundcubemail/releases/download/{roundcube_version}/roundcubemail-{roundcube_version}-complete.tar.gz',
|
||||
'source': f'https://github.com/roundcube/roundcubemail/releases/download/{version}/roundcubemail-{version}-complete.tar.gz',
|
||||
'triggered': True,
|
||||
}
|
||||
actions['delete_roundcube'] = {
|
||||
|
@ -33,11 +32,11 @@ actions['delete_roundcube'] = {
|
|||
'triggered': True,
|
||||
}
|
||||
actions['extract_roundcube'] = {
|
||||
'command': f'tar xfvz /tmp/roundcube-{roundcube_version}.tar.gz --strip 1 -C /opt/roundcube',
|
||||
'unless': f'grep -q "Version {roundcube_version}" /opt/roundcube/index.php',
|
||||
'command': f'tar xfvz /tmp/roundcube-{version}.tar.gz --strip 1 -C /opt/roundcube',
|
||||
'unless': f'grep -q "Version {version}" /opt/roundcube/index.php',
|
||||
'preceded_by': [
|
||||
'action:delete_roundcube',
|
||||
f'file:/tmp/roundcube-{roundcube_version}.tar.gz',
|
||||
f'file:/tmp/roundcube-{version}.tar.gz',
|
||||
],
|
||||
'needs': [
|
||||
'directory:/opt/roundcube',
|
||||
|
@ -65,9 +64,6 @@ files['/opt/roundcube/config/config.inc.php'] = {
|
|||
'needs': [
|
||||
'action:chown_roundcube',
|
||||
],
|
||||
'triggers': [
|
||||
f'svc_systemd:php{php_version}-fpm.service:restart',
|
||||
],
|
||||
}
|
||||
files['/opt/roundcube/plugins/password/config.inc.php'] = {
|
||||
'source': 'password.config.inc.php',
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
- reset (hold reset for 5-10 seconds, until user light starts flashing)
|
||||
- open webinterface under 192.168.88.1
|
||||
- set password
|
||||
- vlans need to be configured and an additional ip needs to be assined to a vlan which es later accessible preferably through an untagged port
|
||||
- for example add 10.0.0.62/24 to "home" vlan
|
||||
- this happens on the first apply
|
||||
- when vlan filering gets enabled, the apply freezes and the switch is no longer available under the old ip
|
||||
- now that filtering is active, the switch is available under its new ip, because now you dont speak to the bridge anymore, where the old ip was residing, but to the vlan interface, where the new ip is residing
|
|
@ -1,122 +0,0 @@
|
|||
routeros['/ip/dns'] = {
|
||||
'servers': '8.8.8.8',
|
||||
}
|
||||
|
||||
routeros['/system/identity'] = {
|
||||
'name': node.name,
|
||||
}
|
||||
|
||||
# for service in (
|
||||
# 'api-ssl', # slow :(
|
||||
# 'ftp', # we can download files via HTTP
|
||||
# 'telnet',
|
||||
# 'www-ssl', # slow :(
|
||||
# 'winbox',
|
||||
# ):
|
||||
# routeros[f'/ip/service?name={service}'] = {
|
||||
# 'disabled': True,
|
||||
# }
|
||||
|
||||
# LOGGING_TOPICS = (
|
||||
# 'critical',
|
||||
# 'error',
|
||||
# 'info',
|
||||
# 'stp',
|
||||
# 'warning',
|
||||
# )
|
||||
# for topic in LOGGING_TOPICS:
|
||||
# routeros[f'/system/logging?action=memory&topics={topic}'] = {}
|
||||
|
||||
# routeros['/snmp'] = {
|
||||
# 'enabled': True,
|
||||
# }
|
||||
# routeros['/snmp/community?name=public'] = {
|
||||
# 'addresses': '0.0.0.0/0',
|
||||
# 'disabled': False,
|
||||
# 'read-access': True,
|
||||
# 'write-access': False,
|
||||
# }
|
||||
|
||||
# routeros['/system/clock'] = {
|
||||
# 'time-zone-autodetect': False,
|
||||
# 'time-zone-name': 'UTC',
|
||||
# }
|
||||
|
||||
# routeros['/ip/neighbor/discovery-settings'] = {
|
||||
# 'protocol': 'cdp,lldp,mndp',
|
||||
# }
|
||||
|
||||
# routeros['/ip/route?dst-address=0.0.0.0/0'] = {
|
||||
# 'gateway': node.metadata.get('routeros/gateway'),
|
||||
# }
|
||||
|
||||
for vlan_name, vlan_id in node.metadata.get('routeros/vlans').items():
|
||||
routeros[f'/interface/vlan?name={vlan_name}'] = {
|
||||
'vlan-id': vlan_id,
|
||||
'interface': 'bridge',
|
||||
'tags': {
|
||||
'routeros-vlan',
|
||||
},
|
||||
'needs': {
|
||||
#'routeros:/interface/bridge?name=bridge',
|
||||
},
|
||||
}
|
||||
|
||||
routeros[f"/interface/bridge/vlan?vlan-ids={vlan_id}&dynamic=false"] = {
|
||||
'bridge': 'bridge',
|
||||
'untagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/untagged')),
|
||||
'tagged': sorted(node.metadata.get(f'routeros/vlan_ports/{vlan_name}/tagged')),
|
||||
'_comment': vlan_name,
|
||||
'tags': {
|
||||
'routeros-vlan-ports',
|
||||
},
|
||||
'needs': {
|
||||
'tag:routeros-vlan',
|
||||
},
|
||||
}
|
||||
|
||||
# create IPs
|
||||
for ip, ip_conf in node.metadata.get('routeros/ips').items():
|
||||
routeros[f'/ip/address?address={ip}'] = {
|
||||
'interface': ip_conf['interface'],
|
||||
'tags': {
|
||||
'routeros-ip',
|
||||
},
|
||||
'needs': {
|
||||
'tag:routeros-vlan',
|
||||
},
|
||||
}
|
||||
|
||||
routeros['/interface/bridge?name=bridge'] = {
|
||||
'vlan-filtering': True, # ENABLE AFTER PORT VLANS ARE SET UP
|
||||
'igmp-snooping': False,
|
||||
'priority': node.metadata.get('routeros/bridge_priority'),
|
||||
'protocol-mode': 'rstp',
|
||||
'needs': {
|
||||
'tag:routeros-vlan',
|
||||
'tag:routeros-vlan-ports',
|
||||
'tag:routeros-ip',
|
||||
},
|
||||
}
|
||||
|
||||
# purge unused vlans
|
||||
routeros['/interface/vlan'] = {
|
||||
'purge': {
|
||||
'id-by': 'name',
|
||||
},
|
||||
'needed_by': {
|
||||
'tag:routeros-vlan',
|
||||
}
|
||||
}
|
||||
|
||||
routeros['/interface/bridge/vlan'] = {
|
||||
'purge': {
|
||||
'id-by': 'vlan-ids',
|
||||
'keep': {
|
||||
'dynamic': True,
|
||||
},
|
||||
},
|
||||
'needed_by': {
|
||||
'tag:routeros-vlan',
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
defaults = {}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'routeros/vlan_ports',
|
||||
)
|
||||
def routeros__(metadata):
|
||||
return {
|
||||
'routeros': {
|
||||
'vlan_ports': {
|
||||
vlan_name: {
|
||||
'untagged': {
|
||||
port_name
|
||||
for port_name, port_conf in metadata.get('routeros/ports').items()
|
||||
if vlan_name == metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/untagged')
|
||||
},
|
||||
'tagged': {
|
||||
port_name
|
||||
for port_name, port_conf in metadata.get('routeros/ports').items()
|
||||
if vlan_name in metadata.get(f'routeros/vlan_groups/{port_conf["vlan_group"]}/tagged')
|
||||
},
|
||||
}
|
||||
for vlan_name in metadata.get('routeros/vlans').keys()
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
gpio=$(gpiofind SCL1)
|
||||
|
||||
while gpiomon --num-events=1 --falling-edge $gpio 2&> /dev/null
|
||||
do
|
||||
systemctl stop rufbereitschafts-klingel
|
||||
done
|
16
bundles/rufbereitschaftsalarm/items.py
Normal file
16
bundles/rufbereitschaftsalarm/items.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
files = {
|
||||
'/opt/rufbereitschaftsalarm': {
|
||||
'mode': '550',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'rufbereitschaftsalarm.service': {
|
||||
'enabled': False,
|
||||
'running': False,
|
||||
'needs': [
|
||||
'pkg_apt:gpiod',
|
||||
'file:/opt/rufbereitschaftsalarm',
|
||||
],
|
||||
}
|
||||
}
|
42
bundles/rufbereitschaftsalarm/metadata.py
Normal file
42
bundles/rufbereitschaftsalarm/metadata.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'gpiod': {},
|
||||
},
|
||||
},
|
||||
'flask': {
|
||||
|
||||
},
|
||||
'systemd': {
|
||||
'units': {
|
||||
'rufbereitschaftsalarm-sound.service': {
|
||||
'Unit': {
|
||||
'Description': 'rufbereitschaftsalarm sound effect',
|
||||
'After': 'network.target',
|
||||
},
|
||||
'Service': {
|
||||
'ExecStart': '/opt/rufbereitschaftsalarm-sound',
|
||||
},
|
||||
'Install': {
|
||||
'WantedBy': {
|
||||
'multi-user.target'
|
||||
},
|
||||
},
|
||||
},
|
||||
'rufbereitschaftsalarm-stop.service': {
|
||||
'Unit': {
|
||||
'Description': 'rufbereitschaftsalarm stop button',
|
||||
'After': 'network.target',
|
||||
},
|
||||
'Service': {
|
||||
'ExecStart': '/opt/rufbereitschaftsalarm-stop',
|
||||
},
|
||||
'Install': {
|
||||
'WantedBy': {
|
||||
'multi-user.target'
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -21,4 +21,3 @@ ClientAliveInterval 30
|
|||
ClientAliveCountMax 5
|
||||
AcceptEnv LANG
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
HostKey /etc/ssh/ssh_host_managed_key
|
||||
|
|
|
@ -51,14 +51,14 @@ files = {
|
|||
],
|
||||
'skip': dont_touch_sshd,
|
||||
},
|
||||
'/etc/ssh/ssh_host_managed_key': {
|
||||
'/etc/ssh/ssh_host_ed25519_key': {
|
||||
'content': node.metadata.get('ssh/host_key/private') + '\n',
|
||||
'mode': '0600',
|
||||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
},
|
||||
'/etc/ssh/ssh_host_managed_key.pub': {
|
||||
'/etc/ssh/ssh_host_ed25519_key.pub': {
|
||||
'content': node.metadata.get('ssh/host_key/public') + '\n',
|
||||
'mode': '0644',
|
||||
'triggers': [
|
||||
|
|
|
@ -34,19 +34,18 @@ defaults = {
|
|||
)
|
||||
def systemd_timer(metadata):
|
||||
return {
|
||||
# steam python login is broken: https://github.com/ValvePython/steam/issues/442
|
||||
# 'systemd-timers': {
|
||||
# f'steam-chat-logger': {
|
||||
# 'command': '/opt/steam_chat_logger/steam_chat_logger.py',
|
||||
# 'when': 'hourly',
|
||||
# 'user': 'steam_chat_logger',
|
||||
# 'env': {
|
||||
# 'DB_NAME': 'steam_chat_logger',
|
||||
# 'DB_USER': 'steam_chat_logger',
|
||||
# 'DB_PASSWORD': metadata.get('postgresql/roles/steam_chat_logger/password'),
|
||||
# **metadata.get('steam_chat_logger'),
|
||||
# },
|
||||
# 'working_dir': '/var/lib/steam_chat_logger',
|
||||
# },
|
||||
# },
|
||||
'systemd-timers': {
|
||||
f'steam-chat-logger': {
|
||||
'command': '/opt/steam_chat_logger/steam_chat_logger.py',
|
||||
'when': 'hourly',
|
||||
'user': 'steam_chat_logger',
|
||||
'env': {
|
||||
'DB_NAME': 'steam_chat_logger',
|
||||
'DB_USER': 'steam_chat_logger',
|
||||
'DB_PASSWORD': metadata.get('postgresql/roles/steam_chat_logger/password'),
|
||||
**metadata.get('steam_chat_logger'),
|
||||
},
|
||||
'working_dir': '/var/lib/steam_chat_logger',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
files = {
|
||||
'/etc/systemd/journald.conf.d/managed.conf': {
|
||||
'content': repo.libs.systemd.generate_unitfile({
|
||||
'Journal': node.metadata.get('systemd-journald'),
|
||||
'Jorunal': node.metadata.get('systemd-journald'),
|
||||
}),
|
||||
'triggers': {
|
||||
'svc_systemd:systemd-journald:restart',
|
||||
|
|
|
@ -1,10 +1,3 @@
|
|||
<%
|
||||
nameservers = (
|
||||
node.metadata.get('overwrite_nameservers', []) or
|
||||
node.metadata.get('nameservers', [])
|
||||
)
|
||||
%>\
|
||||
\
|
||||
% for nameserver in nameservers:
|
||||
% for nameserver in sorted(node.metadata.get('nameservers')):
|
||||
nameserver ${nameserver}
|
||||
% endfor
|
||||
% endfor
|
||||
|
|
|
@ -19,5 +19,5 @@ directories = {
|
|||
}
|
||||
|
||||
svc_systemd = {
|
||||
'systemd-networkd.service': {},
|
||||
'systemd-networkd': {},
|
||||
}
|
||||
|
|
|
@ -42,8 +42,6 @@ def systemd(metadata):
|
|||
units[f'{name}.service']['Service']['SuccessExitStatus'] = config['success_exit_status']
|
||||
if config.get('kill_mode'):
|
||||
units[f'{name}.service']['Service']['KillMode'] = config['kill_mode']
|
||||
if config.get('RuntimeMaxSec'):
|
||||
units[f'{name}.service']['Service']['RuntimeMaxSec'] = config['RuntimeMaxSec']
|
||||
|
||||
services[f'{name}.timer'] = {}
|
||||
|
||||
|
|
|
@ -24,10 +24,10 @@ for name, unit in node.metadata.get('systemd/units').items():
|
|||
path = f'/etc/systemd/network/{name}'
|
||||
dependencies = {
|
||||
'needed_by': [
|
||||
'svc_systemd:systemd-networkd.service',
|
||||
'svc_systemd:systemd-networkd',
|
||||
],
|
||||
'triggers': [
|
||||
'svc_systemd:systemd-networkd.service:restart',
|
||||
'svc_systemd:systemd-networkd:restart',
|
||||
],
|
||||
}
|
||||
elif extension in ['timer', 'service', 'mount', 'swap', 'target']:
|
||||
|
|
|
@ -9,7 +9,7 @@ files = {
|
|||
node.metadata.get('telegraf/config'),
|
||||
cls=MetadataJSONEncoder,
|
||||
)),
|
||||
sort_keys=True,
|
||||
sort_keys=True
|
||||
),
|
||||
'triggers': [
|
||||
'svc_systemd:telegraf:restart',
|
||||
|
|
|
@ -7,8 +7,6 @@ defaults = {
|
|||
# needed by crystal plugins:
|
||||
'libgc-dev': {},
|
||||
'libevent-dev': {},
|
||||
# crystal based (procio, pressure_stall):
|
||||
'libpcre3': {},
|
||||
},
|
||||
'sources': {
|
||||
'influxdata': {
|
||||
|
@ -58,7 +56,7 @@ defaults = {
|
|||
'procstat': {h({
|
||||
'interval': '60s',
|
||||
'pattern': '.',
|
||||
'fieldinclude': [
|
||||
'fieldpass': [
|
||||
'cpu_usage',
|
||||
'memory_rss',
|
||||
],
|
||||
|
|
|
@ -12,7 +12,7 @@ defaults = {
|
|||
'wireguard': {
|
||||
'backports': node.os_version < (11,),
|
||||
'triggers': [
|
||||
'svc_systemd:systemd-networkd.service:restart',
|
||||
'svc_systemd:systemd-networkd:restart',
|
||||
],
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
https://developer.wordpress.org/advanced-administration/upgrade/upgrading/
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
SITE=$1
|
||||
VERSION=$(php -r "require('/opt/$SITE/wp-includes/version.php'); echo \$wp_version;")
|
||||
STATUS=$(curl -ssL http://api.wordpress.org/core/stable-check/1.0/ | jq -r '.["'$VERSION'"]')
|
||||
|
||||
echo "WordPress $VERSION is '$STATUS'"
|
||||
|
||||
if [[ "$STATUS" == latest ]]
|
||||
then
|
||||
exit 0
|
||||
elif [[ "$STATUS" == outdated ]]
|
||||
then
|
||||
exit 1
|
||||
elif [[ "$STATUS" == insecure ]]
|
||||
then
|
||||
if test -f /etc/nginx/sites/$SITE
|
||||
then
|
||||
rm /etc/nginx/sites/$SITE
|
||||
systemctl restart nginx
|
||||
fi
|
||||
exit 2
|
||||
else
|
||||
exit 2
|
||||
fi
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue