Compare commits

..

No commits in common. "40254b403e9d56717e1251e9cee9d9ae7a916d3b" and "72b7f385530ddb9f122f5feb9d46573d465890c0" have entirely different histories.

133 changed files with 730 additions and 2827 deletions

5
.envrc
View file

@ -1,5 +0,0 @@
#!/usr/bin/env bash
python3 -m venv .venv
source ./.venv/bin/activate
unset PS1

1
.gitignore vendored
View file

@ -1,2 +1 @@
.secrets.cfg* .secrets.cfg*
.venv

View file

@ -1 +0,0 @@
3.9.0

View file

@ -1,6 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
repo = Repository(dirname(dirname(realpath(__file__))))

View file

@ -1,13 +0,0 @@
```python
{
'apt': {
'packages': {
'apt-transport-https': {},
},
'sources': [
# place key under data/apt/keys/packages.cloud.google.com.{asc|gpg}
'deb https://packages.cloud.google.com/apt cloud-sdk main',
],
},
}
```

View file

@ -1,35 +1,3 @@
from os.path import join
from urllib.parse import urlparse
from glob import glob
from os.path import join, basename
directories = {
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/trusted.gpg.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
files = {
'/etc/apt/sources.list': {
'content': '# managed'
},
}
actions = { actions = {
'apt_update': { 'apt_update': {
'command': 'apt-get update', 'command': 'apt-get update',
@ -41,57 +9,5 @@ actions = {
}, },
} }
hosts = {} for package, options in node.metadata.get('apt/packages', {}).items():
for source_string in node.metadata.get('apt/sources'):
source = repo.libs.apt.AptSource(source_string)
hosts\
.setdefault(source.url.hostname, set())\
.add(source)
for host, sources in hosts.items():
keyfile = basename(glob(join(repo.path, 'data', 'apt', 'keys', f'{host}.*'))[0])
destination_path = f'/etc/apt/trusted.gpg.d/{keyfile}'
for source in sources:
source.options['signed-by'] = [destination_path]
files[f'/etc/apt/sources.list.d/{host}.list'] = {
'content': '\n'.join(
str(source) for source in sorted(sources)
).format(
release=node.metadata.get('os_release')
),
'triggers': {
'action:apt_update',
},
}
files[destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', keyfile),
'content_type': 'binary',
'triggers': {
'action:apt_update',
},
}
for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options pkg_apt[package] = options
if options.get('backports', None):
pkg_apt[package].pop('backports')
files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([
f"Package: {package}",
f"Pin: release a={node.metadata.get('os_release')}-backports",
f"Pin-Priority: 900",
]),
'needed_by': [
f'pkg_apt:{package}',
],
'triggers': {
'action:apt_update',
},
}

View file

@ -1,6 +0,0 @@
defaults = {
'apt': {
'packages': {},
'sources': [],
},
}

View file

@ -1,12 +0,0 @@
```
defaults = {
'archive': {
'/var/important': {
'exclude': [
'\.cache/',
'\.log$',
],
},
},
}
```

View file

@ -1,29 +0,0 @@
#!/bin/bash
if [[ "$1" == 'perform' ]]
then
echo 'NON-DRY RUN'
DRY=''
else
echo 'DRY RUN'
DRY='-n'
fi
% for path, options in paths.items():
# ${path}
gsutil ${'\\'}
-m ${'\\'}
-o 'GSUtil:parallel_process_count=${processes}' ${'\\'}
-o 'GSUtil:parallel_thread_count=${threads}' ${'\\'}
rsync ${'\\'}
$DRY ${'\\'}
-r ${'\\'}
-d ${'\\'}
-e ${'\\'}
% if options.get('exclude'):
-x '${'|'.join(options['exclude'])}' ${'\\'}
% endif
'${options['encrypted_path']}' ${'\\'}
'gs://${bucket}/${node_id}${path}' ${'\\'}
2>&1 | logger -st gsutil
% endfor

View file

@ -1,10 +0,0 @@
#!/bin/bash
FILENAME=$1
TMPFILE=$(mktemp /tmp/archive_file.XXXXXXXXXX)
BUCKET=$(cat /etc/gcloud/gcloud.json | jq -r .bucket)
NODE=$(cat /etc/archive/archive.json | jq -r .node_id)
MASTERKEY=$(cat /etc/gocryptfs/masterkey)
gsutil cat "gs://$BUCKET/$NODE$FILENAME" > "$TMPFILE"
/opt/gocryptfs-inspect/gocryptfs.py --aessiv --config=/etc/gocryptfs/gocryptfs.conf --masterkey="$MASTERKEY" "$TMPFILE"

View file

@ -1,15 +0,0 @@
#!/bin/bash
FILENAME=$1
ARCHIVE=$(/opt/archive/get_file "$FILENAME" | sha256sum)
ORIGINAL=$(cat "$FILENAME" | sha256sum)
if [[ "$ARCHIVE" == "$ORIGINAL" ]]
then
echo "OK"
exit 0
else
echo "ERROR"
exit 1
fi

View file

@ -1,43 +0,0 @@
assert node.has_bundle('gcloud')
assert node.has_bundle('gocryptfs')
assert node.has_bundle('gocryptfs-inspect')
assert node.has_bundle('systemd')
from json import dumps
directories['/opt/archive'] = {}
directories['/etc/archive'] = {}
files['/etc/archive/archive.json'] = {
'content': dumps(
{
'node_id': node.metadata.get('id'),
**node.metadata.get('archive'),
},
indent=4,
sort_keys=True
),
}
files['/opt/archive/archive'] = {
'content_type': 'mako',
'mode': '700',
'context': {
'node_id': node.metadata.get('id'),
'paths': node.metadata.get('archive/paths'),
'bucket': node.metadata.get('gcloud/bucket'),
'processes': 4,
'threads': 4,
},
'needs': [
'bundle:gcloud',
],
}
files['/opt/archive/get_file'] = {
'mode': '700',
}
files['/opt/archive/validate_file'] = {
'mode': '700',
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'jq': {},
},
},
'archive': {
'paths': {},
},
}
@metadata_reactor.provides(
'archive/paths',
)
def paths(metadata):
return {
'archive': {
'paths': {
path: {
'encrypted_path': f'/mnt/archive.enc{path}',
'exclude': [
'^\..*',
'/\..*',
],
} for path in metadata.get('archive/paths')
},
}
}
@metadata_reactor.provides(
'gocryptfs/paths',
)
def gocryptfs(metadata):
return {
'gocryptfs': {
'paths': {
path: {
'mountpoint': options['encrypted_path'],
'reverse': True,
} for path, options in metadata.get('archive/paths').items()
},
}
}

View file

@ -1,3 +0,0 @@
!/bin/bash
zfs send tank/nextcloud@test1 | ssh backup-receiver@10.0.0.5 sudo zfs recv tank/nextcloud

View file

@ -1,82 +0,0 @@
from ipaddress import ip_interface
defaults = {
'users': {
'backup-receiver': {
'authorized_keys': [],
},
},
}
@metadata_reactor.provides(
'zfs/datasets'
)
def zfs(metadata):
datasets = {}
for other_node in repo.nodes:
if (
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
datasets[f"tank/{other_node.metadata.get('id')}/fs"] = {
'mountpoint': f"/mnt/backups/{other_node.metadata.get('id')}",
'backup': False,
}
if other_node.has_bundle('zfs'):
for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
if path == config.get('mountpoint'):
datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = {
'mountpoint': 'none',
'backup': False,
}
return {
'zfs': {
'datasets': datasets,
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('backup-server/hostname'): {
'A': [
str(ip_interface(network['ipv4']).ip)
for network in metadata.get('network').values()
if 'ipv4' in network
],
'AAAA': [
str(ip_interface(network['ipv6']).ip)
for network in metadata.get('network').values()
if 'ipv6' in network
],
},
},
}
@metadata_reactor.provides(
'users/backup-receiver/authorized_keys'
)
def backup_authorized_keys(metadata):
return {
'users': {
'backup-receiver': {
'authorized_keys': [
other_node.metadata.get('users/root/pubkey')
for other_node in repo.nodes
if other_node.has_bundle('backup')
and other_node.metadata.get('backup/server') == node.name
],
},
},
}

View file

@ -1,14 +0,0 @@
#!/bin/bash
path=$1
if zfs list -H -o mountpoint | grep -q "$path"
then
/opt/backuo/backup_path_via_zfs "$path"
elif test -d "$path"
then
/opt/backuo/backup_path_via_rsync "$path"
else
echo "UNKNOWN PATH: $path"
exit 1
fi

View file

@ -1,53 +0,0 @@
#!/bin/bash
set -e
set -x
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o StrictHostKeyChecking=no backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
target_dataset_parent=$(echo $target_dataset | rev | cut -d / -f 2- | rev)
bookmark_prefix="auto-backup_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for var in path uuid server ssh source_dataset target_dataset target_dataset_parent new_bookmark
do
[[ -z "${!var}" ]] && echo "ERROR - $var is empty" && exit 97
done
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $TARGET_DATASET"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$target_dataset_parent$"
then
echo "CREATING PARENT DATASET..."
$ssh sudo zfs create -p -o mountpoint=none "$target_dataset_parent"
fi
zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark"
echo "SUCCESS"
else
zfs destroy "$source_dataset@$new_bookmark"
echo "ERROR"
exit 99
fi

View file

@ -1,30 +0,0 @@
from json import dumps
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
}
files['/opt/backup/backup_path'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_zfs'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_rsync'] = {
'mode': '700',
}
directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': repo.get_node(node.metadata.get('backup/server')).metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},
indent=4,
sort_keys=True
),
}

View file

@ -1,12 +0,0 @@
defaults = {
'apt': {
'packages': {
'jq': {},
'rsync': {},
},
},
'backup': {
'server': None,
'paths': [],
},
}

View file

@ -1,23 +0,0 @@
<%!
def column_width(column, table):
return max(map(lambda row: len(row[column]), table)) if table else 0
%>\
$TTL 600
@ IN SOA ns.sublimity.de. admin.sublimity.de. (
2020080302 ;Serial
600 ;Refresh
300 ;Retry
1209600 ;Expire
300 ;Negative response caching TTL
)
% for record in sorted(records, key=lambda r: (r['name'], r['type'], r['value'])):
${(record['name'] or '@').ljust(column_width('name', records))} \
IN \
${record['type'].ljust(column_width('type', records))} \
% if record['type'] == 'TXT':
(${' '.join('"'+record['value'][i:i+255]+'"' for i in range(0, len(record['value']), 255))})
% else:
${record['value']}
% endif
% endfor

View file

@ -1,2 +0,0 @@
RESOLVCONF=no
OPTIONS="-u bind"

View file

@ -1,2 +0,0 @@
include "/etc/bind/named.conf.options";
include "/etc/bind/named.conf.local";

View file

@ -1,26 +0,0 @@
% for view in views:
acl "${view['name']}" {
${' '.join(f'{e};' for e in view['acl'])}
};
% endfor
% for view in views:
view "${view['name']}" {
match-clients { ${view['name']}; };
recursion yes;
forward only;
forwarders {
1.1.1.1;
9.9.9.9;
8.8.8.8;
};
% for zone in zones:
zone "${zone}" {
type master;
file "/var/lib/bind/${view['name']}/db.${zone}";
};
% endfor
include "/etc/bind/named.conf.default-zones";
include "/etc/bind/zones.rfc1918";
};
% endfor

View file

@ -1,10 +0,0 @@
options {
directory "/var/cache/bind";
dnssec-validation auto;
listen-on-v6 { any; };
allow-query { any; };
max-cache-size 30%;
querylog yes;
};

View file

@ -1,141 +0,0 @@
from ipaddress import ip_address
directories[f'/var/lib/bind'] = {
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/default/bind9'] = {
'source': 'defaults',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/bind/named.conf'] = {
'owner': 'root',
'group': 'bind',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/bind/named.conf.options'] = {
'owner': 'root',
'group': 'bind',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
views = [
{
'name': 'internal',
'is_internal': True,
'acl': [
'127.0.0.1',
'10.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.168.0.0/16',
]
},
{
'name': 'external',
'is_internal': False,
'acl': [
'any',
]
},
]
files['/etc/bind/named.conf.local'] = {
'content_type': 'mako',
'context': {
'views': views,
'zones': sorted(node.metadata.get('bind/zones')),
},
'owner': 'root',
'group': 'bind',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
def use_record(record, records, view):
if record['type'] in ['A', 'AAAA']:
if view == 'external':
# no internal addresses in external view
if ip_address(record['value']).is_private:
return False
elif view == 'internal':
# external addresses in internal view only, if no internal exists
if ip_address(record['value']).is_global:
for other_record in records:
if (
record['name'] == other_record['name'] and
record['type'] == other_record['type'] and
ip_address(other_record['value']).is_private
):
return False
return True
for view in views:
directories[f"/var/lib/bind/{view['name']}"] = {
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
for zone, records in node.metadata.get('bind/zones').items():
files[f"/var/lib/bind/{view['name']}/db.{zone}"] = {
'group': 'bind',
'source': 'db',
'content_type': 'mako',
'context': {
'view': view['name'],
'records': list(filter(
lambda record: use_record(record, records, view['name']),
records
)),
},
'needs': [
f"directory:/var/lib/bind/{view['name']}",
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
svc_systemd['bind9'] = {}
actions['named-checkconf'] = {
'command': 'named-checkconf -z',
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
]
}

View file

@ -1,87 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'bind9': {},
},
},
'bind': {
'zones': {},
},
}
@metadata_reactor.provides(
'bind/zones',
)
def dns(metadata):
return {
'dns': {
metadata.get('bind/domain'): {
'A': [
str(ip_interface(network['ipv4']).ip)
for network in metadata.get('network').values()
if 'ipv4' in network
],
'AAAA': [
str(ip_interface(network['ipv6']).ip)
for network in metadata.get('network').values()
if 'ipv6' in network
]
},
},
}
@metadata_reactor.provides(
'bind/zones',
)
def collect_records(metadata):
zones = {}
for other_node in repo.nodes:
for fqdn, records in other_node.metadata.get('dns').items():
matching_zones = sorted(
filter(
lambda potential_zone: fqdn.endswith(potential_zone),
metadata.get('bind/zones').keys()
),
key=len,
)
if matching_zones:
zone = matching_zones[-1]
else:
continue
name = fqdn[0:-len(zone) - 1]
for type, values in records.items():
for value in values:
zones\
.setdefault(zone, [])\
.append(
{'name': name, 'type': type, 'value': value}
)
return {
'bind': {
'zones': zones,
},
}
@metadata_reactor.provides(
'bind/zones',
)
def ns_records(metadata):
return {
'bind': {
'zones': {
zone: [
{'name': '@', 'type': 'NS', 'value': f"{metadata.get('bind/domain')}."},
] for zone in metadata.get('bind/zones').keys()
},
},
}

View file

@ -1,9 +0,0 @@
DOVECOT
=======
rescan index: https://doc.dovecot.org/configuration_manual/fts/#rescan
```
sudo -u vmail doveadm fts rescan -u 'test@mail2.sublimity.de'
sudo -u vmail doveadm index -u 'test@mail2.sublimity.de' -q '*'
```

View file

@ -1,105 +0,0 @@
#!/bin/sh
# Example attachment decoder script. The attachment comes from stdin, and
# the script is expected to output UTF-8 data to stdout. (If the output isn't
# UTF-8, everything except valid UTF-8 sequences are dropped from it.)
# The attachment decoding is enabled by setting:
#
# plugin {
# fts_decoder = decode2text
# }
# service decode2text {
# executable = script /usr/local/libexec/dovecot/decode2text.sh
# user = dovecot
# unix_listener decode2text {
# mode = 0666
# }
# }
libexec_dir=`dirname $0`
content_type=$1
# The second parameter is the format's filename extension, which is used when
# found from a filename of application/octet-stream. You can also add more
# extensions by giving more parameters.
formats='application/pdf pdf
application/x-pdf pdf
application/msword doc
application/mspowerpoint ppt
application/vnd.ms-powerpoint ppt
application/ms-excel xls
application/x-msexcel xls
application/vnd.ms-excel xls
application/vnd.openxmlformats-officedocument.wordprocessingml.document docx
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx
application/vnd.openxmlformats-officedocument.presentationml.presentation pptx
application/vnd.oasis.opendocument.text odt
application/vnd.oasis.opendocument.spreadsheet ods
application/vnd.oasis.opendocument.presentation odp
'
if [ "$content_type" = "" ]; then
echo "$formats"
exit 0
fi
fmt=`echo "$formats" | grep -w "^$content_type" | cut -d ' ' -f 2`
if [ "$fmt" = "" ]; then
echo "Content-Type: $content_type not supported" >&2
exit 1
fi
# most decoders can't handle stdin directly, so write the attachment
# to a temp file
path=`mktemp`
trap "rm -f $path" 0 1 2 3 14 15
cat > $path
xmlunzip() {
name=$1
tempdir=`mktemp -d`
if [ "$tempdir" = "" ]; then
exit 1
fi
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat |
$libexec_dir/xml2text
}
wait_timeout() {
childpid=$!
trap "kill -9 $childpid; rm -f $path" 1 2 3 14 15
wait $childpid
}
LANG=en_US.UTF-8
export LANG
if [ $fmt = "pdf" ]; then
/usr/bin/pdftotext $path - 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "doc" ]; then
(/usr/bin/catdoc $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "ppt" ]; then
(/usr/bin/catppt $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "xls" ]; then
(/usr/bin/xls2csv $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "odt" -o $fmt = "ods" -o $fmt = "odp" ]; then
xmlunzip "content.xml"
elif [ $fmt = "docx" ]; then
xmlunzip "document.xml"
elif [ $fmt = "xlsx" ]; then
xmlunzip "sharedStrings.xml"
elif [ $fmt = "pptx" ]; then
xmlunzip "slide*.xml"
else
echo "Buggy decoder script: $fmt not handled" >&2
exit 1
fi
exit 0

View file

@ -8,3 +8,10 @@ password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password\
WHERE redirect IS NULL \ WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%u', '@', 1) \ AND users.name = SPLIT_PART('%u', '@', 1) \
AND domains.name = SPLIT_PART('%u', '@', 2) AND domains.name = SPLIT_PART('%u', '@', 2)
user_query = SELECT CONCAT(users.name, '@', domains.name) AS user, '/var/vmail/%u' AS home \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%u', '@', 1) \
AND domains.name = SPLIT_PART('%u', '@', 2)

View file

@ -1,134 +1,138 @@
protocols = imap lmtp sieve !include conf.d/*.conf
auth_mechanisms = plain login
mail_privileged_group = mail
ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:~
mail_plugins = fts fts_xapian
namespace inbox { namespace inbox {
inbox = yes
separator = . separator = .
type = private
inbox = yes
location =
mailbox Drafts { mailbox Drafts {
auto = subscribe auto = subscribe
special_use = \Drafts special_use = \Drafts
} }
mailbox Junk { mailbox Junk {
auto = create auto = create
special_use = \Junk special_use = \Junk
} autoexpunge = 30d
mailbox Trash {
auto = subscribe
special_use = \Trash
} }
mailbox Sent { mailbox Sent {
auto = subscribe auto = subscribe
special_use = \Sent special_use = \Sent
} }
mailbox Trash {
auto = subscribe
special_use = \Trash
autoexpunge = 360d
}
prefix =
} }
passdb { mail_location = maildir:/var/vmail/%u
driver = sql protocols = imap lmtp sieve
args = /etc/dovecot/dovecot-sql.conf
} ssl = yes
userdb { ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
driver = static ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
args = uid=vmail gid=vmail home=/var/vmail/%u ssl_dh = </etc/dovecot/ssl/dhparam.pem
ssl_min_protocol = TLSv1.2
ssl_cipher_list = EECDH+AESGCM:EDH+AESGCM
ssl_prefer_server_ciphers = yes
login_greeting = IMAPd ready
auth_mechanisms = plain login
disable_plaintext_auth = yes
mail_plugins = $mail_plugins zlib
plugin {
zlib_save_level = 6
zlib_save = gz
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/bin
sieve_extensions = +vnd.dovecot.pipe
old_stats_refresh = 30 secs
old_stats_track_cmds = yes
% if node.has_bundle('rspamd'):
sieve_before = /var/vmail/sieve/global/spam-global.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
% endif
} }
service auth { service auth {
unix_listener /var/spool/postfix/private/auth { unix_listener /var/spool/postfix/private/auth {
mode = 0660 mode = 0660
user = postfix user = postfix
group = postfix group = postfix
} }
unix_listener auth-userdb {
mode = 0660
user = nobody
group = nogroup
}
} }
service lmtp { service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp { unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600 group = postfix
user = postfix mode = 0600
group = postfix user = postfix
} }
} }
service stats {
unix_listener stats-reader { service imap {
user = vmail executable = imap
group = vmail
mode = 0660
}
unix_listener stats-writer {
user = vmail
group = vmail
mode = 0660
}
} }
service imap-login {
service_count = 1
process_min_avail = 8
vsz_limit = 64M
}
service managesieve-login { service managesieve-login {
inet_listener sieve { inet_listener sieve {
} port = 4190
process_min_avail = 0 }
service_count = 1
vsz_limit = 64 M
} }
service managesieve {
process_limit = 100 userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
postmaster_address = ${admin_email}
} }
protocol imap { protocol imap {
mail_plugins = $mail_plugins imap_sieve mail_plugins = $mail_plugins imap_zlib imap_sieve imap_old_stats
mail_max_userip_connections = 50 mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins imap_idle_notify_interval = 29 mins
} }
protocol lmtp {
mail_plugins = $mail_plugins sieve
}
protocol sieve { protocol sieve {
plugin { plugin {
sieve = /var/vmail/sieve/%u.sieve sieve = /var/vmail/sieve/%u.sieve
sieve_storage = /var/vmail/sieve/%u/ sieve_storage = /var/vmail/sieve/%u/
} }
}
# fulltext search
plugin {
fts = xapian
fts_xapian = partial=3 full=20 verbose=0
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
}
service indexer-worker {
vsz_limit = ${indexer_ram}
}
service decode2text {
executable = script /usr/local/libexec/dovecot/decode2text.sh
user = dovecot
unix_listener decode2text {
mode = 0666
}
}
# spam filter
plugin {
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/
sieve_extensions = +vnd.dovecot.pipe
sieve_before = /var/vmail/sieve/global/spam-global.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
} }

View file

@ -1,7 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve", "variables"];
if string "${mailbox}" "Trash" {
stop;
}
pipe :copy "rspamd-learn-ham.sh";

View file

@ -1,3 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve"];
pipe :copy "rspamd-learn-spam.sh";

View file

@ -1,6 +0,0 @@
require ["fileinto", "mailbox"];
if header :contains "X-Spam" "Yes" {
fileinto :create "Junk";
stop;
}

View file

@ -1,24 +1,6 @@
assert node.has_bundle('mailserver') assert node.has_bundle('mailserver')
groups['vmail'] = {}
users['vmail'] = {
'home': '/var/vmail',
'needs': [
'group:vmail',
],
}
directories = { directories = {
'/etc/dovecot': {
'purge': True,
},
'/etc/dovecot/conf.d': {
'purge': True,
'needs': [
'pkg_apt:dovecot-sieve',
'pkg_apt:dovecot-managesieved',
]
},
'/etc/dovecot/ssl': {}, '/etc/dovecot/ssl': {},
'/var/vmail': { '/var/vmail': {
'owner': 'vmail', 'owner': 'vmail',
@ -26,12 +8,23 @@ directories = {
} }
} }
# groups['vmail'] = {
# 'gid': 5000,
# }
#
# users['vmail'] = {
# 'uid': 5000,
# 'home': '/var/vmail',
# 'needs': [
# 'group:vmail',
# ]
# }
files = { files = {
'/etc/dovecot/dovecot.conf': { '/etc/dovecot/dovecot.conf': {
'content_type': 'mako', 'content_type': 'mako',
'context': { 'context': {
'admin_email': node.metadata.get('mailserver/admin_email'), 'admin_email': node.metadata.get('mailserver/admin_email'),
'indexer_ram': node.metadata.get('dovecot/indexer_ram'),
}, },
'needs': { 'needs': {
'pkg_apt:' 'pkg_apt:'
@ -50,41 +43,15 @@ files = {
'svc_systemd:dovecot:restart', 'svc_systemd:dovecot:restart',
}, },
}, },
'/etc/dovecot/dhparam.pem': {
'content_type': 'any',
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/mail/vmail/sieve/global/learn-ham.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
'/var/mail/vmail/sieve/global/learn-spam.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
'/var/mail/vmail/sieve/global/spam-global.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
} }
actions = { actions = {
'dovecot_generate_dhparam': { 'dovecot_generate_dhparam': {
'command': 'openssl dhparam -out /etc/dovecot/dhparam.pem 2048', 'command': 'openssl dhparam -out /etc/dovecot/ssl/dhparam.pem 2048',
'unless': 'test -f /etc/dovecot/dhparam.pem', 'unless': 'test -f /etc/dovecot/ssl/dhparam.pem',
'cascade_skip': False, 'cascade_skip': False,
'needs': { 'needs': {
'pkg_apt:', 'pkg_apt:'
'directory:/etc/dovecot/ssl',
}, },
'triggers': { 'triggers': {
'svc_systemd:dovecot:restart', 'svc_systemd:dovecot:restart',
@ -102,11 +69,3 @@ svc_systemd = {
}, },
}, },
} }
# fulltext search
directories['/usr/local/libexec/dovecot'] = {}
files['/usr/local/libexec/dovecot/decode2text.sh'] = {
'owner': 'dovecot',
'mode': '500',
}

View file

@ -1,16 +1,13 @@
from bundlewrap.metadata import atomic
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
'dovecot-imapd': {}, 'dovecot-imapd': {},
'dovecot-pgsql': {}, 'dovecot-lmtpd': {},
'dovecot-lmtpd': {},
# spam filtering
'dovecot-sieve': {},
'dovecot-managesieved': {}, 'dovecot-managesieved': {},
# fulltext search 'dovecot-pgsql': {},
'dovecot-fts-xapian': {}, # buster-backports 'dovecot-sieve': {},
'poppler-utils': {}, # pdftotext
'catdoc': {}, # catdoc, catppt, xls2csv
}, },
}, },
'letsencrypt': { 'letsencrypt': {
@ -24,14 +21,5 @@ defaults = {
'dbuser': 'mailserver', 'dbuser': 'mailserver',
}, },
}, },
}
@metadata_reactor.provides( }
'dovecot/indexer_ram',
)
def indexer_ram(metadata):
return {
'dovecot': {
'indexer_ram': str(metadata.get('vm/ram')//2)+ 'M',
},
}

View file

@ -1,12 +0,0 @@
```
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectViewer'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectCreator'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectAdmin'
gsutil -o "GSUtil:parallel_process_count=3" -o GSUtil:parallel_thread_count=4 -m rsync -r -d -e /var/vmail gs://sublimity-backup/mailserver
gsutil config
gsutil versioning set on gs://sublimity-backup
gcsfuse --key-file /root/.config/gcloud/service_account.json sublimity-backup gcsfuse
```

View file

@ -1,43 +0,0 @@
from os.path import join
from json import dumps
service_account = node.metadata.get('gcloud/service_account')
project = node.metadata.get('gcloud/project')
directories[f'/etc/gcloud'] = {
'purge': True,
}
files['/etc/gcloud/gcloud.json'] = {
'content': dumps(
node.metadata.get('gcloud'),
indent=4,
sort_keys=True
),
}
files['/etc/gcloud/service_account.json'] = {
'content': repo.vault.decrypt_file(
join(repo.path, 'data', 'gcloud', 'service_accounts', f'{service_account}@{project}.json.enc')
),
'mode': '500',
'needs': [
'pkg_apt:google-cloud-sdk',
],
}
actions['gcloud_activate_service_account'] = {
'command': 'gcloud auth activate-service-account --key-file /etc/gcloud/service_account.json',
'unless': f"gcloud auth list | grep -q '^\*[[:space:]]*{service_account}@{project}.iam.gserviceaccount.com'",
'needs': [
f'file:/etc/gcloud/service_account.json'
],
}
actions['gcloud_select_project'] = {
'command': f"gcloud config set project '{project}'",
'unless': f"gcloud config get-value project | grep -q '^{project}$'",
'needs': [
f'action:gcloud_activate_service_account'
],
}

View file

@ -1,14 +0,0 @@
defaults = {
'apt': {
'packages': {
'apt-transport-https': {},
'ca-certificates': {},
'gnupg': {},
'google-cloud-sdk': {},
'python3-crcmod': {},
},
'sources': [
'deb https://packages.cloud.google.com/apt cloud-sdk main',
],
},
}

View file

@ -51,19 +51,12 @@ defaults = {
'WantedBy': 'multi-user.target', 'WantedBy': 'multi-user.target',
}, },
}, },
'needs': [ 'needs': {
'action:chmod_gitea', 'action:chmod_gitea',
'download:/usr/local/bin/gitea', 'download:/usr/local/bin/gitea',
'file:/etc/systemd/system/gitea.service', 'file:/etc/systemd/system/gitea.service',
'file:/etc/gitea/app.ini', 'file:/etc/gitea/app.ini',
], },
},
},
},
'zfs': {
'datasets': {
'tank/gitea': {
'mountpoint': '/var/lib/gitea',
}, },
}, },
}, },

View file

@ -1,6 +0,0 @@
directories['/opt/gocryptfs-inspect'] = {}
git_deploy['/opt/gocryptfs-inspect'] = {
'repo': 'https://github.com/slackner/gocryptfs-inspect.git',
'rev': 'ecd296c8f014bf18f5889e3cb9cb64807ff6b9c4',
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'python3-pycryptodome': {},
},
},
}

View file

@ -1,43 +0,0 @@
from json import dumps
directories['/etc/gocryptfs'] = {
'purge': True,
}
files['/etc/gocryptfs/masterkey'] = {
'content': node.metadata.get('gocryptfs/masterkey'),
'mode': '500',
}
files['/etc/gocryptfs/gocryptfs.conf'] = {
'content': dumps({
'Version': 2,
'Creator': 'gocryptfs 1.6.1',
'ScryptObject': {
'Salt': node.metadata.get('gocryptfs/salt'),
'N': 65536,
'R': 8,
'P': 1,
'KeyLen': 32,
},
'FeatureFlags': [
'GCMIV128',
'HKDF',
'PlaintextNames',
'AESSIV',
]
}, indent=4, sort_keys=True)
}
for path, options in node.metadata.get('gocryptfs/paths').items():
directories[options['mountpoint']] = {
'owner': None,
'group': None,
'mode': None,
'preceded_by': [
f'svc_systemd:gocryptfs-{options["id"]}:stop',
],
'needed_by': [
f'svc_systemd:gocryptfs-{options["id"]}',
],
}

View file

@ -1,103 +0,0 @@
from hashlib import sha3_256
from base64 import b64decode, b64encode
from binascii import hexlify
from uuid import UUID
defaults = {
'apt': {
'packages': {
'gocryptfs': {},
'fuse': {},
'socat': {},
},
},
'gocryptfs': {
'paths': {},
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def config(metadata):
return {
'gocryptfs': {
'masterkey': hexlify(b64decode(
str(repo.vault.random_bytes_as_base64_for(metadata.get('id'), length=32))
)).decode(),
'salt': b64encode(
sha3_256(UUID(metadata.get('id')).bytes).digest()
).decode(),
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def paths(metadata):
paths = {}
for path, options in metadata.get('gocryptfs/paths').items():
paths[path] = {
'id': hexlify(sha3_256(path.encode()).digest()[:8]).decode(),
}
return {
'gocryptfs': {
'paths': paths,
},
}
@metadata_reactor.provides(
'systemd/services',
)
def systemd(metadata):
services = {}
for path, options in metadata.get('gocryptfs/paths').items():
services[f'gocryptfs-{options["id"]}'] = {
'content': {
'Unit': {
'Description': f'gocryptfs@{path} ({options["id"]})',
'After': {
'filesystem.target',
'zfs.target',
},
},
'Service': {
'RuntimeDirectory': 'gocryptfs',
'Environment': {
'MASTERKEY': metadata.get('gocryptfs/masterkey'),
'SOCKET': f'/var/run/gocryptfs/{options["id"]}',
'PLAIN': path,
'CIPHER': options["mountpoint"]
},
'ExecStart': [
'/usr/bin/gocryptfs -fg -plaintextnames -reverse -masterkey $MASTERKEY -ctlsock $SOCKET $PLAIN $CIPHER',
],
'ExecStopPost': [
'/usr/bin/umount $CIPHER'
],
},
},
'needs': [
'pkg_apt:gocryptfs',
'pkg_apt:fuse',
'pkg_apt:socat',
'file:/etc/gocryptfs/masterkey',
'file:/etc/gocryptfs/gocryptfs.conf',
],
'triggers': [
f'svc_systemd:gocryptfs-{options["id"]}:restart',
],
}
return {
'systemd': {
'services': services,
},
}

View file

@ -1,8 +0,0 @@
# defaults = {
# 'network': {
# 'external': {
# 'gateway4': '172.31.1.1',
# 'gateway6': 'fe80::1',
# },
# },
# }

View file

@ -1,2 +0,0 @@
#sudo systemctl unmask influxdb.service
#sudo systemctl start influxdb

View file

@ -1,10 +0,0 @@
defaults = {
'apt': {
'packages': {
'influxdb2': {},
},
'sources': [
'deb https://repos.influxdata.com/debian {release} stable',
],
},
}

View file

@ -2,6 +2,6 @@
def steam(metadata): def steam(metadata):
return { return {
'steam': { 'steam': {
'222860': 'l4d2', 222860: 'l4d2',
}, },
} }

View file

@ -1,10 +1,7 @@
assert node.has_bundle('postfix') assert node.has_bundle('postfix')
assert node.has_bundle('opendkim')
assert node.has_bundle('dovecot') assert node.has_bundle('dovecot')
assert node.has_bundle('letsencrypt') assert node.has_bundle('letsencrypt')
assert node.has_bundle('roundcube') assert node.has_bundle('roundcube')
assert node.has_bundle('rspamd')
assert node.has_bundle('redis')
from hashlib import md5 from hashlib import md5
from shlex import quote from shlex import quote

View file

@ -1,18 +1,15 @@
from ipaddress import ip_interface
database_password = repo.vault.password_for(f'{node.name} db mailserver') database_password = repo.vault.password_for(f'{node.name} db mailserver')
defaults = { defaults = {
'mailserver': { 'mailserver': {
'maildir': '/var/vmail', 'maildir': '/var/vmail',
'database': { 'database': {
'host': '127.0.0.1', # dont use localhost 'host': 'localhost',
'name': 'mailserver', 'name': 'mailserver',
'user': 'mailserver', 'user': 'mailserver',
'password': database_password, 'password': database_password,
}, },
'test_password': repo.vault.password_for(f'{node.name} test_pw mailserver'), 'test_password': repo.vault.password_for(f'{node.name} test_pw mailserver'),
'domains': [],
}, },
'postgresql': { 'postgresql': {
'roles': { 'roles': {
@ -36,23 +33,6 @@ defaults = {
}, },
} }
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
dns = {}
for domain in metadata.get('mailserver/domains'):
dns[domain] = {
'MX': [f'5 {domain}.'],
'TXT': ['v=spf1 a mx -all'],
}
return {
'dns': dns,
}
@metadata_reactor.provides( @metadata_reactor.provides(
'letsencrypt/domains', 'letsencrypt/domains',
) )

View file

@ -1,46 +0,0 @@
from ipaddress import ip_interface
defaults = {
'network': {
}
}
@metadata_reactor.provides(
'systemd-networkd/networks',
)
def systemd_networkd(metadata):
units = {}
for type, network in metadata.get('network').items():
units[type] = {
'Match': {
'Name': network['interface'],
},
'Network': {
'DHCP': 'no',
'IPv6AcceptRA': 'no',
}
}
for i in [4, 6]:
if network.get(f'ipv{i}', None):
units[type].update({
f'Address#ipv{i}': {
'Address': network[f'ipv{i}'],
},
})
if f'gateway{i}' in network:
units[type].update({
f'Route#ipv{i}': {
'Gateway': network[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
return {
'systemd-networkd': {
'networks': units,
}
}

View file

@ -1,19 +0,0 @@
<?php
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
$CONFIG = array (
'dbuser' => 'nextcloud',
'dbpassword' => '${db_password}',
'dbname' => 'nextcloud',
'dbhost' => 'localhost',
'dbtype' => 'pgsql',
'datadirectory' => '/var/lib/nextcloud',
'dbport' => '5432',
'apps_paths' => [
[
'path'=> '/var/lib/nextcloud/.apps',
'url' => '/userapps',
'writable' => true,
],
],
'cache_path' => '/var/lib/nextcloud/.cache',
);

View file

@ -1,145 +0,0 @@
assert node.has_bundle('php')
from shlex import quote
from os.path import join
from mako.template import Template
def occ(command, *args, **kwargs):
return f"""sudo -u www-data php /opt/nextcloud/occ {command} {' '.join(args)} {' '.join(f'--{name.replace("_", "-")}' + (f'={value}' if value else '') for name, value in kwargs.items())}"""
version = node.metadata.get('nextcloud/version')
# DOWNLOAD
downloads[f'/tmp/nextcloud-{version}.tar.bz2'] = {
'url': f'https://download.nextcloud.com/server/releases/nextcloud-{version}.tar.bz2',
'sha256': node.metadata.get('nextcloud/sha256'),
'triggered': True,
}
actions['delete_nextcloud'] = {
'command': 'rm -rf /opt/nextcloud/*',
'triggered': True,
}
actions['extract_nextcloud'] = {
'command': f'tar xfvj /tmp/nextcloud-{version}.tar.bz2 --skip-old-files --strip 1 -C /opt/nextcloud nextcloud',
'unless': f"""php -r 'include "/opt/nextcloud/version.php"; echo "$OC_VersionString";' | grep -q '^{version}$'""",
'preceded_by': [
'action:delete_nextcloud',
f'download:/tmp/nextcloud-{version}.tar.bz2',
],
'needs': [
'action:symlink_/opt/nextcloud/config',
'directory:/opt/nextcloud',
],
}
# DIRECTORIES, FILES AND SYMLINKS
directories['/etc/nextcloud'] = {
'owner': 'www-data',
'group': 'www-data',
}
directories['/opt/nextcloud'] = {}
directories['/var/lib/nextcloud'] = {
'owner': 'www-data',
'group': 'www-data',
'mode': '770',
}
directories['/var/lib/nextcloud/.apps'] = {
'owner': 'www-data',
'group': 'www-data',
}
directories['/var/lib/nextcloud/.cache'] = {
'owner': 'www-data',
'group': 'www-data',
}
files['/etc/nextcloud/CAN_INSTALL'] = {
'content': '',
'owner': 'www-data',
'group': 'www-data',
'mode': '640',
'needs': [
'directory:/etc/nextcloud',
],
}
files['/etc/nextcloud/managed.config.php'] = {
'content_type': 'mako',
'owner': 'www-data',
'group': 'www-data',
'mode': '640',
'context': {
'db_password': node.metadata.get('postgresql/roles/nextcloud/password'),
},
'needs': [
'directory:/etc/nextcloud',
],
}
actions['symlink_/opt/nextcloud/config'] = {
'command': f'ln -s /etc/nextcloud /opt/nextcloud/config && chown www-data:www-data /opt/nextcloud/config',
'unless': 'readlink /opt/nextcloud/config | grep -q /etc/nextcloud',
'needs': [
'action:delete_nextcloud',
'directory:/etc/nextcloud',
],
}
actions['symlink_/opt/nextcloud/userapps'] = {
'command': f'ln -s /var/lib/nextcloud/.apps /opt/nextcloud/userapps && chown www-data:www-data /opt/nextcloud/userapps',
'unless': 'readlink /opt/nextcloud/userapps | grep -q /var/lib/nextcloud/.apps',
'needs': [
'action:delete_nextcloud',
'directory:/var/lib/nextcloud/.apps',
],
}
# SETUP
actions['install_nextcloud'] = {
'command': occ(
'maintenance:install',
no_interaction=None,
database='pgsql',
database_name='nextcloud',
database_host='localhost',
database_user='nextcloud',
database_pass=node.metadata.get('postgresql/roles/nextcloud/password'),
admin_user='admin',
admin_pass=node.metadata.get('nextcloud/admin_pass'),
data_dir='/var/lib/nextcloud',
),
'unless': occ('status') + ' | grep -q "installed: true"',
'needs': [
'directory:/etc/nextcloud',
'directory:/opt/nextcloud',
'directory:/var/lib/nextcloud',
'directory:/var/lib/nextcloud/.apps',
'directory:/var/lib/nextcloud/.cache',
'file:/etc/nextcloud/CAN_INSTALL',
'file:/etc/nextcloud/managed.config.php',
'action:extract_nextcloud',
'action:symlink_/opt/nextcloud/userapps',
'action:symlink_/opt/nextcloud/config',
'postgres_db:nextcloud',
],
}
# UPGRADE
actions['upgrade_nextcloud'] = {
'command': occ('upgrade'),
'unless': occ('status') + f' | grep -q "versionstring: {version}"',
'needs': [
'action:install_nextcloud',
],
}
actions['nextcloud_add_missing_inidces'] = {
'command': occ('db:add-missing-indices'),
'needs': [
'action:upgrade_nextcloud',
],
'triggered': True,
'triggered_by': [
f'action:extract_nextcloud',
f'action:upgrade_nextcloud',
],
}

View file

@ -1,72 +0,0 @@
import string
from uuid import UUID
defaults = {
'apt': {
'packages': {
'php': {},
'php-curl': {},
'php-gd': {},
'php-json': {},
'php-xml': {},
'php-mbstring': {},
'php-cli': {},
'php-cgi': {},
'php-zip': {},
},
},
'archive': {
'paths': {
'/var/lib/nextcloud': {
'exclude': [
'^appdata_',
'^updater-',
'^nextcloud\.log',
'^updater\.log',
'^[^/]+/cache',
'^[^/]+/files_versions',
'^[^/]+/files_trashbin',
],
},
},
},
'backup': {
'paths': [
'/etc/nextcloud/config.php',
],
},
'nextcloud': {
'admin_user': 'admin',
'admin_pass': repo.vault.password_for(f'{node.name} nextcloud admin pw'),
},
'nginx': {
'vhosts': {
'nextcloud': {
'webroot': '/opt/nextcloud',
'php': True,
},
},
},
'postgresql': {
'roles': {
'nextcloud': {
'password': repo.vault.password_for(f'{node.name} nextcloud db pw'),
},
},
'databases': {
'nextcloud': {
'owner': 'nextcloud',
},
},
},
'zfs': {
'datasets': {
'tank/nextcloud': {
'mountpoint': '/var/lib/nextcloud',
'needed_by': [
'bundle:nextcloud',
],
},
},
},
}

View file

@ -51,6 +51,9 @@ server {
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains"; add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
% endif % endif
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
access_log /var/log/nginx/access-${vhost}.log; access_log /var/log/nginx/access-${vhost}.log;
error_log /var/log/nginx/error-${vhost}.log; error_log /var/log/nginx/error-${vhost}.log;

View file

@ -69,7 +69,6 @@ for vhost, config in node.metadata.get('nginx/vhosts', {}).items():
'create_access_log': config.get('access_log', node.metadata.get('nginx/access_log', False)), 'create_access_log': config.get('access_log', node.metadata.get('nginx/access_log', False)),
'php_version': node.metadata.get('php/version', ''), 'php_version': node.metadata.get('php/version', ''),
'vhost': vhost, 'vhost': vhost,
'nameservers': node.metadata.get('nameservers'),
**config, **config,
}, },
'needs': set(), 'needs': set(),

View file

@ -1,4 +1,4 @@
from ipaddress import ip_interface from bundlewrap.metadata import atomic
defaults = { defaults = {
'apt': { 'apt': {
@ -12,30 +12,6 @@ defaults = {
} }
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
dns = {}
for config in metadata.get('nginx/vhosts', {}).values():
dns[config['domain']] = {
'A': [
str(ip_interface(network['ipv4']).ip)
for network in metadata.get('network').values()
if 'ipv4' in network
],
'AAAA': [
str(ip_interface(network['ipv6']).ip)
for network in metadata.get('network').values()
if 'ipv6' in network
],
}
return {
'dns': dns,
}
@metadata_reactor.provides( @metadata_reactor.provides(
'letsencrypt/domains', 'letsencrypt/domains',
'letsencrypt/reload_after', 'letsencrypt/reload_after',

View file

@ -1,3 +0,0 @@
% for domain in domains:
mail._domainkey.${domain} ${domain}:mail:/etc/opendkim/keys/${domain}/mail.private
% endfor

View file

@ -1,15 +0,0 @@
Mode sv
SignatureAlgorithm rsa-sha256
Canonicalization relaxed/simple
KeyTable refile:/etc/opendkim/key_table
SigningTable refile:/etc/opendkim/signing_table
UMask 007
UserID opendkim:opendkim
PidFile /run/opendkim/opendkim.pid
Socket inet:8891@localhost
Syslog yes
SyslogSuccess Yes
SyslogFacility mail
LogWhy Yes

View file

@ -1,3 +0,0 @@
% for domain in domains:
*@${domain} mail._domainkey.${domain}
% endfor

View file

@ -1,86 +0,0 @@
file_attributes = {
'owner': 'opendkim',
'group': 'opendkim',
'mode': '700',
'triggers': [
'svc_systemd:opendkim:restart',
],
}
groups['opendkim'] = {}
users['opendkim'] = {}
directories = {
'/etc/opendkim': {
**file_attributes,
'purge' : True,
},
'/etc/opendkim/keys': {
**file_attributes,
'purge' : True,
},
}
files = {
'/etc/opendkim.conf': {
**file_attributes,
},
'/etc/defaults/opendkim': {
# https://metadata.ftp-master.debian.org/changelogs//main/o/opendkim/testing_opendkim.NEWS
'delete': True,
},
'/etc/opendkim/key_table': {
'content_type': 'mako',
'context': {
'domains': node.metadata.get('mailserver/domains'),
},
**file_attributes,
},
'/etc/opendkim/signing_table': {
'content_type': 'mako',
'context': {
'domains': node.metadata.get('mailserver/domains'),
},
**file_attributes,
},
}
for domain in node.metadata.get('mailserver/domains'):
directories[f'/etc/opendkim/keys/{domain}'] = {
**file_attributes,
'purge': True,
}
files[f'/etc/opendkim/keys/{domain}/mail.private'] = {
**file_attributes,
'content': node.metadata.get(f'opendkim/keys/{domain}/private'),
}
# files[f'/etc/opendkim/keys/{domain}/mail.txt'] = {
# **file_attributes,
# 'content_type': 'any',
# }
# actions[f'generate_{domain}_dkim_key'] = {
# 'command': (
# f'sudo --user opendkim'
# f' opendkim-genkey'
# f' --selector=mail'
# f' --directory=/etc/opendkim/keys/{domain}'
# f' --domain={domain}'
# ),
# 'unless': f'test -f /etc/opendkim/keys/{domain}/mail.private',
# 'needs': [
# 'svc_systemd:opendkim',
# f'directory:/etc/opendkim/keys/{domain}',
# ],
# 'triggers': [
# 'svc_systemd:opendkim:restart',
# ],
# }
svc_systemd['opendkim'] = {
'needs': [
'pkg_apt:opendkim',
'file:/etc/opendkim.conf',
'file:/etc/opendkim/key_table',
'file:/etc/opendkim/signing_table',
],
}

View file

@ -1,93 +0,0 @@
from os.path import join, exists
from re import sub
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
defaults = {
'apt': {
'packages': {
'opendkim': {},
'opendkim-tools': {},
},
},
'opendkim': {
'keys': {},
},
'dns': {
'mail._domainkey.mail2.sublimity.de': {
'TXT': [
]
}
}
}
@metadata_reactor.provides(
'opendkim/keys',
)
def keys(metadata):
keys = {}
for domain in metadata.get('mailserver/domains'):
if domain in metadata.get(f'opendkim/keys'):
continue
pubkey_path = join(repo.path, 'data', 'dkim', f'{domain}.pubkey')
privkey_path = join(repo.path, 'data', 'dkim', f'{domain}.privkey.enc')
if not exists(pubkey_path) or not exists(privkey_path):
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
with open(pubkey_path, 'w') as file:
file.write(
key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
).decode()
)
with open(privkey_path, 'w') as file:
file.write(
repo.vault.encrypt(
key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption()
).decode(),
)
)
with open(pubkey_path, 'r') as pubkey:
with open(privkey_path, 'r') as privkey:
keys[domain] = {
'public': pubkey.read(),
'private': repo.vault.decrypt(privkey.read()),
}
return {
'opendkim': {
'keys': keys,
}
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
dns = {}
for domain, keys in metadata.get('opendkim/keys').items():
raw_key = sub('^ssh-rsa ', '', keys['public'])
dns[f'mail._domainkey.{domain}'] = {
'TXT': [f'v=DKIM1; k=rsa; p={raw_key}'],
}
return {
'dns': dns,
}

View file

@ -1,7 +1,7 @@
version = node.metadata.get('php/version') version = node.metadata.get('php/version')
php_ini_context = { php_ini_context = {
'num_cpus': node.metadata.get('vm/cores'), 'num_cpus': node.metadata.get('vm/cpu'),
'post_max_size': node.metadata.get('php/post_max_size', 10), 'post_max_size': node.metadata.get('php/post_max_size', 10),
} }

View file

@ -1,53 +1,63 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU) smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no readme_directory = no
compatibility_level = 2
# TLS parameters
smtpd_tls_cert_file = /etc/letsencrypt/live/mail.sublimity.de/fullchain.pem
smtpd_tls_key_file = /etc/letsencrypt/live/mail.sublimity.de/privkey.pem
smtpd_use_tls=yes smtpd_use_tls=yes
<%text> <%text>
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
</%text> </%text>
# See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for
# information on enabling SSL in the smtp client.
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = debian-2gb-nbg1-1 myhostname = mail.sublimity.de
alias_maps = hash:/etc/aliases alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases alias_database = hash:/etc/aliases
myorigin = /etc/mailname myorigin = /etc/mailname
mydestination = $myhostname, debian-2gb-nbg1-1, localhost.localdomain, localhost mydestination = mail.sublimity.de, localhost.localdomain, localhost
relayhost = relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0 mailbox_size_limit = 0
recipient_delimiter = + recipient_delimiter = +
inet_interfaces = all inet_interfaces = all
inet_protocols = all inet_protocols = all
virtual_mailbox_domains = psql:/etc/postfix/virtual_domains.cf
virtual_mailbox_domains = pgsql:/etc/postfix/virtual_mailbox_domains.cf virtual_mailbox_maps = psql:/etc/postfix/virtual_mailboxes.cf
virtual_mailbox_maps = pgsql:/etc/postfix/virtual_mailbox_maps.cf virtual_alias_maps = psql:/etc/postfix/virtual_aliases.cf,psql:/etc/postfix/virtual_mailboxes.cf
virtual_alias_maps = pgsql:/etc/postfix/virtual_alias_maps.cf,pgsql:/etc/postfix/virtual_mailbox_maps.cf
smtpd_sender_login_maps = pgsql:/etc/postfix/virtual_alias_maps.cf
virtual_transport = lmtp:unix:private/dovecot-lmtp virtual_transport = lmtp:unix:private/dovecot-lmtp
smtpd_sasl_type = dovecot smtpd_sasl_type = dovecot
smtpd_sasl_path = private/auth smtpd_sasl_path = private/auth
smtpd_sasl_auth_enable = yes smtpd_sasl_auth_enable = yes
smtpd_tls_security_level = may smtpd_tls_security_level = may
smtpd_tls_auth_only = yes smtpd_tls_auth_only = yes
smtpd_tls_cert_file = /var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem message_size_limit = 1000000000
smtpd_tls_key_file = /var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem smtpd_milters = unix:/spamass/spamass.sock
smtp_tls_security_level = may milter_connect_macros = i j {daemon_name} v {if_name} _
policyd-spf_time_limit = 3600
smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions smtpd_recipient_restrictions = permit_mynetworks,reject_unauth_destination,check_policy_service unix:private/policyd-spf
mua_client_restrictions = permit_sasl_authenticated, reject # reject_rbl_client dnsbl.sorbs.net
mua_sender_restrictions = permit_sasl_authenticated, reject # reject_rbl_client bl.spamcop.net
mua_helo_restrictions = permit_mynetworks, reject_non_fqdn_hostname, reject_invalid_hostname, permit # reject_rbl_client zen.spamhaus.org
# reject_rbl_client dnsbl1.uceprotect.net
smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332 # reject_unauth_destination
non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
# opendkim
milter_protocol = 6
milter_default_action = accept
# rspamd
milter_mail_macros = "i {mail_addr} {client_addr} {client_name} {auth_authen}"

View file

@ -1,37 +1,124 @@
#
# Postfix master process configuration file. For details on the format
# of the file, see the master(5) manual page (command: "man 5 master" or
# on-line: http://www.postfix.org/master.5.html).
#
# Do not forget to execute "postfix reload" after editing this file.
#
# ========================================================================== # ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args # service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (no) (never) (100) # (yes) (yes) (yes) (never) (100)
# ========================================================================== # ==========================================================================
smtp inet n - y - - smtpd smtp inet n - - - - smtpd
pickup unix n - y 60 1 pickup #smtp inet n - - - 1 postscreen
cleanup unix n - y - 0 cleanup #smtpd pass - - - - - smtpd
#dnsblog unix - - - - 0 dnsblog
#tlsproxy unix - - - - 0 tlsproxy
#submission inet n - - - - smtpd
# -o syslog_name=postfix/submission
# -o smtpd_tls_security_level=encrypt
# -o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
# -o smtpd_recipient_restrictions=
# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
submission inet n - - - - smtpd
-o syslog_name=postfix/submission
-o smtpd_tls_security_level=encrypt
-o smtpd_sasl_auth_enable=yes
-o smtpd_client_restrictions=permit_sasl_authenticated,reject
#smtps inet n - - - - smtpd
# -o syslog_name=postfix/smtps
# -o smtpd_tls_wrappermode=yes
# -o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
# -o smtpd_recipient_restrictions=
# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
#628 inet n - - - - qmqpd
pickup unix n - - 60 1 pickup
cleanup unix n - - - 0 cleanup
qmgr unix n - n 300 1 qmgr qmgr unix n - n 300 1 qmgr
tlsmgr unix - - y 1000? 1 tlsmgr #qmgr unix n - n 300 1 oqmgr
rewrite unix - - y - - trivial-rewrite tlsmgr unix - - - 1000? 1 tlsmgr
bounce unix - - y - 0 bounce rewrite unix - - - - - trivial-rewrite
defer unix - - y - 0 bounce bounce unix - - - - 0 bounce
trace unix - - y - 0 bounce defer unix - - - - 0 bounce
verify unix - - y - 1 verify trace unix - - - - 0 bounce
flush unix n - y 1000? 0 flush verify unix - - - - 1 verify
flush unix n - - 1000? 0 flush
proxymap unix - - n - - proxymap proxymap unix - - n - - proxymap
proxywrite unix - - n - 1 proxymap proxywrite unix - - n - 1 proxymap
smtp unix - - y - - smtp smtp unix - - - - - smtp
relay unix - - y - - smtp relay unix - - - - - smtp
-o syslog_name=postfix/$service_name # -o smtp_helo_timeout=5 -o smtp_connect_timeout=5
showq unix n - y - - showq showq unix n - - - - showq
error unix - - y - - error error unix - - - - - error
retry unix - - y - - error retry unix - - - - - error
discard unix - - y - - discard discard unix - - - - - discard
local unix - n n - - local local unix - n n - - local
virtual unix - n n - - virtual virtual unix - n n - - virtual
lmtp unix - - y - - lmtp lmtp unix - - - - - lmtp
anvil unix - - y - 1 anvil anvil unix - - - - 1 anvil
scache unix - - y - 1 scache scache unix - - - - 1 scache
postlog unix-dgram n - n - 1 postlogd #
# ====================================================================
# Interfaces to non-Postfix software. Be sure to examine the manual
# pages of the non-Postfix software to find out what options it wants.
#
# Many of the following services use the Postfix pipe(8) delivery
# agent. See the pipe(8) man page for information about ${recipient}
# and other message envelope options.
# ====================================================================
#
# maildrop. See the Postfix MAILDROP_README file for details.
# Also specify in main.cf: maildrop_destination_recipient_limit=1
#
maildrop unix - n n - - pipe maildrop unix - n n - - pipe
flags=DRhu user=vmail argv=/usr/bin/maildrop -d ${recipient} flags=DRhu user=vmail argv=/usr/bin/maildrop -d ${recipient}
#
# ====================================================================
#
# Recent Cyrus versions can use the existing "lmtp" master.cf entry.
#
# Specify in cyrus.conf:
# lmtp cmd="lmtpd -a" listen="localhost:lmtp" proto=tcp4
#
# Specify in main.cf one or more of the following:
# mailbox_transport = lmtp:inet:localhost
# virtual_transport = lmtp:inet:localhost
#
# ====================================================================
#
# Cyrus 2.1.5 (Amos Gouaux)
# Also specify in main.cf: cyrus_destination_recipient_limit=1
#
#cyrus unix - n n - - pipe
# user=cyrus argv=/cyrus/bin/deliver -e -r ${sender} -m ${extension} ${user}
#
# ====================================================================
# Old example of delivery via Cyrus.
#
#old-cyrus unix - n n - - pipe
# flags=R user=cyrus argv=/cyrus/bin/deliver -e -m ${extension} ${user}
#
# ====================================================================
#
# See the Postfix UUCP_README file for configuration details.
#
uucp unix - n n - - pipe uucp unix - n n - - pipe
flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient) flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient)
#
# Other external delivery methods.
#
ifmail unix - n n - - pipe ifmail unix - n n - - pipe
flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient) flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient)
bsmtp unix - n n - - pipe bsmtp unix - n n - - pipe
@ -41,15 +128,5 @@ scalemail-backend unix - n n - 2 pipe
mailman unix - n n - - pipe mailman unix - n n - - pipe
flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py
${nexthop} ${user} ${nexthop} ${user}
submission inet n - y - - smtpd policyd-spf unix - n n - 0 spawn
-o syslog_name=postfix/submission user=policyd-spf argv=/usr/bin/policyd-spf
-o smtpd_tls_security_level=encrypt
-o smtpd_sasl_auth_enable=yes
-o smtpd_tls_auth_only=yes
-o smtpd_reject_unlisted_recipient=no
-o smtpd_client_restrictions=$mua_client_restrictions
-o smtpd_helo_restrictions=$mua_helo_restrictions
-o smtpd_sender_restrictions=$mua_sender_restrictions
-o smtpd_recipient_restrictions=
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
-o milter_macro_daemon_name=ORIGINATING

View file

@ -2,4 +2,4 @@ hosts = ${host}
dbname = ${name} dbname = ${name}
user = ${user} user = ${user}
password = ${password} password = ${password}
query = SELECT redirect FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NOT NULL AND users.name = '%u' AND domains.name = '%d' query = SELECT redirect FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NOT NULL AND users.name = SPLIT_PART('%s', '@', 1) AND domains.name = SPLIT_PART('%s', '@', 2)

View file

@ -2,4 +2,4 @@ hosts = ${host}
dbname = ${name} dbname = ${name}
user = ${user} user = ${user}
password = ${password} password = ${password}
query = SELECT CONCAT(users.name, '@', domains.name) AS email FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NULL AND users.name = '%u' AND domains.name = '%d' query = SELECT CONCAT(users.name, '@', domains.name) AS email FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NULL AND users.name = SPLIT_PART('%s', '@', 1) AND domains.name = SPLIT_PART('%s', '@', 2)

View file

@ -40,13 +40,6 @@ svc_systemd['postfix'] = {
], ],
} }
actions['test_postfix_config'] = {
'command': 'false',
'unless': "postconf check | grep -v 'symlink leaves directory' | wc -l | grep -q '^0$'",
'needs': [
'svc_systemd:postfix',
],
}
actions['test_virtual_mailbox_domains'] = { actions['test_virtual_mailbox_domains'] = {
'command': 'false', 'command': 'false',
'unless': "postmap -q example.com pgsql:/etc/postfix/virtual_mailbox_domains.cf | grep -q '^example.com$'", 'unless': "postmap -q example.com pgsql:/etc/postfix/virtual_mailbox_domains.cf | grep -q '^example.com$'",

View file

@ -5,11 +5,6 @@ defaults = {
'postfix-pgsql': {}, 'postfix-pgsql': {},
} }
}, },
'backup': {
'paths': [
'/var/vmail',
],
},
'letsencrypt': { 'letsencrypt': {
'reload_after': { 'reload_after': {
'postfix', 'postfix',

View file

@ -1,14 +1,4 @@
defaults = { defaults = {
'apt': {
'packages': {
'postgresql': {},
},
},
'backup': {
'paths': [
'/var/lib/postgresql',
],
},
'postgresql': { 'postgresql': {
'roles': { 'roles': {
'root': { 'root': {
@ -18,6 +8,11 @@ defaults = {
}, },
'databases': {}, 'databases': {},
}, },
'apt': {
'packages': {
'postgresql': {},
},
},
} }
if node.has_bundle('zfs'): if node.has_bundle('zfs'):

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'redis-server': {},
},
},
}

View file

@ -1,3 +0,0 @@
% for ip in sorted(node.metadata.get('rspamd/ignore_spam_check_for_ips', set())):
${ip}
% endfor

View file

@ -1 +0,0 @@
backend = "redis";

View file

@ -1,2 +0,0 @@
systemd = true;
type = "console";

View file

@ -1,2 +0,0 @@
use = ["x-spamd-bar", "x-spam-level", "authentication-results"];
authenticated_headers = ["authentication-results"];

View file

@ -1,6 +0,0 @@
IP_WHITELIST {
type = "ip";
prefilter = true;
map = "/etc/rspamd/local.d/ip_whitelist.map";
action = "accept";
}

View file

@ -1 +0,0 @@
servers = "127.0.0.1";

View file

@ -1 +0,0 @@
bind_socket = "localhost:11333";

View file

@ -1,7 +0,0 @@
bind_socket = "localhost:11332";
milter = yes;
timeout = 120s;
upstream "local" {
default = yes;
self_scan = yes;
}

View file

@ -1,6 +0,0 @@
clamav {
servers = "/run/clamav/clamd.ctl";
action = "reject";
type = "clamav";
symbol = "CLAM_VIRUS";
}

View file

@ -1 +0,0 @@
password = "${node.metadata.get('rspamd/web_password')}";

View file

@ -1,66 +0,0 @@
from os import listdir
from os.path import join
repo.libs.tools.require_bundle(node, 'redis', 'rspamd does not work without a redis cache')
directories = {
'/etc/rspamd/local.d': {
'purge': True,
'needs': {
'pkg_apt:rspamd',
},
'triggers': {
'svc_systemd:rspamd:restart',
},
},
'/etc/rspamd/override.d': {
'purge': True,
'needs': {
'pkg_apt:rspamd',
},
'triggers': {
'svc_systemd:rspamd:restart',
},
},
}
files = {
'/etc/rspamd/local.d/ip_whitelist.map': {
'content_type': 'mako',
'triggers': {
'svc_systemd:rspamd:restart',
},
},
'/etc/rspamd/local.d/worker-controller.inc': {
'content_type': 'mako',
'triggers': {
'svc_systemd:rspamd:restart',
},
}
}
local_config_path = join(repo.path, 'bundles', 'rspamd', 'files', 'local.d')
for f in listdir(local_config_path):
files[f'/etc/rspamd/local.d/{f}'] = {
'source': f'local.d/{f}',
'triggers': {
'svc_systemd:rspamd:restart',
},
}
override_config_path = join(repo.path, 'bundles', 'rspamd', 'files', 'override.d')
for f in listdir(override_config_path):
files[f'/etc/rspamd/override.d/{f}'] = {
'source': f'override.d/{f}',
'triggers': {
'svc_systemd:rspamd:restart',
},
}
svc_systemd = {
'rspamd': {
'needs': {
'pkg_apt:rspamd',
},
},
}

View file

@ -1,15 +0,0 @@
defaults = {
'apt': {
'packages': {
'clamav': {},
'clamav-daemon': {},
'clamav-freshclam': {},
'clamav-unofficial-sigs': {},
'rspamd': {},
},
},
'rspamd': {
'web_password': repo.vault.password_for(node.name + ' rspamd web password'),
'ignore_spam_check_for_ips': [],
},
}

View file

@ -1,3 +1,3 @@
% for nameserver in sorted(node.metadata.get('nameservers')): % for nameserver in sorted(node.metadata.get('nameservers', {'9.9.9.10', '2620:fe::10'})):
nameserver ${nameserver} nameserver ${nameserver}
% endfor % endfor

View file

@ -0,0 +1,50 @@
<%
from ipaddress import ip_network
%>\
[Match]
Name=${interface}
% for addr in sorted(config.get('ips', set())):
[Address]
<%
if '/' in addr:
ip, prefix = addr.split('/')
else:
ip = addr
prefix = '32'
%>\
Address=${ip}/${prefix}
% endfor
% for route, rconfig in sorted(config.get('routes', {}).items()):
[Route]
% if 'via' in rconfig:
Gateway=${rconfig['via']}
% endif
Destination=${route}
GatewayOnlink=yes
% endfor
% if 'gateway4' in config:
[Route]
Gateway=${config['gateway4']}
GatewayOnlink=yes
% endif
% if 'gateway6' in config:
[Route]
Gateway=${config['gateway6']}
GatewayOnlink=yes
% endif
[Network]
DHCP=no
IPv6AcceptRA=no
% if config.get('forwarding', False):
IPForward=yes
%endif
% for vlan in sorted(config.get('vlans', set())):
VLAN=${interface}.${vlan}
% endfor

View file

@ -1,14 +1,24 @@
assert node.has_bundle('systemd') assert node.has_bundle('systemd')
from bundlewrap.exceptions import BundleError
files = { files = {
'/etc/network/interfaces': { '/etc/network/interfaces': {
'delete': True, 'delete': True,
}, },
} }
files['/etc/resolv.conf'] = { if node.metadata.get('systemd-networkd/enable-resolved', False):
'content_type': 'mako', symlinks['/etc/resolv.conf'] = {
} 'target': '/run/systemd/resolve/stub-resolv.conf',
}
svc_systemd['systemd-resolved'] = {}
else:
files['/etc/resolv.conf'] = {
'content_type': 'mako',
}
directories = { directories = {
'/etc/systemd/network': { '/etc/systemd/network': {
@ -16,13 +26,49 @@ directories = {
}, },
} }
for type, path in { mac_host_prefix = '%04x' % (node.magic_number % 65534)
'networks': '/etc/systemd/network/{}.network', generated_mac = f'52:54:00:{mac_host_prefix[0:2]}:{mac_host_prefix[2:4]}:{{}}'
'netdevs': '/etc/systemd/network/{}.netdev',
}.items(): # Don't use .get() here. We might end up with a node without a network
for name, config in node.metadata.get(f'systemd-networkd/{type}').items(): # config!
files[path.format(name)] = { for interface, config in node.metadata['interfaces'].items():
'content': repo.libs.systemd.generate_unitfile(config), if config.get('dhcp', False):
if 'vlans' in config:
raise BundleError(f'{node.name} interface {interface} cannot use vlans and dhcp!')
template = 'template-iface-dhcp.network'
else:
template = 'template-iface-nodhcp.network'
if '.' in interface:
vlan_id = int(interface.split('.')[1])
vlan_hex = '%02x' % (vlan_id % 255)
files['/etc/systemd/network/60-iface-{}.netdev'.format(interface)] = {
'source': 'template-iface-vlan.netdev',
'content_type': 'mako',
'context': {
'interface': interface,
'vlan': vlan_id,
'mac': generated_mac.format(vlan_hex)
},
'needed_by': {
'svc_systemd:systemd-networkd',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
}
weight = 61
else:
weight = 50
if not config.get('ignore', False):
files['/etc/systemd/network/{}-iface-{}.network'.format(weight, interface)] = {
'source': template,
'content_type': 'mako',
'context': {
'interface': interface,
'config': config,
},
'needed_by': { 'needed_by': {
'svc_systemd:systemd-networkd', 'svc_systemd:systemd-networkd',
}, },
@ -31,6 +77,66 @@ for type, path in {
}, },
} }
for bond, config in node.metadata.get('systemd-networkd/bonds', {}).items():
files['/etc/systemd/network/20-bond-{}.netdev'.format(bond)] = {
'source': 'template-bond.netdev',
'content_type': 'mako',
'context': {
'bond': bond,
'mode': config.get('mode', '802.3ad'),
'prio': config.get('priority', '32768'),
},
'needed_by': {
'svc_systemd:systemd-networkd',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
}
files['/etc/systemd/network/21-bond-{}.network'.format(bond)] = {
'source': 'template-bond.network',
'content_type': 'mako',
'context': {
'bond': bond,
'match': config['match'],
},
'needed_by': {
'svc_systemd:systemd-networkd',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
}
for brname, config in node.metadata.get('systemd-networkd/bridges', {}).items():
files['/etc/systemd/network/30-bridge-{}.netdev'.format(brname)] = {
'source': 'template-bridge.netdev',
'content_type': 'mako',
'context': {
'bridge': brname,
},
'needed_by': {
'svc_systemd:systemd-networkd',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
}
files['/etc/systemd/network/31-bridge-{}.network'.format(brname)] = {
'source': 'template-bridge.network',
'content_type': 'mako',
'context': {
'bridge': brname,
'match': config['match'],
},
'needed_by': {
'svc_systemd:systemd-networkd',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
}
svc_systemd = { svc_systemd = {
'systemd-networkd': {}, 'systemd-networkd': {},
} }

View file

@ -6,8 +6,24 @@ defaults = {
}, },
}, },
}, },
'systemd-networkd': {
'netdevs': {},
'networks': {},
},
} }
@metadata_reactor.provides(
'interfaces',
)
def add_vlan_infos_to_interface(metadata):
interfaces = {}
for iface in metadata.get('interfaces', {}):
if not '.' in iface:
continue
interface,vlan = iface.split('.')
interfaces.setdefault(interface, {}).setdefault('vlans', set())
interfaces[interface]['vlans'].add(vlan)
return {
'interfaces': interfaces,
}

View file

@ -1,12 +1,8 @@
from mako.template import Template % for i, (segment, options) in enumerate(data.items()):
% if i > 0:
template = ''' % endif
% for segment, options in data.items(): [${segment}]
% if '#' in segment:
# ${segment.split('#', 2)[1]}
% endif
[${segment.split('#')[0]}]
% for option, value in options.items(): % for option, value in options.items():
% if isinstance(value, dict): % if isinstance(value, dict):
% for k, v in value.items(): % for k, v in value.items():
@ -16,12 +12,8 @@ ${option}=${k}=${v}
% for item in sorted(value): % for item in sorted(value):
${option}=${item} ${option}=${item}
% endfor % endfor
% else: % elif isinstance(value, str):
${option}=${str(value)} ${option}=${value}
% endif % endif
% endfor % endfor
% endfor % endfor
'''
def generate_unitfile(data):
return Template(template).render(data=data).lstrip()

View file

@ -12,7 +12,13 @@ actions = {
}, },
} }
for name, service in node.metadata.get('systemd/services').items(): for name, service in node.metadata.get('systemd', {}).get('services', {}).items():
# use set() in metadata
for enumerator in [
'preceded_by', 'needs', 'needed_by', 'triggers', 'triggered_by'
]:
assert isinstance(service.get(enumerator, set()), set)
# dont call a service 'service' explicitly # dont call a service 'service' explicitly
if name.endswith('.service'): if name.endswith('.service'):
raise Exception(name) raise Exception(name)
@ -28,17 +34,19 @@ for name, service in node.metadata.get('systemd/services').items():
# create unit file # create unit file
unit_path = f'/etc/systemd/system/{name}.service' unit_path = f'/etc/systemd/system/{name}.service'
files[unit_path] = { files[unit_path] = {
'content': repo.libs.systemd.generate_unitfile(content_data), 'source': 'unitfile',
'triggers': [ 'content_type': 'mako',
'context': {
'data': content_data,
},
'triggers': [
'action:systemd-reload', 'action:systemd-reload',
f'svc_systemd:{name}:restart', f'svc_systemd:{name}:restart',
], ],
} }
# service depends on unit file # service depends on unit file
service\ service.setdefault('needs', set()).add(f'file:{unit_path}')
.setdefault('needs', [])\
.append(f'file:{unit_path}')
# service # service
svc_systemd[name] = service svc_systemd[name] = service

View file

@ -1,5 +0,0 @@
defaults = {
'systemd': {
'services': {},
}
}

View file

@ -1,28 +1,35 @@
for group, config in node.metadata.get('groups', {}).items(): from os.path import join, exists
groups[group] = config
for name, config in node.metadata.get('users').items(): for group, attrs in node.metadata.get('groups', {}).items():
directories[config['home']] = { groups[group] = attrs
'owner': name,
'mode': '700', for username, attrs in node.metadata['users'].items():
home = attrs.get('home', '/home/{}'.format(username))
user = users.setdefault(username, {})
user['home'] = home
user['shell'] = attrs.get('shell', '/bin/bash')
if 'password' in attrs:
user['password'] = attrs['password']
else:
user['password_hash'] = 'x' if node.use_shadow_passwords else '*'
if 'groups' in attrs:
user['groups'] = attrs['groups']
directories[home] = {
'owner': username,
'mode': attrs.get('home-mode', '0700'),
} }
files[f"{config['home']}/.ssh/id_{config['keytype']}"] = { if 'ssh_pubkey' in attrs:
'content': config['privkey'] + '\n', files[home + '/.ssh/authorized_keys'] = {
'owner': name, 'content': '\n'.join(sorted(attrs['ssh_pubkey'])) + '\n',
'mode': '0600', 'owner': username,
} 'mode': '0600',
files[f"{config['home']}/.ssh/id_{config['keytype']}.pub"] = { }
'content': config['pubkey'] + '\n',
'owner': name,
'mode': '0600',
}
files[config['home'] + '/.ssh/authorized_keys'] = {
'content': '\n'.join(sorted(config['authorized_keys'])) + '\n',
'owner': name,
'mode': '0600',
}
users[name] = config elif not attrs.get('do_not_remove_authorized_keys_from_home', False):
for option in ['authorized_keys', 'privkey', 'pubkey', 'keytype']: files[home + '/.ssh/authorized_keys'] = {'delete': True}
users[name].pop(option, None)

View file

@ -1,42 +1,9 @@
from base64 import b64decode # defaults = {
# 'users': {
defaults = { # 'root': {
'users': { # 'home': '/root',
'root': { # 'shell': '/bin/bash',
'home': '/root', # 'password': repo.vault.human_password_for('root on {}'.format(node.name)),
}, # },
}, # },
} # }
@metadata_reactor.provides(
'users',
)
def user(metadata):
users = {}
for name, config in metadata.get('users').items():
users[name] = {
'authorized_keys': [],
}
if not 'full_name' in config:
users[name]['full_name'] = name
if not 'home' in config:
users[name]['home'] = f'/home/{name}'
if not 'shell' in config:
users[name]['shell'] = '/bin/bash'
if not 'privkey' in users[name] and not 'pubkey' in users[name]:
privkey, pubkey = repo.libs.ssh.generate_ed25519_key_pair(
b64decode(str(repo.vault.random_bytes_as_base64_for(f"{name}@{metadata.get('id')}", length=32)))
)
users[name]['keytype'] = 'ed25519'
users[name]['privkey'] = privkey
users[name]['pubkey'] = pubkey + f' {name}@{node.name}'
return {
'users': users,
}

View file

@ -0,0 +1,25 @@
[NetDev]
Name=wg0
Kind=wireguard
Description=WireGuard server
[WireGuard]
PrivateKey=${privatekey}
ListenPort=51820
% for peer, config in sorted(peers.items()):
# Peer ${peer}
[WireGuardPeer]
PublicKey=${config['pubkey']}
% if len(peers) == 1: # FIXME
AllowedIPs=${network}
% else:
AllowedIPs=${','.join(sorted(config['ips']))}
% endif
PresharedKey=${config['psk']}
% if 'endpoint' in config:
Endpoint=${config['endpoint']}
% endif
PersistentKeepalive=30
% endfor

View file

@ -1,3 +1,21 @@
from ipaddress import ip_network from ipaddress import ip_network
repo.libs.tools.require_bundle(node, 'systemd-networkd') repo.libs.tools.require_bundle(node, 'systemd-networkd')
network = ip_network(node.metadata['wireguard']['my_ip'], strict=False)
files = {
'/etc/systemd/network/wg0.netdev': {
'content_type': 'mako',
'context': {
'network': f'{network.network_address}/{network.prefixlen}',
**node.metadata['wireguard'],
},
'needs': {
'pkg_apt:wireguard',
},
'triggers': {
'svc_systemd:systemd-networkd:restart',
},
},
}

View file

@ -1,4 +1,4 @@
from ipaddress import ip_network, ip_interface from ipaddress import ip_network
from bundlewrap.exceptions import NoSuchNode from bundlewrap.exceptions import NoSuchNode
from bundlewrap.metadata import atomic from bundlewrap.metadata import atomic
@ -7,100 +7,36 @@ from bundlewrap.metadata import atomic
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
'linux-headers-amd64': {}, 'wireguard': {},
'wireguard': {
'backports': True,
'needs': [
'pkg_apt:linux-headers-amd64',
],
'triggers': [
'svc_systemd:systemd-networkd:restart',
],
},
}, },
}, },
'wireguard': { 'wireguard': {
'privatekey': repo.vault.random_bytes_as_base64_for(f'{node.name} wireguard privatekey'), 'privatekey': repo.libs.keys.gen_privkey(repo, f'{node.name} wireguard privatekey'),
}, },
} }
@metadata_reactor.provides( @metadata_reactor.provides(
'systemd-networkd/networks', 'wireguard/peers',
) )
def systemd_networkd_networks(metadata): def peer_psks(metadata):
network = { peers = {}
'Match': {
'Name': 'wg0',
},
'Address': {
'Address': metadata.get('wireguard/my_ip'),
},
'Route': {
'Destination': str(ip_interface(metadata.get('wireguard/my_ip')).network),
'GatewayOnlink': 'yes',
},
'Network': {
'DHCP': 'no',
'IPForward': 'yes',
'IPv6AcceptRA': 'no',
},
}
for peer, config in metadata.get('wireguard/peers').items(): for peer_name in metadata.get('wireguard/peers', {}):
for route in config.get('route', []): peers[peer_name] = {}
network.update({
f'Route#{peer}_{route}': {
'Destination': route,
'Gateway': str(ip_interface(repo.get_node(peer).metadata.get(f'wireguard/my_ip')).ip),
'GatewayOnlink': 'yes',
}
})
return { if node.name < peer_name:
'systemd-networkd': { peers[peer_name] = {
'networks': { 'psk': repo.vault.random_bytes_as_base64_for(f'{node.name} wireguard {peer_name}'),
'wireguard': network,
},
},
}
@metadata_reactor.provides(
'systemd-networkd/netdevs',
)
def systemd_networkd_netdevs(metadata):
netdev = {
'NetDev': {
'Name': 'wg0',
'Kind': 'wireguard',
'Description': 'WireGuard server',
},
'WireGuard': {
'PrivateKey': metadata.get('wireguard/privatekey'),
'ListenPort': 51820,
},
}
for peer, config in metadata.get('wireguard/peers').items():
netdev.update({
f'WireGuardPeer#{peer}': {
'Endpoint': config['endpoint'],
'PublicKey': config['pubkey'],
'PresharedKey': config['psk'],
'AllowedIPs': ', '.join([
str(ip_interface(repo.get_node(peer).metadata.get(f'wireguard/my_ip')).ip),
*config.get('route', []),
]), # FIXME
'PersistentKeepalive': 30,
} }
}) else:
peers[peer_name] = {
'psk': repo.vault.random_bytes_as_base64_for(f'{peer_name} wireguard {node.name}'),
}
return { return {
'systemd-networkd': { 'wireguard': {
'netdevs': { 'peers': peers,
'wireguard': netdev,
},
}, },
} }
@ -108,24 +44,21 @@ def systemd_networkd_netdevs(metadata):
@metadata_reactor.provides( @metadata_reactor.provides(
'wireguard/peers', 'wireguard/peers',
) )
def peer_keys(metadata): def peer_pubkeys(metadata):
peers = {} peers = {}
for peer_name in metadata.get('wireguard/peers', {}): for peer_name in metadata.get('wireguard/peers', {}):
peer_node = repo.get_node(peer_name) try:
rnode = repo.get_node(peer_name)
first, second = sorted([node.name, peer_name]) except NoSuchNode:
psk = repo.vault.random_bytes_as_base64_for(f'{first} wireguard {second}') continue
pubkey = repo.libs.keys.get_pubkey_from_privkey(
f'{peer_name} wireguard pubkey',
peer_node.metadata.get('wireguard/privatekey'),
)
peers[peer_name] = { peers[peer_name] = {
'psk': psk, 'pubkey': repo.libs.keys.get_pubkey_from_privkey(
'pubkey': pubkey, repo,
'endpoint': f'{peer_node.hostname}:51820', f'{rnode.name} wireguard pubkey',
rnode.metadata.get('wireguard/privatekey'),
),
} }
return { return {
@ -133,3 +66,75 @@ def peer_keys(metadata):
'peers': peers, 'peers': peers,
}, },
} }
@metadata_reactor.provides(
'wireguard/peers',
)
def peer_ips_and_endpoints(metadata):
peers = {}
for peer_name in metadata.get('wireguard/peers', {}):
try:
rnode = repo.get_node(peer_name)
except NoSuchNode:
continue
ips = rnode.metadata.get('wireguard/subnets', set())
ips.add(rnode.metadata.get('wireguard/my_ip').split('/')[0])
ips = repo.libs.tools.remove_more_specific_subnets(ips)
peers[rnode.name] = {
'endpoint': '{}:51820'.format(rnode.metadata.get('wireguard/external_hostname', rnode.hostname)),
'ips': ips,
}
return {
'wireguard': {
'peers': peers,
},
}
@metadata_reactor.provides(
'interfaces/wg0/ips',
)
def interface_ips(metadata):
return {
'interfaces': {
'wg0': {
'ips': {
metadata.get('wireguard/my_ip'),
},
},
},
}
@metadata_reactor.provides(
'interfaces/wg0/routes',
)
def routes(metadata):
network = ip_network(metadata.get('wireguard/my_ip'), strict=False)
ips = {
f'{network.network_address}/{network.prefixlen}',
}
routes = {}
for _, peer_config in metadata.get('wireguard/peers', {}).items():
for ip in peer_config['ips']:
ips.add(ip)
if '0.0.0.0/0' in ips:
ips.remove('0.0.0.0/0')
for ip in repo.libs.tools.remove_more_specific_subnets(ips):
routes[ip] = {}
return {
'interfaces': {
'wg0': {
'routes': routes,
},
},
}

View file

@ -26,17 +26,15 @@ svc_systemd = {
}, },
} }
for name, config in node.metadata.get('zfs/datasets', {}).items(): zfs_datasets = node.metadata.get('zfs/datasets')
zfs_datasets[name] = config
zfs_datasets[name].pop('backup', None)
for name, config in node.metadata.get('zfs/pools', {}).items(): for name, attrs in node.metadata.get('zfs/pools', {}).items():
zfs_pools[name] = config zfs_pools[name] = attrs
actions[f'pool_{name}_enable_trim'] = { # actions[f'pool_{name}_enable_trim'] = {
'command': f'zpool set autotrim=on {name}', # 'command': f'zpool set autotrim=on {name}',
'unless': f'zpool get autotrim -H -o value {name} | grep -q on', # 'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
'needs': [ # 'needs': [
f'zfs_pool:{name}' # f'zfs_pool:{name}'
] # ]
} # }

View file

@ -8,28 +8,19 @@ defaults = {
'pkg_apt:zfs-dkms', 'pkg_apt:zfs-dkms',
}, },
}, },
'parted':{
'needed_by': {
'pkg_apt:zfs-zed',
'pkg_apt:zfsutils-linux',
},
},
'zfs-dkms': { 'zfs-dkms': {
'backports': True,
'needed_by': { 'needed_by': {
'pkg_apt:zfs-zed', 'pkg_apt:zfs-zed',
'pkg_apt:zfsutils-linux', 'pkg_apt:zfsutils-linux',
}, },
}, },
'zfs-zed': { 'zfs-zed': {
'backports': True,
'needed_by': { 'needed_by': {
'zfs_dataset:', 'zfs_dataset:',
'zfs_pool:', 'zfs_pool:',
}, },
}, },
'zfsutils-linux': { 'zfsutils-linux': {
'backports': True,
'needed_by': { 'needed_by': {
'pkg_apt:zfs-zed', 'pkg_apt:zfs-zed',
'zfs_dataset:', 'zfs_dataset:',
@ -57,17 +48,3 @@ def dataset_defaults(metadata):
}, },
}, },
} }
@metadata_reactor.provides(
'backup/paths'
)
def backup(metadata):
return {
'backup': {
'paths': [
options['mountpoint'] for options in metadata.get('zfs/datasets').values()
if options.get('backup', True)
],
},
}

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more