Compare commits

...

14 commits

Author SHA1 Message Date
mwiegand
6e5b6c0f05 wip 2021-07-13 15:29:44 +02:00
mwiegand
198c1f54fa wip 2021-07-13 15:29:28 +02:00
mwiegand
9b025bdd6b wip 2021-07-13 15:28:45 +02:00
mwiegand
0676868e51 wip 2021-07-13 15:28:08 +02:00
mwiegand
e71d25dd3c wip 2021-07-13 15:27:00 +02:00
mwiegand
f53c6f118a wip 2021-07-13 15:26:40 +02:00
mwiegand
e65e7185b5 wip 2021-07-13 15:26:23 +02:00
mwiegand
a2ceae83bb wip 2021-07-13 15:25:42 +02:00
mwiegand
6249e52e0f wip 2021-07-13 15:24:43 +02:00
mwiegand
92d3b7ac83 wip 2021-07-13 15:23:13 +02:00
mwiegand
4f3ad805fe wip 2021-07-13 15:20:08 +02:00
mwiegand
514948a9bb wip 2021-07-13 15:19:35 +02:00
mwiegand
7276751e42 wip 2021-07-13 15:17:07 +02:00
mwiegand
516287f74e wip 2021-07-13 15:14:28 +02:00
214 changed files with 16 additions and 7098 deletions

View file

@ -1,13 +0,0 @@
```python
{
'apt': {
'packages': {
'apt-transport-https': {},
},
'sources': [
# place key under data/apt/keys/packages.cloud.google.com.{asc|gpg}
'deb https://packages.cloud.google.com/apt cloud-sdk main',
],
},
}
```

View file

@ -1,100 +0,0 @@
from os.path import join
from urllib.parse import urlparse
from glob import glob
from os.path import join, basename
directories = {
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/trusted.gpg.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
files = {
'/etc/apt/sources.list': {
'content': '# managed'
},
}
actions = {
'apt_update': {
'command': 'apt-get update',
'needed_by': {
'pkg_apt:',
},
'triggered': True,
'cascade_skip': False,
},
}
# group sources by apt server hostname
hosts = {}
for source_string in node.metadata.get('apt/sources'):
source = repo.libs.apt.AptSource(source_string)
hosts\
.setdefault(source.url.hostname, set())\
.add(source)
# create sources lists and keyfiles
for host, sources in hosts.items():
keyfile = basename(glob(join(repo.path, 'data', 'apt', 'keys', f'{host}.*'))[0])
destination_path = f'/etc/apt/trusted.gpg.d/{keyfile}'
for source in sources:
source.options['signed-by'] = [destination_path]
files[f'/etc/apt/sources.list.d/{host}.list'] = {
'content': '\n'.join(
str(source) for source in sorted(sources)
).format(
release=node.metadata.get('os_release')
),
'triggers': {
'action:apt_update',
},
}
files[destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', keyfile),
'content_type': 'binary',
'triggers': {
'action:apt_update',
},
}
# create backport pinnings
for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options
if pkg_apt[package].pop('backports', False):
files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([
f"Package: {package}",
f"Pin: release a={node.metadata.get('os_release')}-backports",
f"Pin-Priority: 900",
]),
'needed_by': [
f'pkg_apt:{package}',
],
'triggers': {
'action:apt_update',
},
}

View file

@ -1,6 +0,0 @@
defaults = {
'apt': {
'packages': {},
'sources': [],
},
}

View file

@ -1,12 +0,0 @@
```
defaults = {
'archive': {
'/var/important': {
'exclude': [
'\.cache/',
'\.log$',
],
},
},
}
```

View file

@ -1,29 +0,0 @@
#!/bin/bash
if [[ "$1" == 'perform' ]]
then
echo 'NON-DRY RUN'
DRY=''
else
echo 'DRY RUN'
DRY='-n'
fi
% for path, options in paths.items():
# ${path}
gsutil ${'\\'}
-m ${'\\'}
-o 'GSUtil:parallel_process_count=${processes}' ${'\\'}
-o 'GSUtil:parallel_thread_count=${threads}' ${'\\'}
rsync ${'\\'}
$DRY ${'\\'}
-r ${'\\'}
-d ${'\\'}
-e ${'\\'}
% if options.get('exclude'):
-x '${'|'.join(options['exclude'])}' ${'\\'}
% endif
'${options['encrypted_path']}' ${'\\'}
'gs://${bucket}/${node_id}${path}' ${'\\'}
2>&1 | logger -st gsutil
% endfor

View file

@ -1,10 +0,0 @@
#!/bin/bash
FILENAME=$1
TMPFILE=$(mktemp /tmp/archive_file.XXXXXXXXXX)
BUCKET=$(cat /etc/gcloud/gcloud.json | jq -r .bucket)
NODE=$(cat /etc/archive/archive.json | jq -r .node_id)
MASTERKEY=$(cat /etc/gocryptfs/masterkey)
gsutil cat "gs://$BUCKET/$NODE$FILENAME" > "$TMPFILE"
/opt/gocryptfs-inspect/gocryptfs.py --aessiv --config=/etc/gocryptfs/gocryptfs.conf --masterkey="$MASTERKEY" "$TMPFILE"

View file

@ -1,15 +0,0 @@
#!/bin/bash
FILENAME=$1
ARCHIVE=$(/opt/archive/get_file "$FILENAME" | sha256sum)
ORIGINAL=$(cat "$FILENAME" | sha256sum)
if [[ "$ARCHIVE" == "$ORIGINAL" ]]
then
echo "OK"
exit 0
else
echo "ERROR"
exit 1
fi

View file

@ -1,43 +0,0 @@
assert node.has_bundle('gcloud')
assert node.has_bundle('gocryptfs')
assert node.has_bundle('gocryptfs-inspect')
assert node.has_bundle('systemd')
from json import dumps
directories['/opt/archive'] = {}
directories['/etc/archive'] = {}
files['/etc/archive/archive.json'] = {
'content': dumps(
{
'node_id': node.metadata.get('id'),
**node.metadata.get('archive'),
},
indent=4,
sort_keys=True
),
}
files['/opt/archive/archive'] = {
'content_type': 'mako',
'mode': '700',
'context': {
'node_id': node.metadata.get('id'),
'paths': node.metadata.get('archive/paths'),
'bucket': node.metadata.get('gcloud/bucket'),
'processes': 4,
'threads': 4,
},
'needs': [
'bundle:gcloud',
],
}
files['/opt/archive/get_file'] = {
'mode': '700',
}
files['/opt/archive/validate_file'] = {
'mode': '700',
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'jq': {},
},
},
'archive': {
'paths': {},
},
}
@metadata_reactor.provides(
'archive/paths',
)
def paths(metadata):
return {
'archive': {
'paths': {
path: {
'encrypted_path': f'/mnt/archive.enc{path}',
'exclude': [
'^\..*',
'/\..*',
],
} for path in metadata.get('archive/paths')
},
}
}
@metadata_reactor.provides(
'gocryptfs/paths',
)
def gocryptfs(metadata):
return {
'gocryptfs': {
'paths': {
path: {
'mountpoint': options['encrypted_path'],
'reverse': True,
} for path, options in metadata.get('archive/paths').items()
},
}
}

View file

@ -1,3 +0,0 @@
!/bin/bash
zfs send tank/nextcloud@test1 | ssh backup-receiver@10.0.0.5 sudo zfs recv tank/nextcloud

View file

@ -1,89 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'rsync': {},
},
},
'users': {
'backup-receiver': {
'authorized_keys': [],
},
},
'sudoers': {
'backup-receiver': ['ALL'],
}
}
@metadata_reactor.provides(
'zfs/datasets'
)
def zfs(metadata):
datasets = {}
for other_node in repo.nodes:
if (
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
# container
datasets[f"tank/{other_node.metadata.get('id')}"] = {
'mountpoint': 'none',
'readonly': 'on',
'backup': False,
}
# for rsync backups
datasets[f"tank/{other_node.metadata.get('id')}/fs"] = {
'mountpoint': f"/mnt/backups/{other_node.metadata.get('id')}",
'readonly': 'off',
'backup': False,
}
# for zfs send/recv
if other_node.has_bundle('zfs'):
for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
if path == config.get('mountpoint'):
datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = {
'mountpoint': 'none',
'readonly': 'on',
'backup': False,
}
continue
return {
'zfs': {
'datasets': datasets,
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('backup-server/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'users/backup-receiver/authorized_keys'
)
def backup_authorized_keys(metadata):
return {
'users': {
'backup-receiver': {
'authorized_keys': [
other_node.metadata.get('users/root/pubkey')
for other_node in repo.nodes
if other_node.has_bundle('backup')
and other_node.metadata.get('backup/server') == node.name
],
},
},
}

View file

@ -1,6 +0,0 @@
#!/bin/bash
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do
/opt/backup/backup_path "$path"
done

View file

@ -1,14 +0,0 @@
#!/bin/bash
path=$1
if zfs list -H -o mountpoint | grep -q "$path"
then
/opt/backup/backup_path_via_zfs "$path"
elif test -d "$path"
then
/opt/backuo/backup_path_via_rsync "$path"
else
echo "UNKNOWN PATH: $path"
exit 1
fi

View file

@ -1,11 +0,0 @@
#!/bin/bash
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
rsync -av --rsync-path="sudo rsync" "$path/" "backup-receiver@$server:/mnt/backups/$uuid$path/"
$ssh sudo zfs snap "tank/$uuid/fs@auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"

View file

@ -1,54 +0,0 @@
#!/bin/bash
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
target_dataset_parent=$(echo $target_dataset | rev | cut -d / -f 2- | rev)
bookmark_prefix="auto-backup_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for var in path uuid server ssh source_dataset target_dataset target_dataset_parent new_bookmark
do
[[ -z "${!var}" ]] && echo "ERROR - $var is empty" && exit 96
done
$ssh true || (echo "ERROR - cant ssh connect to $server" && exit 97)
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $target_dataset"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$target_dataset_parent$"
then
echo "CREATING PARENT DATASET..."
$ssh sudo zfs create -p -o mountpoint=none "$target_dataset_parent"
fi
zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark"
echo "SUCCESS"
else
zfs destroy "$source_dataset@$new_bookmark"
echo "ERROR"
exit 99
fi

View file

@ -1,30 +0,0 @@
from json import dumps
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
}
files['/opt/backup/backup_path'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_zfs'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_rsync'] = {
'mode': '700',
}
directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': repo.get_node(node.metadata.get('backup/server')).metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},
indent=4,
sort_keys=True
),
}

View file

@ -1,18 +0,0 @@
defaults = {
'apt': {
'packages': {
'jq': {},
'rsync': {},
},
},
'backup': {
'server': None,
'paths': [],
},
'systemd-timers': {
f'backup': {
'command': '/opt/backup/backup_all',
'when': 'daily',
},
},
}

View file

@ -1,23 +0,0 @@
<%!
def column_width(column, table):
return max(map(lambda row: len(row[column]), table)) if table else 0
%>\
$TTL 600
@ IN SOA ${hostname}. admin.${hostname}. (
2021070821 ;Serial
3600 ;Refresh
200 ;Retry
1209600 ;Expire
900 ;Negative response caching TTL
)
% for record in sorted(records, key=lambda r: (r['name'], r['type'], r['value'])):
${(record['name'] or '@').ljust(column_width('name', records))} \
IN \
${record['type'].ljust(column_width('type', records))} \
% if record['type'] == 'TXT':
(${' '.join('"'+record['value'][i:i+255]+'"' for i in range(0, len(record['value']), 255))})
% else:
${record['value']}
% endif
% endfor

View file

@ -1,2 +0,0 @@
RESOLVCONF=no
OPTIONS="-u bind"

View file

@ -1,6 +0,0 @@
statistics-channels {
inet 127.0.0.1 port 8053;
};
include "/etc/bind/named.conf.options";
include "/etc/bind/named.conf.local";

View file

@ -1,42 +0,0 @@
% for view in views:
acl "${view['name']}" {
${' '.join(f'{e};' for e in view['acl'])}
};
% endfor
% for view in views:
view "${view['name']}" {
match-clients { ${view['name']}; };
% if view['is_internal']:
recursion yes;
% else:
recursion no;
rate-limit {
responses-per-second 2;
window 25;
};
% endif
forward only;
forwarders {
1.1.1.1;
9.9.9.9;
8.8.8.8;
};
% for zone in zones:
zone "${zone}" {
type ${type};
% if type == 'slave':
masters { ${master_ip}; };
% endif
file "/var/lib/bind/${view['name']}/db.${zone}";
};
% endfor
include "/etc/bind/named.conf.default-zones";
include "/etc/bind/zones.rfc1918";
};
% endfor

View file

@ -1,16 +0,0 @@
options {
directory "/var/cache/bind";
dnssec-validation auto;
listen-on-v6 { any; };
allow-query { any; };
max-cache-size 30%;
querylog yes;
% if type == 'master':
notify yes;
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
% endif
};

View file

@ -1,179 +0,0 @@
from ipaddress import ip_address, ip_interface
from datetime import datetime
if node.metadata.get('bind/type') == 'master':
zones = node.metadata.get('bind/zones')
master_ip = None
slave_ips = [
ip_interface(repo.get_node(slave).metadata.get('network/external/ipv4')).ip
for slave in node.metadata.get('bind/slaves')
]
else:
zones = repo.get_node(node.metadata.get('bind/master_node')).metadata.get('bind/zones')
master_ip = ip_interface(repo.get_node(node.metadata.get('bind/master_node')).metadata.get('network/external/ipv4')).ip
slave_ips = []
directories[f'/var/lib/bind'] = {
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/default/bind9'] = {
'source': 'defaults',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/bind/named.conf'] = {
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
files['/etc/bind/named.conf.options'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'slave_ips': sorted(slave_ips),
},
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
views = [
{
'name': 'internal',
'is_internal': True,
'acl': [
'127.0.0.1',
'10.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.168.0.0/16',
]
},
{
'name': 'external',
'is_internal': False,
'acl': [
'any',
]
},
]
files['/etc/bind/named.conf.local'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'master_ip': master_ip,
'views': views,
'zones': sorted(zones),
},
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
def record_matches_view(record, records, view):
if record['type'] in ['A', 'AAAA']:
if view == 'external':
# no internal addresses in external view
if ip_address(record['value']).is_private:
return False
elif view == 'internal':
# external addresses in internal view only, if no internal exists
if ip_address(record['value']).is_global:
for other_record in records:
if (
record['name'] == other_record['name'] and
record['type'] == other_record['type'] and
ip_address(other_record['value']).is_private
):
return False
return True
for view in views:
directories[f"/var/lib/bind/{view['name']}"] = {
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
for zone, records in zones.items():
unique_records = [
dict(record_tuple)
for record_tuple in set(
tuple(record.items()) for record in records
)
]
files[f"/var/lib/bind/{view['name']}/db.{zone}"] = {
'group': 'bind',
'source': 'db',
'content_type': 'mako',
'context': {
'view': view['name'],
'serial': datetime.now().strftime('%Y%m%d%H'),
'records': list(filter(
lambda record: record_matches_view(record, records, view['name']),
unique_records
)),
'hostname': node.metadata.get('bind/hostname'),
},
'needs': [
f"directory:/var/lib/bind/{view['name']}",
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:restart',
],
}
svc_systemd['bind9'] = {}
actions['named-checkconf'] = {
'command': 'named-checkconf -z',
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
]
}

View file

@ -1,131 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'bind9': {},
},
},
'bind': {
'zones': {},
'slaves': {},
},
'telegraf': {
'config': {
'inputs': {
'bind': [{
'urls': ['http://localhost:8053/xml/v3'],
'gather_memory_contexts': False,
'gather_views': True,
}],
},
},
},
}
@metadata_reactor.provides(
'bind/type',
)
def type(metadata):
return {
'bind': {
'type': 'slave' if metadata.get('bind/master_node', None) else 'master',
}
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('bind/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'bind/zones',
)
def collect_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
zones = {}
for other_node in repo.nodes:
for fqdn, records in other_node.metadata.get('dns').items():
matching_zones = sorted(
filter(
lambda potential_zone: fqdn.endswith(potential_zone),
metadata.get('bind/zones').keys()
),
key=len,
)
if matching_zones:
zone = matching_zones[-1]
else:
continue
name = fqdn[0:-len(zone) - 1]
for type, values in records.items():
for value in values:
zones\
.setdefault(zone, [])\
.append(
{'name': name, 'type': type, 'value': value}
)
return {
'bind': {
'zones': zones,
},
}
@metadata_reactor.provides(
'bind/zones',
)
def ns_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
nameservers = [
node.metadata.get('bind/hostname'),
*[
repo.get_node(slave).metadata.get('bind/hostname')
for slave in node.metadata.get('bind/slaves')
]
]
return {
'bind': {
'zones': {
zone: [
{'name': '@', 'type': 'NS', 'value': f"{nameserver}."}
for nameserver in nameservers
] for zone in metadata.get('bind/zones').keys()
},
},
}
@metadata_reactor.provides(
'bind/slaves',
)
def slaves(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'slaves': [
other_node.name
for other_node in repo.nodes
if other_node.has_bundle('bind') and other_node.metadata.get('bind/master_node', None) == node.name
],
},
}

View file

@ -1,9 +0,0 @@
DOVECOT
=======
rescan index: https://doc.dovecot.org/configuration_manual/fts/#rescan
```
sudo -u vmail doveadm fts rescan -u 'test@mail2.sublimity.de'
sudo -u vmail doveadm index -u 'test@mail2.sublimity.de' -q '*'
```

View file

@ -1,105 +0,0 @@
#!/bin/sh
# Example attachment decoder script. The attachment comes from stdin, and
# the script is expected to output UTF-8 data to stdout. (If the output isn't
# UTF-8, everything except valid UTF-8 sequences are dropped from it.)
# The attachment decoding is enabled by setting:
#
# plugin {
# fts_decoder = decode2text
# }
# service decode2text {
# executable = script /usr/local/libexec/dovecot/decode2text.sh
# user = dovecot
# unix_listener decode2text {
# mode = 0666
# }
# }
libexec_dir=`dirname $0`
content_type=$1
# The second parameter is the format's filename extension, which is used when
# found from a filename of application/octet-stream. You can also add more
# extensions by giving more parameters.
formats='application/pdf pdf
application/x-pdf pdf
application/msword doc
application/mspowerpoint ppt
application/vnd.ms-powerpoint ppt
application/ms-excel xls
application/x-msexcel xls
application/vnd.ms-excel xls
application/vnd.openxmlformats-officedocument.wordprocessingml.document docx
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx
application/vnd.openxmlformats-officedocument.presentationml.presentation pptx
application/vnd.oasis.opendocument.text odt
application/vnd.oasis.opendocument.spreadsheet ods
application/vnd.oasis.opendocument.presentation odp
'
if [ "$content_type" = "" ]; then
echo "$formats"
exit 0
fi
fmt=`echo "$formats" | grep -w "^$content_type" | cut -d ' ' -f 2`
if [ "$fmt" = "" ]; then
echo "Content-Type: $content_type not supported" >&2
exit 1
fi
# most decoders can't handle stdin directly, so write the attachment
# to a temp file
path=`mktemp`
trap "rm -f $path" 0 1 2 3 14 15
cat > $path
xmlunzip() {
name=$1
tempdir=`mktemp -d`
if [ "$tempdir" = "" ]; then
exit 1
fi
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat |
$libexec_dir/xml2text
}
wait_timeout() {
childpid=$!
trap "kill -9 $childpid; rm -f $path" 1 2 3 14 15
wait $childpid
}
LANG=en_US.UTF-8
export LANG
if [ $fmt = "pdf" ]; then
/usr/bin/pdftotext $path - 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "doc" ]; then
(/usr/bin/catdoc $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "ppt" ]; then
(/usr/bin/catppt $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "xls" ]; then
(/usr/bin/xls2csv $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "odt" -o $fmt = "ods" -o $fmt = "odp" ]; then
xmlunzip "content.xml"
elif [ $fmt = "docx" ]; then
xmlunzip "document.xml"
elif [ $fmt = "xlsx" ]; then
xmlunzip "sharedStrings.xml"
elif [ $fmt = "pptx" ]; then
xmlunzip "slide*.xml"
else
echo "Buggy decoder script: $fmt not handled" >&2
exit 1
fi
exit 0

View file

@ -1,10 +0,0 @@
connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql
default_pass_scheme = ARGON2ID
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password\
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%u', '@', 1) \
AND domains.name = SPLIT_PART('%u', '@', 2)

View file

@ -1,134 +0,0 @@
protocols = imap lmtp sieve
auth_mechanisms = plain login
mail_privileged_group = mail
ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:~
mail_plugins = fts fts_xapian
namespace inbox {
inbox = yes
separator = .
mailbox Drafts {
auto = subscribe
special_use = \Drafts
}
mailbox Junk {
auto = create
special_use = \Junk
}
mailbox Trash {
auto = subscribe
special_use = \Trash
}
mailbox Sent {
auto = subscribe
special_use = \Sent
}
}
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
userdb {
driver = static
args = uid=vmail gid=vmail home=/var/vmail/%u
}
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600
user = postfix
group = postfix
}
}
service stats {
unix_listener stats-reader {
user = vmail
group = vmail
mode = 0660
}
unix_listener stats-writer {
user = vmail
group = vmail
mode = 0660
}
}
service managesieve-login {
inet_listener sieve {
}
process_min_avail = 0
service_count = 1
vsz_limit = 64 M
}
service managesieve {
process_limit = 100
}
protocol imap {
mail_plugins = $mail_plugins imap_sieve
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
}
protocol sieve {
plugin {
sieve = /var/vmail/sieve/%u.sieve
sieve_storage = /var/vmail/sieve/%u/
}
}
# fulltext search
plugin {
fts = xapian
fts_xapian = partial=3 full=20 verbose=0
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
}
service indexer-worker {
vsz_limit = ${indexer_ram}
}
service decode2text {
executable = script /usr/local/libexec/dovecot/decode2text.sh
user = dovecot
unix_listener decode2text {
mode = 0666
}
}
# spam filter
plugin {
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/
sieve_extensions = +vnd.dovecot.pipe
sieve_before = /var/vmail/sieve/global/spam-global.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
}

View file

@ -1,7 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve", "variables"];
if string "${mailbox}" "Trash" {
stop;
}
pipe :copy "rspamd-learn-ham.sh";

View file

@ -1,3 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve"];
pipe :copy "rspamd-learn-spam.sh";

View file

@ -1,6 +0,0 @@
require ["fileinto", "mailbox"];
if header :contains "X-Spam" "Yes" {
fileinto :create "Junk";
stop;
}

View file

@ -1 +0,0 @@
www-data ALL=(ALL) NOPASSWD: /usr/bin/doveadm pw -s ARGON2ID

View file

@ -1,108 +0,0 @@
assert node.has_bundle('mailserver')
users['vmail'] = {
'home': '/var/vmail',
}
directories = {
'/etc/dovecot': {
'purge': True,
},
'/etc/dovecot/conf.d': {
'purge': True,
'needs': [
'pkg_apt:dovecot-sieve',
'pkg_apt:dovecot-managesieved',
]
},
'/etc/dovecot/ssl': {},
'/var/vmail': {
'owner': 'vmail',
'group': 'vmail',
}
}
files = {
'/etc/dovecot/dovecot.conf': {
'content_type': 'mako',
'context': {
'admin_email': node.metadata.get('mailserver/admin_email'),
'indexer_ram': node.metadata.get('dovecot/indexer_ram'),
},
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dhparam.pem': {
'content_type': 'any',
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/mail/vmail/sieve/global/learn-ham.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
'/var/mail/vmail/sieve/global/learn-spam.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
'/var/mail/vmail/sieve/global/spam-global.sieve': {
'owner': 'nobody',
'group': 'nogroup',
},
}
actions = {
'dovecot_generate_dhparam': {
'command': 'openssl dhparam -out /etc/dovecot/dhparam.pem 2048',
'unless': 'test -f /etc/dovecot/dhparam.pem',
'cascade_skip': False,
'needs': {
'pkg_apt:',
'directory:/etc/dovecot/ssl',
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
}
svc_systemd = {
'dovecot': {
'needs': {
'action:letsencrypt_update_certificates',
'action:dovecot_generate_dhparam',
'file:/etc/dovecot/dovecot.conf',
'file:/etc/dovecot/dovecot-sql.conf',
},
},
}
# fulltext search
directories['/usr/local/libexec/dovecot'] = {}
files['/usr/local/libexec/dovecot/decode2text.sh'] = {
'owner': 'dovecot',
'mode': '500',
}

View file

@ -1,37 +0,0 @@
defaults = {
'apt': {
'packages': {
'dovecot-imapd': {},
'dovecot-pgsql': {},
'dovecot-lmtpd': {},
# spam filtering
'dovecot-sieve': {},
'dovecot-managesieved': {},
# fulltext search
'dovecot-fts-xapian': {}, # buster-backports
'poppler-utils': {}, # pdftotext
'catdoc': {}, # catdoc, catppt, xls2csv
},
},
'letsencrypt': {
'reload_after': {
'dovecot',
},
},
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
}
@metadata_reactor.provides(
'dovecot/indexer_ram',
)
def indexer_ram(metadata):
return {
'dovecot': {
'indexer_ram': str(metadata.get('vm/ram')//2)+ 'M',
},
}

View file

@ -1,12 +0,0 @@
```
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectViewer'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectCreator'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectAdmin'
gsutil -o "GSUtil:parallel_process_count=3" -o GSUtil:parallel_thread_count=4 -m rsync -r -d -e /var/vmail gs://sublimity-backup/mailserver
gsutil config
gsutil versioning set on gs://sublimity-backup
gcsfuse --key-file /root/.config/gcloud/service_account.json sublimity-backup gcsfuse
```

View file

@ -1,43 +0,0 @@
from os.path import join
from json import dumps
service_account = node.metadata.get('gcloud/service_account')
project = node.metadata.get('gcloud/project')
directories[f'/etc/gcloud'] = {
'purge': True,
}
files['/etc/gcloud/gcloud.json'] = {
'content': dumps(
node.metadata.get('gcloud'),
indent=4,
sort_keys=True
),
}
files['/etc/gcloud/service_account.json'] = {
'content': repo.vault.decrypt_file(
join(repo.path, 'data', 'gcloud', 'service_accounts', f'{service_account}@{project}.json.enc')
),
'mode': '500',
'needs': [
'pkg_apt:google-cloud-sdk',
],
}
actions['gcloud_activate_service_account'] = {
'command': 'gcloud auth activate-service-account --key-file /etc/gcloud/service_account.json',
'unless': f"gcloud auth list | grep -q '^\*[[:space:]]*{service_account}@{project}.iam.gserviceaccount.com'",
'needs': [
f'file:/etc/gcloud/service_account.json'
],
}
actions['gcloud_select_project'] = {
'command': f"gcloud config set project '{project}'",
'unless': f"gcloud config get-value project | grep -q '^{project}$'",
'needs': [
f'action:gcloud_activate_service_account'
],
}

View file

@ -1,14 +0,0 @@
defaults = {
'apt': {
'packages': {
'apt-transport-https': {},
'ca-certificates': {},
'gnupg': {},
'google-cloud-sdk': {},
'python3-crcmod': {},
},
'sources': [
'deb https://packages.cloud.google.com/apt cloud-sdk main',
],
},
}

View file

@ -1,88 +0,0 @@
APP_NAME = ckn-gitea
RUN_USER = git
RUN_MODE = prod
[repository]
ROOT = /var/lib/gitea/repositories
MAX_CREATION_LIMIT = 0
DEFAULT_BRANCH = main
[ui]
ISSUE_PAGING_NUM = 50
MEMBERS_PAGING_NUM = 100
[server]
PROTOCOL = http
SSH_DOMAIN = ${domain}
DOMAIN = ${domain}
HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3500
ROOT_URL = https://${domain}/
DISABLE_SSH = false
SSH_PORT = 22
LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
LFS_JWT_SECRET = ${lfs_secret_key}
OFFLINE_MODE = true
START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore
[database]
DB_TYPE = postgres
HOST = ${database.get('host')}:${database.get('port')}
NAME = ${database.get('database')}
USER = ${database.get('username')}
PASSWD = ${database.get('password')}
SSL_MODE = disable
LOG_SQL = false
[admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true
[security]
INTERNAL_TOKEN = ${internal_token}
INSTALL_LOCK = true
SECRET_KEY = ${security_secret_key}
LOGIN_REMEMBER_DAYS = 30
DISABLE_GIT_HOOKS = ${str(not enable_git_hooks).lower()}
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[service]
REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.${domain}
[mailer]
ENABLED = true
MAILER_TYPE = sendmail
FROM = "${app_name}" <noreply@${domain}>
[session]
PROVIDER = file
[picture]
DISABLE_GRAVATAR = true
ENABLE_FEDERATED_AVATAR = false
[log]
MODE = console
LEVEL = warn
[oauth2]
JWT_SECRET = ${oauth_secret_key}
[other]
SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false

View file

@ -1,45 +0,0 @@
version = version=node.metadata.get('gitea/version')
downloads['/usr/local/bin/gitea'] = {
'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'sha256': node.metadata.get('gitea/sha256'),
'triggers': {
'svc_systemd:gitea.service:restart',
},
'preceded_by': {
'action:stop_gitea',
},
}
users['git'] = {}
directories['/var/lib/gitea'] = {
'owner': 'git',
'mode': '0700',
'triggers': {
'svc_systemd:gitea.service:restart',
},
}
actions = {
'chmod_gitea': {
'command': 'chmod a+x /usr/local/bin/gitea',
'unless': 'test -x /usr/local/bin/gitea',
'needs': {
'download:/usr/local/bin/gitea',
},
},
'stop_gitea': {
'command': 'systemctl stop gitea',
'triggered': True,
},
}
files['/etc/gitea/app.ini'] = {
'content_type': 'mako',
'owner': 'git',
'context': node.metadata['gitea'],
'triggers': {
'svc_systemd:gitea.service:restart',
},
}

View file

@ -1,89 +0,0 @@
database_password = repo.vault.password_for(f'{node.name} postgresql gitea')
defaults = {
'gitea': {
'database': {
'host': 'localhost',
'port': '5432',
'username': 'gitea',
'password': database_password,
'database': 'gitea',
},
'app_name': 'Gitea',
'lfs_secret_key': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
'security_secret_key': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
'oauth_secret_key': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
'internal_token': repo.vault.password_for(f'{node.name} gitea internal_token'),
},
'postgresql': {
'roles': {
'gitea': {
'password': database_password,
},
},
'databases': {
'gitea': {
'owner': 'gitea',
},
},
},
'systemd': {
'units': {
'gitea.service': {
'content': {
'Unit': {
'Description': 'gitea',
'After': 'syslog.target',
'After': 'network.target',
'Requires': 'postgresql.service',
},
'Service': {
'RestartSec': '2s',
'Type': 'simple',
'User': 'git',
'Group': 'git',
'WorkingDirectory': '/var/lib/gitea/',
'ExecStart': '/usr/local/bin/gitea web -c /etc/gitea/app.ini',
'Restart': 'always',
'Environment': 'USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea',
},
'Install': {
'WantedBy': 'multi-user.target',
},
},
'item': {
'needs': [
'action:chmod_gitea',
'download:/usr/local/bin/gitea',
'file:/etc/gitea/app.ini',
],
},
},
},
},
'zfs': {
'datasets': {
'tank/gitea': {
'mountpoint': '/var/lib/gitea',
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('gitea/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3500',
}
},
},
},
}

View file

@ -1,6 +0,0 @@
directories['/opt/gocryptfs-inspect'] = {}
git_deploy['/opt/gocryptfs-inspect'] = {
'repo': 'https://github.com/slackner/gocryptfs-inspect.git',
'rev': 'ecd296c8f014bf18f5889e3cb9cb64807ff6b9c4',
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'python3-pycryptodome': {},
},
},
}

View file

@ -1,43 +0,0 @@
from json import dumps
directories['/etc/gocryptfs'] = {
'purge': True,
}
files['/etc/gocryptfs/masterkey'] = {
'content': node.metadata.get('gocryptfs/masterkey'),
'mode': '500',
}
files['/etc/gocryptfs/gocryptfs.conf'] = {
'content': dumps({
'Version': 2,
'Creator': 'gocryptfs 1.6.1',
'ScryptObject': {
'Salt': node.metadata.get('gocryptfs/salt'),
'N': 65536,
'R': 8,
'P': 1,
'KeyLen': 32,
},
'FeatureFlags': [
'GCMIV128',
'HKDF',
'PlaintextNames',
'AESSIV',
]
}, indent=4, sort_keys=True)
}
for path, options in node.metadata.get('gocryptfs/paths').items():
directories[options['mountpoint']] = {
'owner': None,
'group': None,
'mode': None,
'preceded_by': [
f'svc_systemd:gocryptfs-{options["id"]}:stop',
],
'needed_by': [
f'svc_systemd:gocryptfs-{options["id"]}',
],
}

View file

@ -1,103 +0,0 @@
from hashlib import sha3_256
from base64 import b64decode, b64encode
from binascii import hexlify
from uuid import UUID
defaults = {
'apt': {
'packages': {
'gocryptfs': {},
'fuse': {},
'socat': {},
},
},
'gocryptfs': {
'paths': {},
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def config(metadata):
return {
'gocryptfs': {
'masterkey': hexlify(b64decode(
str(repo.vault.random_bytes_as_base64_for(metadata.get('id'), length=32))
)).decode(),
'salt': b64encode(
sha3_256(UUID(metadata.get('id')).bytes).digest()
).decode(),
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def paths(metadata):
paths = {}
for path, options in metadata.get('gocryptfs/paths').items():
paths[path] = {
'id': hexlify(sha3_256(path.encode()).digest()[:8]).decode(),
}
return {
'gocryptfs': {
'paths': paths,
},
}
@metadata_reactor.provides(
'systemd/services',
)
def systemd(metadata):
services = {}
for path, options in metadata.get('gocryptfs/paths').items():
services[f'gocryptfs-{options["id"]}'] = {
'content': {
'Unit': {
'Description': f'gocryptfs@{path} ({options["id"]})',
'After': {
'filesystem.target',
'zfs.target',
},
},
'Service': {
'RuntimeDirectory': 'gocryptfs',
'Environment': {
'MASTERKEY': metadata.get('gocryptfs/masterkey'),
'SOCKET': f'/var/run/gocryptfs/{options["id"]}',
'PLAIN': path,
'CIPHER': options["mountpoint"]
},
'ExecStart': [
'/usr/bin/gocryptfs -fg -plaintextnames -reverse -masterkey $MASTERKEY -ctlsock $SOCKET $PLAIN $CIPHER',
],
'ExecStopPost': [
'/usr/bin/umount $CIPHER'
],
},
},
'needs': [
'pkg_apt:gocryptfs',
'pkg_apt:fuse',
'pkg_apt:socat',
'file:/etc/gocryptfs/masterkey',
'file:/etc/gocryptfs/gocryptfs.conf',
],
'triggers': [
f'svc_systemd:gocryptfs-{options["id"]}:restart',
],
}
return {
'systemd': {
'services': services,
},
}

View file

@ -1,14 +0,0 @@
# metadata
```python
{
'hostname': 'example.com',
'influxdb_node': 'htz.influx',
}
```
# links
https://github.com/grafana/influxdb-flux-datasource/issues/42
https://community.grafana.com/t/no-alias-by-when-using-flux/15575/6

View file

@ -1,150 +0,0 @@
assert node.has_bundle('redis')
assert node.has_bundle('postgresql')
from mako.template import Template
from shlex import quote
from copy import deepcopy
from itertools import count
import yaml
import json
svc_systemd['grafana-server'] = {
'needs': [
'pkg_apt:grafana',
],
}
admin_password = node.metadata.get('grafana/config/security/admin_password')
port = node.metadata.get('grafana/config/server/http_port')
actions['reset_grafana_admin_password'] = {
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
'unless': f"curl http://admin:{quote(admin_password)}@localhost:{port}/api/org",
'needs': [
'svc_systemd:grafana-server',
],
}
directories = {
'/etc/grafana': {
},
'/etc/grafana/provisioning': {
},
'/etc/grafana/provisioning/datasources': {
'purge': True,
},
'/etc/grafana/provisioning/dashboards': {
'purge': True,
},
'/var/lib/grafana': {},
'/var/lib/grafana/dashboards': {},
}
files = {
'/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
'/etc/grafana/provisioning/datasources/managed.yaml': {
'content': yaml.dump({
'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()),
}),
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
'/etc/grafana/provisioning/dashboards/managed.yaml': {
'content': yaml.dump({
'apiVersion': 1,
'providers': [{
'name': 'Default',
'folder': 'Generated',
'type': 'file',
'options': {
'path': '/var/lib/grafana/dashboards',
},
}],
}),
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
}
# DASHBOARDS
with open(repo.path.join([f'data/grafana/dashboard.py'])) as file:
dashboard_template = eval(file.read())
with open(repo.path.join([f'data/grafana/panel.py'])) as file:
panel_template = eval(file.read())
with open(repo.path.join([f'data/grafana/flux.mako'])) as file:
flux_template = Template(file.read())
bucket = repo.get_node(node.metadata.get('grafana/influxdb_node')).metadata.get('influxdb/bucket')
monitored_nodes = [
other_node
for other_node in repo.nodes
if other_node.metadata.get('telegraf/influxdb_node', None) == node.metadata.get('grafana/influxdb_node')
]
for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
dashboard = deepcopy(dashboard_template)
dashboard['id'] = dashboard_id
dashboard['title'] = monitored_node.name
dashboard['uid'] = monitored_node.metadata.get('id')
panel_id = count(start=1)
for row_id, row_name in enumerate(sorted(monitored_node.metadata.get('grafana_rows')), start=1):
with open(repo.path.join([f'data/grafana/rows/{row_name}.py'])) as file:
row = eval(file.read())
for panel_in_row, (panel_name, panel_config) in enumerate(row.items()):
panel = deepcopy(panel_template)
panel['id'] = next(panel_id)
panel['title'] = f'{row_name} {panel_name}'
panel['gridPos']['w'] = 24 // len(row)
panel['gridPos']['x'] = (24 // len(row)) * panel_in_row
panel['gridPos']['y'] = (row_id - 1) * panel['gridPos']['h']
if 'display_name' in panel_config:
panel['fieldConfig']['defaults']['displayName'] = '${'+panel_config['display_name']+'}'
if panel_config.get('stacked'):
panel['fieldConfig']['defaults']['custom']['stacking']['mode'] = 'normal'
if 'unit' in panel_config:
panel['fieldConfig']['defaults']['unit'] = panel_config['unit']
if 'min' in panel_config:
panel['fieldConfig']['defaults']['min'] = panel_config['min']
if 'max' in panel_config:
panel['fieldConfig']['defaults']['max'] = panel_config['max']
for query_name, query_config in panel_config['queries'].items():
panel['targets'].append({
'refId': query_name,
'query': flux_template.render(
bucket=bucket,
host=monitored_node.name,
negative=query_config.get('negative', False),
filters={
'host': monitored_node.name,
**query_config['filters'],
},
function=query_config.get('function', None),
).strip()
})
dashboard['panels'].append(panel)
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4),
'triggers': [
'svc_systemd:grafana-server:restart',
]
}

View file

@ -1,125 +0,0 @@
from mako.template import Template
postgres_password = repo.vault.password_for(f'{node.name} postgres role grafana')
defaults = {
'apt': {
'packages': {
'grafana': {},
},
'sources': [
'deb https://packages.grafana.com/oss/deb stable main',
],
},
'grafana': {
'config': {
'server': {
'http_port': 8300,
},
'database': {
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
},
'remote_cache': {
'type': 'redis',
'connstr': 'addr=127.0.0.1:6379',
},
'security': {
'admin_user': 'admin',
'admin_password': str(repo.vault.password_for(f'{node.name} grafana admin')),
},
'users': {
'allow_signup': False,
},
},
'datasources': {},
},
'postgresql': {
'databases': {
'grafana': {
'owner': 'grafana',
},
},
'roles': {
'grafana': {
'password': postgres_password,
},
},
},
'zfs': {
'datasets': {
'tank/grafana': {
'mountpoint': '/var/lib/grafana'
},
},
},
}
@metadata_reactor.provides(
'grafana/datasources',
)
def influxdb2(metadata):
influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb')
return {
'grafana': {
'datasources': {
f"influxdb@{influxdb_metadata['hostname']}": {
'type': 'influxdb',
'url': f"http://{influxdb_metadata['hostname']}:{influxdb_metadata['port']}",
'jsonData': {
'version': 'Flux',
'organization': influxdb_metadata['org'],
'defaultBucket': influxdb_metadata['bucket'],
},
'secureJsonData': {
'token': str(influxdb_metadata['readonly_token']),
},
'editable': False,
'isDefault': True,
},
},
},
}
@metadata_reactor.provides(
'grafana/datasources',
)
def datasource_key_to_name(metadata):
return {
'grafana': {
'datasources': {
name: {'name': name} for name in metadata.get('grafana/datasources').keys()
},
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('grafana/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('grafana/hostname'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:8300',
}
},
},
},
}

View file

@ -1,8 +0,0 @@
# defaults = {
# 'network': {
# 'external': {
# 'gateway4': '172.31.1.1',
# 'gateway6': 'fe80::1',
# },
# },
# }

View file

@ -1,11 +0,0 @@
files['/etc/hostname'] = {
'content': node.metadata.get('hostname'),
'triggers': [
'action:update_hostname',
],
}
actions["update_hostname"] = {
"command": "hostname -F /etc/hostname",
'triggered': True,
}

View file

@ -1,14 +0,0 @@
defaults = {
'hostname': '.'.join([*reversed(node.name.split('.')), 'ckn', 'li']),
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('hostname'): repo.libs.dns.get_a_records(metadata, external=False),
},
}

View file

@ -1,39 +0,0 @@
from ipaddress import ip_address
from collections import OrderedDict
def sorted_hostnames(hostnames):
return sorted(
hostnames,
key=lambda e: (len(e.split('.')), e),
)
def sorted_hosts_for_ip_version(version):
return OrderedDict(
sorted(
[
(ip, sorted_hostnames(hostnames))
for ip, hostnames in node.metadata.get('hosts').items()
if ip_address(ip).version == version
],
key=lambda e: ip_address(e[0]),
),
)
sorted_hosts = OrderedDict({
**sorted_hosts_for_ip_version(4),
**sorted_hosts_for_ip_version(6),
})
ip_width = len(max(sorted_hosts.keys(), key=len))
files['/etc/hosts'] = {
'content': '\n'.join(
' '.join([
ip.ljust(ip_width, ' '),
*hostnames
])
for ip, hostnames in sorted_hosts.items()
),
}

View file

@ -1,28 +0,0 @@
defaults = {
'hosts': {
'127.0.0.1': [
'localhost',
node.name,
],
'::1': [
'localhost',
'ip6-localhost',
'ip6-loopback',
],
'fe00::0': [
'ip6-localnet'
],
'ff00::0': [
'ip6-mcastprefix'
],
'ff02::1': [
'ip6-allnodes'
],
'ff02::2': [
'ip6-allrouters'
],
'ff02::3': [
'ip6-allhosts'
],
},
}

View file

@ -1,21 +0,0 @@
# setup
1. apply influxdb to server
2. write `admin`, `readonly` and `writeonly` token into influxdb metadata:
`influx auth list --json | jq -r '.[] | select (.description == "NAME") | .token'`
3. apply clients
# metadata
```python
{
'hostname': 'example.com',
'admin_token': 'Wawbd5n...HJS76ez',
'readonly_token': '5v235b3...6wbnuzz',
'writeonly_token': '8w4cnos...fn849zg',
}
```
# reset password
Opening /var/lib/influxdb/influxd.bolt with https://github.com/br0xen/boltbrowser might help

View file

@ -1,77 +0,0 @@
from tomlkit import dumps
from shlex import quote
directories['/var/lib/influxdb'] = {
'owner': 'influxdb',
'group': 'influxdb',
'needs': [
'zfs_dataset:tank/influxdb',
],
}
directories['/etc/influxdb'] = {
'purge': True,
}
files['/etc/influxdb/config.toml'] = {
'content': dumps(node.metadata.get('influxdb/config')),
'triggers': [
'svc_systemd:influxdb:restart',
]
}
svc_systemd['influxdb'] = {
'needs': [
'directory:/var/lib/influxdb',
'file:/etc/influxdb/config.toml',
'pkg_apt:influxdb2',
]
}
actions['wait_for_influxdb_start'] = {
'command': 'sleep 15',
'triggered': True,
'triggered_by': [
'svc_systemd:influxdb',
'svc_systemd:influxdb:restart',
]
}
actions['setup_influxdb'] = {
'command': 'influx setup --username={username} --password={password} --org={org} --bucket={bucket} --token={token} --retention=0 --force'.format(
username=node.metadata.get('influxdb/username'),
password=quote(str(node.metadata.get('influxdb/password'))),
org=node.metadata.get('influxdb/org'),
bucket=node.metadata.get('influxdb/bucket'),
token=str(node.metadata.get('influxdb/admin_token')),
),
'unless': 'influx bucket list',
'needs': [
'action:wait_for_influxdb_start',
],
}
files['/root/.influxdbv2/configs'] = {
'content': dumps({
node.metadata.get('influxdb/bucket'): {
'url': f"http://localhost:{node.metadata.get('influxdb/port')}",
'token': str(node.metadata.get('influxdb/admin_token')),
'org': node.metadata.get('influxdb/org'),
'active': True,
},
}),
'needs': [
'action:setup_influxdb',
],
}
for description, permissions in {
'readonly': '--read-buckets',
'writeonly': '--write-buckets --read-telegrafs',
}.items():
actions[f'influxdb_{description}_token'] = {
'command': f'influx auth create --description {description} {permissions}',
'unless': f'''influx auth list --json | jq -r '.[] | select (.description == "{description}") | .token' | wc -l | grep -q ^1$''',
'needs': [
'file:/root/.influxdbv2/configs',
],
}

View file

@ -1,73 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'influxdb2': {},
},
'sources': [
'deb https://repos.influxdata.com/debian {release} stable',
],
},
'influxdb': {
'port': '8200',
'username': 'admin',
'org': 'default',
'bucket': 'default',
'config': {
'bolt-path': '/var/lib/influxdb/influxd.bolt',
'engine-path': '/var/lib/influxdb/engine',
'reporting-disabled': True,
'http-bind-address': ':8200',
},
},
'zfs': {
'datasets': {
'tank/influxdb': {
'mountpoint': '/var/lib/influxdb',
'recordsize': '8192',
'atime': 'off',
},
},
},
}
@metadata_reactor.provides(
'influxdb/password',
)
def admin_password(metadata):
return {
'influxdb': {
'password': repo.vault.password_for(f"{metadata.get('id')} influxdb admin"),
'admin_token': repo.vault.random_bytes_as_base64_for(f"{metadata.get('id')} influxdb default token", length=64),
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('influxdb/hostname'): repo.libs.dns.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('influxdb/hostname'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:8200',
}
},
},
},
}

View file

@ -1,13 +0,0 @@
GNU nano 4.8 /etc/systemd/system/l4d2-server-a.service
[Unit]
Description=l4d2 Server A
After=network.target steam-update.service
[Service]
User=steam
WorkingDirectory=/home/steam/steam/l4d2
ExecStart=/home/steam/steam/l4d2/srcds_run -port 27001 -secure +exec server_a.cfg
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -1,4 +0,0 @@
#!/bin/bash
/home/steam/steam_workshop_downloader/workshop.py -o /home/steam/steam/l4d2/left4dead2/addons 2283884609
chown -R steam:steam /home/steam/steam/l4d2/left4dead2/addons

View file

@ -1,7 +0,0 @@
@metadata_reactor.provides()
def steam(metadata):
return {
'steam': {
'222860': 'l4d2',
},
}

View file

@ -1,5 +0,0 @@
CONFIG_D=/etc/dehydrated/conf.d
BASEDIR=/var/lib/dehydrated
WELLKNOWN="${BASEDIR}/acme-challenges"
DOMAINS_TXT="/etc/dehydrated/domains.txt"
HOOK="/etc/dehydrated/hook.sh"

View file

@ -1,3 +0,0 @@
% for domain, aliases in sorted(node.metadata.get('letsencrypt/domains', {}).items()):
${domain} ${' '.join(sorted(aliases))}
% endfor

View file

@ -1,37 +0,0 @@
deploy_cert() {<%text>
local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" TIMESTAMP="${6}"</%text>
% for service, config in node.metadata.get('letsencrypt/concat_and_deploy', {}).items():
# concat_and_deploy ${service}
if [ "$DOMAIN" = "${config['match_domain']}" ]; then
cat $KEYFILE > ${config['target']}
cat $FULLCHAINFILE >> ${config['target']}
% if 'chown' in config:
chown ${config['chown']} ${config['target']}
% endif
% if 'chmod' in config:
chmod ${config['chmod']} ${config['target']}
% endif
% if 'commands' in config:
% for command in config['commands']:
${command}
% endfor
% endif
fi
% endfor
}
exit_hook() {<%text>
local ERROR="${1:-}"</%text>
% for service in sorted(node.metadata.get('letsencrypt/reload_after', set())):
systemctl reload-or-restart ${service}
% endfor
}
<%text>
HANDLER="$1"; shift
if [[ "${HANDLER}" =~ ^(deploy_cert|exit_hook)$ ]]; then
"$HANDLER" "$@"
fi</%text>

View file

@ -1,31 +0,0 @@
#!/bin/sh
domain=$1
just_check=$2
cert_path="/var/lib/dehydrated/certs/$domain"
already_exists=false
if [ -f "$cert_path/privkey.pem" -a -f "$cert_path/fullchain.pem" -a -f "$cert_path/chain.pem" ]
then
already_exists=true
fi
if [ "$just_check" = true ]
then
if [ "$already_exists" = true ]
then
exit 0
else
exit 1
fi
fi
if [ "$already_exists" != true ]
then
rm -r "$cert_path"
mkdir -p "$cert_path"
openssl req -x509 -newkey rsa:4096 -nodes -days 3650 -subj "/CN=$domain" -keyout "$cert_path/privkey.pem" -out "$cert_path/fullchain.pem"
chmod 0600 "$cert_path/privkey.pem"
cp "$cert_path/fullchain.pem" "$cert_path/chain.pem"
fi

View file

@ -1,53 +0,0 @@
assert node.has_bundle('nginx')
delegated = 'delegate_to_node' in node.metadata.get('letsencrypt')
directories = {
'/etc/dehydrated/conf.d': {},
'/var/lib/dehydrated/acme-challenges': {},
}
files = {
'/etc/dehydrated/domains.txt': {
'content_type': 'mako',
'triggers': {
'action:letsencrypt_update_certificates',
},
},
'/etc/dehydrated/config': {
'triggers': {
'action:letsencrypt_update_certificates',
},
},
'/etc/dehydrated/hook.sh': {
'content_type': 'mako',
'mode': '0755',
},
'/etc/dehydrated/letsencrypt-ensure-some-certificate': {
'mode': '0755',
},
}
actions['letsencrypt_update_certificates'] = {
'command': 'dehydrated --cron --accept-terms --challenge http-01',
'triggered': True,
'skip': delegated,
'needs': {
'svc_systemd:nginx',
},
}
for domain in node.metadata.get('letsencrypt/domains').keys():
actions[f'letsencrypt_ensure-some-certificate_{domain}'] = {
'command': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain}',
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
'needs': {
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
},
'needed_by': {
'svc_systemd:nginx',
},
'triggers': {
'action:letsencrypt_update_certificates',
},
}

View file

@ -1,62 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'dehydrated': {},
},
},
'letsencrypt': {
'domains': {},
},
'pacman': {
'packages': {
'dehydrated': {},
},
},
}
@metadata_reactor.provides(
'systemd-timers/letsencrypt',
'mirror/certs',
)
def renew(metadata):
delegated_node = metadata.get('letsencrypt/delegate_to_node', False)
if delegated_node:
delegated_ip = ip_interface(repo.get_node(delegated_node).metadata.get('network/internal/ipv4')).ip
return {
'mirror': {
'certs': {
'from': f"{delegated_ip}:/var/lib/dehydrated/certs",
'to': '/var/lib/dehydrated',
},
},
}
else:
return {
'systemd-timers': {
'letsencrypt': {
'command': '/usr/bin/dehydrated --cron --accept-terms --challenge http-01 && /usr/bin/dehydrated --cleanup',
'when': 'daily',
},
},
}
@metadata_reactor.provides(
'letsencrypt/domains'
)
def delegated_domains(metadata):
return {
'letsencrypt': {
'domains': {
domain: {}
for other_node in repo.nodes
if other_node.has_bundle('letsencrypt')
and other_node.metadata.get('letsencrypt/delegate_to_node', None) == node.name
for domain in other_node.metadata.get('letsencrypt/domains').keys()
},
},
}

View file

@ -1,88 +0,0 @@
assert node.has_bundle('postfix')
assert node.has_bundle('opendkim')
assert node.has_bundle('dovecot')
assert node.has_bundle('letsencrypt')
assert node.has_bundle('roundcube')
assert node.has_bundle('rspamd')
assert node.has_bundle('redis')
from hashlib import md5
from shlex import quote
db_data = node.metadata.get('mailserver/database')
test_password = str(node.metadata.get('mailserver/test_password'))
setup = f"""
CREATE TABLE domains (
"id" BIGSERIAL PRIMARY KEY,
"name" varchar(255) UNIQUE NOT NULL
);
CREATE INDEX ON domains ("name");
CREATE TABLE users (
"id" BIGSERIAL PRIMARY KEY,
"name" varchar(255) NULL,
"domain_id" BIGSERIAL REFERENCES domains(id),
"password" varchar(255) NULL,
"redirect" varchar(255) DEFAULT NULL
);
CREATE UNIQUE INDEX ON users ("name", "domain_id") WHERE "redirect" IS NULL;
ALTER TABLE users
ADD CONSTRAINT name_unless_redirect
CHECK (name IS NOT null OR redirect IS NOT null);
ALTER TABLE users
ADD CONSTRAINT no_password_for_redirects
CHECK (redirect IS null OR password IS null);
ALTER TABLE users
ADD CONSTRAINT name_is_not_empty_string
CHECK (name <> '');
-- OWNERSHIPS
ALTER TABLE domains OWNER TO {db_data['user']};
ALTER TABLE users OWNER TO {db_data['user']};
-- TEST DATA
INSERT INTO domains (name) VALUES ('example.com');
INSERT INTO users (name, domain_id, password)
SELECT 'bw_test_user', domains.id, MD5('{test_password}')
FROM domains
WHERE domains.name = 'example.com';
INSERT INTO users (name, domain_id, redirect)
SELECT 'bw_test_alias', domains.id, 'somewhere@example.com'
FROM domains
WHERE domains.name = 'example.com';
"""
actions['initialize_mailserver_db'] = {
'command': f"psql -d {db_data['name']} -c {quote(setup)}",
'unless': f"psql -At -d {db_data['name']} -c \"SELECT to_regclass(\'public.users\')\" | grep -q '^users$'",
'needs': [
'postgres_db:mailserver',
],
}
# testuser
test_password_md5 = md5(str(test_password).encode()).hexdigest()
check_query = """
SELECT password
FROM users
WHERE name = 'bw_test_user'
AND domain_id = (SELECT id FROM domains WHERE name = 'example.com')
"""
update_query = f"""
UPDATE users
SET password = MD5('{test_password}')
WHERE name = 'bw_test_user'
AND domain_id = (SELECT id FROM domains WHERE name = 'example.com')
"""
actions['mailserver_update_test_pw'] = {
'command': f"psql -d {db_data['name']} -c {quote(update_query)}",
'unless': f"psql -At -d {db_data['name']} -c {quote(check_query)} | grep -q '^{test_password_md5}$\'",
'needs': [
'action:initialize_mailserver_db',
],
}

View file

@ -1,66 +0,0 @@
from ipaddress import ip_interface
database_password = repo.vault.password_for(f'{node.name} db mailserver')
defaults = {
'mailserver': {
'maildir': '/var/vmail',
'database': {
'host': '127.0.0.1', # dont use localhost
'name': 'mailserver',
'user': 'mailserver',
'password': database_password,
},
'test_password': repo.vault.password_for(f'{node.name} test_pw mailserver'),
'domains': [],
},
'postgresql': {
'roles': {
'mailserver': {
'password': database_password,
},
},
'databases': {
'mailserver': {
'owner': 'mailserver',
},
},
},
'zfs': {
'datasets': {
'tank/vmail': {
'mountpoint': '/var/vmail',
'compression': 'on',
},
},
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
dns = {}
for domain in metadata.get('mailserver/domains'):
dns[domain] = {
'MX': [f"5 {metadata.get('mailserver/hostname')}."],
'TXT': ['v=spf1 a mx -all'],
}
return {
'dns': dns,
}
@metadata_reactor.provides(
'letsencrypt/domains',
)
def letsencrypt(metadata):
return {
'letsencrypt': {
'domains': {
metadata.get('mailserver/hostname'): set(),
},
},
}

View file

@ -1,17 +0,0 @@
defaults = {
'mirror': {},
}
@metadata_reactor.provides(
'systemd-timers',
)
def timers(metadata):
return {
'systemd-timers': {
f'mirror-{name}': {
'command': f"/usr/bin/scp -r -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null '{config['from']}' '{config['to']}'",
'when': 'hourly',
} for name, config in metadata.get('mirror').items()
}
}

View file

@ -1,48 +0,0 @@
from ipaddress import ip_interface
defaults = {
'network': {
}
}
@metadata_reactor.provides(
'systemd/units',
)
def units(metadata):
units = {}
for type, network in metadata.get('network').items():
units[f'{type}.network'] = {
'content': {
'Match': {
'Name': network['interface'],
},
'Network': {
'DHCP': 'no',
'IPv6AcceptRA': 'no',
}
}
}
for i in [4, 6]:
if network.get(f'ipv{i}', None):
units[f'{type}.network']['content'].update({
f'Address#ipv{i}': {
'Address': network[f'ipv{i}'],
},
})
if f'gateway{i}' in network:
units[f'{type}.network']['content'].update({
f'Route#ipv{i}': {
'Gateway': network[f'gateway{i}'],
'GatewayOnlink': 'yes',
}
})
return {
'systemd': {
'units': units,
}
}

View file

@ -1,38 +0,0 @@
# downloads[f'/tmp/nextcloud-{version}.tar.bz2'] = {
# 'url': f'https://download.nextcloud.com/server/releases/nextcloud-{version}.tar.bz2',
# 'sha256_url': '{url}.sha256',
# 'triggered': True,
# }
# actions['delete_nextcloud'] = {
# 'command': 'rm -rf /opt/nextcloud/*',
# 'triggered': True,
# }
# actions['extract_nextcloud'] = {
# 'command': f'tar xfvj /tmp/nextcloud-{version}.tar.bz2 --strip 1 -C /opt/nextcloud nextcloud',
# 'unless': f"""php -r 'include "/opt/nextcloud/version.php"; echo "$OC_VersionString";' | grep -q '^{version}$'""",
# 'preceded_by': [
# 'action:delete_nextcloud',
# f'download:/tmp/nextcloud-{version}.tar.bz2',
# ],
# 'needs': [
# 'directory:/opt/nextcloud',
# ],
# }
# git_deploy = {
# '/opt/nextcloud': {
# 'repo': 'git://github.com/nextcloud/server.git',
# 'rev': f"v{node.metadata.get('nextcloud/version')}",
# 'needs': {
# 'directory:/opt/nextcloud',
# },
# },
# '/opt/nextcloud/3rdparty': {
# 'repo': 'git://github.com/nextcloud/3rdparty.git',
# 'rev': f"v{node.metadata.get('nextcloud/version')}",
# 'needs': {
# 'git_deploy:/opt/nextcloud',
# },
# },
# }

View file

@ -1,25 +0,0 @@
<?php
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
$CONFIG = array (
"dbuser" => "nextcloud",
"dbpassword" => "${db_password}",
"dbname" => "nextcloud",
"dbhost" => "localhost",
"dbtype" => "pgsql",
"datadirectory" => "/var/lib/nextcloud",
"dbport" => "5432",
"apps_paths" => [
[
"path" => "/opt/nextcloud/apps",
"url" => "/apps",
"writable" => false,
],
[
"path" => "/var/lib/nextcloud/.userapps",
"url" => "/userapps",
"writable" => true,
],
],
"cache_path" => "/var/lib/nextcloud/.cache",
"upgrade.disable-web" => true,
);

View file

@ -1,134 +0,0 @@
assert node.has_bundle('php')
from shlex import quote
from os.path import join
from mako.template import Template
version = node.metadata.get('nextcloud/version')
directories = {
'/opt/nextcloud': {},
'/etc/nextcloud': {
'owner': 'www-data',
'group': 'www-data',
},
'/var/lib/nextcloud': {
'owner': 'www-data',
'group': 'www-data',
'mode': '770',
},
'/var/lib/nextcloud/.userapps': {
'owner': 'www-data',
'group': 'www-data',
},
'/var/lib/nextcloud/.cache': {
'owner': 'www-data',
'group': 'www-data',
},
}
downloads[f'/tmp/nextcloud-{version}.tar.bz2'] = {
'url': f'https://download.nextcloud.com/server/releases/nextcloud-{version}.tar.bz2',
'sha256_url': '{url}.sha256',
'triggered': True,
}
actions['delete_nextcloud'] = {
'command': 'rm -rf /opt/nextcloud/*',
'triggered': True,
}
actions['extract_nextcloud'] = {
'command': f'tar xfvj /tmp/nextcloud-{version}.tar.bz2 --strip 1 -C /opt/nextcloud nextcloud',
'unless': f"""php -r 'include "/opt/nextcloud/version.php"; echo "$OC_VersionString";' | grep -q '^{version}$'""",
'preceded_by': [
'action:delete_nextcloud',
f'download:/tmp/nextcloud-{version}.tar.bz2',
],
'needs': [
'directory:/opt/nextcloud',
],
}
symlinks = {
'/opt/nextcloud/config': {
'target': '/etc/nextcloud',
'owner': 'www-data',
'group': 'www-data',
'needs': [
'action:extract_nextcloud',
],
},
'/opt/nextcloud/userapps': {
'target': '/var/lib/nextcloud/.userapps',
'owner': 'www-data',
'group': 'www-data',
'needs': [
'action:extract_nextcloud',
],
},
}
files = {
'/etc/nextcloud/managed.config.php': {
'content_type': 'mako',
'owner': 'www-data',
'group': 'www-data',
'mode': '640',
'context': {
'db_password': node.metadata.get('postgresql/roles/nextcloud/password'),
},
'needs': [
'directory:/etc/nextcloud',
],
},
}
# SETUP
actions['install_nextcloud'] = {
'command': repo.libs.nextcloud.occ(
'maintenance:install',
no_interaction=None,
database='pgsql',
database_name='nextcloud',
database_host='localhost',
database_user='nextcloud',
database_pass=node.metadata.get('postgresql/roles/nextcloud/password'),
admin_user='admin',
admin_pass=node.metadata.get('nextcloud/admin_pass'),
data_dir='/var/lib/nextcloud',
),
'unless': repo.libs.nextcloud.occ('status') + ' | grep -q "installed: true"',
'needs': [
'directory:/etc/nextcloud',
'directory:/opt/nextcloud',
'directory:/var/lib/nextcloud',
'directory:/var/lib/nextcloud/.userapps',
'directory:/var/lib/nextcloud/.cache',
'symlink:/opt/nextcloud/config',
'symlink:/opt/nextcloud/userapps',
'action:extract_nextcloud',
'file:/etc/nextcloud/managed.config.php',
'postgres_db:nextcloud',
],
}
# UPGRADE
actions['upgrade_nextcloud'] = {
'command': repo.libs.nextcloud.occ('upgrade'),
'unless': "! " + repo.libs.nextcloud.occ('status') + ' | grep -q "Nextcloud or one of the apps require upgrade"',
'needs': [
'action:install_nextcloud',
],
}
actions['nextcloud_add_missing_inidces'] = {
'command': repo.libs.nextcloud.occ('db:add-missing-indices'),
'needs': [
'action:upgrade_nextcloud',
],
'triggered': True,
'triggered_by': [
f'action:extract_nextcloud',
],
}

View file

@ -1,83 +0,0 @@
import string
from uuid import UUID
defaults = {
'apt': {
'packages': {
'php': {},
'php-curl': {},
'php-gd': {},
'php-json': {},
'php-xml': {},
'php-mbstring': {},
'php-cli': {},
'php-cgi': {},
'php-zip': {},
'php-pgsql': {},
},
},
'archive': {
'paths': {
'/var/lib/nextcloud': {
'exclude': [
'^appdata_',
'^updater-',
'^nextcloud\.log',
'^updater\.log',
'^[^/]+/cache',
'^[^/]+/files_versions',
'^[^/]+/files_trashbin',
],
},
},
},
'backup': {
'paths': [
'/etc/nextcloud/config.php',
],
},
'nextcloud': {
'admin_user': 'admin',
'admin_pass': repo.vault.password_for(f'{node.name} nextcloud admin pw'),
},
'postgresql': {
'roles': {
'nextcloud': {
'password': repo.vault.password_for(f'{node.name} nextcloud db pw'),
},
},
'databases': {
'nextcloud': {
'owner': 'nextcloud',
},
},
},
'zfs': {
'datasets': {
'tank/nextcloud': {
'mountpoint': '/var/lib/nextcloud',
'needed_by': [
'bundle:nextcloud',
],
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts'
)
def vhost(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('nextcloud/hostname'): {
'content': 'nextcloud/vhost.conf',
'context': {
'root': '/opt/nextcloud',
},
},
},
},
}

View file

@ -1,12 +0,0 @@
server {
listen 80;
listen [::]:80;
location / {
return 301 https://$host$request_uri;
}
location /.well-known/acme-challenge/ {
alias /var/lib/dehydrated/acme-challenges/;
}
}

View file

@ -1,27 +0,0 @@
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param REQUEST_SCHEME $scheme;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
fastcgi_param REDIRECT_STATUS 200;
# This is the only thing that's different to the debian default.
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;

View file

@ -1,25 +0,0 @@
pid /var/run/nginx.pid;
user www-data;
worker_processes 10;
events {
worker_connections 768;
}
http {
access_log /var/log/nginx/access.log;
default_type application/octet-stream;
error_log /var/log/nginx/error.log;
include /etc/nginx/mime.types;
sendfile on;
server_names_hash_bucket_size 128;
tcp_nopush on;
% if node.has_bundle('php'):
upstream php-handler {
server unix:/var/run/php/php${node.metadata.get('php/version')}-fpm.sock;
}
% endif
include /etc/nginx/sites/*;
}

View file

@ -1,5 +0,0 @@
server {
listen 127.0.0.1:22999 default_server;
server_name _;
stub_status ;
}

View file

@ -1,89 +0,0 @@
from datetime import datetime, timedelta
from mako.template import Template
from os.path import join
directories = {
'/etc/nginx/sites': {
'purge': True,
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/ssl': {
'purge': True,
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/var/www': {
'owner': 'www-data',
},
}
files = {
'/etc/nginx/nginx.conf': {
'content_type': 'mako',
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/fastcgi.conf': {
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/sites/80.conf': {
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/sites/stub_status.conf': {
'triggers': {
'svc_systemd:nginx:restart',
},
},
'/etc/nginx/sites-available': {
'delete': True,
},
'/etc/nginx/sites-enabled': {
'delete': True,
},
}
actions = {
'nginx-generate-dhparam': {
'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
'unless': 'test -f /etc/ssl/certs/dhparam.pem',
},
}
svc_systemd = {
'nginx': {
'needs': {
'action:nginx-generate-dhparam',
'pkg_apt:nginx',
},
},
}
for name, config in node.metadata.get('nginx/vhosts').items():
files[f'/etc/nginx/sites/{name}'] = {
'content': Template(filename=join(repo.path, 'data', config['content'])).render(
server_name=name,
**config.get('context', {}),
),
'needs': [],
'needed_by': {
'svc_systemd:nginx',
'svc_systemd:nginx:restart',
},
'triggers': {
'svc_systemd:nginx:restart',
},
}
if name in node.metadata.get('letsencrypt/domains'):
files[f'/etc/nginx/sites/{name}']['needs'].append(
f'action:letsencrypt_ensure-some-certificate_{name}',
)

View file

@ -1,113 +0,0 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'nginx': {},
},
},
'nginx': {
'default_vhosts': {
'80': {
'listen': [
'80',
'[::]:80',
],
'location /.well-known/acme-challenge/': {
'alias': '/var/lib/dehydrated/acme-challenges/',
},
'location /': {
'return': '301 https://$host$request_uri',
},
},
'stub_status': {
'listen': '127.0.0.1:22999 default_server',
'server_name': '_',
'stub_status': '',
},
},
'vhosts': {
# '80': {
# 'content': 'nginx/80.conf',
# },
# 'stub_status': {
# 'content': 'nginx/stub_status.conf',
# },
},
'includes': {},
},
}
@metadata_reactor.provides(
'nginx/includes',
)
def includes(metadata):
return {
'nginx': {
'includes': {
'php': {
'location ~ \.php$': {
'include': 'fastcgi.conf',
'fastcgi_split_path_info': '^(.+\.php)(/.+)$',
'fastcgi_pass': f"unix:/run/php/php{metadata.get('php/version')}-fpm.sock",
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def vhosts(metadata):
vhosts = {}
for name, config in metadata.get('nginx/vhosts').items():
vhosts[name] = {
'server_name': name,
'listen': [
'443 ssl http2',
'[::]:443 ssl http2',
],
'ssl_certificate': f'/var/lib/dehydrated/certs/{name}/fullchain.pem',
'ssl_certificate_key': f'/var/lib/dehydrated/certs/{name}/privkey.pem',
'location /.well-known/acme-challenge/': {
'alias': '/var/lib/dehydrated/acme-challenges/',
},
}
return {
'nginx': {
'vhosts': vhosts,
}
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
domain: repo.libs.dns.get_a_records(metadata, internal=config.get('internal_dns', True))
for domain, config in metadata.get('nginx/vhosts').items()
},
}
@metadata_reactor.provides(
'letsencrypt/domains',
'letsencrypt/reload_after',
)
def letsencrypt(metadata):
return {
'letsencrypt': {
'domains': {
domain: {} for domain in metadata.get('nginx/vhosts')
},
'reload_after': {
'nginx',
},
},
}

View file

@ -1,3 +0,0 @@
% for domain in domains:
mail._domainkey.${domain} ${domain}:mail:/etc/opendkim/keys/${domain}/mail.private
% endfor

View file

@ -1,15 +0,0 @@
Mode sv
SignatureAlgorithm rsa-sha256
Canonicalization relaxed/simple
KeyTable refile:/etc/opendkim/key_table
SigningTable refile:/etc/opendkim/signing_table
UMask 007
UserID opendkim:opendkim
PidFile /run/opendkim/opendkim.pid
Socket inet:8891@localhost
Syslog yes
SyslogSuccess Yes
SyslogFacility mail
LogWhy Yes

View file

@ -1,3 +0,0 @@
% for domain in domains:
*@${domain} mail._domainkey.${domain}
% endfor

View file

@ -1,85 +0,0 @@
file_attributes = {
'owner': 'opendkim',
'group': 'opendkim',
'mode': '700',
'triggers': [
'svc_systemd:opendkim:restart',
],
}
users['opendkim'] = {}
directories = {
'/etc/opendkim': {
**file_attributes,
'purge' : True,
},
'/etc/opendkim/keys': {
**file_attributes,
'purge' : True,
},
}
files = {
'/etc/opendkim.conf': {
**file_attributes,
},
'/etc/defaults/opendkim': {
# https://metadata.ftp-master.debian.org/changelogs//main/o/opendkim/testing_opendkim.NEWS
'delete': True,
},
'/etc/opendkim/key_table': {
'content_type': 'mako',
'context': {
'domains': node.metadata.get('mailserver/domains'),
},
**file_attributes,
},
'/etc/opendkim/signing_table': {
'content_type': 'mako',
'context': {
'domains': node.metadata.get('mailserver/domains'),
},
**file_attributes,
},
}
for domain in node.metadata.get('mailserver/domains'):
directories[f'/etc/opendkim/keys/{domain}'] = {
**file_attributes,
'purge': True,
}
files[f'/etc/opendkim/keys/{domain}/mail.private'] = {
**file_attributes,
'content': node.metadata.get(f'opendkim/keys/{domain}/private'),
}
# files[f'/etc/opendkim/keys/{domain}/mail.txt'] = {
# **file_attributes,
# 'content_type': 'any',
# }
# actions[f'generate_{domain}_dkim_key'] = {
# 'command': (
# f'sudo --user opendkim'
# f' opendkim-genkey'
# f' --selector=mail'
# f' --directory=/etc/opendkim/keys/{domain}'
# f' --domain={domain}'
# ),
# 'unless': f'test -f /etc/opendkim/keys/{domain}/mail.private',
# 'needs': [
# 'svc_systemd:opendkim',
# f'directory:/etc/opendkim/keys/{domain}',
# ],
# 'triggers': [
# 'svc_systemd:opendkim:restart',
# ],
# }
svc_systemd['opendkim'] = {
'needs': [
'pkg_apt:opendkim',
'file:/etc/opendkim.conf',
'file:/etc/opendkim/key_table',
'file:/etc/opendkim/signing_table',
],
}

View file

@ -1,93 +0,0 @@
from os.path import join, exists
from re import sub
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
defaults = {
'apt': {
'packages': {
'opendkim': {},
'opendkim-tools': {},
},
},
'opendkim': {
'keys': {},
},
'dns': {
'mail._domainkey.mail2.sublimity.de': {
'TXT': [
]
}
}
}
@metadata_reactor.provides(
'opendkim/keys',
)
def keys(metadata):
keys = {}
for domain in metadata.get('mailserver/domains'):
if domain in metadata.get(f'opendkim/keys'):
continue
pubkey_path = join(repo.path, 'data', 'dkim', f'{domain}.pubkey')
privkey_path = join(repo.path, 'data', 'dkim', f'{domain}.privkey.enc')
if not exists(pubkey_path) or not exists(privkey_path):
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
with open(pubkey_path, 'w') as file:
file.write(
key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
).decode()
)
with open(privkey_path, 'w') as file:
file.write(
repo.vault.encrypt(
key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption()
).decode(),
)
)
with open(pubkey_path, 'r') as pubkey:
with open(privkey_path, 'r') as privkey:
keys[domain] = {
'public': pubkey.read(),
'private': repo.vault.decrypt(privkey.read()),
}
return {
'opendkim': {
'keys': keys,
}
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
dns = {}
for domain, keys in metadata.get('opendkim/keys').items():
raw_key = sub('^ssh-rsa ', '', keys['public'])
dns[f'mail._domainkey.{domain}'] = {
'TXT': [f'v=DKIM1; k=rsa; p={raw_key}'],
}
return {
'dns': dns,
}

View file

@ -1,102 +0,0 @@
[PHP]
; Only needed for libapache2-mod-php?
engine = On
short_open_tag = Off
precision = 14
output_buffering = 4096
zlib.output_compression = Off
implicit_flush = Off
serialize_precision = -1
disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals
ignore_user_abort = Off
zend.enable_gc = On
expose_php = Off
max_execution_time = 300
max_input_time = 600
memory_limit = 1G
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_startup_errors = Off
log_errors = On
log_errors_max_len = 1024
ignore_repeated_errors = Off
ignore_repeated_source = Off
report_memleaks = On
html_errors = On
error_log = syslog
syslog.ident = php7.4
syslog.filter = ascii
arg_separator.output = "&amp;"
variables_order = "GPCS"
request_order = "GP"
register_argc_argv = Off
auto_globals_jit = On
post_max_size = ${post_max_size}
default_mimetype = "text/html"
default_charset = "UTF-8"
enable_dl = Off
file_uploads = On
upload_max_filesize = ${post_max_size}
max_file_uploads = 2000
allow_url_fopen = On
allow_url_include = Off
default_socket_timeout = 10
[CLI Server]
cli_server.color = On
[mail function]
mail.add_x_header = Off
[ODBC]
odbc.allow_persistent = On
odbc.check_persistent = On
odbc.max_persistent = -1
odbc.max_links = -1
odbc.defaultlrl = 4096
odbc.defaultbinmode = 1
[PostgreSQL]
pgsql.allow_persistent = On
pgsql.auto_reset_persistent = Off
pgsql.max_persistent = -1
pgsql.max_links = -1
pgsql.ignore_notice = 0
pgsql.log_notice = 0
[bcmath]
bcmath.scale = 0
[Session]
session.save_handler = files
session.use_strict_mode = 0
session.use_cookies = 1
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.cookie_samesite =
session.serialize_handler = php
session.gc_probability = 1
session.gc_divisor = 1000
session.gc_maxlifetime = 1440
session.referer_check =
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.sid_length = 32
session.trans_sid_tags = "a=href,area=href,frame=src,form="
session.sid_bits_per_character = 6
[Assertion]
zend.assertions = -1
[Date]
date.timezone = Europe/London

View file

@ -1,37 +0,0 @@
version = node.metadata.get('php/version')
php_ini_context = {
'num_cpus': node.metadata.get('vm/cores'),
'post_max_size': node.metadata.get('php/post_max_size', '32G'),
}
files = {
f'/etc/php/{version}/fpm/php.ini': {
'content_type': 'mako',
'context': php_ini_context,
'needs': {
# "all php packages"
'pkg_apt:'
},
'triggers': {
f'svc_systemd:php{version}-fpm:restart',
},
},
f'/etc/php/{version}/cli/php.ini': {
'content_type': 'mako',
'context': php_ini_context,
'needs': {
# "all php packages"
'pkg_apt:'
},
},
}
svc_systemd = {
f'php{version}-fpm': {
'needs': {
'pkg_apt:',
f'file:/etc/php/{version}/fpm/php.ini',
},
},
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'php': {},
},
},
}

View file

@ -1,60 +0,0 @@
#!/bin/bash
#exit 0
export LANGUAGE=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
function log {
logger -st nc-picsort "${*:-$(</dev/stdin)}"
}
SOURCEPATH="/var/lib/nextcloud/ckn/files/SofortUpload/AutoSort/"
DESTINATIONPATH="/var/lib/nextcloud/ckn/files/Bilder/Chronologie/"
USER="ckn"
log "STARTING..."
if ps aux | grep cron | grep nc-picsort | grep -v $$; then log "EXIT: still running"; exit 0; fi
SCAN="FALSE"
IFS=$'\n'
for f in `find "$SOURCEPATH" -iname *.PNG -o -iname *.JPG -o -iname *.CR2 -o -iname *.CR3 -o -iname *.MP4 -o -iname *.MOV`; do
log "PROCESSING: $f"
DATE=`exiftool "$f" | grep -m 1 "Create Date"`
if ! echo "$DATE" | grep "Create Date" >/dev/null
then
log "SKIP: no 'Create Date' in exif ($f)"
continue
fi
SCAN="TRUE"
YEAR=`echo $DATE | cut -d':' -f2 | cut -c 2-`
MONTH=`echo $DATE | cut -d':' -f3`
DAY=`echo $DATE | cut -d':' -f4 | cut -d' ' -f1`
HOUR=`echo $DATE | cut -d':' -f4 | cut -d' ' -f2`
MINUTE=`echo $DATE | cut -d':' -f5`
SECOND=`echo $DATE | cut -d':' -f6`
HASH=`sha256sum "$f" | xxd -r -p | base64 | head -c 3 | tr '/+' '_-'`
EXT=`echo "${f##*.}" | tr '[:upper:]' '[:lower:]'`
if [[ "$EXT" = "cr2" ]] || [[ "$EXT" = "cr3" ]]
then
RAW="raw/"
else
RAW=""
fi
FILE="$DESTINATIONPATH$YEAR-$MONTH/$RAW$YEAR$MONTH$DAY"-"$HOUR$MINUTE$SECOND"_"$HASH"."$EXT"
log "DESTINATION: $FILE"
mkdir -p "$(dirname "$FILE")"
mv -v "$f" "$FILE"
done
if [ "$SCAN" == "TRUE" ]; then
log "SCANNING..."
# find "$SOURCEPATH/"* -type d -empty -delete >> /var/log/nc-picsort.log # nextcloud app bug when deleting folders
chown -R www-data:www-data "$DESTINATIONPATH"
chmod -R 777 "$DESTINATIONPATH"
sudo -u www-data php /var/www/nextcloud/occ files:scan $USER | log
sudo -u www-data php /var/www/nextcloud/occ preview:generate-all $USER | log
fi
log "FINISH."

View file

@ -1,53 +0,0 @@
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
append_dot_mydomain = no
readme_directory = no
compatibility_level = 2
smtpd_use_tls=yes
<%text>
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
myorigin = /etc/mailname
mydestination = $myhostname, localhost.localdomain, localhost
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = all
virtual_mailbox_domains = pgsql:/etc/postfix/virtual_mailbox_domains.cf
virtual_mailbox_maps = pgsql:/etc/postfix/virtual_mailbox_maps.cf
virtual_alias_maps = pgsql:/etc/postfix/virtual_alias_maps.cf,pgsql:/etc/postfix/virtual_mailbox_maps.cf
smtpd_sender_login_maps = pgsql:/etc/postfix/virtual_alias_maps.cf
virtual_transport = lmtp:unix:private/dovecot-lmtp
smtpd_sasl_type = dovecot
smtpd_sasl_path = private/auth
smtpd_sasl_auth_enable = yes
smtpd_tls_security_level = may
smtpd_tls_auth_only = yes
smtpd_tls_cert_file = /var/lib/dehydrated/certs/${hostname}/fullchain.pem
smtpd_tls_key_file = /var/lib/dehydrated/certs/${hostname}/privkey.pem
smtp_tls_security_level = may
smtpd_restriction_classes = mua_sender_restrictions, mua_client_restrictions, mua_helo_restrictions
mua_client_restrictions = permit_sasl_authenticated, reject
mua_sender_restrictions = permit_sasl_authenticated, reject
mua_helo_restrictions = permit_mynetworks, reject_non_fqdn_hostname, reject_invalid_hostname, permit
smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
non_smtpd_milters = inet:localhost:8891 inet:127.0.0.1:11332
# opendkim
milter_protocol = 6
milter_default_action = accept
# rspamd
milter_mail_macros = "i {mail_addr} {client_addr} {client_name} {auth_authen}"

View file

@ -1,55 +0,0 @@
# ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (no) (never) (100)
# ==========================================================================
smtp inet n - y - - smtpd
pickup unix n - y 60 1 pickup
cleanup unix n - y - 0 cleanup
qmgr unix n - n 300 1 qmgr
tlsmgr unix - - y 1000? 1 tlsmgr
rewrite unix - - y - - trivial-rewrite
bounce unix - - y - 0 bounce
defer unix - - y - 0 bounce
trace unix - - y - 0 bounce
verify unix - - y - 1 verify
flush unix n - y 1000? 0 flush
proxymap unix - - n - - proxymap
proxywrite unix - - n - 1 proxymap
smtp unix - - y - - smtp
relay unix - - y - - smtp
-o syslog_name=postfix/$service_name
showq unix n - y - - showq
error unix - - y - - error
retry unix - - y - - error
discard unix - - y - - discard
local unix - n n - - local
virtual unix - n n - - virtual
lmtp unix - - y - - lmtp
anvil unix - - y - 1 anvil
scache unix - - y - 1 scache
postlog unix-dgram n - n - 1 postlogd
maildrop unix - n n - - pipe
flags=DRhu user=vmail argv=/usr/bin/maildrop -d ${recipient}
uucp unix - n n - - pipe
flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient)
ifmail unix - n n - - pipe
flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient)
bsmtp unix - n n - - pipe
flags=Fq. user=bsmtp argv=/usr/lib/bsmtp/bsmtp -t$nexthop -f$sender $recipient
scalemail-backend unix - n n - 2 pipe
flags=R user=scalemail argv=/usr/lib/scalemail/bin/scalemail-store ${nexthop} ${user} ${extension}
mailman unix - n n - - pipe
flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py
${nexthop} ${user}
submission inet n - y - - smtpd
-o syslog_name=postfix/submission
-o smtpd_tls_security_level=encrypt
-o smtpd_sasl_auth_enable=yes
-o smtpd_tls_auth_only=yes
-o smtpd_reject_unlisted_recipient=no
-o smtpd_client_restrictions=$mua_client_restrictions
-o smtpd_helo_restrictions=$mua_helo_restrictions
-o smtpd_sender_restrictions=$mua_sender_restrictions
-o smtpd_recipient_restrictions=
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
-o milter_macro_daemon_name=ORIGINATING

View file

@ -1,5 +0,0 @@
hosts = ${host}
dbname = ${name}
user = ${user}
password = ${password}
query = SELECT redirect FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NOT NULL AND domains.name = '%d' AND (users.name = '%u' OR users.name IS null)

View file

@ -1,5 +0,0 @@
hosts = ${host}
dbname = ${name}
user = ${user}
password = ${password}
query = SELECT name FROM domains WHERE name='%s'

View file

@ -1,5 +0,0 @@
hosts = ${host}
dbname = ${name}
user = ${user}
password = ${password}
query = SELECT CONCAT(users.name, '@', domains.name) AS email FROM users LEFT JOIN domains ON users.domain_id = domains.id WHERE redirect IS NULL AND users.name = '%u' AND domains.name = '%d'

View file

@ -1,79 +0,0 @@
assert node.has_bundle('mailserver')
file_options = {
'needs': [
'pkg_apt:postfix',
],
'needed_by': [
'svc_systemd:postfix',
],
'triggers': [
'svc_systemd:postfix:restart',
],
}
files = {
'/etc/postfix/main.cf': {
'content_type': 'mako',
'context': {
'hostname': node.metadata.get('mailserver/hostname'),
},
**file_options,
},
'/etc/postfix/master.cf': {
**file_options,
},
'/etc/postfix/virtual_mailbox_domains.cf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
**file_options,
},
'/etc/postfix/virtual_mailbox_maps.cf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
**file_options,
},
'/etc/postfix/virtual_alias_maps.cf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
**file_options,
},
}
svc_systemd['postfix'] = {
'needs': [
'postgres_db:mailserver',
],
}
actions['test_postfix_config'] = {
'command': 'false',
'unless': "postconf check | grep -v 'symlink leaves directory' | wc -l | grep -q '^0$'",
'needs': [
'svc_systemd:postfix',
],
}
actions['test_virtual_mailbox_domains'] = {
'command': 'false',
'unless': "postmap -q example.com pgsql:/etc/postfix/virtual_mailbox_domains.cf | grep -q '^example.com$'",
'needs': [
'svc_systemd:postfix',
'action:mailserver_update_test_pw',
],
}
actions['test_virtual_mailbox_maps'] = {
'command': 'false',
'unless': "postmap -q bw_test_user@example.com pgsql:/etc/postfix/virtual_mailbox_maps.cf | grep -q '^bw_test_user@example.com$'",
'needs': [
'svc_systemd:postfix',
'action:mailserver_update_test_pw',
],
}
actions['test_virtual_alias_maps'] = {
'command': 'false',
'unless': "postmap -q bw_test_alias@example.com pgsql:/etc/postfix/virtual_alias_maps.cf | grep -q '^somewhere@example.com$'",
'needs': [
'svc_systemd:postfix',
'action:mailserver_update_test_pw',
],
}

View file

@ -1,25 +0,0 @@
defaults = {
'apt': {
'packages': {
'postfix': {},
'postfix-pgsql': {},
}
},
'backup': {
'paths': [
'/var/vmail',
],
},
'letsencrypt': {
'reload_after': {
'postfix',
},
},
'telegraf': {
'config': {
'inputs': {
'postfix': [{}],
},
},
},
}

View file

@ -1,35 +0,0 @@
from bundlewrap.utils.dicts import merge_dict
directories = {
'/var/lib/postgresql': {
'owner': 'postgres',
'group': 'postgres',
'needs': [
'zfs_dataset:tank/postgresql',
],
'needed_by': [
'svc_systemd:postgresql',
],
}
}
svc_systemd['postgresql'] = {
'needs': [
'pkg_apt:postgresql',
],
}
for user, config in node.metadata.get('postgresql/roles').items():
postgres_roles[user] = merge_dict(config, {
'needs': [
'svc_systemd:postgresql',
],
})
for database, config in node.metadata.get('postgresql/databases').items():
postgres_dbs[database] = merge_dict(config, {
'needs': [
'svc_systemd:postgresql',
],
})

View file

@ -1,54 +0,0 @@
root_password = repo.vault.password_for(f'{node.name} postgresql root')
defaults = {
'apt': {
'packages': {
'postgresql': {},
},
},
'backup': {
'paths': [
'/var/lib/postgresql',
],
},
'postgresql': {
'roles': {
'root': {
'password': root_password,
'superuser': True,
},
},
'databases': {},
},
'grafana_rows': [],
}
if node.has_bundle('zfs'):
defaults['zfs'] = {
'datasets': {
'tank/postgresql': {
'mountpoint': '/var/lib/postgresql',
'recordsize': '8192',
'atime': 'off',
'logbias': 'throughput',
},
},
}
@metadata_reactor.provides(
'telegraf/config/inputs/postgresql',
)
def telegraf(metadata):
return {
'telegraf': {
'config': {
'inputs': {
'postgresql': [{
'address': f'postgres://root:{root_password}@localhost:5432/postgres',
'databases': sorted(list(node.metadata.get('postgresql/databases').keys())),
}],
},
},
},
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'redis-server': {},
},
},
}

View file

@ -1,88 +0,0 @@
<?php
% if installer:
$config['enable_installer'] = true;
% endif
/* Local configuration for Roundcube Webmail */
// ----------------------------------
// SQL DATABASE
// ----------------------------------
// Database connection string (DSN) for read+write operations
// Format (compatible with PEAR MDB2): db_provider://user:password@host/database
// Currently supported db_providers: mysql, pgsql, sqlite, mssql or sqlsrv
// For examples see http://pear.php.net/manual/en/package.database.mdb2.intro-dsn.php
// NOTE: for SQLite use absolute path: 'sqlite:////full/path/to/sqlite.db?mode=0646'
$config['db_dsnw'] = '${database['provider']}://${database['user']}:${database['password']}@${database['host']}/${database['name']}';
// ----------------------------------
// IMAP
// ----------------------------------
// The mail host chosen to perform the log-in.
// Leave blank to show a textbox at login, give a list of hosts
// to display a pulldown menu or set one host as string.
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
// Supported replacement variables:
// %n - hostname ($_SERVER['SERVER_NAME'])
// %t - hostname without the first part
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
// %s - domain name after the '@' from e-mail address provided at login screen
// For example %n = mail.domain.tld, %t = domain.tld
// WARNING: After hostname change update of mail_host column in users table is
// required to match old user data records with the new host.
$config['default_host'] = 'localhost';
// ----------------------------------
// SMTP
// ----------------------------------
// SMTP server host (for sending mails).
// To use SSL/TLS connection, enter hostname with prefix ssl:// or tls://
// If left blank, the PHP mail() function is used
// Supported replacement variables:
// %h - user's IMAP hostname
// %n - hostname ($_SERVER['SERVER_NAME'])
// %t - hostname without the first part
// %d - domain (http hostname $_SERVER['HTTP_HOST'] without the first part)
// %z - IMAP domain (IMAP hostname without the first part)
// For example %n = mail.domain.tld, %t = domain.tld
$config['smtp_server'] = 'tls://localhost';
// SMTP username (if required) if you use %u as the username Roundcube
// will use the current username for login
$config['smtp_user'] = '%u';
// SMTP password (if required) if you use %p as the password Roundcube
// will use the current user's password for login
$config['smtp_pass'] = '%p';
// provide an URL where a user can get support for this Roundcube installation
// PLEASE DO NOT LINK TO THE ROUNDCUBE.NET WEBSITE HERE!
$config['support_url'] = '';
// this key is used to encrypt the users imap password which is stored
// in the session record (and the client cookie if remember password is enabled).
// please provide a string of exactly 24 chars.
$config['des_key'] = '${des_key}';
// Name your service. This is displayed on the login screen and in the window title
$config['product_name'] = '${product_name}';
// ----------------------------------
// PLUGINS
// ----------------------------------
// List of active plugins (in plugins/ directory)
$config['plugins'] = array(${', '.join(f'"{plugin}"' for plugin in plugins)});
// the default locale setting (leave empty for auto-detection)
// RFC1766 formatted language name like en_US, de_DE, de_CH, fr_FR, pt_BR
$config['language'] = 'de_DE';
// https://serverfault.com/a/991304
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'verify_peer_name' => false,
),
);

View file

@ -1,17 +0,0 @@
<?php
$config['password_driver'] = 'sql';
$config['password_strength_driver'] = null;
$config['password_confirm_current'] = true;
$config['password_minimum_length'] = 8;
$config['password_minimum_score'] = 0;
$config['password_log'] = true;
$config['password_hosts'] = null;
$config['password_force_save'] = false;
$config['password_force_new_user'] = false;
$config['password_algorithm'] = 'dovecot';
$config['password_dovecotpw'] = '/usr/bin/sudo /usr/bin/doveadm pw';
$config['password_dovecotpw_method'] = 'ARGON2ID';
$config['password_dovecotpw_with_method'] = true;
$config['password_db_dsn'] = 'pgsql://mailserver:${mailserver_db_password}@localhost/mailserver';
$config['password_query'] = "UPDATE users SET password=%D FROM domains WHERE domains.id = domain_id AND domains.name = %d AND users.name = %l";

View file

@ -1,88 +0,0 @@
assert node.has_bundle('php')
assert node.has_bundle('mailserver')
version = node.metadata.get('roundcube/version')
directories = {
'/opt/roundcube': {
'owner': 'www-data',
},
'/opt/roundcube/logs': {
'owner': 'www-data',
'needs': [
'action:extract_roundcube',
],
},
'/opt/roundcube/temp': {
'owner': 'www-data',
'needs': [
'action:extract_roundcube',
],
}
}
downloads[f'/tmp/roundcube-{version}.tar.gz'] = {
'url': f'https://github.com/roundcube/roundcubemail/releases/download/{version}/roundcubemail-{version}-complete.tar.gz',
'gpg_signature_url': '{url}.asc',
'gpg_pubkey_url': 'https://roundcube.net/download/pubkey.asc',
'triggered': True,
}
actions['delete_roundcube'] = {
'command': 'rm -rf /opt/roundcube/*',
'triggered': True,
}
actions['extract_roundcube'] = {
'command': f'tar xfvz /tmp/roundcube-{version}.tar.gz --strip 1 -C /opt/roundcube',
'unless': f'grep -q "Version {version}" /opt/roundcube/index.php',
'preceded_by': [
'action:delete_roundcube',
f'download:/tmp/roundcube-{version}.tar.gz',
],
'needs': [
'directory:/opt/roundcube',
],
'triggers': [
'action:chown_roundcube',
'action:composer_install',
],
}
actions['chown_roundcube'] = {
'command': 'chown -R www-data /opt/roundcube',
'triggered': True,
}
files = {
'/opt/roundcube/config/config.inc.php': {
'content_type': 'mako',
'context': {
'installer': node.metadata.get('roundcube/installer'),
'product_name': node.metadata.get('roundcube/product_name'),
'des_key': node.metadata.get('roundcube/des_key'),
'database': node.metadata.get('roundcube/database'),
'plugins': node.metadata.get('roundcube/plugins'),
},
'needs': [
'action:chown_roundcube',
],
},
'/opt/roundcube/plugins/password/config.inc.php': {
'source': 'password.config.inc.php',
'content_type': 'mako',
'context': {
'mailserver_db_password': node.metadata.get('mailserver/database/password'),
},
'needs': [
'action:chown_roundcube',
],
},
}
actions['composer_install'] = {
'command': "cp /opt/roundcube/composer.json-dist /opt/roundcube/composer.json && su www-data -s /bin/bash -c '/usr/bin/composer -d /opt/roundcube install'",
'triggered': True,
'needs': [
'action:chown_roundcube',
],
}

Some files were not shown because too many files have changed in this diff Show more