Compare commits
30 commits
7ca2b07971
...
3cc463999f
Author | SHA1 | Date | |
---|---|---|---|
![]() |
3cc463999f | ||
![]() |
c3fbdfda72 | ||
![]() |
9cf17d2a4e | ||
![]() |
5bd0cece48 | ||
![]() |
91e4fee518 | ||
![]() |
df9c038d87 | ||
![]() |
5b4ad017e1 | ||
![]() |
4ef6826837 | ||
![]() |
4b9980a8c3 | ||
![]() |
8532f914c3 | ||
![]() |
33062c3ec6 | ||
![]() |
be6903d3a6 | ||
![]() |
8a9434a384 | ||
![]() |
24bf39dda5 | ||
![]() |
0dbda1c200 | ||
![]() |
dab554473e | ||
![]() |
8b3f9d7736 | ||
![]() |
b2b6f08b86 | ||
![]() |
a4e819317b | ||
![]() |
085eb2b2d3 | ||
![]() |
e9771f1b9f | ||
![]() |
63863f69c0 | ||
![]() |
1a552844da | ||
![]() |
9f95e78277 | ||
![]() |
041098ecde | ||
![]() |
09ca6bddf6 | ||
![]() |
b205bd7555 | ||
![]() |
d82a066fb3 | ||
![]() |
e85afeb656 | ||
![]() |
5fd969ebb2 |
39 changed files with 872 additions and 36 deletions
63
bin/apt_upgrade_and_restart_all
Executable file
63
bin/apt_upgrade_and_restart_all
Executable file
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bundlewrap.repo import Repository
|
||||
from os.path import realpath, dirname
|
||||
from ipaddress import ip_interface
|
||||
|
||||
repo = Repository(dirname(dirname(realpath(__file__))))
|
||||
nodes = [
|
||||
node
|
||||
for node in repo.nodes_in_group('debian')
|
||||
if not node.dummy
|
||||
]
|
||||
|
||||
print('updating nodes:', sorted(node.name for node in nodes))
|
||||
|
||||
# UPDATE
|
||||
|
||||
for node in nodes:
|
||||
print('--------------------------------------')
|
||||
print('updating', node.name)
|
||||
print('--------------------------------------')
|
||||
repo.libs.wol.wake(node)
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
|
||||
print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
|
||||
|
||||
# REBOOT IN ORDER
|
||||
|
||||
wireguard_servers = [
|
||||
node
|
||||
for node in nodes
|
||||
if node.has_bundle('wireguard')
|
||||
and (
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
|
||||
)
|
||||
]
|
||||
|
||||
wireguard_s2s = [
|
||||
node
|
||||
for node in nodes
|
||||
if node.has_bundle('wireguard')
|
||||
and (
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
|
||||
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
|
||||
)
|
||||
]
|
||||
|
||||
everything_else = [
|
||||
node
|
||||
for node in nodes
|
||||
if not node.has_bundle('wireguard')
|
||||
]
|
||||
|
||||
print('======================================')
|
||||
print(len(everything_else), len(wireguard_s2s), len(wireguard_servers))
|
||||
|
||||
for node in [
|
||||
*everything_else,
|
||||
*wireguard_s2s,
|
||||
*wireguard_servers,
|
||||
]:
|
||||
print('rebooting', node.name)
|
||||
print(node.run('systemctl reboot').stdout.decode())
|
|
@ -12,7 +12,10 @@ defaults = {
|
|||
},
|
||||
},
|
||||
'sudoers': {
|
||||
'backup-receiver': ['ALL'],
|
||||
'backup-receiver': {
|
||||
'/usr/bin/rsync',
|
||||
'/sbin/zfs',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ then
|
|||
/opt/backup/backup_path_via_zfs "$path"
|
||||
elif test -d "$path"
|
||||
then
|
||||
/opt/backuo/backup_path_via_rsync "$path"
|
||||
/opt/backup/backup_path_via_rsync "$path"
|
||||
else
|
||||
echo "UNKNOWN PATH: $path"
|
||||
exit 1
|
||||
|
|
|
@ -5,7 +5,7 @@ set -exu
|
|||
path=$1
|
||||
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
|
||||
server=$(jq -r .server_hostname < /etc/backup/config.json)
|
||||
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
|
||||
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
|
||||
|
||||
rsync -av --rsync-path="sudo rsync" "$path/" "backup-receiver@$server:/mnt/backups/$uuid$path/"
|
||||
$ssh sudo zfs snap "tank/$uuid/fs@auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"
|
||||
|
|
|
@ -5,7 +5,7 @@ set -exu
|
|||
path=$1
|
||||
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
|
||||
server=$(jq -r .server_hostname < /etc/backup/config.json)
|
||||
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 backup-receiver@$server"
|
||||
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
|
||||
|
||||
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
|
||||
target_dataset="tank/$uuid/$source_dataset"
|
||||
|
|
9
bundles/build-ci/items.py
Normal file
9
bundles/build-ci/items.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
for project, options in node.metadata.get('build-ci').items():
|
||||
directories[options['path']] = {
|
||||
'owner': 'build-ci',
|
||||
'group': options['group'],
|
||||
'mode': '770',
|
||||
'needs': [
|
||||
'user:build-ci',
|
||||
],
|
||||
}
|
25
bundles/build-ci/metadata.py
Normal file
25
bundles/build-ci/metadata.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
from shlex import quote
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'users/build-ci/authorized_users',
|
||||
'sudoers/build-ci',
|
||||
)
|
||||
def ssh_keys(metadata):
|
||||
return {
|
||||
'users': {
|
||||
'build-ci': {
|
||||
'authorized_users': {
|
||||
f'build-server@{other_node.name}'
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-server')
|
||||
},
|
||||
},
|
||||
},
|
||||
'sudoers': {
|
||||
'build-ci': {
|
||||
f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
|
||||
for ci in metadata.get('build-ci').values()
|
||||
}
|
||||
},
|
||||
}
|
31
bundles/build-server/files/ci
Normal file
31
bundles/build-server/files/ci
Normal file
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -xu
|
||||
|
||||
|
||||
CONFIG_PATH=${config_path}
|
||||
JSON="$1"
|
||||
REPO_NAME=$(jq -r .repository.name <<< $JSON)
|
||||
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
|
||||
REPO_BRANCH=$(jq -r .ref <<< $JSON | cut -d'/' -f3)
|
||||
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
|
||||
|
||||
for INTEGRATION in "$(cat $CONFIG_PATH | jq -r '.ci | values[]')"
|
||||
do
|
||||
[[ $(jq -r '.repo' <<< $INTEGRATION) = "$REPO_NAME" ]] || continue
|
||||
[[ $(jq -r '.branch' <<< $INTEGRATION) = "$REPO_BRANCH" ]] || continue
|
||||
|
||||
HOSTNAME=$(jq -r '.hostname' <<< $INTEGRATION)
|
||||
DEST_PATH=$(jq -r '.path' <<< $INTEGRATION)
|
||||
DEST_GROUP=$(jq -r '.group' <<< $INTEGRATION)
|
||||
|
||||
[[ -z "$HOSTNAME" ]] || [[ -z "$DEST_PATH" ]] || [[ -z "$DEST_GROUP" ]] && exit 5
|
||||
|
||||
cd ~
|
||||
rm -rf "$REPO_NAME"
|
||||
git clone "$CLONE_URL" "$REPO_NAME"
|
||||
|
||||
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "find \"$DEST_PATH\" -mindepth 1 -delete"
|
||||
scp -r $SSH_OPTIONS "$REPO_NAME"/* "build-ci@$HOSTNAME:$DEST_PATH"
|
||||
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "sudo chown -R build-ci:$DEST_GROUP $(printf "%q" "$DEST_PATH")"
|
||||
done
|
|
@ -10,7 +10,7 @@ directories = {
|
|||
files = {
|
||||
'/etc/build-server.json': {
|
||||
'owner': 'build-server',
|
||||
'content': json.dumps(node.metadata.get('build-server'), indent=4, cls=MetadataJSONEncoder)
|
||||
'content': json.dumps(node.metadata.get('build-server'), indent=4, sort_keys=True, cls=MetadataJSONEncoder)
|
||||
},
|
||||
'/opt/build-server/strategies/crystal': {
|
||||
'content_type': 'mako',
|
||||
|
@ -21,4 +21,12 @@ files = {
|
|||
'download_server': node.metadata.get('build-server/download_server_ip'),
|
||||
},
|
||||
},
|
||||
'/opt/build-server/strategies/ci': {
|
||||
'content_type': 'mako',
|
||||
'owner': 'build-server',
|
||||
'mode': '0777', # FIXME
|
||||
'context': {
|
||||
'config_path': '/etc/build-server.json',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -40,6 +40,24 @@ def agent_conf(metadata):
|
|||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'build-server',
|
||||
)
|
||||
def ci(metadata):
|
||||
return {
|
||||
'build-server': {
|
||||
'ci': {
|
||||
f'{repo}@{other_node.name}': {
|
||||
'hostname': other_node.metadata.get('hostname'),
|
||||
'repo': repo,
|
||||
**options,
|
||||
}
|
||||
for other_node in repo.nodes
|
||||
if other_node.has_bundle('build-ci')
|
||||
for repo, options in other_node.metadata.get('build-ci').items()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
|
|
13
bundles/icinga2/files/conf.d/templates.conf
Normal file
13
bundles/icinga2/files/conf.d/templates.conf
Normal file
|
@ -0,0 +1,13 @@
|
|||
template Host "generic-host" {
|
||||
max_check_attempts = 3
|
||||
check_interval = 1m
|
||||
retry_interval = 30s
|
||||
|
||||
check_command = "hostalive"
|
||||
}
|
||||
|
||||
template Service "generic-service" {
|
||||
max_check_attempts = 5
|
||||
check_interval = 1m
|
||||
retry_interval = 30s
|
||||
}
|
6
bundles/icinga2/files/constants.conf
Normal file
6
bundles/icinga2/files/constants.conf
Normal file
|
@ -0,0 +1,6 @@
|
|||
const PluginDir = "/usr/lib/nagios/plugins"
|
||||
const ManubulonPluginDir = "/usr/lib/nagios/plugins"
|
||||
const PluginContribDir = "/usr/lib/nagios/plugins"
|
||||
const NodeName = "${domain}"
|
||||
const ZoneName = NodeName
|
||||
const TicketSalt = ""
|
8
bundles/icinga2/files/features/ido-pgsql.conf
Normal file
8
bundles/icinga2/files/features/ido-pgsql.conf
Normal file
|
@ -0,0 +1,8 @@
|
|||
library "db_ido_pgsql"
|
||||
|
||||
object IdoPgsqlConnection "ido-pgsql" {
|
||||
user = "icinga2",
|
||||
password = "${db_password}",
|
||||
host = "localhost",
|
||||
database = "icinga2"
|
||||
}
|
36
bundles/icinga2/files/hosts.d/host.conf
Normal file
36
bundles/icinga2/files/hosts.d/host.conf
Normal file
|
@ -0,0 +1,36 @@
|
|||
<%!
|
||||
def render_value(key, value):
|
||||
if isinstance(value, Fault):
|
||||
return render_value(key, value.value)
|
||||
elif isinstance(value, type(None)):
|
||||
return '""'
|
||||
elif isinstance(value, bool):
|
||||
return 'true' if value else 'false'
|
||||
elif isinstance(value, int):
|
||||
return str(value)
|
||||
elif isinstance(value, str):
|
||||
if key.endswith('_interval'):
|
||||
return value
|
||||
else:
|
||||
return f'"{value}"'
|
||||
elif isinstance(value, (list, set)):
|
||||
return '[' + ', '.join(render_value(e) for e in sorted(value)) + ']'
|
||||
else:
|
||||
raise Exception(f"cant process type '{type(value)}' of value '{value}'")
|
||||
%>
|
||||
|
||||
object Host "${host_name}" {
|
||||
import "generic-host"
|
||||
% for key, value in sorted(host_settings.items()):
|
||||
${key} = ${render_value(key, value)}
|
||||
% endfor
|
||||
}
|
||||
|
||||
% for service_name, service_config in sorted(services.items(), key=lambda e: [e[1]['vars.bundle'], e[0]]):
|
||||
object Service "${service_name}" {
|
||||
import "generic-service"
|
||||
% for key, value in sorted(service_config.items()):
|
||||
${key} = ${render_value(key, value)}
|
||||
% endfor
|
||||
}
|
||||
% endfor
|
4
bundles/icinga2/files/icinga2.conf
Normal file
4
bundles/icinga2/files/icinga2.conf
Normal file
|
@ -0,0 +1,4 @@
|
|||
include "constants.conf"
|
||||
include_recursive "features.d"
|
||||
include_recursive "conf.d"
|
||||
include_recursive "hosts.d"
|
72
bundles/icinga2/items.py
Normal file
72
bundles/icinga2/items.py
Normal file
|
@ -0,0 +1,72 @@
|
|||
# Git-Hash for Icinga1: b63bb0ef52bf213715e567c81e3ed097024e61af
|
||||
#
|
||||
# directories = {
|
||||
# '/etc/icinga2': {
|
||||
# 'purge': True,
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# '/etc/icinga2/conf.d': {
|
||||
# 'purge': True,
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# '/etc/icinga2/hosts.d': {
|
||||
# 'purge': True,
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# '/etc/icinga2/features.d': {
|
||||
# 'purge': True,
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# files = {
|
||||
# '/etc/icinga2/icinga2.conf': {
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# '/etc/icinga2/constants.conf': {
|
||||
# 'owner': 'nagios',
|
||||
# 'context': {
|
||||
# 'hostname': node.metadata.get('icinga2/hostname')
|
||||
# },
|
||||
# },
|
||||
# '/etc/icinga2/conf.d/templates.conf': {
|
||||
# 'source': 'conf.d/templates.conf',
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# '/etc/icinga2/features/ido-pgsql.conf': {
|
||||
# 'source': 'features/ido-pgsql.conf',
|
||||
# 'content_type': 'mako',
|
||||
# 'owner': 'nagios',
|
||||
# 'context': {
|
||||
# 'db_password': node.metadata.get('postgresql/roles/icinga2/password')
|
||||
# },
|
||||
# 'needs': [
|
||||
# 'pkg_apt:icinga2-ido-pgsql',
|
||||
# ],
|
||||
# },
|
||||
# '/etc/icingaweb2/setup.token': {
|
||||
# 'content': node.metadata.get('icingaweb2/setup_token'),
|
||||
# 'owner': 'nagios',
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# for other_node in repo.nodes:
|
||||
# files[f'/etc/icinga2/hosts.d/{other_node.name}.conf'] = {
|
||||
# 'content_type': 'mako',
|
||||
# 'source': 'hosts.d/host.conf',
|
||||
# 'owner': 'nagios',
|
||||
# 'context': {
|
||||
# 'host_name': other_node.name,
|
||||
# 'host_settings': {},
|
||||
# 'services': other_node.metadata.get('monitoring', {}),
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# svc_systemd = {
|
||||
# 'icinga2': {
|
||||
# 'needs': [
|
||||
# 'pkg_apt:icinga2-ido-pgsql',
|
||||
# 'svc_systemd:postgresql',
|
||||
# ],
|
||||
# },
|
||||
# }
|
74
bundles/icinga2/metadata.py
Normal file
74
bundles/icinga2/metadata.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from hashlib import sha3_256
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'icingadb': {},
|
||||
'icingadb-web': {},
|
||||
'icingaweb2': {},
|
||||
'icingadb-redis': {},
|
||||
},
|
||||
'sources': {
|
||||
'deb https://packages.icinga.com/debian icinga-{release} main',
|
||||
'deb https://packages.icinga.com/debian icinga-{release}-testing main',
|
||||
},
|
||||
},
|
||||
'postgresql': {
|
||||
'databases': {
|
||||
'icingadb': {
|
||||
'owner': 'icinga2',
|
||||
},
|
||||
'icingaweb2': {
|
||||
'owner': 'icingaweb2',
|
||||
},
|
||||
},
|
||||
'roles': {
|
||||
'icingadb': {
|
||||
'password': repo.vault.password_for(f'psql icinga2 on {node.name}'),
|
||||
},
|
||||
'icingaweb2': {
|
||||
'password': repo.vault.password_for(f'psql icingaweb2 on {node.name}'),
|
||||
},
|
||||
},
|
||||
},
|
||||
# 'zfs': {
|
||||
# 'datasets': {
|
||||
# 'tank/icinga2': {
|
||||
# 'mountpoint': '/var/lib/icingadb',
|
||||
# 'needed_by': {
|
||||
# 'pkg_apt:icingadb',
|
||||
# 'pkg_apt:icingadb-web',
|
||||
# 'pkg_apt:icingaweb2',
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
}
|
||||
|
||||
#
|
||||
# @metadata_reactor.provides(
|
||||
# 'icingaweb2/setup_token',
|
||||
# )
|
||||
# def setup_token(metadata):
|
||||
# return {
|
||||
# 'icingaweb2': {
|
||||
# 'setup_token': sha3_256(metadata.get('id').encode()).hexdigest()[:16],
|
||||
# },
|
||||
# }
|
||||
#
|
||||
#
|
||||
# @metadata_reactor.provides(
|
||||
# 'nginx/vhosts',
|
||||
# )
|
||||
# def nginx(metadata):
|
||||
# return {
|
||||
# 'nginx': {
|
||||
# 'vhosts': {
|
||||
# metadata.get('icinga2/hostname'): {
|
||||
# 'content': 'icingaweb2/vhost.conf',
|
||||
# 'context': {
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# }
|
28
bundles/lonercrew/metadata.py
Normal file
28
bundles/lonercrew/metadata.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
if not node.has_bundle('build-ci'):
|
||||
raise Exception('lownercrew needs bundle build-ci')
|
||||
|
||||
|
||||
defaults = {
|
||||
'build-ci': {
|
||||
'lonercrew': {
|
||||
'path': '/opt/lonercrew',
|
||||
'group': 'www-data',
|
||||
'branch': 'master',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'nginx/vhosts',
|
||||
)
|
||||
def nginx(metadata):
|
||||
return {
|
||||
'nginx': {
|
||||
'vhosts': {
|
||||
'lonercrew.io': {
|
||||
'content': 'lonercrew/vhost.conf',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
php /opt/nextcloud/occ files:scan --all
|
||||
php /opt/nextcloud/occ files:scan-app-data
|
||||
php /opt/nextcloud/occ preview:generate-all
|
||||
|
|
6
bundles/ssh/files/ssh_config
Normal file
6
bundles/ssh/files/ssh_config
Normal file
|
@ -0,0 +1,6 @@
|
|||
Host *
|
||||
SendEnv LANG LC_*
|
||||
HashKnownHosts yes
|
||||
GSSAPIAuthentication yes
|
||||
StrictHostKeyChecking yes
|
||||
GlobalKnownHostsFile /etc/ssh/ssh_known_hosts
|
|
@ -10,7 +10,7 @@ MaxSessions 255
|
|||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
ChallengeResponseAuthentication no
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
UsePAM yes
|
||||
|
||||
AllowUsers ${' '.join(users)}
|
||||
|
|
|
@ -1,7 +1,36 @@
|
|||
if not node.metadata.get('FIXME_dont_touch_sshd', False):
|
||||
# on debian bullseye raspberry images, starting the systemd ssh
|
||||
# daemon seems to collide with an existing sysv daemon
|
||||
files['/etc/ssh/sshd_config'] = {
|
||||
# on debian bullseye raspberry images, starting the systemd ssh
|
||||
# daemon seems to collide with an existing sysv daemon
|
||||
dont_touch_sshd = node.metadata.get('FIXME_dont_touch_sshd', False)
|
||||
|
||||
directories = {
|
||||
'/etc/ssh': {
|
||||
'purge': True,
|
||||
'mode': '0755',
|
||||
'skip': dont_touch_sshd,
|
||||
}
|
||||
}
|
||||
|
||||
files = {
|
||||
'/etc/ssh/moduli': {
|
||||
'content_type': 'any',
|
||||
'skip': dont_touch_sshd,
|
||||
},
|
||||
'/etc/ssh/ssh_config': {
|
||||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
'skip': dont_touch_sshd,
|
||||
},
|
||||
'/etc/ssh/ssh_config': {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
},
|
||||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
'skip': dont_touch_sshd,
|
||||
},
|
||||
'/etc/ssh/sshd_config': {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'users': sorted(node.metadata.get('ssh/allow_users')),
|
||||
|
@ -9,10 +38,35 @@ if not node.metadata.get('FIXME_dont_touch_sshd', False):
|
|||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
}
|
||||
|
||||
svc_systemd['ssh'] = {
|
||||
'needs': [
|
||||
'tag:ssh_users',
|
||||
'skip': dont_touch_sshd,
|
||||
},
|
||||
'/etc/ssh/ssh_host_ed25519_key': {
|
||||
'content': node.metadata.get('ssh/host_key/private') + '\n',
|
||||
'mode': '0600',
|
||||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
}
|
||||
},
|
||||
'/etc/ssh/ssh_host_ed25519_key.pub': {
|
||||
'content': node.metadata.get('ssh/host_key/public') + '\n',
|
||||
'mode': '0644',
|
||||
'triggers': [
|
||||
'svc_systemd:ssh:restart'
|
||||
],
|
||||
},
|
||||
'/etc/ssh/ssh_known_hosts': {
|
||||
'content': '\n'.join(
|
||||
repo.libs.ssh.known_hosts_entry_for(other_node)
|
||||
for other_node in sorted(repo.nodes)
|
||||
if other_node != node
|
||||
and other_node.has_bundle('ssh')
|
||||
) + '\n',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd['ssh'] = {
|
||||
'needs': [
|
||||
'tag:ssh_users',
|
||||
],
|
||||
'skip': dont_touch_sshd,
|
||||
}
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
from ipaddress import ip_interface
|
||||
from base64 import b64decode
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'ssh/allow_users',
|
||||
)
|
||||
|
@ -11,3 +15,52 @@ def users(metadata):
|
|||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'ssh/host_key',
|
||||
)
|
||||
def host_key(metadata):
|
||||
private, public = repo.libs.ssh.generate_ed25519_key_pair(
|
||||
b64decode(str(repo.vault.random_bytes_as_base64_for(f"HostKey {metadata.get('id')}", length=32)))
|
||||
)
|
||||
|
||||
return {
|
||||
'ssh': {
|
||||
'host_key': {
|
||||
'private': private + '\n',
|
||||
'public': public + f' root@{node.name}',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'ssh/hostnames',
|
||||
)
|
||||
def hostnames(metadata):
|
||||
ips = set()
|
||||
|
||||
for network in node.metadata.get('network').values():
|
||||
if network.get('ipv4', None):
|
||||
ips.add(str(ip_interface(network['ipv4']).ip))
|
||||
if network.get('ipv6', None):
|
||||
ips.add(str(ip_interface(network['ipv6']).ip))
|
||||
|
||||
domains = {
|
||||
domain
|
||||
for domain, records in node.metadata.get('dns').items()
|
||||
for type, values in records.items()
|
||||
if type in {'A', 'AAAA'}
|
||||
and set(values) & ips
|
||||
}
|
||||
|
||||
return {
|
||||
'ssh': {
|
||||
'hostnames': {
|
||||
node.hostname,
|
||||
*ips,
|
||||
*domains,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
3
bundles/sudo/files/sudoer
Normal file
3
bundles/sudo/files/sudoer
Normal file
|
@ -0,0 +1,3 @@
|
|||
% for command in sorted(commands):
|
||||
${user} ALL=(ALL) NOPASSWD: ${command}
|
||||
% endfor
|
|
@ -6,6 +6,11 @@ directories = {
|
|||
|
||||
for user, commands in node.metadata.get('sudoers').items():
|
||||
files[f'/etc/sudoers.d/{user}'] = {
|
||||
'content': f"{user} ALL=(ALL) NOPASSWD: {', '.join(sorted(commands))}",
|
||||
'content_type': 'mako',
|
||||
'source': 'sudoer',
|
||||
'context': {
|
||||
'user': user,
|
||||
'commands': commands,
|
||||
},
|
||||
'mode': '500',
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ for name, user_config in node.metadata.get('users').items():
|
|||
|
||||
git_deploy = {
|
||||
join(user_config['home'], '.zsh/oh-my-zsh'): {
|
||||
'repo': 'git://github.com/ohmyzsh/ohmyzsh.git',
|
||||
'repo': 'https://github.com/ohmyzsh/ohmyzsh.git',
|
||||
'rev': 'master',
|
||||
},
|
||||
join(user_config['home'], '.zsh/oh-my-zsh/custom/plugins/zsh-autosuggestions'): {
|
||||
'repo': 'git://github.com/zsh-users/zsh-autosuggestions.git',
|
||||
'repo': 'https://github.com/zsh-users/zsh-autosuggestions.git',
|
||||
'rev': 'master',
|
||||
},
|
||||
}
|
||||
|
|
30
data/apt/keys/packages.icinga.com.asc
Normal file
30
data/apt/keys/packages.icinga.com.asc
Normal file
|
@ -0,0 +1,30 @@
|
|||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v2.0.19 (GNU/Linux)
|
||||
|
||||
mQGiBFKHzk4RBACSHMIFTtfw4ZsNKAA03Gf5t7ovsKWnS7kcMYleAidypqhOmkGg
|
||||
0petiYsMPYT+MOepCJFGNzwQwJhZrdLUxxMSWay4Xj0ArgpD9vbvU+gj8Tb02l+x
|
||||
SqNGP8jXMV5UnK4gZsrYGLUPvx47uNNYRIRJAGOPYTvohhnFJiG402dzlwCg4u5I
|
||||
1RdFplkp9JM6vNM9VBIAmcED/2jr7UQGsPs8YOiPkskGHLh/zXgO8SvcNAxCLgbp
|
||||
BjGcF4Iso/A2TAI/2KGJW6kBW/Paf722ltU6s/6mutdXJppgNAz5nfpEt4uZKZyu
|
||||
oSWf77179B2B/Wl1BsX/Oc3chscAgQb2pD/qPF/VYRJU+hvdQkq1zfi6cVsxyREV
|
||||
k+IwA/46nXh51CQxE29ayuy1BoIOxezvuXFUXZ8rP6aCh4KaiN9AJoy7pBieCzsq
|
||||
d7rPEeGIzBjI+yhEu8p92W6KWzL0xduWfYg9I7a2GTk8CaLX2OCLuwnKd7RVDyyZ
|
||||
yzRjWs0T5U7SRAWspLStYxMdKert9lLyQiRHtLwmlgBPqa0gh7Q+SWNpbmdhIE9w
|
||||
ZW4gU291cmNlIE1vbml0b3JpbmcgKEJ1aWxkIHNlcnZlcikgPGluZm9AaWNpbmdh
|
||||
Lm9yZz6IYAQTEQIAIAUCUofOTgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJ
|
||||
EMbjGcM0QQaCgSQAnRjXdbsyqziqhmxfAKffNJYuMPwdAKCS/IRCVyQzApFBtIBQ
|
||||
1xuoym/4C7kCDQRSh85OEAgAvPwjlURCi8z6+7i60no4n16dNcSzd6AT8Kizpv2r
|
||||
9BmNBff/GNYGnHyob/DMtmO2esEuVG8w62rO9m1wzzXzjbtmtU7NZ1Tg+C+reU2I
|
||||
GNVu3SYtEVK/UTJHAhLcgry9yD99610tYPN2Fx33Efse94mXOreBfCvDsmFGSc7j
|
||||
GVNCWXpMR3jTYyGj1igYd5ztOzG63D8gPyOucTTl+RWN/G9EoGBv6sWqk5eCd1Fs
|
||||
JlWyQX4BJn3YsCZx3uj1DWL0dAl2zqcn6m1M4oj1ozW47MqM/efKOcV6VvCs9SL8
|
||||
F/NFvZcH4LKzeupCQ5jEONqcTlVlnLlIqId95Z4DI4AV9wADBQf/S6sKA4oH49tD
|
||||
Yb5xAfUyEp5ben05TzUJbXs0Z7hfRQzy9+vQbWGamWLgg3QRUVPx1e4IT+W5vEm5
|
||||
dggNTMEwlLMI7izCPDcD32B5oxNVxlfj428KGllYWCFj+edY+xKTvw/PHnn+drKs
|
||||
LE65Gwx4BPHm9EqWHIBX6aPzbgbJZZ06f6jWVBi/N7e/5n8lkxXqS23DBKemapyu
|
||||
S1i56sH7mQSMaRZP/iiOroAJemPNxv1IQkykxw2woWMmTLKLMCD/i+4DxejE50tK
|
||||
dxaOLTc4HDCsattw/RVJO6fwE414IXHMv330z4HKWJevMQ+CmQGfswvCwgeBP9n8
|
||||
PItLjBQAXIhJBBgRAgAJBQJSh85OAhsMAAoJEMbjGcM0QQaCzpAAmwUNoRyySf9p
|
||||
5G3/2UD1PMueIwOtAKDVVDXEq5LJPVg4iafNu0SRMwgP0Q==
|
||||
=icbY
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
30
data/icingaweb2/vhost.conf
Normal file
30
data/icingaweb2/vhost.conf
Normal file
|
@ -0,0 +1,30 @@
|
|||
# icingacli setup config webserver nginx --document-root /usr/share/icingaweb2/public --config /etc/icingaweb2 --fpm-uri 127.0.0.1:9000
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
|
||||
server_name ${server_name};
|
||||
|
||||
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
|
||||
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
|
||||
|
||||
location / {
|
||||
return 302 /icingaweb2/index.php;
|
||||
}
|
||||
|
||||
location ~ ^/icingaweb2/index\.php(.*)$ {
|
||||
fastcgi_pass unix:/run/php/php7.4-fpm.sock;
|
||||
fastcgi_index index.php;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/share/icingaweb2/public/index.php;
|
||||
fastcgi_param ICINGAWEB_CONFIGDIR /etc/icingaweb2;
|
||||
fastcgi_param REMOTE_USER $remote_user;
|
||||
}
|
||||
|
||||
location ~ ^/icingaweb2(.+)? {
|
||||
alias /usr/share/icingaweb2/public;
|
||||
index index.php;
|
||||
try_files $1 $uri $uri/ /icingaweb2/index.php$is_args$args;
|
||||
}
|
||||
}
|
11
data/lonercrew/vhost.conf
Normal file
11
data/lonercrew/vhost.conf
Normal file
|
@ -0,0 +1,11 @@
|
|||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
|
||||
ssl_certificate /var/lib/dehydrated/certs/${server_name}/fullchain.pem;
|
||||
ssl_certificate_key /var/lib/dehydrated/certs/${server_name}/privkey.pem;
|
||||
|
||||
server_name ${server_name};
|
||||
index index.html;
|
||||
root /opt/lonercrew;
|
||||
}
|
55
doc/test_protect.service
Normal file
55
doc/test_protect.service
Normal file
|
@ -0,0 +1,55 @@
|
|||
[Unit]
|
||||
Description=TEST
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/test
|
||||
|
||||
DynamicUser=yes
|
||||
UMask=077
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes # DevicePolicy=closed
|
||||
PrivateNetwork=yes
|
||||
IPAddressDeny=any
|
||||
PrivateUsers=yes
|
||||
ProtectHostname=yes
|
||||
ProtectClock=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictAddressFamilies=none
|
||||
RestrictFileSystems=ext4 tmpfs zfs
|
||||
RestrictNamespaces=yes
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
RemoveIPC=yes
|
||||
PrivateMounts=yes
|
||||
SystemCallFilter=~@swap
|
||||
SystemCallFilter=~@resources
|
||||
SystemCallFilter=~@reboot
|
||||
SystemCallFilter=~@raw-io
|
||||
SystemCallFilter=~@privileged
|
||||
SystemCallFilter=~@obsolete
|
||||
SystemCallFilter=~@mount
|
||||
SystemCallFilter=~@module
|
||||
SystemCallFilter=~@debug
|
||||
SystemCallFilter=~@cpu-emulation
|
||||
SystemCallFilter=~@clock
|
||||
CapabilityBoundingSet=
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
NoNewPrivileges=yes
|
||||
SystemCallArchitectures=native
|
||||
|
||||
ReadOnlyPaths=/
|
||||
|
||||
NoExecPaths=/
|
||||
ExecPaths=/opt/test /bin/bash /lib
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
55
doc/test_protect_2.service
Normal file
55
doc/test_protect_2.service
Normal file
|
@ -0,0 +1,55 @@
|
|||
[Unit]
|
||||
Description=TEST
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/test
|
||||
|
||||
# user
|
||||
UMask=077
|
||||
DynamicUser=yes
|
||||
PrivateUsers=yes
|
||||
RestrictSUIDSGID=yes
|
||||
NoNewPrivileges=yes
|
||||
LockPersonality=yes
|
||||
RemoveIPC=yes
|
||||
|
||||
# fs
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes
|
||||
PrivateNetwork=yes
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
PrivateMounts=yes
|
||||
RestrictFileSystems=ext4 tmpfs zfs
|
||||
|
||||
NoExecPaths=/
|
||||
ExecPaths=/opt/test /bin /lib /lib64 /usr
|
||||
|
||||
TemporaryFileSystem=/var
|
||||
TemporaryFileSystem=/var
|
||||
|
||||
# network
|
||||
IPAddressDeny=any
|
||||
RestrictAddressFamilies=none
|
||||
|
||||
# syscall
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=~@swap ~@resources ~@reboot ~@raw-io ~@privileged ~@obsolete ~@mount ~@module ~@debug ~@cpu-emulation ~@clock
|
||||
|
||||
# else
|
||||
ProtectHostname=yes
|
||||
ProtectClock=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictNamespaces=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
RestrictRealtime=yes
|
||||
CapabilityBoundingSet=
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
54
doc/test_temp.service
Normal file
54
doc/test_temp.service
Normal file
|
@ -0,0 +1,54 @@
|
|||
[Unit]
|
||||
Description=TEST
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/test
|
||||
|
||||
TemporaryFileSystem=/
|
||||
|
||||
BindReadOnlyPaths=/opt/test /bin /lib /lib64 /usr
|
||||
|
||||
|
||||
|
||||
UMask=077
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes
|
||||
PrivateNetwork=yes
|
||||
IPAddressDeny=any
|
||||
ProtectHostname=yes
|
||||
ProtectClock=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictAddressFamilies=none
|
||||
RestrictFileSystems=ext4 tmpfs zfs
|
||||
RestrictNamespaces=yes
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
RemoveIPC=yes
|
||||
PrivateMounts=yes
|
||||
SystemCallFilter=~@swap
|
||||
SystemCallFilter=~@resources
|
||||
SystemCallFilter=~@reboot
|
||||
SystemCallFilter=~@raw-io
|
||||
SystemCallFilter=~@privileged
|
||||
SystemCallFilter=~@obsolete
|
||||
SystemCallFilter=~@mount
|
||||
SystemCallFilter=~@module
|
||||
SystemCallFilter=~@debug
|
||||
SystemCallFilter=~@cpu-emulation
|
||||
SystemCallFilter=~@clock
|
||||
CapabilityBoundingSet=
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
NoNewPrivileges=yes
|
||||
SystemCallArchitectures=native
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -6,7 +6,7 @@
|
|||
'Service': {
|
||||
'ExecStart': [
|
||||
'',
|
||||
'-/usr/sbin/agetty --autologin root --noclear %I $TERM',
|
||||
'-/sbin/agetty --autologin root --noclear %I $TERM',
|
||||
],
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,14 +1,2 @@
|
|||
from bundlewrap.operations import run_local
|
||||
from bundlewrap.utils.ui import io
|
||||
from bundlewrap.utils.text import yellow, bold
|
||||
|
||||
|
||||
def node_apply_start(repo, node, interactive=False, **kwargs):
|
||||
if node.has_bundle('wol-sleeper'):
|
||||
io.stdout('{x} {node} waking up...'.format(
|
||||
x=yellow('!'),
|
||||
node=bold(node.name)
|
||||
))
|
||||
repo\
|
||||
.get_node(node.metadata.get('wol-sleeper/waker'))\
|
||||
.run(node.metadata.get('wol-sleeper/wake_command'))
|
||||
repo.libs.wol.wake(node)
|
||||
|
|
26
libs/ssh.py
26
libs/ssh.py
|
@ -1,6 +1,7 @@
|
|||
from base64 import b64decode, b64encode
|
||||
from hashlib import sha3_224
|
||||
from hashlib import sha3_224, sha1
|
||||
from functools import cache
|
||||
import hmac
|
||||
|
||||
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
|
||||
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, PublicFormat, NoEncryption
|
||||
|
@ -46,3 +47,26 @@ def generate_ed25519_key_pair(secret):
|
|||
# RETURN
|
||||
|
||||
return (deterministic_privatekey, public_key)
|
||||
|
||||
|
||||
#https://www.fragmentationneeded.net/2017/10/ssh-hashknownhosts-file-format.html
|
||||
# test this:
|
||||
# - `ssh-keyscan -H 10.0.0.5`
|
||||
# - take the salt from the ssh-ed25519 entry (first field after '|1|')
|
||||
# - `bw debug -c 'repo.libs.ssh.known_hosts_entry_for(repo.get_node(<node with hostname 10.0.0.5>), <salt from ssh-keygen>)'`
|
||||
@cache
|
||||
def known_hosts_entry_for(node, test_salt=None):
|
||||
lines = set()
|
||||
|
||||
for hostname in sorted(node.metadata.get('ssh/hostnames')):
|
||||
if test_salt:
|
||||
salt = b64decode(test_salt)
|
||||
else:
|
||||
salt = sha1((node.metadata.get('id') + hostname).encode()).digest()
|
||||
|
||||
hash = hmac.new(salt, hostname.encode(), sha1).digest()
|
||||
pubkey = node.metadata.get('ssh/host_key/public')
|
||||
|
||||
lines.add(f'|1|{b64encode(salt).decode()}|{b64encode(hash).decode()} {" ".join(pubkey.split()[:2])}')
|
||||
|
||||
return '\n'.join(sorted(lines))
|
||||
|
|
|
@ -7,7 +7,7 @@ template = '''
|
|||
# ${segment.split('#', 2)[1]}
|
||||
% endif
|
||||
[${segment.split('#')[0]}]
|
||||
% for option, value in options.items():
|
||||
% for option, value in sorted(options.items()):
|
||||
% if isinstance(value, dict):
|
||||
% for k, v in value.items():
|
||||
${option}=${k}=${v}
|
||||
|
@ -16,6 +16,7 @@ ${option}=${k}=${v}
|
|||
% for item in sorted(value):
|
||||
${option}=${item}
|
||||
% endfor
|
||||
% elif isinstance(value, type(None)):
|
||||
% else:
|
||||
${option}=${str(value)}
|
||||
% endif
|
||||
|
@ -39,5 +40,53 @@ def segment_order(segment):
|
|||
def generate_unitfile(data):
|
||||
return Template(template).render(
|
||||
data=dict(sorted(data.items(), key=segment_order)),
|
||||
order=order
|
||||
).lstrip()
|
||||
|
||||
# wip
|
||||
def protection():
|
||||
return {
|
||||
# user
|
||||
'UMask': '077',
|
||||
'DynamicUser': 'yes',
|
||||
'PrivateUsers': 'yes',
|
||||
'RestrictSUIDSGID': 'yes',
|
||||
'NoNewPrivileges': 'yes',
|
||||
'LockPersonality': 'yes',
|
||||
'RemoveIPC': 'yes',
|
||||
|
||||
# fs
|
||||
'ProtectSystem': 'strict',
|
||||
'ProtectHome': 'yes',
|
||||
'PrivateTmp': 'yes',
|
||||
'PrivateDevices': 'yes',
|
||||
'ProtectProc': 'invisible',
|
||||
'ProcSubset': 'pid',
|
||||
'PrivateMounts': 'yes',
|
||||
'RestrictFileSystems': {'ext4', 'tmpfs', 'zfs'},
|
||||
|
||||
'NoExecPaths': {'/'},
|
||||
'ExecPaths': {'/bin', '/sbin', '/lib', '/lib64', '/usr'},
|
||||
|
||||
'TemporaryFileSystem': {'/var'},
|
||||
|
||||
# network
|
||||
'IPAddressDeny': 'any',
|
||||
'PrivateNetwork': 'yes',
|
||||
'RestrictAddressFamilies': 'none',
|
||||
|
||||
# syscall
|
||||
'SystemCallArchitectures': 'native',
|
||||
'SystemCallFilter': '~@swap @resources @reboot @raw-io @privileged @obsolete @mount @module @debug @cpu-emulation @clock',
|
||||
|
||||
# else
|
||||
'ProtectHostname': 'yes',
|
||||
'ProtectClock': 'yes',
|
||||
'ProtectKernelTunables': 'yes',
|
||||
'ProtectKernelModules': 'yes',
|
||||
'ProtectKernelLogs': 'yes',
|
||||
'ProtectControlGroups': 'yes',
|
||||
'RestrictNamespaces': 'yes',
|
||||
'MemoryDenyWriteExecute': 'yes',
|
||||
'RestrictRealtime': 'yes',
|
||||
'CapabilityBoundingSet': '',
|
||||
}
|
||||
|
|
13
libs/wol.py
Normal file
13
libs/wol.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
from bundlewrap.utils.ui import io
|
||||
from bundlewrap.utils.text import yellow, bold
|
||||
|
||||
def wake(node):
|
||||
if node.has_bundle('wol-sleeper'):
|
||||
io.stdout('{x} {node} waking up...'.format(
|
||||
x=yellow('!'),
|
||||
node=bold(node.name)
|
||||
))
|
||||
node\
|
||||
.repo\
|
||||
.get_node(node.metadata.get('wol-sleeper/waker'))\
|
||||
.run(node.metadata.get('wol-sleeper/wake_command'))
|
|
@ -18,6 +18,7 @@
|
|||
'gitea',
|
||||
'gollum',
|
||||
'grafana',
|
||||
'icinga2',
|
||||
'influxdb2',
|
||||
'mirror',
|
||||
'mosquitto',
|
||||
|
@ -67,6 +68,9 @@
|
|||
'hostname': 'grafana.sublimity.de',
|
||||
'influxdb_node': 'home.server',
|
||||
},
|
||||
'icinga2': {
|
||||
'hostname': 'icinga2.sublimity.de',
|
||||
},
|
||||
'influxdb': {
|
||||
'hostname': 'influxdb.sublimity.de',
|
||||
'admin_token': '!decrypt:encrypt$gAAAAABg3z5PcaLYmUpcElJ07s_G-iYwnS8d532TcR8xUYbZfttT-B736zgR6J726mzKAFNYlIfJ7amNLIzi2ETDH5TAXWsOiAKpX8WC_dPBAvG3uXGtcPYENjdeuvllSagZzPt0hCIZQZXg--Z_YvzaX9VzNrVAgGD-sXQnghN5_Vhf9gVxxwP---VB_6iNlsf61Nc4axoS',
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
'islamicstate.eu',
|
||||
'wireguard',
|
||||
'zfs',
|
||||
'lonercrew',
|
||||
'build-ci',
|
||||
],
|
||||
'metadata': {
|
||||
'id': 'ea29bdf0-0b47-4bf4-8346-67d60c9dc4ae',
|
||||
|
@ -45,6 +47,7 @@
|
|||
'islamicstate.eu',
|
||||
'hausamsilberberg.de',
|
||||
'wiegand.tel',
|
||||
'lonercrew.io',
|
||||
},
|
||||
},
|
||||
'dns': {
|
||||
|
|
Loading…
Reference in a new issue