Compare commits

..

731 commits

Author SHA1 Message Date
dcd2ebc49c
dist-upgrade -> full-upgrade 2025-01-16 10:20:34 +01:00
555350eab7
debian update 2025-01-16 10:20:18 +01:00
e117acac04
backup all doesnt stop on first error 2025-01-09 23:41:21 +01:00
16313b9e40
disable tasnomta charge 2025-01-09 22:45:27 +01:00
033a1cf6e5
macbook gnu grep 2025-01-01 13:04:42 +01:00
8befec9769
readme git sign 2024-12-09 09:07:19 +01:00
d22add5bfd
shortcut 2024-12-09 09:03:14 +01:00
69fb93a664
macbook compat 2024-12-09 08:58:14 +01:00
f4b59dc702
stuff 2024-11-23 15:58:10 +01:00
17aa3d7e48
no wg while at home 2024-11-23 14:50:49 +01:00
8bb9dae45c
all via usb interface, internal is broken 2024-11-23 14:50:40 +01:00
c244645020
kea deps 2024-11-23 14:50:22 +01:00
64029d2147
freescout readme 2024-11-23 11:51:31 +01:00
8081f12315
freescout comment 2024-11-23 11:18:11 +01:00
4ec2d5192a
freescout repair? 2024-11-23 11:02:28 +01:00
0e78afea6a
fix ip 2024-11-23 09:53:05 +01:00
f0d1cf9861
new icinga apt key 2024-11-23 09:53:05 +01:00
e17b023503
some grafana permsission 2024-11-23 09:53:05 +01:00
a3ba06bcb0
pipes -> shlex 2024-11-23 09:52:58 +01:00
01bcfd8638
dhcp from interface 2024-11-23 09:52:48 +01:00
c0944f9fa2
fix dhcp 2024-11-23 09:52:48 +01:00
dedbffa107
vlans 2024-11-23 09:52:48 +01:00
67d5a4bff8
TOTAL FACKUP 2024-11-23 09:52:22 +01:00
6d64a5e52d
dont apply freescout by accident 2024-09-05 23:02:29 +02:00
07e9eb4d8f
freescout timer less timeout 2024-09-05 22:59:24 +02:00
1f53ff63a9
freescout timer timeout and stuff 2024-09-05 22:58:40 +02:00
0eaed67334
dommy against unintentional apply all 2024-09-05 22:08:16 +02:00
fd5e4180fa
supervised update command readme 2024-09-05 22:04:01 +02:00
ab87fe6f96
freescout 2024-09-05 21:57:33 +02:00
95efe10ef6
roundcube 1.6.7 2024-08-19 12:23:35 +02:00
e47c709f39
dedup 2024-07-29 10:26:38 +02:00
24d346962a
omz permissions 2024-07-22 10:35:50 +02:00
3e2cae42e6
nextcloud update 2024-07-03 11:13:41 +02:00
6e410bfc25
nextcloud maintenance_window_start 2024-07-03 11:13:09 +02:00
8ebf4e0ec0
oh my zsh fix permissions 2024-07-03 10:12:27 +02:00
8e8f77e546
ssh host key: use custom path to not collide with auto generated keys 2024-07-03 10:05:44 +02:00
c128b8a1ca
comment 2024-06-23 13:17:44 +02:00
53d2928de2
errors and deprecatons 2024-06-22 02:59:15 +02:00
4996f98cd1 Merge pull request 'homeassistant-supervised' (#18) from homeassistant-supervised into master
Reviewed-on: #18
2024-06-11 18:41:31 +02:00
5b254b1b28
homeassistant-supervised 2024-06-11 18:40:22 +02:00
4348e6045e
zfs.headers use system/architecture 2024-06-11 18:03:32 +02:00
28e9d69571
nginx fix ssl_dhparam path 2024-06-11 18:03:08 +02:00
32011c5b1f
bundles/macbook/files/venv: install optional requirements 2024-06-11 18:02:03 +02:00
5c8e28ddb5
homeass more log 2024-06-05 21:34:47 +02:00
d62e609863
faster better dhparams that actually get used 2024-06-05 21:34:28 +02:00
ff51b41c38
hass bluez 2024-05-31 16:11:15 +02:00
76cf14a9ef
hass more timeout 2024-05-31 16:11:07 +02:00
301889ab8b
homeassistant kinda works 2024-05-31 15:14:49 +02:00
1a163ce9f0
dep order 2024-05-31 15:14:16 +02:00
15a78737cb
sort 2024-05-31 15:13:37 +02:00
d90e0a18e8
update nextcloud 2024-05-28 11:11:22 +02:00
a55ec37d21
elimu-kwanza.de google-site-verification 2024-05-14 11:18:20 +02:00
ee23f3ef6e
some default 2024-05-10 10:28:59 +02:00
de67571f5e
lobercrew killed letsencrypt 2024-05-10 10:28:52 +02:00
a04163b72f
update forgejo 2024-04-30 14:19:28 +02:00
fc7f7e2c23
update gitea 2024-04-30 14:12:51 +02:00
e18306058a
nodes/netcup.mails.py: upgrade roundcube 2024-04-16 10:58:36 +02:00
e982f1e076
comment 2024-04-16 10:58:16 +02:00
a2639bc370
reactivate backupserver 2024-04-16 10:58:00 +02:00
fd1d0ac976
xapian indexes in dataset without snapshots 2024-03-15 15:42:22 +01:00
e3fe0eeb79
wp 2024-02-08 10:46:27 +01:00
782b3fbe0b
improve wireguard script 2024-01-26 13:40:41 +01:00
3d8a77f9e4
tidyup and doc raspberrymatic cert 2024-01-26 12:01:25 +01:00
535ec252b5
mua_helo_restrictions Outlook compat 2024-01-17 18:28:38 +01:00
d1bd92e6cc
bundles/roundcube/files/password.config.inc.php 2024-01-17 17:50:32 +01:00
4f990f8d6f
stromzaehler is offline for now 2024-01-08 12:26:04 +01:00
cd9a7e172e
macbook manage zsh theme and remove clamav 2024-01-08 12:25:44 +01:00
206e62e698
leftover 2023-12-21 11:24:56 +01:00
57aa3b8433
direnv pyenv reset .pip_upgrade_timestamp 2023-12-14 11:34:28 +01:00
70091eca8c
disable steam logger, package is broken 2023-12-11 09:38:49 +01:00
fdd35e0a2c
cargo PATH 2023-12-11 09:38:31 +01:00
ccc54b53a5
nextcloud update against CVE-2023-48239 2023-11-24 08:55:32 +01:00
1222eb813d
grafana/influx file eprmissions 2023-11-15 11:51:36 +01:00
054087fa1c
crystal source http 2023-11-15 11:51:22 +01:00
b64470b160
pg: apt/config/APT/NeverAutoRemove 2023-11-15 11:41:51 +01:00
0dabb39ca4
some minor fixes 2023-10-24 11:17:29 +02:00
d302a22d3e
python 3.12 compat 2023-10-09 08:58:31 +02:00
1f3740dd59
some gitea fixes 2023-09-29 10:27:39 +02:00
919f5f2c08
remove print() 2023-09-29 10:21:18 +02:00
a6f1695e4e
gitea -> forgejo 2023-09-29 10:19:53 +02:00
8f45a39967
bundles/download-server/items.py: obsolete 2023-09-25 17:05:58 +02:00
0eb37a909e
bundles/macbook/files/macbook-update: xcode acept license 2023-09-25 16:59:23 +02:00
2211571689
exclude some dummies 2023-09-25 16:59:03 +02:00
6cb4275e31
bin/upgrade_and_restart_all: use /var/run/reboot-required 2023-09-25 16:58:34 +02:00
5373954567
roundcube disable installer 2023-09-06 09:29:02 +02:00
a5ec5eca7a
rc 1.6 options rename 2023-09-06 09:26:50 +02:00
b459821a8d
roundcube update +
composer_lock_reset
2023-09-06 09:25:17 +02:00
4415bc32f5
macbook clamav 2023-09-06 09:18:21 +02:00
5cb5396817
nodes/home.openhab.py: remove for now 2023-09-04 12:36:28 +02:00
85673abb29
data/apt/keys/grafana.asc: update 2023-09-04 12:35:01 +02:00
29be9d9896
cronekorkn.de redirct twitch 2023-08-30 20:54:30 +02:00
c4da3ee013
nicer 2023-08-24 11:31:23 +02:00
9288836b3a
fix apt config datatype 2023-08-15 12:06:12 +02:00
66624141f8
comment 2023-08-15 10:19:05 +02:00
9c639b4977
remove apt-listchanges 2023-08-14 15:28:00 +02:00
98e05fc151
apt listcahnges fix 2023-08-14 14:45:29 +02:00
402dca9b31
add cronekorkn.de 2023-08-09 19:21:23 +02:00
89d6b6d93c
update nextcloud 2023-08-09 19:21:11 +02:00
33a6e2a979
some more apt configs, which used to be used on the fly 2023-08-09 19:20:55 +02:00
14715fdab7
PATH_add bin 2023-08-09 07:16:06 +02:00
13d91fa512
englisch sprache schwere sprache 2023-08-09 07:14:33 +02:00
0e8afa29e5
bw less parallelism because it breaks :( 2023-08-08 19:11:58 +02:00
d300866bc8
dummy sources.list file 2023-08-02 14:20:24 +02:00
aede8d21c1
unfault 2023-08-01 15:53:14 +02:00
1fe2e0710f
macbook custom ssh key 2023-08-01 12:57:27 +02:00
fe884f446a
is_known_as known_hosts metadata 2023-08-01 12:52:15 +02:00
637ab05590
apt source multiple urls 2023-08-01 12:15:49 +02:00
843712d7bf
apt README 2023-08-01 11:58:01 +02:00
4aa8a18b4f
comment 2023-08-01 10:48:44 +02:00
83cc936c82
apt key variables 2023-07-31 21:12:15 +02:00
e1e1920ffb
apt new sources format 2023-07-31 21:00:17 +02:00
34d55f0849
apt conf in metadata 2023-07-31 18:41:52 +02:00
594b7d3c86
manage /etc/apt, use keyring dir 2023-07-31 11:47:58 +02:00
49b05fe8b8
known_hosts 2023-07-28 10:00:16 +02:00
789897acf6
nginx: more managed, hopefully survives updates 2023-07-28 02:34:37 +02:00
1233da8dd6
disable broken clamav-clamonacc 2023-07-28 02:20:52 +02:00
fce2425c56
java headless 2023-07-28 02:20:27 +02:00
219bbf9983
debian updates 2023-07-28 01:35:41 +02:00
d3b90cfe89
new key 2023-07-28 01:35:26 +02:00
b5d48db4dd
from debian 12 2023-07-28 01:35:15 +02:00
b81b6472fd
htop cpu frq 2023-07-28 01:34:48 +02:00
d380701703
update nextcloud vhost 2023-07-06 00:07:55 +02:00
b2aadeb98c
home.server debian-12 2023-07-05 23:43:20 +02:00
b8675adf99
fail-with-body didnt work 2023-07-05 19:21:27 +02:00
0463637d9f
elimu-kwanza.de 2023-07-05 18:12:37 +02:00
9b7171864a
netcup.mails debian 12 2023-07-05 17:31:42 +02:00
964b248de3
home.backups mdadm config 2023-07-05 15:59:08 +02:00
c756729cac
home.backups debian 12 2023-07-05 15:58:18 +02:00
49498c0ca9
update debian apt keys 2023-07-04 23:11:54 +02:00
be26672b85
obsolete import 2023-07-04 23:00:29 +02:00
0f4b01f996
fix sleep 2023-07-04 22:42:19 +02:00
bb0f123e02
mitigate apt cache config bug 2023-07-04 22:41:17 +02:00
a4fd08a8cd
nextcloud update 2023-07-04 22:29:12 +02:00
4a5711a570
macbook more bw threads 2023-06-26 19:37:42 +02:00
0cf83d0744
some grafana bundle fixes 2023-06-26 19:37:30 +02:00
5e66318c38
debian 12 preps 2023-06-26 19:37:21 +02:00
53d22e8c67
more precise requriements.txt 2023-06-26 19:36:48 +02:00
3256329064
grafana sleep befoer trying admin reset 2023-06-06 17:09:41 +02:00
d2f8df88bf
comment 2023-06-06 11:39:20 +02:00
5259e13eef
apt disable package cache 2023-06-06 10:25:46 +02:00
ab01562c85
nextcloud 25.0.7 2023-06-06 10:25:30 +02:00
4d440bcb5b
bundles/ssh/metadata.py: host key no user name 2023-05-25 10:58:25 +02:00
0fb1899322
macbook more managed 2023-05-25 10:57:39 +02:00
cb463350b4
home.router ... 2023-05-25 08:52:30 +02:00
5dd6e56ca9
initialize_swapfile unless 2023-05-08 16:35:39 +02:00
e8a5379ccd
some weird space 2023-05-05 18:25:21 +02:00
226b152fa0
bundles/systemd-swap/metadata.py: start swap on boot 2023-05-04 18:09:02 +02:00
4e9c6bf67b
libs/systemd.py: nicer ordering for Swap 2023-05-04 18:08:18 +02:00
c0ccd78517
libs/systemd.py: dont sort lists and sets 2023-05-04 17:07:21 +02:00
5b6d31742e
some influxdb dir permission 2023-05-04 17:07:12 +02:00
04a271a1e5
.envrc: delete git deploy cache after 24h 2023-05-04 12:49:57 +02:00
0f74cc8c7e
dmarc adkim & aspf 2023-04-28 11:25:08 +02:00
a0dc65f568
dmarc 2023-04-28 11:11:11 +02:00
5fa4969cfe
router dummy for now 2023-04-24 22:37:14 +02:00
11754a362f
router 2023-04-24 22:36:48 +02:00
fcb6c9bd8e
bundles/macbook/files/macbook-update: sudo, pyenv 2023-04-20 11:24:45 +02:00
534b7142a8 Merge pull request 'blog_dirty' (#8) from blog_dirty into master
Reviewed-on: #8
2023-04-20 11:10:04 +02:00
fac893f34a
update gitea and use sha256_url 2023-04-20 11:09:09 +02:00
8bdf675b47
bundles/systemd-swap/items.py: dd -> fallocate 2023-04-20 11:09:09 +02:00
d451a70db8
twitch-clip-download 2023-04-20 11:09:09 +02:00
6a90f605cc
icinga deps 2023-04-20 11:09:09 +02:00
d03a4fd554
fix doublicate key name 2023-04-20 11:09:08 +02:00
d8963141fc
release -> codename 2023-04-20 11:09:08 +02:00
0667304dd7
remove leftovers 2023-04-20 11:09:08 +02:00
ff9acf9638
unhardcode 2023-04-20 11:09:08 +02:00
233760d7a8
systemd mount defaults 2023-04-20 11:09:08 +02:00
fc115345a0
envrc --require-virtualenv 2023-04-20 11:09:08 +02:00
7403f31ac5
nextcloud 25.0.5 2023-03-27 12:51:07 +02:00
66b0492343
.envrc: rebuild venv on python version change; install requirements 2023-03-23 11:24:41 +01:00
73a5175a6d
bundles/macbook/files/macbook-update: install python version 2023-03-23 11:23:40 +01:00
904a4d0e40
nextcloud 25.0.4 2023-03-23 10:01:24 +01:00
c227c38875
remove leftovers 2023-03-22 12:18:15 +01:00
84207ee82b
nextcloud bigger uploads 2023-03-17 17:32:24 +01:00
eae3b92eaf
downtime services too 2023-03-02 18:35:14 +01:00
f167643980
backup-freshness-check: check local and offsite backup 2023-03-02 18:20:10 +01:00
e12e19d5ee
backup-server: check backup freshness 2023-03-02 17:53:38 +01:00
8b6acf7791
bundles/zfs/files/check_zpool_online: refactor 2023-03-02 15:38:07 +01:00
e897ef6898
zpool check online 2023-03-02 15:24:17 +01:00
047403c2a5
nextcloud: less reliant from config.php 2023-02-23 18:28:44 +01:00
b13d6980de
upgrade bw 2023-02-23 18:27:44 +01:00
3c996c63f8
demagify remove faults 2023-02-23 18:27:27 +01:00
44ec984552
icinga setup is working now 2023-02-18 14:04:35 +01:00
8a54f64b18
update influx key 2023-02-06 11:25:50 +01:00
367c134ecc
nginx Wants network-online.target 2023-01-31 18:48:43 +01:00
7cb2bdb6a1
cant compare floats 2023-01-23 12:57:19 +01:00
edf7ab4236
bundles/nextcloud/files/managed.config.php: decode to assoc array instead of object 2023-01-03 15:14:21 +01:00
71125e32e0
whitespace 2023-01-03 15:14:21 +01:00
43bd31f5d5
nextcloud config in metadata 2023-01-03 15:14:20 +01:00
2afc41a9f4
nextcloud config quotes 2023-01-03 14:55:45 +01:00
272ee4f5af
nextcloud hide signup link on shares 2023-01-03 14:03:56 +01:00
afef2d18d6
rbenv 2022-12-30 14:58:30 +01:00
d558d682e3
bundle:nodejs 2022-12-29 14:00:53 +01:00
4965db78ea
nft readme 2022-12-29 12:39:13 +01:00
28a9b40fdd
bundles/macbook/files/macbook-update: upgrade pip 2022-12-29 12:39:00 +01:00
b6fa63ad42
fix lots of backup stuff 2022-12-21 14:26:16 +01:00
3ce2807d9f
nc cron killmode process 2022-12-21 14:12:27 +01:00
4663f7632b
bundles/backup/files/backup_path_via_rsync: dont snapshot 2022-12-21 14:12:15 +01:00
8fc701b40e
fix old snapshot and bookmark deletion 2022-12-21 13:54:10 +01:00
6f4f2c4a63
nextcloud: replace face recognition with recognize app 2022-12-21 12:36:07 +01:00
a0c588359d
tidyup 2022-12-21 11:22:25 +01:00
be4c9ce6f4
icinga2 syslog -> journald logging 2022-12-21 11:22:13 +01:00
d489971990
nextcloud update 2022-12-21 10:57:24 +01:00
1fe9ffea72
readme 2022-12-05 17:03:44 +01:00
0d5d8500df
change rtmp ports 2022-12-05 16:38:45 +01:00
407318445d
nginx-rtmps 2022-12-05 16:26:50 +01:00
801a8bcf5f
left4dead: only individual server workshop downloads 2022-12-03 19:51:13 +01:00
9f8a64a653
remove steam.target 2022-12-03 17:33:42 +01:00
daf582d6d8
left4dead unit dependencies 2022-12-03 16:50:45 +01:00
ccb6dcd14f
l4d fixes 2022-12-01 19:11:55 +01:00
89dda7fb15
commants 2022-11-27 11:56:23 +01:00
f4c8e8e1ad
fix purgin server specific addons 2022-11-26 15:34:52 +01:00
e7cf2b04e2
fix provides 2022-11-26 15:26:17 +01:00
886a1c4655
left4dead2 different workshop per server 2022-11-26 15:14:01 +01:00
f2b984e238
left4dead2 servers on overlayfs 2022-11-26 14:56:46 +01:00
096f9a845c
macbook stuff 2022-11-26 13:05:13 +01:00
6c84dfa678
apply macbook 2022-11-26 12:24:27 +01:00
7aeb3be86d
rcon helper script 2022-11-24 18:39:13 +01:00
3c053cf51b
l4d2 rcon 2022-11-24 18:19:24 +01:00
e6685b6fcf
wakeonlan-remove-downtime.service running idc 2022-11-17 11:09:45 +01:00
5993a3413e
left4.me 2022-11-11 12:30:32 +01:00
50f4e7b7d5
smart errors grafana 2022-11-06 22:24:25 +01:00
5680b805b1
left4dead add server and admin 2022-11-02 11:47:26 +01:00
43aadda73f
sorted ports 2022-11-02 11:47:11 +01:00
e8f878884d
dont pregenerate images 2022-11-02 11:38:09 +01:00
2974d8e1ae
change disk cache dir 2022-10-19 00:53:59 +02:00
3894e7dfe7
cache rsa keys longer 2022-10-19 00:36:20 +02:00
1588a11868
cache slow rsa generation to disk 2022-10-19 00:23:56 +02:00
fea2d96077
opendkim deterministic rsa 2022-10-18 23:05:02 +02:00
b67e77ed6a
dkim pubkey from rpivate key 2022-10-18 20:35:20 +02:00
139a46dce0
change ipv6 2022-10-18 19:51:58 +02:00
6b918e81bb
comments 2022-10-18 19:18:28 +02:00
702f83ed44
devocot optimize index 2022-10-18 18:14:58 +02:00
a7d02ca428
update readme 2022-10-18 17:38:28 +02:00
31113dc9a9
fix xml2text path 2022-10-18 17:38:19 +02:00
3dfd09cc81
fix syntax error 2022-10-18 15:37:55 +02:00
01735e4c7a
style 2022-10-18 15:37:46 +02:00
2446f36375
zfs mirror fix ping wrong param 2022-10-10 10:28:45 +02:00
0507a0e740
fix for test 2022-10-08 00:11:44 +02:00
89e25b4ca3
remove network metadata from dummy nodes 2022-10-08 00:11:00 +02:00
2d77fa8d10
whitespace 2022-10-07 22:49:34 +02:00
c55b465c2f
bundles/systemd-networkd/metadata.py: remove ifupdown 2022-10-07 22:15:16 +02:00
bc13ff7711
zfs-mirror wait online 2022-10-07 22:05:45 +02:00
97e5a2b921
BW_ITEM_WORKERS 2022-10-07 22:05:27 +02:00
e490b67377
auto dns + ext ips 2022-10-07 20:58:08 +02:00
782b30d064
offsite-backups disable zfs-import-cache 2022-10-07 20:33:00 +02:00
6b0e92447a
left4dead 2022-10-06 14:42:52 +02:00
afe04ae6c8 Merge pull request 'temp130752653' (#6) from temp130752653 into master
Reviewed-on: #6
2022-10-06 14:35:57 +02:00
5bae9ea885
doesnt exist!? 2022-10-06 14:34:51 +02:00
4035ec7fac
bw worker defaults 2022-10-06 14:34:21 +02:00
addbae4b1d
start nginx when network is online 2022-10-06 14:34:21 +02:00
8669124c73
backup timers after online 2022-10-06 14:34:21 +02:00
05eecb72e2
enrv bw workers 2022-10-06 14:34:21 +02:00
46e180ee96
mroe steam 2022-09-24 16:31:06 +02:00
da2f3af643
steam 2022-09-24 16:09:03 +02:00
f48ea22a42
check_zpool_space 2022-09-24 15:16:55 +02:00
6c300f24f0
retry... 2022-09-24 14:40:58 +02:00
12621fa36b
wait until network is actually up 2022-09-24 13:51:26 +02:00
4248db53ac
oops 2022-09-24 13:40:35 +02:00
ac919278aa
rename timer 2022-09-24 13:37:13 +02:00
6c0193520d
resolve unnecessary metaproc 2022-09-24 13:35:45 +02:00
7c1da59bb7
backup at determined time 2022-09-24 13:35:15 +02:00
ca15978a6c
enable wakeonlan-remove-downtime 2022-09-23 18:57:17 +02:00
95c71b122d
wakeonlan downtime 2022-09-23 18:45:24 +02:00
1abdfc4bcd
downtime script 2022-09-23 18:25:53 +02:00
5eb684e7ea
dont backup datasets without mountpoint 2022-09-22 16:11:46 +02:00
7a60ab1599
multiplex_incoming: local, too 2022-09-22 16:11:25 +02:00
c8a916d5ac
dont supsend if outgoing connections present 2022-09-21 22:11:09 +02:00
1ea39b8117
dont multiplex to sleepers 2022-09-21 22:10:47 +02:00
bd118be239
readme 2022-09-21 22:10:14 +02:00
21f871b2f8
actually recompress data 2022-09-21 22:10:07 +02:00
dacb7cfec3
crystal telegraf plugin dependencies 2022-09-19 18:04:20 +02:00
3b1ef1eb41
backup server recordsize 2022-09-18 23:00:32 +02:00
50fed682eb
backup /etc 2022-09-18 22:58:56 +02:00
5ead4ba105
dont de- and recompress 2022-09-18 22:58:31 +02:00
8ce3217b16
backup large blocks 2022-09-18 19:07:55 +02:00
779e3ff8d4
unattended upgrades 2022-09-18 19:07:13 +02:00
388edf0ea6
remove load from grafana, its useless 2022-09-18 18:36:10 +02:00
49a097246d
check latest kernel is running 2022-09-18 17:02:02 +02:00
5b66659ce2
zfs fixes and default tank is on ssd now 2022-09-18 15:39:36 +02:00
af274d0076
samba 2022-09-15 00:55:06 +02:00
c67b3b2393
gitea direct config metadata 2022-09-15 00:54:58 +02:00
d2da2eb387
stop nextcloud preview generation 2022-09-15 00:53:45 +02:00
dc9e38d4ba
nextcloud limit version retention 2022-09-15 00:53:06 +02:00
1fb71a0f25
remove obsolete includes 2022-09-15 00:51:52 +02:00
2712d212b6
multiplex prevents backup server from sleeping 2022-09-15 00:31:31 +02:00
46b29ce4fb
samba 2022-09-13 02:20:43 +02:00
440f270b25
nginx check less noise 2022-09-13 00:42:57 +02:00
1797c784af
postgres close firewall 2022-09-13 00:40:46 +02:00
fef8adad20
fix some metadata provides 2022-09-13 00:40:31 +02:00
283f2da099
remove debian 10, use https in sources.list 2022-09-11 23:46:06 +02:00
e18bb37670
wireguard is udp 2022-09-11 22:29:43 +02:00
32e1250d06
check apt upgradable 2022-09-11 22:17:32 +02:00
f19d604213
nftables.conf tidyup 2022-09-11 15:43:20 +02:00
bc1d3bdec3
nextcloud 24.0.5 2022-09-09 19:51:16 +02:00
c64aa70b49
nftables 2022-09-09 19:50:42 +02:00
936630322f
test 2022-09-08 11:40:58 +02:00
mwiegand
f7cac0eedf new seperate ssh keys for private repo 2022-09-06 11:50:40 +02:00
mwiegand
023d45f2bb whitespace 2022-09-05 18:34:27 +02:00
mwiegand
21af9c8b62 monitor systemd --failed 2022-09-05 16:26:16 +02:00
mwiegand
d4ccc3dce0 icinga escape strings 2022-09-05 16:26:08 +02:00
mwiegand
f3f624be1f monitoring stuff 2022-09-05 15:42:48 +02:00
mwiegand
78d2499b46 nginx http check uses GET 2022-09-05 15:02:07 +02:00
mwiegand
7582e8d9cc build server htt check path 2022-09-05 15:01:49 +02:00
mwiegand
edebd1588f some mroe homassistant deps 2022-09-05 13:50:10 +02:00
mwiegand
6abfd868db icinga: features.d -> features-enabled (compat with debian packages) 2022-09-05 13:49:47 +02:00
mwiegand
1e2e63405a bundles/nginx/metadata.pybasic http check 2022-09-05 13:48:54 +02:00
mwiegand
d8f0d49a64 grafana per missions 2022-09-04 23:10:01 +02:00
mwiegand
5414c5e0cb grafana ups hidden legend 2022-09-04 23:09:50 +02:00
mwiegand
e65b18430e gitea: update indexer config 2022-08-31 12:40:10 +02:00
mwiegand
4e25dc000c change ssh key 2 2022-08-31 12:30:09 +02:00
mwiegand
0ff09f0cbd gitea repo indexer 2022-08-31 12:27:48 +02:00
mwiegand
8c416dd047 update gitea 2022-08-31 12:25:46 +02:00
mwiegand
eb3069359d change ssh key 2022-08-31 12:25:40 +02:00
mwiegand
8c8e4b8433 home.server add grub bundle 2022-08-31 12:03:03 +02:00
mwiegand
9afe4eb619 fix zsh shell set 2022-08-31 12:02:54 +02:00
mwiegand
a545a74242 icinga 2022-08-31 12:02:40 +02:00
mwiegand
606a60b1c0 faster demagify 2022-08-17 23:48:32 +02:00
mwiegand
695f204ee4 amke dummy until bw support in place uploads 2022-08-13 15:35:28 +02:00
mwiegand
3ddaead092 home.backups: disable smartctl: gets resetted on ever server wakeup anyway 2022-08-13 15:35:12 +02:00
mwiegand
83149c197e more grafana 2022-08-12 22:42:53 +02:00
mwiegand
6fef63655c proc ram cpu combined 2022-08-12 18:22:01 +02:00
mwiegand
63b68b8d3e disk_io only partitions 2022-08-12 18:19:19 +02:00
mwiegand
3b257aadab some grafana 2022-08-12 17:42:25 +02:00
mwiegand
88e80f4107 nextcloud update 2022-08-12 01:32:43 +02:00
mwiegand
8dfadbf9c3 typo 2022-08-11 19:31:42 +02:00
mwiegand
0cda286db1 pressure stall graph per ressource 2022-08-11 13:22:44 +02:00
mwiegand
d8de90fa5d pressure stall grafana 2022-08-11 13:18:30 +02:00
mwiegand
e05d987036 telegraf apcups less often 2022-08-11 12:40:28 +02:00
mwiegand
c8b7e34732 apcaccess telegraf: only one call 2022-08-11 12:37:28 +02:00
mwiegand
bb6eeba6fb readme 2022-08-11 12:37:13 +02:00
mwiegand
6c2d4ca69f pressure stall telegraf 2022-08-11 12:37:05 +02:00
mwiegand
706c4028f8 zfs mirror: delete old snapshots and bookmarks 2022-08-09 19:59:24 +02:00
mwiegand
3cd41adeaf smartctl doesnt work here 2022-08-09 19:59:02 +02:00
mwiegand
8a13421577 improve wake on lan 2022-08-09 19:58:47 +02:00
mwiegand
9ff8dce802 ssh multiplexing 2022-08-09 19:58:13 +02:00
mwiegand
49081248ae bundles/backup/files/backup_path_via_zfs: delte old bookmarks and snapshots 2022-08-09 19:57:52 +02:00
mwiegand
116697af9f keep fewer snpashots on backup servers 2022-08-09 19:15:58 +02:00
mwiegand
dc2dd9aa7a more timer readme 2022-08-09 18:13:28 +02:00
mwiegand
495f5537be bin/upgrade_and_restart_all: rename 2022-08-09 17:30:52 +02:00
mwiegand
0e28b18298 htop more delay 2022-08-09 17:02:40 +02:00
mwiegand
8d4abe1ec6 htop process io 2022-08-09 16:59:39 +02:00
mwiegand
ee3625311b htop 2022-08-09 16:50:46 +02:00
mwiegand
1343a85e0b editorconfig 2022-08-09 16:49:48 +02:00
mwiegand
fbe62c8127 monitor timers readme 2022-08-09 00:30:33 +02:00
mwiegand
1387b5f1ae nomodeset on nvidia gpu 2022-07-07 20:17:38 +02:00
mwiegand
e3a1438247 more proc_cpu data 2022-07-07 19:12:59 +02:00
mwiegand
b8cbf4648a influx: remove now-obsolete resolution feature 2022-07-07 19:05:53 +02:00
mwiegand
15f0317fbe massive flux performance increase 2022-07-07 19:00:36 +02:00
mwiegand
e44fc3dc04 further improve swap 2022-07-07 16:16:18 +02:00
mwiegand
f72ee7c85b remove obsolete metadatum 2022-07-07 15:46:15 +02:00
mwiegand
5eac0e8c85 home.server swapfile 2022-07-07 15:46:02 +02:00
mwiegand
fbee9a32df bundle systemd-swap improvements 2022-07-07 15:45:28 +02:00
mwiegand
1bde29bb17 fix pkg name 2022-07-07 15:35:10 +02:00
mwiegand
d10f2b1eb3 picsort: use File Modification Date as alternative 2022-07-07 15:35:01 +02:00
mwiegand
1397e9c9a3 picsort needs exiftool 2022-07-07 13:43:22 +02:00
mwiegand
1b84e84841 picsort heic 2022-07-07 13:29:04 +02:00
mwiegand
c37be21034 home.server more ram 2022-06-28 13:12:40 +02:00
mwiegand
57079a0cbe postgres tuning 2022-06-28 13:12:18 +02:00
mwiegand
cd5b854b00 nextcloud update 2022-06-28 10:18:00 +02:00
mwiegand
04d55caef6 only one ssd pool 2022-06-25 12:58:00 +02:00
mwiegand
8285a12f00 php pm dynamic 2022-06-23 21:35:30 +02:00
mwiegand
9946edc6f3 gitea ssd 2022-06-23 12:55:52 +02:00
mwiegand
a3c7acc399 influx on ssd 2022-06-23 02:52:18 +02:00
mwiegand
4530a34175 home.server more cores 2022-06-23 02:13:47 +02:00
mwiegand
b8100f472b zfs zfs_arc_max 2022-06-23 02:13:06 +02:00
mwiegand
9b104cac25 php pm more procasses 2022-06-23 02:12:33 +02:00
mwiegand
7243ad9e9b storage classes 2022-06-23 02:12:11 +02:00
mwiegand
12268daad6 sort 2022-06-23 01:32:31 +02:00
mwiegand
d4738d762b apt more unique sources lists 2022-06-23 00:26:51 +02:00
mwiegand
72f560809b reinstall home server and backups and many fixes 2022-06-22 22:31:39 +02:00
mwiegand
f55d46281c home server onboard lan 2022-06-20 10:51:37 +02:00
mwiegand
59537a536f grafana rows cpu more everything 2022-06-16 01:34:07 +02:00
mwiegand
ab55ad1020 comment 2022-06-16 01:30:41 +02:00
mwiegand
bb3b4b9bca nextcloud face recognition 2022-06-16 01:30:27 +02:00
mwiegand
f31f86aa21 php.ini as metadata 2022-06-16 01:30:06 +02:00
mwiegand
30b3d570fb nextcloud update 2022-06-16 01:29:45 +02:00
mwiegand
f6baeb328b nextcloud conf: log to syslog, trusted domain 2022-06-16 01:29:40 +02:00
mwiegand
31a4da75aa grafana cpu nice 2022-06-16 01:23:50 +02:00
mwiegand
a51d62f5d2 chatlogger more often 2022-06-11 23:26:38 +02:00
mwiegand
bada714b10 termux autostart 2022-06-01 23:11:55 +02:00
mwiegand
6f9218c5a1 bundle steam-chat-viewer 2022-06-01 20:09:28 +02:00
mwiegand
e6940b151c steam-chat-logger: steamuserimages subdir 2022-06-01 19:51:19 +02:00
mwiegand
5647654135 envrc: empty bw git deploy cache 2022-06-01 19:50:58 +02:00
mwiegand
5fd9452a6c hhtop resets htop config 2022-06-01 14:01:28 +02:00
mwiegand
2a84822cfe zsh namspacing 2022-06-01 13:50:20 +02:00
mwiegand
e62a7781b4 upadte zsh prompt 2022-06-01 13:14:47 +02:00
mwiegand
9f4e304aec upadte zsh prompt 2022-06-01 13:11:20 +02:00
mwiegand
8b2d8d974e upadte zsh prompt 2022-06-01 13:04:45 +02:00
mwiegand
3cddebee11 fix zsh plugins dir permissions plugins 2022-06-01 12:57:12 +02:00
mwiegand
c8b9a46aad update zsh prompt 2022-06-01 12:52:48 +02:00
mwiegand
35d8d4828b global oh-my-zsh 2022-06-01 12:41:45 +02:00
mwiegand
b9ff7e5953 manage global zprofile 2022-06-01 11:34:12 +02:00
mwiegand
8063833950 fix overwrite dicts 2022-06-01 11:33:59 +02:00
mwiegand
34b42832ac zsh prompt show username 2022-06-01 11:33:28 +02:00
mwiegand
52404fe7ce remove .python-version 2022-06-01 11:29:21 +02:00
mwiegand
76a568e8b6 node run wol 2022-06-01 11:11:14 +02:00
mwiegand
6706d04298 ckn user 2022-06-01 10:42:35 +02:00
mwiegand
cf09493486 zsh chown 2022-06-01 10:42:23 +02:00
mwiegand
8cabb029b3 systemd timers be nice 2022-06-01 09:39:17 +02:00
mwiegand
e81f28cf04 envrc add PATH and upgrade pip 2022-06-01 09:38:57 +02:00
mwiegand
df4ffd2d77 readme bw fork 2022-06-01 09:38:42 +02:00
mwiegand
b235ede36d git ignore *.pyc 2022-05-31 17:23:16 +02:00
mwiegand
e5a16b5506 php pm less workers 2022-05-31 17:22:37 +02:00
mwiegand
5e5118215a nicer ux 2022-05-31 17:21:43 +02:00
mwiegand
452c983f63 loadkeys support 2022-05-06 12:43:11 +02:00
mwiegand
c9e565bbde FIXME_dont_touch_sshd seems to be obsolete now? 2022-05-05 15:16:21 +02:00
mwiegand
5ed8f08231 change /etc/locale.gen after installing pkg_apt:locales 2022-05-05 14:21:57 +02:00
mwiegand
7588741b30 debian 12 2022-05-04 20:50:03 +02:00
mwiegand
f416852225 tasmota-charge: fixes + 2022-04-21 23:50:49 +02:00
mwiegand
f5ab497bff tasmota-charge: fixes 2022-04-21 23:32:42 +02:00
mwiegand
f2439dcf66 steam chat logger 2022-04-20 00:37:11 +02:00
mwiegand
b27f07a867 grafana use vhost from documentation 2022-04-09 16:32:34 +02:00
mwiegand
824b10546d grafana set domain 2022-04-09 16:32:08 +02:00
mwiegand
33d973927c dont keep backup snapshots 2022-04-09 16:31:55 +02:00
mwiegand
b528e9b94b wol fix and refactor 2022-04-02 19:30:59 +02:00
mwiegand
8a6c166f16 gutea 1.16.5 2022-04-02 14:39:49 +02:00
mwiegand
49346ba20b move bw managed systemd units to /usr/local/lib 2022-03-31 10:23:57 +02:00
mwiegand
31bf80f771 update nextcloud 2022-03-31 10:23:42 +02:00
mwiegand
1340aaf52e fix path 2022-03-30 10:06:16 +02:00
mwiegand
739c38d1b4 only reboot where necessary 2022-03-29 12:31:04 +02:00
mwiegand
df9c038d87 ssh enable strict host key chacking 2022-03-27 18:03:51 +02:00
mwiegand
5b4ad017e1 good old tuple typo 2022-03-27 18:03:43 +02:00
mwiegand
4ef6826837 backup enable host key checking 2022-03-27 17:57:10 +02:00
mwiegand
4b9980a8c3 explicitly set GlobalKnownHostsFile 2022-03-27 17:57:00 +02:00
mwiegand
8532f914c3 remove obsolete option 2022-03-27 17:02:42 +02:00
mwiegand
33062c3ec6 use skip instead of if 2022-03-27 17:02:33 +02:00
mwiegand
be6903d3a6 ssh: collect host keys in metadata 2022-03-27 17:02:17 +02:00
mwiegand
8a9434a384 ssh: manage hostkeys and global known_hosts 2022-03-27 16:38:38 +02:00
mwiegand
24bf39dda5 backup no host key checking 2022-03-27 13:30:27 +02:00
mwiegand
0dbda1c200 fix rsync backup path 2022-03-27 13:30:16 +02:00
mwiegand
dab554473e sort units 2022-03-27 13:30:07 +02:00
mwiegand
8b3f9d7736 play around with systemd hardening 2022-03-27 13:29:58 +02:00
mwiegand
b2b6f08b86 github https 2022-03-26 12:26:48 +01:00
mwiegand
a4e819317b backup-receiver less sudo 2022-03-26 12:03:22 +01:00
mwiegand
085eb2b2d3 sudo: one command per line 2022-03-26 11:59:10 +01:00
mwiegand
e9771f1b9f nextcloud rescan scan all files 2022-03-14 21:49:21 +01:00
mwiegand
63863f69c0 ci check branch 2022-03-13 18:40:55 +01:00
mwiegand
1a552844da fix mode 2022-03-13 18:31:25 +01:00
mwiegand
9f95e78277 fix check 2022-03-13 18:31:03 +01:00
mwiegand
041098ecde ci tidyup more 2022-03-13 18:30:13 +01:00
mwiegand
09ca6bddf6 ci tidyup 2022-03-13 18:22:24 +01:00
mwiegand
b205bd7555 ci check empty 2022-03-13 18:21:30 +01:00
mwiegand
d82a066fb3 gitea ci 2022-03-13 18:11:11 +01:00
mwiegand
e85afeb656 fix agetty path on older systems 2022-03-07 18:04:55 +01:00
mwiegand
5fd969ebb2 apt_upgrade_and_restart_all 2022-03-07 17:54:50 +01:00
mwiegand
ca835a69df cache generate_ed25519_key_pair 2022-03-04 08:48:18 +01:00
mwiegand
63076ec921 gitea add new webhook settings 2022-03-03 10:13:11 +01:00
mwiegand
f075d4f3cd libs/apt some comments and clarifications 2022-03-03 09:38:12 +01:00
mwiegand
349b4e9d3f update gitea 2022-03-03 09:34:27 +01:00
mwiegand
41067d1aa4 php less children 2022-02-24 01:05:16 +01:00
mwiegand
1d2bfa9df9 qireguard qrcode 2022-02-24 00:03:44 +01:00
mwiegand
60bc44a946 no other files allowed under /opt/nextcloud thanks to code signing 2022-02-23 18:41:35 +01:00
mwiegand
8092b5faff nextcloud upgrade_status script 2022-02-23 18:33:25 +01:00
mwiegand
faf78e3766 upgarde nextcloud 2022-02-23 18:19:58 +01:00
mwiegand
f3114bfcef fix upgarde check 2022-02-23 18:17:36 +01:00
mwiegand
f82fa22be8 cronological order 2022-02-23 18:12:35 +01:00
mwiegand
e4084956a2 no json: error prone 2022-02-23 18:11:12 +01:00
mwiegand
60d30e9df0 readme notes 2022-02-23 18:04:17 +01:00
mwiegand
2549a298a4 nextcloud picsort: faster, apperently 2022-02-23 18:04:06 +01:00
mwiegand
b52030b830 nextcloud weekly rescan 2022-02-23 18:03:38 +01:00
mwiegand
5a9716b0ff nextcloud supress pcntl error and use json 2022-02-23 18:03:05 +01:00
mwiegand
ad145c3ace fix wireguard client config 2022-02-23 17:19:34 +01:00
mwiegand
1b5b354cc9 format and comments 2022-02-15 16:45:43 +01:00
mwiegand
1e4713cb3a some sanity check and comments 2022-02-15 16:43:27 +01:00
mwiegand
a759bbf58c EXPERIMENTAL_UPLOAD_VIA_CAT 2022-02-15 16:41:42 +01:00
mwiegand
f19a8eb6a8 sha3_224, as we only need 32 bit anyways 2022-02-15 09:42:59 +01:00
mwiegand
b1e5992f05 group home 2022-02-15 09:39:11 +01:00
mwiegand
5ac4d3cc33 format 2022-02-15 09:39:04 +01:00
mwiegand
806b5e1880 ssh: dont set rendom bytes to zero 2022-02-15 09:36:57 +01:00
mwiegand
69ce72aa7b wiregiard_client_config dont assume server node 2022-02-15 09:18:57 +01:00
mwiegand
f9790912a6 fix dep 2022-02-15 09:18:39 +01:00
mwiegand
f1afe13fad wol: more sleep 2022-02-13 00:19:19 +01:00
mwiegand
9ea206318c min wake 10 mins 2022-02-12 21:18:16 +01:00
mwiegand
2176403bcc wol install jq 2022-02-12 21:17:50 +01:00
mwiegand
4906b13a38 use uptime since last wake instead of boot 2022-02-12 21:16:08 +01:00
mwiegand
386d7bab9b wake info 2022-02-12 21:01:13 +01:00
mwiegand
b08fe2c749 no sleep when ssh 2022-02-12 20:55:17 +01:00
mwiegand
a581dbfee9 format 2022-02-12 20:55:08 +01:00
mwiegand
dbca66326a sleep faster 2022-02-12 20:40:57 +01:00
mwiegand
fab4d0a476 wakeup hook 2022-02-12 20:40:28 +01:00
mwiegand
cf6e716301 fix if 2022-02-12 20:40:14 +01:00
mwiegand
df607f0656 suspend_if_idle ignore own service 2022-02-12 20:23:33 +01:00
mwiegand
cfee1d74b0 wol suspend if idle 2022-02-12 19:50:36 +01:00
mwiegand
4e56ba6da0 wol on backup 2022-02-12 18:42:27 +01:00
mwiegand
e406db30f9 test OK 2022-02-12 18:26:13 +01:00
mwiegand
6727fcf404 wake_command 2022-02-12 18:23:09 +01:00
mwiegand
78b324903d wol waker 2022-02-12 18:01:03 +01:00
mwiegand
f4a4f22d69 readme 2022-02-12 17:37:53 +01:00
mwiegand
d12b446f34 wol + 2022-02-12 17:15:15 +01:00
mwiegand
ae929e4773 fix some errors 2022-02-12 17:01:06 +01:00
mwiegand
fe70776dfc zfs-mirror persistent 2022-02-12 16:58:55 +01:00
mwiegand
98ba428bb7 wol-sleeper 2022-02-12 16:58:44 +01:00
mwiegand
b579dc4928 autologin 2022-02-12 15:20:00 +01:00
mwiegand
0d168cfb5f ssh allow_users 2022-02-12 13:41:03 +01:00
mwiegand
6d8450b270 rspamd clamav settings 2022-02-12 13:18:29 +01:00
mwiegand
254af0c72b raspberrymatic-cert 2022-02-06 20:34:17 +01:00
mwiegand
c8565876db dup 2022-02-06 17:05:00 +01:00
mwiegand
323ad5bc2c dont reject clamav matches 2022-02-05 14:58:15 +01:00
mwiegand
d5e2290f12 nextcloud update 2022-02-05 14:52:47 +01:00
mwiegand
4a23393691 mailserver pw reset readme 2022-02-05 14:52:40 +01:00
mwiegand
63741f271b rspam web passwordprotection 2022-02-05 14:52:27 +01:00
mwiegand
f7de8e4d2e less greylisting, cause it interferes with 2fa codes 2022-02-05 14:18:32 +01:00
mwiegand
fbc82ef6b1 remove more resolv.conf destroyers 2022-01-11 18:41:06 +01:00
mwiegand
cff4371fef dm-crypt import pool after decrypt 2022-01-11 18:29:21 +01:00
mwiegand
b8cfd06e12 fix modprobe/zfs.conf 2022-01-11 18:07:09 +01:00
mwiegand
d0c3030e7a fix interface name 2022-01-11 14:41:39 +01:00
mwiegand
89621c7cbb fix ip 2022-01-11 14:31:38 +01:00
mwiegand
95e00d4d71 apcupsd wip 2022-01-09 15:02:01 +01:00
mwiegand
ec1b52aa2f grafana apcups 2022-01-09 00:04:36 +01:00
mwiegand
9de1444668 change telegraf plugin dir 2022-01-08 23:26:21 +01:00
mwiegand
36ca196f3a letsencrypt timer 2022-01-08 00:20:50 +01:00
mwiegand
c9b76596da fix trim path 2022-01-08 00:01:42 +01:00
mwiegand
44c67ba003 renumber offsite-backups 2022-01-05 21:10:50 +01:00
mwiegand
7663d3fcce tasmota-charge telegraf 2021-12-16 00:29:52 +01:00
mwiegand
3c09b3a984 tasmota-charge 2021-12-16 00:05:06 +01:00
mwiegand
41430ebc2f some picsort stuff 2021-12-12 21:26:56 +01:00
mwiegand
0e97f9e596 mailserver eebug metadata 2021-11-29 21:24:18 +01:00
mwiegand
1b2926a24d postfix only newer TLS versions 2021-11-29 21:24:04 +01:00
mwiegand
b02adbb7cb wg fix allowed ips 2021-11-29 21:21:57 +01:00
mwiegand
cdd379ba82 grafana purge dshboard dir 2021-11-29 21:15:58 +01:00
mwiegand
2eab0d2ca9 le needs bind 2021-11-19 00:43:37 +01:00
mwiegand
5c9dea327c make non dynamics zones on slaves to masters 2021-11-19 00:38:01 +01:00
mwiegand
87861aae98 bind slave: dont replace zones all the time 2021-11-18 23:47:55 +01:00
mwiegand
604e01f16e hardware and monitoring 2021-11-18 23:37:14 +01:00
mwiegand
384f7dbfa8 procio 2021-11-18 23:26:26 +01:00
mwiegand
9295e1789c more srv autodiscover 2021-11-18 21:53:29 +01:00
7309a20c47 Merge pull request 'autoconfig' (#5) from autoconfig into master
Reviewed-on: #5
2021-11-18 21:26:31 +01:00
mwiegand
8cfa3575f8 mailserver-autoconfig 2021-11-18 21:23:56 +01:00
mwiegand
59a598448d wiegand.tel dkms 2021-11-18 21:23:37 +01:00
mwiegand
8b84fe0f0e postfix enable smtps 2021-11-18 21:23:05 +01:00
mwiegand
c66374c9db add forgotten wiegand.tel domain 2021-11-18 21:22:54 +01:00
mwiegand
d23384d4d1 remove some garbage 2021-11-18 21:21:49 +01:00
mwiegand
5849ecc9e4 make spamd worker inaccessible for now 2021-11-17 22:48:28 +01:00
mwiegand
c92704390d buildserver install shards 2021-11-17 17:46:00 +01:00
mwiegand
2300874637 fix wireguard client config dns server 2021-11-17 17:36:35 +01:00
mwiegand
6cceae2458 build cystal 2021-11-17 01:13:01 +01:00
mwiegand
7ad5f62022 second.resolver.name -> secondry.resolver.name 2021-11-14 18:17:12 +01:00
mwiegand
33f4660503 move secondary to ovh 2021-11-14 18:05:19 +01:00
mwiegand
fc65db3de5 bind remove db prefix 2021-11-14 16:18:27 +01:00
mwiegand
b70c8e8217 htz.mails -> netcup.mails 2021-11-14 14:10:33 +01:00
mwiegand
357c591b69 php prformance tweaks 2021-11-13 16:23:54 +01:00
mwiegand
38e542c184 php prformance tweaks 2021-11-13 16:21:24 +01:00
mwiegand
d6e4cdb45a case sensitive config parser 2021-11-13 15:57:29 +01:00
mwiegand
6c2473e2da nextcloud php www.conf 2021-11-13 15:17:20 +01:00
mwiegand
715e163514 sort inis 2021-11-13 15:17:03 +01:00
mwiegand
2d4afe6b53 change redis run dirs 2021-11-13 14:57:43 +01:00
mwiegand
2ec0e5068a trim root, dont trim zs so often, as it takes some time 2021-11-12 18:57:09 +01:00
mwiegand
f4b3841793 zfs: no autotrim, manually trim hourly, scrub every two months 2021-11-12 18:09:11 +01:00
mwiegand
5d22aaa1eb backup server stuff 2021-11-12 17:50:11 +01:00
mwiegand
706af2e127 dont autosnapshot backup datasets 2021-11-12 17:40:00 +01:00
mwiegand
72561bdb52 backup openhab 2021-11-12 16:59:57 +01:00
mwiegand
b5489cd22f bw 4.13.1 2021-11-12 16:47:01 +01:00
mwiegand
fa300ca547 dont do logbias=throughput, fragmentation might not be worth it 2021-11-10 20:08:21 +01:00
mwiegand
f0ecf64938 unifi 2021-11-10 04:04:14 +01:00
mwiegand
5d37e2665e health no legend 2021-11-10 03:53:25 +01:00
mwiegand
2becf72559 health in hatdware bundle 2021-11-10 03:48:29 +01:00
mwiegand
806feb9fdd cpu freq grafana 2021-11-10 03:43:09 +01:00
mwiegand
44fe39b025 cpu freq 2021-11-10 03:26:33 +01:00
mwiegand
11150b4f69 swapfile 2021-11-10 02:50:20 +01:00
mwiegand
8a65459d69 swapfile 2021-11-10 02:48:20 +01:00
mwiegand
776e876876 swapfile 2021-11-10 02:43:45 +01:00
mwiegand
487899ae3e systemd-journald bundle 2021-11-10 02:12:50 +01:00
mwiegand
d2916ef4f9 raspberry bundle 2021-11-10 02:09:42 +01:00
mwiegand
89d2f3ce7a hue dns 2021-11-10 01:38:25 +01:00
mwiegand
9ef958d935 new opanhab raspi 2021-11-10 01:25:48 +01:00
mwiegand
99c12b6106 fix locale deps 2021-11-10 00:14:41 +01:00
mwiegand
93d9f1af39 acme allow wireguard ips 2021-11-08 22:48:24 +01:00
b9896960ff Merge pull request 'letsencrypt dns challenge' (#2) from dns_challenge3 into master
Reviewed-on: #2
2021-11-08 10:52:54 +01:00
mwiegand
fdcfa8a82b letsencrypt dns challenge 2021-11-08 10:49:37 +01:00
mwiegand
34e9366c61 bw 13 hashable 2021-11-06 06:39:50 +01:00
mwiegand
b022eabeb0 wip 2021-11-05 21:45:23 +01:00
mwiegand
df9d5fb62f openhab io problem 2021-11-04 22:39:10 +01:00
mwiegand
4abfd5fcbc openhab web 2021-11-04 22:24:33 +01:00
mwiegand
03378ed638 zfs-auto-snapshot 2021-11-02 23:37:07 +01:00
mwiegand
cf4bf15db0 mosquitto password file 2021-11-02 23:22:24 +01:00
mwiegand
753954ebaf mosquitto 2021-11-02 21:45:05 +01:00
mwiegand
ec4be43b5e mosquitto 2021-11-02 01:39:28 +01:00
mwiegand
a1a0beb8cb hue dummy 2021-11-02 00:37:43 +01:00
mwiegand
bba63bac3d homematic dummy 2021-11-02 00:37:33 +01:00
mwiegand
a7a79b48d7 FIXME_dont_touch_sshd 2021-11-01 11:13:46 +01:00
mwiegand
c62fae4fc4 FIXME_dont_touch_sshd 2021-11-01 10:15:57 +01:00
mwiegand
3d070abca7 comment 2021-10-31 17:50:29 +01:00
mwiegand
d8f887a4eb raspberry ebian ssh workaround? 2021-10-31 17:50:21 +01:00
mwiegand
1536b4aa2c telegraf 2021-10-30 22:07:43 +02:00
mwiegand
86884f4cb7 smartctl 2021-10-30 22:06:39 +02:00
mwiegand
946b3d0e9f smartctl 2021-10-30 22:03:41 +02:00
mwiegand
79820a0c10 flux min 2021-10-30 20:59:54 +02:00
mwiegand
a564d2ed31 smartctl 2021-10-30 20:43:47 +02:00
mwiegand
5a49533460 smartctl 2021-10-30 20:43:01 +02:00
mwiegand
7f443cfdd4 smartctl 2021-10-30 20:13:24 +02:00
mwiegand
21b530cd8d smartctl 2021-10-30 19:05:50 +02:00
mwiegand
5390f3ac3c snartctl 2021-10-30 18:57:06 +02:00
mwiegand
1f0e660a4d hdparm 2021-10-30 14:02:37 +02:00
mwiegand
596db36e10 antiflicker 2021-10-30 13:12:36 +02:00
mwiegand
594ae6df66 stromzaehler gpiod 2021-10-30 13:00:15 +02:00
mwiegand
1066ca50ab hashable dict 2021-10-30 11:13:40 +02:00
mwiegand
07e6a2d07e raspberry bedina ssh workaround 2021-10-29 21:17:54 +02:00
mwiegand
16a7bb915f sshd fix locale problems 2021-10-29 19:15:56 +02:00
mwiegand
5b7c67815b more locales 2021-10-29 19:15:45 +02:00
mwiegand
f6ac34dfd0 openhab wip 2021-10-29 10:43:14 +02:00
mwiegand
d2a802524d openhab 2021-10-29 01:58:15 +02:00
mwiegand
1825faabf6 autowip 2021-10-28 23:41:25 +02:00
mwiegand
9e4163d291 wpa_supplicant 2021-10-28 23:02:40 +02:00
mwiegand
3652a521de wpa_supplicant 2021-10-28 22:56:50 +02:00
mwiegand
bc898c8009 stromzaehler 2021-10-28 04:45:22 +02:00
mwiegand
43cf6cabea stromzaehler 2021-10-28 04:31:35 +02:00
mwiegand
bb862ed6ec git ensure home 2021-10-28 00:02:23 +02:00
mwiegand
6a6ffc8720 gitea sha256 2021-10-27 23:44:11 +02:00
mwiegand
9d6b6777bf graphs 2021-10-27 23:30:17 +02:00
mwiegand
6b1674c93a remove gameserver for now 2021-10-27 21:57:23 +02:00
mwiegand
fda17989d3 graphs 2021-10-27 21:57:14 +02:00
mwiegand
d7e5483d74 graphs 2021-10-27 21:51:29 +02:00
mwiegand
3243bb1890 graphs 2021-10-27 21:46:11 +02:00
mwiegand
da06f7cf06 graphs 2021-10-27 20:40:09 +02:00
mwiegand
ef461fec1c disk usage graphs 2021-10-27 19:02:07 +02:00
mwiegand
e5b1bc0921 flux derivative no negative 2021-10-27 18:01:33 +02:00
mwiegand
bbef19a73d fix gollum ruby2.7 brings bundler and everything is strange 2021-10-27 17:14:54 +02:00
mwiegand
15155cd7a9 nextcloud php-fpm 2021-10-27 16:58:00 +02:00
mwiegand
b94b95c5f9 debian 11 2021-10-27 16:55:19 +02:00
mwiegand
5931ce16d9 telegraf processes 2021-10-27 15:30:30 +02:00
mwiegand
d2798a91c1 wip 2021-10-27 13:44:53 +02:00
mwiegand
e5ddb318bd zfs scrub more seldom 2021-10-26 10:58:12 +02:00
mwiegand
1a1121e010 gollum wip 2021-10-26 00:14:32 +02:00
mwiegand
2ecef5446a picsort fixes 2021-10-26 00:14:26 +02:00
mwiegand
effded149a dunno 2021-10-22 17:20:42 +02:00
421d3c10ba Merge pull request 'multi-redis' (#1) from multi-redis into master
Reviewed-on: #1
2021-10-22 17:17:02 +02:00
mwiegand
077c65d8b9 redis conf multiple values 2021-10-22 17:15:31 +02:00
mwiegand
40cedbf20c wip 2021-10-22 16:59:03 +02:00
mwiegand
75d0043578 multiredis 2021-10-22 16:06:42 +02:00
mwiegand
4e6071183f wip 2021-10-20 23:53:48 +02:00
mwiegand
d1bbfecbc9 wip 2021-10-20 23:50:12 +02:00
mwiegand
ea494b10e3 wip 2021-10-20 23:47:22 +02:00
mwiegand
e184259c37 wip 2021-10-20 22:39:59 +02:00
mwiegand
d2d233deda wip 2021-10-20 22:39:40 +02:00
mwiegand
1398dc247b grafana postfix 2021-10-19 00:38:16 +02:00
mwiegand
effe20d323 postfix setfacl 2021-10-19 00:25:24 +02:00
mwiegand
bad7977ffc wip 2021-10-19 00:09:46 +02:00
mwiegand
b2d93de057 wip 2021-10-18 23:41:30 +02:00
mwiegand
c6c9dc37f7 backup redis 2021-10-18 23:36:16 +02:00
mwiegand
b840462782 learn 2021-10-18 23:27:34 +02:00
mwiegand
b4f1145b6a wip 2021-10-18 22:29:01 +02:00
mwiegand
7b3c1ece8d spam 2021-10-18 22:15:33 +02:00
mwiegand
162982ac00 wip 2021-10-16 19:22:55 +02:00
mwiegand
602ef0cc96 spam 2021-10-16 19:22:43 +02:00
mwiegand
fe24a79b6f wip 2021-10-16 15:48:36 +02:00
mwiegand
d10c5a2743 rspamd 2021-10-16 15:42:35 +02:00
mwiegand
973ce6673b wip 2021-10-16 15:31:33 +02:00
mwiegand
213aeacbdc nc cron 2021-10-16 13:44:51 +02:00
mwiegand
be6a793114 nc picsort 2021-10-16 13:35:44 +02:00
mwiegand
1010537699 aggregateWindow2 2021-10-16 12:38:05 +02:00
mwiegand
c88102b0ca aggregateWindow 2021-10-16 12:31:55 +02:00
mwiegand
a2a588b171 test dmcrypt 2021-10-13 23:54:41 +02:00
mwiegand
1c330b626e wip 2021-10-13 12:35:39 +02:00
mwiegand
e26b259009 wip 2021-10-13 02:28:30 +02:00
mwiegand
968f5e8d7d wip 2021-10-13 02:09:31 +02:00
mwiegand
0c69e6c478 wip 2021-10-13 01:55:54 +02:00
mwiegand
0b8b769b12 wip 2021-10-13 01:34:19 +02:00
mwiegand
f82eceae2e wip 2021-10-13 01:31:36 +02:00
mwiegand
9b3693dc04 wip 2021-10-13 01:13:32 +02:00
mwiegand
ab0e8a8ff5 wip 2021-10-13 01:06:47 +02:00
mwiegand
747d10f509 grafana 2021-10-13 00:48:57 +02:00
mwiegand
21e01feffc wip 2021-10-13 00:37:35 +02:00
mwiegand
dbaeeeaad7 wip 2021-10-13 00:33:54 +02:00
mwiegand
0a3837db64 dmcrypt 2021-10-13 00:10:10 +02:00
mwiegand
0e5c6f5401 wip 2021-10-13 00:10:06 +02:00
mwiegand
84e2d2b0ee wip 2021-10-12 22:57:02 +02:00
mwiegand
69e18014e3 zfs-mirror 2021-10-12 22:28:57 +02:00
mwiegand
ada09df208 wip 2021-10-12 21:23:24 +02:00
mwiegand
3e9da36e2e wip 2021-10-12 21:18:28 +02:00
mwiegand
4b9bbeffdd wip 2021-10-12 20:52:08 +02:00
mwiegand
2ca19525c9 wip 2021-10-11 02:11:42 +02:00
mwiegand
a8c6cc1337 wip 2021-10-11 02:08:52 +02:00
mwiegand
a3a5d68078 wip 2021-10-11 01:29:52 +02:00
mwiegand
55c993b1fe wip 2021-10-11 01:00:55 +02:00
mwiegand
f21998266d wip 2021-10-11 00:02:29 +02:00
mwiegand
af592c771e wip 2021-10-10 23:16:23 +02:00
mwiegand
acb792f0a4 wip 2021-10-10 21:47:35 +02:00
mwiegand
6fc6f0b6f4 wip 2021-10-10 20:48:14 +02:00
mwiegand
7302811418 bw 4.12 2021-10-10 18:08:41 +02:00
mwiegand
c888aca1af wip 2021-10-10 17:19:08 +02:00
mwiegand
4dd42db7f2 wip 2021-10-10 17:15:08 +02:00
mwiegand
42fc425173 wip 2021-10-10 15:34:21 +02:00
mwiegand
f596f6b833 wip 2021-10-10 15:13:31 +02:00
mwiegand
7c72fbb044 wip 2021-10-10 15:02:15 +02:00
mwiegand
d87c77b441 wip 2021-10-10 14:55:57 +02:00
mwiegand
316d7db89d wip 2021-10-10 14:50:59 +02:00
mwiegand
c9eef4fc79 wip 2021-10-10 14:31:05 +02:00
mwiegand
1c9c4e0902 wip 2021-10-10 03:46:43 +02:00
mwiegand
cbaded9f8a wip 2021-10-10 01:35:12 +02:00
mwiegand
7c3c1cabf5 wip 2021-10-10 01:18:05 +02:00
mwiegand
ce7b3a0fc7 wip 2021-10-10 00:31:12 +02:00
mwiegand
cf54948abb wip 2021-10-10 00:06:28 +02:00
mwiegand
e35cdb98bb wip 2021-10-09 23:37:14 +02:00
mwiegand
d5a7a8c8cb wip 2021-10-09 22:34:08 +02:00
mwiegand
5f85594b32 wip 2021-10-09 22:15:03 +02:00
mwiegand
3eee733daf wip 2021-10-09 21:50:22 +02:00
mwiegand
811a3caf98 wip 2021-10-09 17:28:34 +02:00
mwiegand
4098cd60ea wip 2021-10-09 17:28:29 +02:00
mwiegand
bb5c0ec453 wip 2021-10-09 16:52:25 +02:00
mwiegand
5479f44b56 wip 2021-10-09 15:52:45 +02:00
mwiegand
bd7a9c644e wip 2021-10-09 15:18:15 +02:00
mwiegand
26a4b8cd80 wip 2021-10-09 15:01:45 +02:00
mwiegand
ebac40506e wip 2021-10-09 14:37:52 +02:00
mwiegand
0e534380e3 wip 2021-10-09 14:37:32 +02:00
mwiegand
82b84aacc5 wip 2021-09-20 09:57:59 +02:00
mwiegand
1748401ca7 wip 2021-09-13 13:13:56 +02:00
mwiegand
136cf6a466 wip 2021-09-13 12:41:46 +02:00
mwiegand
7931cd8226 wip 2021-09-13 12:38:37 +02:00
mwiegand
2e3038a7e8 wip 2021-09-13 11:21:04 +02:00
mwiegand
393aee068f wip 2021-09-13 10:41:00 +02:00
mwiegand
25ba946216 minecraft 2021-09-13 10:38:02 +02:00
mwiegand
5dcf412a97 wip 2021-08-17 09:12:32 +02:00
mwiegand
2d83675c64 wip 2021-08-16 18:06:58 +02:00
mwiegand
cf1079d368 wip 2021-08-13 20:05:06 +02:00
mwiegand
bae0a087b5 wip 2021-08-13 18:42:02 +02:00
mwiegand
1501d47382 wip 2021-08-13 15:59:42 +02:00
mwiegand
53db057485 wip 2021-08-13 01:21:22 +02:00
mwiegand
d2c1c6ab0d wip 2021-08-13 01:19:31 +02:00
mwiegand
cc58a4e74f wip 2021-08-13 01:15:21 +02:00
mwiegand
1d791e2cc1 wip 2021-08-12 22:43:48 +02:00
mwiegand
e859467b0c wip 2021-07-17 01:16:23 +02:00
mwiegand
b2c9a0afd0 wip 2021-07-16 00:47:48 +02:00
mwiegand
5172573448 wip 2021-07-14 13:03:29 +02:00
mwiegand
46941f532e wip 2021-07-14 12:41:22 +02:00
mwiegand
b59c691b5f wip 2021-07-13 16:52:13 +02:00
mwiegand
5d73f73e4b wip 2021-07-13 16:50:45 +02:00
mwiegand
42d1dd433c wip 2021-07-13 16:50:08 +02:00
mwiegand
aad74c13d4 wip 2021-07-13 16:49:57 +02:00
mwiegand
f49928bed1 wip 2021-07-13 16:37:31 +02:00
mwiegand
9db53faf23 wip 2021-07-13 16:23:47 +02:00
mwiegand
65ef24f403 wip 2021-07-13 16:17:38 +02:00
461 changed files with 19253 additions and 115 deletions

22
.editorconfig Normal file
View file

@ -0,0 +1,22 @@
root = true
[*]
end_of_line = lf
[*.py]
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true
[*.toml]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
[*.yaml]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true

9
.envrc
View file

@ -1,8 +1,7 @@
#!/usr/bin/env bash
python3 -m venv .venv
source ./.venv/bin/activate
PATH_add bin
export BW_GIT_DEPLOY_CACHE="$(realpath ~)/.cache/bw/git_deploy"
mkdir -p "$BW_GIT_DEPLOY_CACHE"
unset PS1
source_env ~/.local/share/direnv/pyenv
source_env ~/.local/share/direnv/venv
source_env ~/.local/share/direnv/bundlewrap

2
.gitignore vendored
View file

@ -1,2 +1,4 @@
.secrets.cfg*
.venv
.cache
*.pyc

View file

@ -1 +0,0 @@
3.9.0

48
README.md Normal file
View file

@ -0,0 +1,48 @@
# TODO
- dont spamfilter forwarded mails
- gollum wiki
- blog?
- fix dkim not working sometimes
- LDAP
- oauth2/OpenID
- icinga
Raspberry pi as soundcard
- gadget mode
- OTG g_audio
- https://audiosciencereview.com/forum/index.php?threads/raspberry-pi-as-usb-to-i2s-adapter.8567/post-215824
# install bw fork
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork@main#egg=bundlewrap
# monitor timers
```sh
Timer=backup
Triggers=$(systemctl show ${Timer}.timer --property=Triggers --value)
echo $Triggers
if systemctl is-failed "$Triggers"
then
InvocationID=$(systemctl show "$Triggers" --property=InvocationID --value)
echo $InvocationID
ExitCode=$(systemctl show "$Triggers" -p ExecStartEx --value | sed 's/^{//' | sed 's/}$//' | tr ';' '\n' | xargs -n 1 | grep '^status=' | cut -d '=' -f 2)
echo $ExitCode
journalctl INVOCATION_ID="$InvocationID" --output cat
fi
```
telegraf: execd for daemons
TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

32
bin/rcon Executable file
View file

@ -0,0 +1,32 @@
#!/usr/bin/env python3
from sys import argv
from os.path import realpath, dirname
from shlex import quote
from bundlewrap.repo import Repository
repo = Repository(dirname(dirname(realpath(__file__))))
if len(argv) == 1:
for node in repo.nodes:
for name in node.metadata.get('left4dead2/servers', {}):
print(name)
exit(0)
server = argv[1]
command = argv[2]
remote_code = """
from rcon.source import Client
with Client('127.0.0.1', {port}, passwd='''{password}''') as client:
response = client.run('''{command}''')
print(response)
"""
for node in repo.nodes:
for name, conf in node.metadata.get('left4dead2/servers', {}).items():
if name == server:
response = node.run('python3 -c ' + quote(remote_code.format(port=conf['port'], password=conf['rcon_password'], command=command)))
print(response.stdout.decode())

70
bin/upgrade_and_restart_all Executable file
View file

@ -0,0 +1,70 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
from ipaddress import ip_interface
repo = Repository(dirname(dirname(realpath(__file__))))
nodes = [
node
for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy
]
print('updating nodes:', sorted(node.name for node in nodes))
# UPDATE
for node in nodes:
print('--------------------------------------')
print('updating', node.name)
print('--------------------------------------')
repo.libs.wol.wake(node)
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode())
# REBOOT IN ORDER
wireguard_servers = [
node
for node in nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
)
]
wireguard_s2s = [
node
for node in nodes
if node.has_bundle('wireguard')
and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
ip_interface(node.metadata.get('wireguard/my_ip')).network.max_prefixlen
)
]
everything_else = [
node
for node in nodes
if not node.has_bundle('wireguard')
]
print('======================================')
for node in [
*everything_else,
*wireguard_s2s,
*wireguard_servers,
]:
try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0:
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
except Exception as e:
print(e)

9
bin/wake Executable file
View file

@ -0,0 +1,9 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
from sys import argv
repo = Repository(dirname(dirname(realpath(__file__))))
repo.libs.wol.wake(repo.get_node(argv[1]))

52
bin/wireguard_client_config Executable file
View file

@ -0,0 +1,52 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
from sys import argv
from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
allowed_ips = [
vpn_network,
ip_interface(server_node.metadata.get('network/internal/ipv4')).network,
]
for peer in server_node.metadata.get('wireguard/s2s').values():
for network in peer['allowed_ips']:
if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network))
conf = f'''
[Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820
Address = {data['peer_ip']}
DNS = 172.30.0.1
[Peer]
PublicKey = {repo.libs.wireguard.pubkey(server_node.metadata.get('id'))}
PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.get('id'))}
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']:
import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1))

10
bundles/apcupsd/README.md Normal file
View file

@ -0,0 +1,10 @@
http://www.apcupsd.org/manual/manual.html#power-down-during-shutdown
- onbattery: power lost
- battery drains
- when BATTERYLEVEL or MINUTES threshold is reached, server is shut down and
the ups is issued to cut the power
- when the mains power returns, the ups will reinstate power to the server
- the server will reboot
NOT IMPLEMENTED

View file

@ -0,0 +1,343 @@
## apcupsd.conf v1.1 ##
#
# "apcupsd" POSIX config file
#
# Note that the apcupsd daemon must be restarted in order for changes to
# this configuration file to become active.
#
#
# ========= General configuration parameters ============
#
# UPSNAME xxx
# Use this to give your UPS a name in log files and such. This
# is particulary useful if you have multiple UPSes. This does not
# set the EEPROM. It should be 8 characters or less.
#UPSNAME
# UPSCABLE <cable>
# Defines the type of cable connecting the UPS to your computer.
#
# Possible generic choices for <cable> are:
# simple, smart, ether, usb
#
# Or a specific cable model number may be used:
# 940-0119A, 940-0127A, 940-0128A, 940-0020B,
# 940-0020C, 940-0023A, 940-0024B, 940-0024C,
# 940-1524C, 940-0024G, 940-0095A, 940-0095B,
# 940-0095C, 940-0625A, M-04-02-2000
#
UPSCABLE usb
# To get apcupsd to work, in addition to defining the cable
# above, you must also define a UPSTYPE, which corresponds to
# the type of UPS you have (see the Description for more details).
# You must also specify a DEVICE, sometimes referred to as a port.
# For USB UPSes, please leave the DEVICE directive blank. For
# other UPS types, you must specify an appropriate port or address.
#
# UPSTYPE DEVICE Description
# apcsmart /dev/tty** Newer serial character device, appropriate for
# SmartUPS models using a serial cable (not USB).
#
# usb <BLANK> Most new UPSes are USB. A blank DEVICE
# setting enables autodetection, which is
# the best choice for most installations.
#
# net hostname:port Network link to a master apcupsd through apcupsd's
# Network Information Server. This is used if the
# UPS powering your computer is connected to a
# different computer for monitoring.
#
# snmp hostname:port:vendor:community
# SNMP network link to an SNMP-enabled UPS device.
# Hostname is the ip address or hostname of the UPS
# on the network. Vendor can be can be "APC" or
# "APC_NOTRAP". "APC_NOTRAP" will disable SNMP trap
# catching; you usually want "APC". Port is usually
# 161. Community is usually "private".
#
# netsnmp hostname:port:vendor:community
# OBSOLETE
# Same as SNMP above but requires use of the
# net-snmp library. Unless you have a specific need
# for this old driver, you should use 'snmp' instead.
#
# dumb /dev/tty** Old serial character device for use with
# simple-signaling UPSes.
#
# pcnet ipaddr:username:passphrase:port
# PowerChute Network Shutdown protocol which can be
# used as an alternative to SNMP with the AP9617
# family of smart slot cards. ipaddr is the IP
# address of the UPS management card. username and
# passphrase are the credentials for which the card
# has been configured. port is the port number on
# which to listen for messages from the UPS, normally
# 3052. If this parameter is empty or missing, the
# default of 3052 will be used.
#
# modbus /dev/tty** Serial device for use with newest SmartUPS models
# supporting the MODBUS protocol.
# modbus <BLANK> Leave the DEVICE setting blank for MODBUS over USB
# or set to the serial number of the UPS to ensure
# that apcupsd binds to that particular unit
# (helpful if you have more than one USB UPS).
#
UPSTYPE usb
#DEVICE /dev/ttyS0
# POLLTIME <int>
# Interval (in seconds) at which apcupsd polls the UPS for status. This
# setting applies both to directly-attached UPSes (UPSTYPE apcsmart, usb,
# dumb) and networked UPSes (UPSTYPE net, snmp). Lowering this setting
# will improve apcupsd's responsiveness to certain events at the cost of
# higher CPU utilization. The default of 60 is appropriate for most
# situations.
#POLLTIME 60
# LOCKFILE <path to lockfile>
# Path for device lock file for UPSes connected via USB or
# serial port. This is the directory into which the lock file
# will be written. The directory must already exist; apcupsd will not create
# it. The actual name of the lock file is computed from DEVICE.
# Not used on Win32.
LOCKFILE /var/lock
# SCRIPTDIR <path to script directory>
# Directory in which apccontrol and event scripts are located.
SCRIPTDIR /etc/apcupsd
# PWRFAILDIR <path to powerfail directory>
# Directory in which to write the powerfail flag file. This file
# is created when apcupsd initiates a system shutdown and is
# checked in the OS halt scripts to determine if a killpower
# (turning off UPS output power) is required.
PWRFAILDIR /etc/apcupsd
# NOLOGINDIR <path to nologin directory>
# Directory in which to write the nologin file. The existence
# of this flag file tells the OS to disallow new logins.
NOLOGINDIR /etc
#
# ======== Configuration parameters used during power failures ==========
#
# The ONBATTERYDELAY is the time in seconds from when a power failure
# is detected until we react to it with an onbattery event.
#
# This means that, apccontrol will be called with the powerout argument
# immediately when a power failure is detected. However, the
# onbattery argument is passed to apccontrol only after the
# ONBATTERYDELAY time. If you don't want to be annoyed by short
# powerfailures, make sure that apccontrol powerout does nothing
# i.e. comment out the wall.
ONBATTERYDELAY 6
#
# Note: BATTERYLEVEL, MINUTES, and TIMEOUT work in conjunction, so
# the first that occurs will cause the initation of a shutdown.
#
# If during a power failure, the remaining battery percentage
# (as reported by the UPS) is below or equal to BATTERYLEVEL,
# apcupsd will initiate a system shutdown.
BATTERYLEVEL 10
# If during a power failure, the remaining runtime in minutes
# (as calculated internally by the UPS) is below or equal to MINUTES,
# apcupsd, will initiate a system shutdown.
MINUTES 5
# If during a power failure, the UPS has run on batteries for TIMEOUT
# many seconds or longer, apcupsd will initiate a system shutdown.
# A value of 0 disables this timer.
#
# Note, if you have a Smart UPS, you will most likely want to disable
# this timer by setting it to zero. That way, you UPS will continue
# on batteries until either the % charge remaing drops to or below BATTERYLEVEL,
# or the remaining battery runtime drops to or below MINUTES. Of course,
# if you are testing, setting this to 60 causes a quick system shutdown
# if you pull the power plug.
# If you have an older dumb UPS, you will want to set this to less than
# the time you know you can run on batteries.
TIMEOUT 0
# Time in seconds between annoying users to signoff prior to
# system shutdown. 0 disables.
ANNOY 300
# Initial delay after power failure before warning users to get
# off the system.
ANNOYDELAY 60
# The condition which determines when users are prevented from
# logging in during a power failure.
# NOLOGON <string> [ disable | timeout | percent | minutes | always ]
NOLOGON disable
# If KILLDELAY is non-zero, apcupsd will continue running after a
# shutdown has been requested, and after the specified time in
# seconds attempt to kill the power. This is for use on systems
# where apcupsd cannot regain control after a shutdown.
# KILLDELAY <seconds> 0 disables
KILLDELAY 0
#
# ==== Configuration statements for Network Information Server ====
#
# NETSERVER [ on | off ] on enables, off disables the network
# information server. If netstatus is on, a network information
# server process will be started for serving the STATUS and
# EVENT data over the network (used by CGI programs).
NETSERVER on
# NISIP <dotted notation ip address>
# IP address on which NIS server will listen for incoming connections.
# This is useful if your server is multi-homed (has more than one
# network interface and IP address). Default value is 0.0.0.0 which
# means any incoming request will be serviced. Alternatively, you can
# configure this setting to any specific IP address of your server and
# NIS will listen for connections only on that interface. Use the
# loopback address (127.0.0.1) to accept connections only from the
# local machine.
NISIP 127.0.0.1
# NISPORT <port> default is 3551 as registered with the IANA
# port to use for sending STATUS and EVENTS data over the network.
# It is not used unless NETSERVER is on. If you change this port,
# you will need to change the corresponding value in the cgi directory
# and rebuild the cgi programs.
NISPORT 3551
# If you want the last few EVENTS to be available over the network
# by the network information server, you must define an EVENTSFILE.
EVENTSFILE /var/log/apcupsd.events
# EVENTSFILEMAX <kilobytes>
# By default, the size of the EVENTSFILE will be not be allowed to exceed
# 10 kilobytes. When the file grows beyond this limit, older EVENTS will
# be removed from the beginning of the file (first in first out). The
# parameter EVENTSFILEMAX can be set to a different kilobyte value, or set
# to zero to allow the EVENTSFILE to grow without limit.
EVENTSFILEMAX 10
#
# ========== Configuration statements used if sharing =============
# a UPS with more than one machine
#
# Remaining items are for ShareUPS (APC expansion card) ONLY
#
# UPSCLASS [ standalone | shareslave | sharemaster ]
# Normally standalone unless you share an UPS using an APC ShareUPS
# card.
UPSCLASS standalone
# UPSMODE [ disable | share ]
# Normally disable unless you share an UPS using an APC ShareUPS card.
UPSMODE disable
#
# ===== Configuration statements to control apcupsd system logging ========
#
# Time interval in seconds between writing the STATUS file; 0 disables
STATTIME 0
# Location of STATUS file (written to only if STATTIME is non-zero)
STATFILE /var/log/apcupsd.status
# LOGSTATS [ on | off ] on enables, off disables
# Note! This generates a lot of output, so if
# you turn this on, be sure that the
# file defined in syslog.conf for LOG_NOTICE is a named pipe.
# You probably do not want this on.
LOGSTATS off
# Time interval in seconds between writing the DATA records to
# the log file. 0 disables.
DATATIME 0
# FACILITY defines the logging facility (class) for logging to syslog.
# If not specified, it defaults to "daemon". This is useful
# if you want to separate the data logged by apcupsd from other
# programs.
#FACILITY DAEMON
#
# ========== Configuration statements used in updating the UPS EPROM =========
#
#
# These statements are used only by apctest when choosing "Set EEPROM with conf
# file values" from the EEPROM menu. THESE STATEMENTS HAVE NO EFFECT ON APCUPSD.
#
# UPS name, max 8 characters
#UPSNAME UPS_IDEN
# Battery date - 8 characters
#BATTDATE mm/dd/yy
# Sensitivity to line voltage quality (H cause faster transfer to batteries)
# SENSITIVITY H M L (default = H)
#SENSITIVITY H
# UPS delay after power return (seconds)
# WAKEUP 000 060 180 300 (default = 0)
#WAKEUP 60
# UPS Grace period after request to power off (seconds)
# SLEEP 020 180 300 600 (default = 20)
#SLEEP 180
# Low line voltage causing transfer to batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 106 103 100 097
# M 177 172 168 182
# A 092 090 088 086
# I 208 204 200 196 (default = 0 => not valid)
#LOTRANSFER 208
# High line voltage causing transfer to batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 127 130 133 136
# M 229 234 239 224
# A 108 110 112 114
# I 253 257 261 265 (default = 0 => not valid)
#HITRANSFER 253
# Battery charge needed to restore power
# RETURNCHARGE 00 15 50 90 (default = 15)
#RETURNCHARGE 15
# Alarm delay
# 0 = zero delay after pwr fail, T = power fail + 30 sec, L = low battery, N = never
# BEEPSTATE 0 T L N (default = 0)
#BEEPSTATE T
# Low battery warning delay in minutes
# LOWBATT 02 05 07 10 (default = 02)
#LOWBATT 2
# UPS Output voltage when running on batteries
# The permitted values depend on your model as defined by last letter
# of FIRMWARE or APCMODEL. Some representative values are:
# D 115
# M 208
# A 100
# I 230 240 220 225 (default = 0 => not valid)
#OUTPUTVOLTS 230
# Self test interval in hours 336=2 weeks, 168=1 week, ON=at power on
# SELFTEST 336 168 ON OFF (default = 336)
#SELFTEST 336

View file

@ -0,0 +1,10 @@
#!/bin/bash
date=$(date --utc +%s%N)
METRICS=$(apcaccess)
for METRIC in TIMELEFT LOADPCT BCHARGE
do
echo "apcupsd $METRIC=$(grep $METRIC <<< $METRICS | cut -d ':' -f 2 | xargs | cut -d ' ' -f 1 ) $date"
done

20
bundles/apcupsd/items.py Normal file
View file

@ -0,0 +1,20 @@
files = {
'/etc/apcupsd/apcupsd.conf': {
'needs': [
'pkg_apt:apcupsd',
],
},
'/usr/local/share/telegraf/apcupsd': {
'source': 'telegraf_plugin',
'mode': '755',
},
}
svc_systemd = {
'apcupsd': {
'needs': [
'pkg_apt:apcupsd',
'file:/etc/apcupsd/apcupsd.conf',
],
}
}

View file

@ -0,0 +1,30 @@
defaults = {
'apt': {
'packages': {
'apcupsd': {},
},
},
'grafana_rows': {
'ups',
},
'sudoers': {
'telegraf': {
'/usr/local/share/telegraf/apcupsd',
},
},
'telegraf': {
'config': {
'inputs': {
'exec': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/apcupsd"],
'name_override': "apcupsd",
'data_format': "influx",
'interval': '30s',
'flush_interval': '30s',
}),
},
},
},
},
}

37
bundles/apt/README.md Normal file
View file

@ -0,0 +1,37 @@
# https://manpages.debian.org/latest/apt/sources.list.5.de.html
# https://repolib.readthedocs.io/en/latest/deb822-format.html
```python
{
'apt': {
'packages': {
'apt-transport-https': {},
},
'sources': {
'debian': {
'types': { # optional, defaults to `{'deb'}``
'deb',
'deb-src',
},
'urls': {
'https://deb.debian.org/debian',
},
'suites': { # at least one
'{codename}',
'{codename}-updates',
'{codename}-backports',
},
'components': { # optional
'main',
'contrib',
'non-frese',
},
# key:
# - optional, defaults to source name (`debian` in this example)
# - place key under data/apt/keys/debian-12.{asc|gpg}
'key': 'debian-{version}',
},
},
},
}
```

View file

@ -0,0 +1,15 @@
#!/bin/bash
apt update -qq --silent 2> /dev/null
UPGRADABLE=$(apt list --upgradable -qq 2> /dev/null | cut -d '/' -f 1)
if test "$UPGRADABLE" != ""
then
echo "$(wc -l <<< $UPGRADABLE) package(s) upgradable:"
echo
echo "$UPGRADABLE"
exit 1
else
exit 0
fi

139
bundles/apt/items.py Normal file
View file

@ -0,0 +1,139 @@
# TODO pin repo: https://superuser.com/a/1595920
from os.path import join, basename
directories = {
'/etc/apt': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/apt.conf.d': {
# existance is expected
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/keyrings': {
# https://askubuntu.com/a/1307181
'purge': True,
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf.d': {
# 'purge': True,
# 'triggers': {
# 'action:apt_update',
# },
# },
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
files = {
'/etc/apt/apt.conf': {
'content': repo.libs.apt.render_apt_conf(node.metadata.get('apt/config')),
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list': {
'content': '# managed by bundlewrap\n',
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755',
},
}
actions = {
'apt_update': {
'command': 'apt-get update',
'needed_by': {
'pkg_apt:',
},
'triggered': True,
'cascade_skip': False,
},
}
# create sources.lists and respective keyfiles
for name, config in node.metadata.get('apt/sources').items():
# place keyfile
keyfile_destination_path = repo.libs.apt.format_variables(node, config['options']['Signed-By'])
files[keyfile_destination_path] = {
'source': join(repo.path, 'data', 'apt', 'keys', basename(keyfile_destination_path)),
'content_type': 'binary',
'triggers': {
'action:apt_update',
},
}
# place sources.list
files[f'/etc/apt/sources.list.d/{name}.sources'] = {
'content': repo.libs.apt.render_source(node, name),
'triggers': {
'action:apt_update',
},
}
# create backport pinnings
for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options
if pkg_apt[package].pop('backports', False):
files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([
f"Package: {package}",
f"Pin: release a={node.metadata.get('os_codename')}-backports",
f"Pin-Priority: 900",
]),
'needed_by': [
f'pkg_apt:{package}',
],
'triggers': {
'action:apt_update',
},
}
# unattended upgrades
#
# unattended-upgrades.service: delays shutdown if necessary
# apt-daily.timer: performs apt update
# apt-daily-upgrade.timer: performs apt upgrade
svc_systemd['unattended-upgrades.service'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily-upgrade.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}

177
bundles/apt/metadata.py Normal file
View file

@ -0,0 +1,177 @@
defaults = {
'apt': {
'packages': {
'apt-listchanges': {
'installed': False,
},
},
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true',
},
'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
},
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
},
'APT': {
'NeverAutoRemove': {
'^firmware-linux.*',
'^linux-firmware$',
'^linux-image-[a-z0-9]*$',
'^linux-image-[a-z0-9]*-[a-z0-9]*$',
},
'VersionedKernelPackages': {
# kernels
'linux-.*',
'kfreebsd-.*',
'gnumach-.*',
# (out-of-tree) modules
'.*-modules',
'.*-kernel',
},
'Never-MarkAuto-Sections': {
'metapackages',
'tasks',
},
'Move-Autobit-Sections': {
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
},
},
'sources': {},
},
'monitoring': {
'services': {
'apt upgradable': {
'vars.command': '/usr/lib/nagios/plugins/check_apt_upgradable',
'vars.sudo': True,
'check_interval': '1h',
},
'current kernel': {
'vars.command': 'ls /boot/vmlinuz-* | sort -V | tail -n 1 | xargs -n1 basename | cut -d "-" -f 2- | grep -q "^$(uname -r)$"',
'check_interval': '1h',
},
'apt reboot-required': {
'vars.command': 'ls /var/run/reboot-required 2> /dev/null && exit 1 || exit 0',
'check_interval': '1h',
},
},
},
}
@metadata_reactor.provides(
'apt/sources',
)
def key(metadata):
return {
'apt': {
'sources': {
source_name: {
'key': source_name,
}
for source_name, source_config in metadata.get('apt/sources').items()
if 'key' not in source_config
},
},
}
@metadata_reactor.provides(
'apt/sources',
)
def signed_by(metadata):
return {
'apt': {
'sources': {
source_name: {
'options': {
'Signed-By': '/etc/apt/keyrings/' + metadata.get(f'apt/sources/{source_name}/key') + '.' + repo.libs.apt.find_keyfile_extension(node, metadata.get(f'apt/sources/{source_name}/key')),
},
}
for source_name in metadata.get('apt/sources')
},
},
}
@metadata_reactor.provides(
'apt/config',
'apt/packages',
)
def unattended_upgrades(metadata):
return {
'apt': {
'config': {
'APT': {
'Periodic': {
'Update-Package-Lists': '1',
'Unattended-Upgrade': '1',
},
},
'Unattended-Upgrade': {
'Origins-Pattern': {
"origin=*",
},
},
},
'packages': {
'unattended-upgrades': {},
},
},
}
# @metadata_reactor.provides(
# 'apt/config',
# 'apt/list_changes',
# )
# def listchanges(metadata):
# return {
# 'apt': {
# 'config': {
# 'DPkg': {
# 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# },
# 'Tools': {
# 'Options': {
# '/usr/bin/apt-listchanges': {
# 'Version': '2',
# 'InfoFD': '20',
# },
# },
# },
# },
# 'Dir': {
# 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d',
# },
# },
# },
# 'list_changes': {
# 'apt': {
# 'frontend': 'pager',
# 'which': 'news',
# 'email_address': 'root',
# 'email_format': 'text',
# 'confirm': 'false',
# 'headers': 'false',
# 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db',
# },
# },
# },
# }

12
bundles/archive/README.md Normal file
View file

@ -0,0 +1,12 @@
```
defaults = {
'archive': {
'/var/important': {
'exclude': [
'\.cache/',
'\.log$',
],
},
},
}
```

View file

@ -0,0 +1,29 @@
#!/bin/bash
if [[ "$1" == 'perform' ]]
then
echo 'NON-DRY RUN'
DRY=''
else
echo 'DRY RUN'
DRY='-n'
fi
% for path, options in paths.items():
# ${path}
gsutil ${'\\'}
-m ${'\\'}
-o 'GSUtil:parallel_process_count=${processes}' ${'\\'}
-o 'GSUtil:parallel_thread_count=${threads}' ${'\\'}
rsync ${'\\'}
$DRY ${'\\'}
-r ${'\\'}
-d ${'\\'}
-e ${'\\'}
% if options.get('exclude'):
-x '${'|'.join(options['exclude'])}' ${'\\'}
% endif
'${options['encrypted_path']}' ${'\\'}
'gs://${bucket}/${node_id}${path}' ${'\\'}
2>&1 | logger -st gsutil
% endfor

View file

@ -0,0 +1,10 @@
#!/bin/bash
FILENAME=$1
TMPFILE=$(mktemp /tmp/archive_file.XXXXXXXXXX)
BUCKET=$(cat /etc/gcloud/gcloud.json | jq -r .bucket)
NODE=$(cat /etc/archive/archive.json | jq -r .node_id)
MASTERKEY=$(cat /etc/gocryptfs/masterkey)
gsutil cat "gs://$BUCKET/$NODE$FILENAME" > "$TMPFILE"
/opt/gocryptfs-inspect/gocryptfs.py --aessiv --config=/etc/gocryptfs/gocryptfs.conf --masterkey="$MASTERKEY" "$TMPFILE"

View file

@ -0,0 +1,15 @@
#!/bin/bash
FILENAME=$1
ARCHIVE=$(/opt/archive/get_file "$FILENAME" | sha256sum)
ORIGINAL=$(cat "$FILENAME" | sha256sum)
if [[ "$ARCHIVE" == "$ORIGINAL" ]]
then
echo "OK"
exit 0
else
echo "ERROR"
exit 1
fi

43
bundles/archive/items.py Normal file
View file

@ -0,0 +1,43 @@
assert node.has_bundle('gcloud')
assert node.has_bundle('gocryptfs')
assert node.has_bundle('gocryptfs-inspect')
assert node.has_bundle('systemd')
from json import dumps
directories['/opt/archive'] = {}
directories['/etc/archive'] = {}
files['/etc/archive/archive.json'] = {
'content': dumps(
{
'node_id': node.metadata.get('id'),
**node.metadata.get('archive'),
},
indent=4,
sort_keys=True
),
}
files['/opt/archive/archive'] = {
'content_type': 'mako',
'mode': '700',
'context': {
'node_id': node.metadata.get('id'),
'paths': node.metadata.get('archive/paths'),
'bucket': node.metadata.get('gcloud/bucket'),
'processes': 4,
'threads': 4,
},
'needs': [
'bundle:gcloud',
],
}
files['/opt/archive/get_file'] = {
'mode': '700',
}
files['/opt/archive/validate_file'] = {
'mode': '700',
}

View file

@ -0,0 +1,45 @@
defaults = {
'apt': {
'packages': {
'jq': {},
},
},
'archive': {
'paths': {},
},
}
@metadata_reactor.provides(
'archive/paths',
)
def paths(metadata):
return {
'archive': {
'paths': {
path: {
'encrypted_path': f'/mnt/archive.enc{path}',
'exclude': [
'^\..*',
'/\..*',
],
} for path in metadata.get('archive/paths')
},
}
}
@metadata_reactor.provides(
'gocryptfs/paths',
)
def gocryptfs(metadata):
return {
'gocryptfs': {
'paths': {
path: {
'mountpoint': options['encrypted_path'],
'reverse': True,
} for path, options in metadata.get('archive/paths').items()
},
}
}

View file

@ -0,0 +1,47 @@
#!/usr/bin/env python3
import json
from subprocess import check_output
from datetime import datetime, timedelta
now = datetime.now()
two_days_ago = now - timedelta(days=2)
with open('/etc/backup-freshness-check.json', 'r') as file:
config = json.load(file)
local_datasets = check_output(['zfs', 'list', '-H', '-o', 'name']).decode().splitlines()
errors = set()
for dataset in config['datasets']:
if f'tank/{dataset}' not in local_datasets:
errors.add(f'dataset "{dataset}" not present at all')
continue
snapshots = [
snapshot
for snapshot in check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot', f'tank/{dataset}', '-s', 'creation']).decode().splitlines()
if f"@{config['prefix']}" in snapshot
]
if not snapshots:
errors.add(f'dataset "{dataset}" has no backup snapshots')
continue
newest_backup_snapshot = snapshots[-1]
snapshot_datetime = datetime.utcfromtimestamp(
int(check_output(['zfs', 'list', '-p', '-H', '-o', 'creation', '-t', 'snapshot', newest_backup_snapshot]).decode())
)
if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
continue
if errors:
for error in errors:
print(error)
exit(2)
else:
print(f"all {len(config['datasets'])} datasets have fresh backups.")

View file

@ -0,0 +1,15 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/backup-freshness-check.json': {
'content': dumps({
'prefix': node.metadata.get('backup-freshness-check/prefix'),
'datasets': node.metadata.get('backup-freshness-check/datasets'),
}, indent=4, sort_keys=True, cls=MetadataJSONEncoder),
},
'/usr/lib/nagios/plugins/check_backup_freshness': {
'mode': '0755',
},
}

View file

@ -0,0 +1,37 @@
defaults = {
'backup-freshness-check': {
'server': node.name,
'prefix': 'auto-backup_',
'datasets': {},
},
'monitoring': {
'services': {
'backup freshness': {
'vars.command': '/usr/lib/nagios/plugins/check_backup_freshness',
'check_interval': '6h',
'vars.sudo': True,
},
},
},
}
@metadata_reactor.provides(
'backup-freshness-check/datasets'
)
def backup_freshness_check(metadata):
return {
'backup-freshness-check': {
'datasets': {
f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes
if not other_node.dummy
and other_node.has_bundle('backup')
and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items()
if options.get('backup', True)
and not options.get('mountpoint', None) in [None, 'none']
},
},
}

View file

@ -0,0 +1,3 @@
!/bin/bash
zfs send tank/nextcloud@test1 | ssh backup-receiver@10.0.0.5 sudo zfs recv tank/nextcloud

View file

@ -0,0 +1,122 @@
from ipaddress import ip_interface
defaults = {
'apt': {
'packages': {
'rsync': {},
},
},
'users': {
'backup-receiver': {
'authorized_keys': set(),
},
},
'sudoers': {
'backup-receiver': {
'/usr/bin/rsync',
'/sbin/zfs',
},
},
'zfs': {
'datasets': {
'tank': {
'recordsize': "1048576",
},
},
},
}
@metadata_reactor.provides(
'zfs/datasets'
)
def zfs(metadata):
datasets = {}
for other_node in repo.nodes:
if (
not other_node.dummy and
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
id = other_node.metadata.get('id')
base_dataset = f'tank/{id}'
# container
datasets[base_dataset] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# for rsync backups
datasets[f'{base_dataset}/fs'] = {
'mountpoint': f"/mnt/backups/{id}",
'readonly': 'off',
'compression': 'lz4',
'com.sun:auto-snapshot': 'true',
'backup': False,
}
# for zfs send/recv
if other_node.has_bundle('zfs'):
# base datasets for each tank
for pool in other_node.metadata.get('zfs/pools'):
datasets[f'{base_dataset}/{pool}'] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
# actual datasets
for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
if path == config.get('mountpoint'):
datasets[f'{base_dataset}/{dataset}'] = {
'mountpoint': None,
'readonly': 'on',
'compression': 'lz4',
'com.sun:auto-snapshot': 'false',
'backup': False,
}
continue
return {
'zfs': {
'datasets': datasets,
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('backup-server/hostname'): repo.libs.ip.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'users/backup-receiver/authorized_keys'
)
def backup_authorized_keys(metadata):
return {
'users': {
'backup-receiver': {
'authorized_keys': {
other_node.metadata.get('users/root/pubkey')
for other_node in repo.nodes
if other_node.has_bundle('backup')
and other_node.metadata.get('backup/server') == node.name
},
},
},
}

View file

@ -0,0 +1,31 @@
#!/bin/bash
set -u
# FIXME: inelegant
% if wol_command:
${wol_command}
% endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do
echo backing up $path
/opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -0,0 +1,16 @@
#!/bin/bash
set -exu
path=$1
if zfs list -H -o mountpoint | grep -q "^$path$"
then
/opt/backup/backup_path_via_zfs "$path"
elif test -e "$path"
then
/opt/backup/backup_path_via_rsync "$path"
else
echo "UNKNOWN PATH: $path"
exit 1
fi

View file

@ -0,0 +1,20 @@
#!/bin/bash
set -exu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
if test -d "$path"
then
postfix="/"
elif test -f "$path"
then
postfix=""
else
exit 1
fi
rsync -av --rsync-path="sudo rsync" "$path$postfix" "backup-receiver@$server:/mnt/backups/$uuid$path$postfix"

View file

@ -0,0 +1,67 @@
#!/bin/bash
set -eu
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
target_dataset_parent=$(echo $target_dataset | rev | cut -d / -f 2- | rev)
bookmark_prefix="auto-backup_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for var in path uuid server ssh source_dataset target_dataset target_dataset_parent new_bookmark
do
[[ -z "${!var}" ]] && echo "ERROR - $var is empty" && exit 96
done
$ssh true || (echo "ERROR - cant ssh connect to $server" && exit 97)
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $target_dataset"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$target_dataset_parent$"
then
echo "CREATING PARENT DATASET..."
$ssh sudo zfs create -p -o mountpoint=none "$target_dataset_parent"
fi
zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -L -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$source_dataset" | grep "^$source_dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete remote snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$target_dataset" | grep "^$target_dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark" # keep snapshots?
echo "SUCCESS"
else
zfs destroy "$source_dataset@$new_bookmark"
echo "ERROR"
exit 99
fi

37
bundles/backup/items.py Normal file
View file

@ -0,0 +1,37 @@
from json import dumps
backup_node = repo.get_node(node.metadata.get('backup/server'))
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
'content_type': 'mako',
'context': {
'wol_command': backup_node.metadata.get('wol-sleeper/wake_command', False),
},
}
files['/opt/backup/backup_path'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_zfs'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_rsync'] = {
'mode': '700',
}
directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': backup_node.metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},
indent=4,
sort_keys=True
),
}

View file

@ -0,0 +1,30 @@
defaults = {
'apt': {
'packages': {
'jq': {
'needed_by': {
'svc_systemd:backup.timer',
},
},
'rsync': {
'needed_by': {
'svc_systemd:backup.timer',
},
},
},
},
'backup': {
'server': None,
'paths': set(),
},
'systemd-timers': {
f'backup': {
'command': '/opt/backup/backup_all',
'when': '1:00',
'persistent': True,
'after': {
'network-online.target',
},
},
},
}

View file

@ -0,0 +1,69 @@
from ipaddress import ip_interface
@metadata_reactor.provides(
'dns',
)
def acme_records(metadata):
domains = set()
for other_node in repo.nodes:
for domain, conf in other_node.metadata.get('letsencrypt/domains', {}).items():
domains.add(domain)
domains.update(conf.get('aliases', []))
return {
'dns': {
f'_acme-challenge.{domain}': {
'CNAME': {f"{domain}.{metadata.get('bind/acme_zone')}."},
}
for domain in domains
}
}
@metadata_reactor.provides(
'bind/acls/acme',
'bind/views/external/keys/acme',
'bind/views/external/zones',
)
def acme_zone(metadata):
allowed_ips = {
*{
str(ip_interface(other_node.metadata.get('network/internal/ipv4')).ip)
for other_node in repo.nodes
if other_node.metadata.get('letsencrypt/domains', {})
},
*{
str(ip_interface(other_node.metadata.get('wireguard/my_ip')).ip)
for other_node in repo.nodes
if other_node.has_bundle('wireguard')
},
}
return {
'bind': {
'acls': {
'acme': {
'key acme',
'!{ !{' + ' '.join(f'{ip};' for ip in sorted(allowed_ips)) + '}; any;}',
},
},
'views': {
'external': {
'keys': {
'acme': {},
},
'zones': {
metadata.get('bind/acme_zone'): {
'allow_update': {
'acme',
},
},
},
},
},
},
}
#https://lists.isc.org/pipermail/bind-users/2006-January/061051.html

23
bundles/bind/files/db Normal file
View file

@ -0,0 +1,23 @@
<%!
def column_width(column, table):
return max(map(lambda row: len(row[column]), table)) if table else 0
%>\
$TTL 600
@ IN SOA ${hostname}. admin.${hostname}. (
2021111709 ;Serial
3600 ;Refresh
200 ;Retry
1209600 ;Expire
900 ;Negative response caching TTL
)
% for record in sorted(records, key=lambda r: (tuple(reversed(r['name'].split('.'))), r['type'], r['value'])):
(${(record['name'] or '@').rjust(column_width('name', records))}) \
IN \
${record['type'].ljust(column_width('type', records))} \
% if record['type'] == 'TXT':
(${' '.join('"'+record['value'][i:i+255]+'"' for i in range(0, len(record['value']), 255))})
% else:
${record['value']}
% endif
% endfor

View file

@ -0,0 +1,2 @@
RESOLVCONF=no
OPTIONS="-u bind"

View file

@ -0,0 +1,6 @@
statistics-channels {
inet 127.0.0.1 port 8053;
};
include "/etc/bind/named.conf.options";
include "/etc/bind/named.conf.local";

View file

@ -0,0 +1,70 @@
# KEYS
% for view_name, view_conf in views.items():
% for key_name, key_conf in sorted(view_conf['keys'].items()):
key "${key_name}" {
algorithm hmac-sha512;
secret "${key_conf['token']}";
};
% endfor
% endfor
# ACLS
% for acl_name, acl_content in acls.items():
acl "${acl_name}" {
% for ac in sorted(acl_content, key=lambda e: (not e.startswith('!'), not e.startswith('key'), e)):
${ac};
% endfor
};
% endfor
# VIEWS
% for view_name, view_conf in views.items():
view "${view_name}" {
match-clients {
${view_name};
};
% if view_conf['is_internal']:
recursion yes;
% else:
recursion no;
rate-limit {
responses-per-second 2;
window 25;
};
% endif
forward only;
forwarders {
1.1.1.1;
9.9.9.9;
8.8.8.8;
};
% for zone_name, zone_conf in sorted(view_conf['zones'].items()):
zone "${zone_name}" {
% if type == 'slave' and zone_conf.get('allow_update', []):
type slave;
masters { ${master_ip}; };
% else:
type master;
% if zone_conf.get('allow_update', []):
allow-update {
% for allow_update in zone_conf['allow_update']:
${allow_update};
% endfor
};
% endif
% endif
file "/var/lib/bind/${view_name}/${zone_name}";
};
% endfor
include "/etc/bind/named.conf.default-zones";
include "/etc/bind/zones.rfc1918";
};
% endfor

View file

@ -0,0 +1,16 @@
options {
directory "/var/cache/bind";
dnssec-validation auto;
listen-on-v6 { any; };
allow-query { any; };
max-cache-size 30%;
querylog yes;
% if type == 'master':
notify yes;
also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
% endif
};

144
bundles/bind/items.py Normal file
View file

@ -0,0 +1,144 @@
from ipaddress import ip_address, ip_interface
from datetime import datetime
from hashlib import sha3_512
if node.metadata.get('bind/type') == 'master':
master_node = node
else:
master_node = repo.get_node(node.metadata.get('bind/master_node'))
directories[f'/var/lib/bind'] = {
'owner': 'bind',
'group': 'bind',
'purge': True,
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
files['/etc/default/bind9'] = {
'source': 'defaults',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
files['/etc/bind/named.conf'] = {
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
files['/etc/bind/named.conf.options'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'slave_ips': node.metadata.get('bind/slave_ips', []),
'master_ip': node.metadata.get('bind/master_ip', None),
},
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
files['/etc/bind/named.conf.local'] = {
'content_type': 'mako',
'context': {
'type': node.metadata.get('bind/type'),
'master_ip': node.metadata.get('bind/master_ip', None),
'acls': {
**master_node.metadata.get('bind/acls'),
**{
view_name: view_conf['match_clients']
for view_name, view_conf in master_node.metadata.get('bind/views').items()
},
},
'views': dict(sorted(
master_node.metadata.get('bind/views').items(),
key=lambda e: (e[1].get('default', False), e[0]),
)),
},
'owner': 'root',
'group': 'bind',
'needs': [
'pkg_apt:bind9',
],
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
for view_name, view_conf in master_node.metadata.get('bind/views').items():
directories[f"/var/lib/bind/{view_name}"] = {
'owner': 'bind',
'group': 'bind',
'purge': True,
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
for zone_name, zone_conf in view_conf['zones'].items():
files[f"/var/lib/bind/{view_name}/{zone_name}"] = {
'source': 'db',
'content_type': 'mako',
'unless': f"test -f /var/lib/bind/{view_name}/{zone_name}" if zone_conf.get('allow_update', False) else 'false',
'context': {
'serial': datetime.now().strftime('%Y%m%d%H'),
'records': zone_conf['records'],
'hostname': node.metadata.get('bind/hostname'),
'type': node.metadata.get('bind/type'),
},
'owner': 'bind',
'group': 'bind',
'needed_by': [
'svc_systemd:bind9',
],
'triggers': [
'svc_systemd:bind9:reload',
],
}
svc_systemd['bind9'] = {}
actions['named-checkconf'] = {
'command': 'named-checkconf -z',
'unless': 'named-checkconf -z',
'needs': [
'svc_systemd:bind9',
'svc_systemd:bind9:reload',
]
}

257
bundles/bind/metadata.py Normal file
View file

@ -0,0 +1,257 @@
from ipaddress import ip_interface
from json import dumps
h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo
defaults = {
'apt': {
'packages': {
'bind9': {},
},
},
'bind': {
'slaves': {},
'acls': {
'our-nets': {
'127.0.0.1',
'10.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.168.0.0/16',
}
},
'views': {
'internal': {
'is_internal': True,
'keys': {},
'match_clients': {
'our-nets',
},
'zones': {},
},
'external': {
'default': True,
'is_internal': False,
'keys': {},
'match_clients': {
'any',
},
'zones': {},
},
},
'zones': set(),
},
'nftables': {
'input': {
'tcp dport 53 accept',
'udp dport 53 accept',
},
},
'telegraf': {
'config': {
'inputs': {
'bind': [{
'urls': ['http://localhost:8053/xml/v3'],
'gather_memory_contexts': False,
'gather_views': True,
}],
},
},
},
}
@metadata_reactor.provides(
'bind/type',
'bind/master_ip',
'bind/slave_ips',
)
def master_slave(metadata):
if metadata.get('bind/master_node', None):
return {
'bind': {
'type': 'slave',
'master_ip': str(ip_interface(repo.get_node(metadata.get('bind/master_node')).metadata.get('network/external/ipv4')).ip),
}
}
else:
return {
'bind': {
'type': 'master',
'slave_ips': {
str(ip_interface(repo.get_node(slave).metadata.get('network/external/ipv4')).ip)
for slave in metadata.get('bind/slaves')
}
}
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('bind/hostname'): repo.libs.ip.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'bind/views',
)
def collect_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
views = {}
for view_name, view_conf in metadata.get('bind/views').items():
for other_node in repo.nodes:
for fqdn, records in other_node.metadata.get('dns', {}).items():
matching_zones = sorted(
filter(
lambda potential_zone: fqdn.endswith(potential_zone),
metadata.get('bind/zones')
),
key=len,
)
if matching_zones:
zone = matching_zones[-1]
else:
continue
name = fqdn[0:-len(zone) - 1]
for type, values in records.items():
for value in values:
if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata):
views\
.setdefault(view_name, {})\
.setdefault('zones', {})\
.setdefault(zone, {})\
.setdefault('records', set())\
.add(
h({'name': name, 'type': type, 'value': value})
)
return {
'bind': {
'views': views,
},
}
@metadata_reactor.provides(
'bind/views',
)
def ns_records(metadata):
if metadata.get('bind/type') == 'slave':
return {}
nameservers = [
node.metadata.get('bind/hostname'),
*[
repo.get_node(slave).metadata.get('bind/hostname')
for slave in node.metadata.get('bind/slaves')
]
]
return {
'bind': {
'views': {
view_name: {
'zones': {
zone_name: {
'records': {
# FIXME: bw currently cant handle lists of dicts :(
h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."})
for nameserver in nameservers
}
}
for zone_name, zone_conf in view_conf['zones'].items()
}
}
for view_name, view_conf in metadata.get('bind/views').items()
},
},
}
@metadata_reactor.provides(
'bind/slaves',
)
def slaves(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'slaves': [
other_node.name
for other_node in repo.nodes
if other_node.has_bundle('bind') and other_node.metadata.get('bind/master_node', None) == node.name
],
},
}
@metadata_reactor.provides(
'bind/views',
)
def generate_keys(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'views': {
view_name: {
'keys': {
key: {
'token':repo.libs.hmac.hmac_sha512(
key,
str(repo.vault.random_bytes_as_base64_for(
f"{metadata.get('id')} bind key {key}",
length=32,
)),
)
}
for key in view_conf['keys']
}
}
for view_name, view_conf in metadata.get('bind/views').items()
}
}
}
@metadata_reactor.provides(
'bind/views',
)
def generate_acl_entries_for_keys(metadata):
if metadata.get('bind/type') == 'slave':
return {}
return {
'bind': {
'views': {
view_name: {
'match_clients': {
# allow keys from this view
*{
f'key {key}'
for key in view_conf['keys']
},
# reject keys from other views
*{
f'! key {key}'
for other_view_name, other_view_conf in metadata.get('bind/views').items()
if other_view_name != view_name
for key in other_view_conf.get('keys', [])
}
}
}
for view_name, view_conf in metadata.get('bind/views').items()
},
},
}

View file

View file

@ -0,0 +1,38 @@
defaults = {
'apt': {
'packages': {
'build-essential': {},
# crystal
'clang': {},
'libssl-dev': {},
'libpcre3-dev': {},
'libgc-dev': {},
'libevent-dev': {},
'zlib1g-dev': {},
},
},
'users': {
'build-agent': {
'home': '/var/lib/build-agent',
},
},
}
@metadata_reactor.provides(
'users/build-agent/authorized_users',
)
def ssh_keys(metadata):
return {
'users': {
'build-agent': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
for architecture in other_node.metadata.get('build-server/architectures').values()
if architecture['node'] == node.name
},
},
},
}

View file

@ -0,0 +1,9 @@
for project, options in node.metadata.get('build-ci').items():
directories[options['path']] = {
'owner': 'build-ci',
'group': options['group'],
'mode': '770',
'needs': [
'user:build-ci',
],
}

View file

@ -0,0 +1,29 @@
from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides(
'users/build-ci/authorized_users',
'sudoers/build-ci',
)
def ssh_keys(metadata):
return {
'users': {
'build-ci': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},
},
},
'sudoers': {
'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values()
}
},
}

View file

@ -0,0 +1,2 @@
JSON=$(cat bundles/build-server/example.json)
curl -X POST 'https://build.sublimity.de/crystal?file=procio.cr' -H "Content-Type: application/json" --data-binary @- <<< $JSON

View file

@ -0,0 +1,169 @@
{
"after": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"before": "7a358255247926363ef0ef34111f0bc786a8c6f4",
"commits": [
{
"added": [],
"author": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"committer": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"message": "wip\n",
"modified": [
"README.md"
],
"removed": [],
"timestamp": "2021-11-16T22:10:05+01:00",
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
"verification": null
}
],
"compare_url": "https://git.sublimity.de/cronekorkn/telegraf-procio/compare/7a358255247926363ef0ef34111f0bc786a8c6f4...122d7843c7814079e8df4919b0208c95ec7c75e3",
"head_commit": {
"added": [],
"author": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"committer": {
"email": "mwiegand@seibert-media.net",
"name": "mwiegand",
"username": ""
},
"id": "122d7843c7814079e8df4919b0208c95ec7c75e3",
"message": "wip\n",
"modified": [
"README.md"
],
"removed": [],
"timestamp": "2021-11-16T22:10:05+01:00",
"url": "https://git.sublimity.de/cronekorkn/telegraf-procio/commit/122d7843c7814079e8df4919b0208c95ec7c75e3",
"verification": null
},
"pusher": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
},
"ref": "refs/heads/master",
"repository": {
"allow_merge_commits": true,
"allow_rebase": true,
"allow_rebase_explicit": true,
"allow_squash_merge": true,
"archived": false,
"avatar_url": "",
"clone_url": "https://git.sublimity.de/cronekorkn/telegraf-procio.git",
"created_at": "2021-11-05T18:46:04+01:00",
"default_branch": "master",
"default_merge_style": "merge",
"description": "",
"empty": false,
"fork": false,
"forks_count": 0,
"full_name": "cronekorkn/telegraf-procio",
"has_issues": true,
"has_projects": true,
"has_pull_requests": true,
"has_wiki": true,
"html_url": "https://git.sublimity.de/cronekorkn/telegraf-procio",
"id": 5,
"ignore_whitespace_conflicts": false,
"internal": false,
"internal_tracker": {
"allow_only_contributors_to_track_time": true,
"enable_issue_dependencies": true,
"enable_time_tracker": true
},
"mirror": false,
"mirror_interval": "",
"name": "telegraf-procio",
"open_issues_count": 0,
"open_pr_counter": 0,
"original_url": "",
"owner": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
},
"parent": null,
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"release_counter": 0,
"size": 28,
"ssh_url": "git@git.sublimity.de:cronekorkn/telegraf-procio.git",
"stars_count": 0,
"template": false,
"updated_at": "2021-11-16T21:41:40+01:00",
"watchers_count": 1,
"website": ""
},
"sender": {
"active": false,
"avatar_url": "https://git.sublimity.de/user/avatar/cronekorkn/-1",
"created": "2021-06-13T19:19:25+02:00",
"description": "",
"email": "i@ckn.li",
"followers_count": 0,
"following_count": 0,
"full_name": "",
"id": 1,
"is_admin": false,
"language": "",
"last_login": "0001-01-01T00:00:00Z",
"location": "",
"login": "cronekorkn",
"prohibit_login": false,
"restricted": false,
"starred_repos_count": 0,
"username": "cronekorkn",
"visibility": "public",
"website": ""
}
}

View file

@ -0,0 +1,31 @@
#!/bin/bash
set -xu
CONFIG_PATH=${config_path}
JSON="$1"
REPO_NAME=$(jq -r .repository.name <<< $JSON)
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
REPO_BRANCH=$(jq -r .ref <<< $JSON | cut -d'/' -f3)
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
for INTEGRATION in "$(cat $CONFIG_PATH | jq -r '.ci | values[]')"
do
[[ $(jq -r '.repo' <<< $INTEGRATION) = "$REPO_NAME" ]] || continue
[[ $(jq -r '.branch' <<< $INTEGRATION) = "$REPO_BRANCH" ]] || continue
HOSTNAME=$(jq -r '.hostname' <<< $INTEGRATION)
DEST_PATH=$(jq -r '.path' <<< $INTEGRATION)
DEST_GROUP=$(jq -r '.group' <<< $INTEGRATION)
[[ -z "$HOSTNAME" ]] || [[ -z "$DEST_PATH" ]] || [[ -z "$DEST_GROUP" ]] && exit 5
cd ~
rm -rf "$REPO_NAME"
git clone "$CLONE_URL" "$REPO_NAME"
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "find \"$DEST_PATH\" -mindepth 1 -delete"
scp -r $SSH_OPTIONS "$REPO_NAME"/* "build-ci@$HOSTNAME:$DEST_PATH"
ssh $SSH_OPTIONS "build-ci@$HOSTNAME" "sudo chown -R build-ci:$DEST_GROUP $(printf "%q" "$DEST_PATH")"
done

View file

@ -0,0 +1,32 @@
#!/bin/bash
set -exu
DOWNLOAD_SERVER="${download_server}"
CONFIG=$(cat ${config_path})
JSON="$1"
ARGS="$2"
REPO_NAME=$(jq -r .repository.name <<< $JSON)
CLONE_URL=$(jq -r .repository.clone_url <<< $JSON)
BUILD_FILE=$(jq -r .file <<< $ARGS)
DATE=$(date --utc +%s)
cd ~
rm -rf "$REPO_NAME"
git clone "$CLONE_URL"
cd "$REPO_NAME"
shards install
for ARCH in $(jq -r '.architectures | keys[]' <<< $CONFIG)
do
TARGET=$(jq -r .architectures.$ARCH.target <<< $CONFIG)
IP=$(jq -r .architectures.$ARCH.ip <<< $CONFIG)
BUILD_CMD=$(crystal build "$BUILD_FILE" --cross-compile --target="$TARGET" --release -o "$REPO_NAME")
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME.o" "build-agent@$IP:~"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP" $BUILD_CMD
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "build-agent@$IP:~/$REPO_NAME" .
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" mkdir -p "~/$REPO_NAME"
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "$REPO_NAME" "downloads@$DOWNLOAD_SERVER:~/$REPO_NAME/$REPO_NAME-$ARCH-$DATE"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "downloads@$DOWNLOAD_SERVER" ln -sf "$REPO_NAME-$ARCH-$DATE" "~/$REPO_NAME/$REPO_NAME-$ARCH-latest"
done

View file

@ -0,0 +1,32 @@
import json
from bundlewrap.metadata import MetadataJSONEncoder
directories = {
'/opt/build-server/strategies': {
'owner': 'build-server',
},
}
files = {
'/etc/build-server.json': {
'owner': 'build-server',
'content': json.dumps(node.metadata.get('build-server'), indent=4, sort_keys=True, cls=MetadataJSONEncoder)
},
'/opt/build-server/strategies/crystal': {
'content_type': 'mako',
'owner': 'build-server',
'mode': '0777', # FIXME
'context': {
'config_path': '/etc/build-server.json',
'download_server': node.metadata.get('build-server/download_server_ip'),
},
},
'/opt/build-server/strategies/ci': {
'content_type': 'mako',
'owner': 'build-server',
'mode': '0777', # FIXME
'context': {
'config_path': '/etc/build-server.json',
},
},
}

View file

@ -0,0 +1,78 @@
from ipaddress import ip_interface
defaults = {
'flask': {
'build-server' : {
'git_url': "https://git.sublimity.de/cronekorkn/build-server.git",
'port': 4000,
'app_module': 'build_server',
'user': 'build-server',
'group': 'build-server',
'timeout': 900,
'env': {
'CONFIG': '/etc/build-server.json',
'STRATEGIES_DIR': '/opt/build-server/strategies',
},
},
},
'users': {
'build-server': {
'home': '/var/lib/build-server',
},
},
}
@metadata_reactor.provides(
'build-server',
)
def agent_conf(metadata):
download_server = repo.get_node(metadata.get('build-server/download_server'))
return {
'build-server': {
'architectures': {
architecture: {
'ip': str(ip_interface(repo.get_node(conf['node']).metadata.get('network/internal/ipv4')).ip),
}
for architecture, conf in metadata.get('build-server/architectures').items()
},
'download_server_ip': str(ip_interface(download_server.metadata.get('network/internal/ipv4')).ip),
},
}
@metadata_reactor.provides(
'build-server',
)
def ci(metadata):
return {
'build-server': {
'ci': {
f'{repo}@{other_node.name}': {
'hostname': other_node.metadata.get('hostname'),
'repo': repo,
**options,
}
for other_node in repo.nodes
if other_node.has_bundle('build-ci')
for repo, options in other_node.metadata.get('build-ci').items()
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('build-server/hostname'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:4000',
},
'check_path': '/status',
},
},
},
}

View file

@ -0,0 +1,20 @@
debian_version = min([node.os_version, (11,)])[0] # FIXME
defaults = {
'apt': {
'packages': {
'crystal': {},
},
'sources': {
'crystal': {
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},
'suites': {
'/',
},
},
},
},
}

View file

@ -0,0 +1,24 @@
dm-crypt
========
Create encrypted block devices using `dm-crypt` on GNU/Linux. Unlocking
these devices will be done on runs of `bw apply`.
Metadata
--------
'dm-crypt': {
'encrypted-devices': {
'foobar': {
'device': '/dev/sdb',
# either
'salt': 'muWWU7dr+5Wtk+56OLdqUNZccnzXPUTJprMSMxkstR8=',
# or
'password': vault.decrypt('passphrase'),
},
},
},
This will encrypt `/dev/sdb` using the specified passphrase. When the
device is going to be unlocked, it will be available as
`/dev/mapper/foobar`.

46
bundles/dm-crypt/items.py Normal file
View file

@ -0,0 +1,46 @@
for name, conf in node.metadata.get('dm-crypt').items():
actions[f'dm-crypt_format_{name}'] = {
'command': f"cryptsetup --batch-mode luksFormat --cipher aes-xts-plain64 --key-size 512 '{conf['device']}'",
'data_stdin': conf['password'],
'unless': f"blkid -t TYPE=crypto_LUKS '{conf['device']}'",
'comment': f"WARNING: This DESTROYS the contents of the device: '{conf['device']}'",
'needs': {
'pkg_apt:cryptsetup',
},
}
actions[f'dm-crypt_test_{name}'] = {
'command': 'false',
'unless': f"! cryptsetup --batch-mode luksOpen --test-passphrase '{conf['device']}'",
'data_stdin': conf['password'],
'needs': {
f"action:dm-crypt_format_{name}",
},
}
actions[f'dm-crypt_open_{name}'] = {
'command': f"cryptsetup --batch-mode luksOpen '{conf['device']}' '{name}'",
'data_stdin': conf['password'],
'unless': f"test -e /dev/mapper/{name}",
'comment': f"Unlocks the device '{conf['device']}' and makes it available in: '/dev/mapper/{name}'",
'needs': {
f"action:dm-crypt_test_{name}",
},
'needed_by': set(),
}
if node.has_bundle('zfs'):
for pool, pool_conf in node.metadata.get('zfs/pools').items():
if f'/dev/mapper/{name}' in pool_conf['devices']:
actions[f'dm-crypt_open_{name}']['needed_by'].add(f'zfs_pool:{pool}')
actions[f'zpool_import_{name}'] = {
'command': f"zpool import -d /dev/mapper/{name} {pool}",
'unless': f"zpool status {pool}",
'needs': {
f"action:dm-crypt_open_{name}",
},
'needed_by': {
f"zfs_pool:{pool}",
},
}

View file

@ -0,0 +1,22 @@
defaults = {
'apt': {
'packages': {
'cryptsetup': {},
},
},
'dm-crypt': {},
}
@metadata_reactor.provides(
'dm-crypt',
)
def password_from_salt(metadata):
return {
'dm-crypt': {
name: {
'password': repo.vault.password_for(f"dm-crypt/{metadata.get('id')}/{name}"),
}
for name, conf in metadata.get('dm-crypt').items()
}
}

12
bundles/dovecot/README.md Normal file
View file

@ -0,0 +1,12 @@
DOVECOT
=======
rescan index
------------
https://doc.dovecot.org/configuration_manual/fts/#rescan
```
doveadm fts rescan -u 'i@ckn.li'
doveadm index -u 'i@ckn.li' -q '*'
```

View file

@ -0,0 +1,104 @@
#!/bin/sh
# Example attachment decoder script. The attachment comes from stdin, and
# the script is expected to output UTF-8 data to stdout. (If the output isn't
# UTF-8, everything except valid UTF-8 sequences are dropped from it.)
# The attachment decoding is enabled by setting:
#
# plugin {
# fts_decoder = decode2text
# }
# service decode2text {
# executable = script /usr/local/libexec/dovecot/decode2text.sh
# user = dovecot
# unix_listener decode2text {
# mode = 0666
# }
# }
libexec_dir=`dirname $0`
content_type=$1
# The second parameter is the format's filename extension, which is used when
# found from a filename of application/octet-stream. You can also add more
# extensions by giving more parameters.
formats='application/pdf pdf
application/x-pdf pdf
application/msword doc
application/mspowerpoint ppt
application/vnd.ms-powerpoint ppt
application/ms-excel xls
application/x-msexcel xls
application/vnd.ms-excel xls
application/vnd.openxmlformats-officedocument.wordprocessingml.document docx
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx
application/vnd.openxmlformats-officedocument.presentationml.presentation pptx
application/vnd.oasis.opendocument.text odt
application/vnd.oasis.opendocument.spreadsheet ods
application/vnd.oasis.opendocument.presentation odp
'
if [ "$content_type" = "" ]; then
echo "$formats"
exit 0
fi
fmt=`echo "$formats" | grep -w "^$content_type" | cut -d ' ' -f 2`
if [ "$fmt" = "" ]; then
echo "Content-Type: $content_type not supported" >&2
exit 1
fi
# most decoders can't handle stdin directly, so write the attachment
# to a temp file
path=`mktemp`
trap "rm -f $path" 0 1 2 3 14 15
cat > $path
xmlunzip() {
name=$1
tempdir=`mktemp -d`
if [ "$tempdir" = "" ]; then
exit 1
fi
trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat | /usr/lib/dovecot/xml2text
}
wait_timeout() {
childpid=$!
trap "kill -9 $childpid; rm -f $path" 1 2 3 14 15
wait $childpid
}
LANG=en_US.UTF-8
export LANG
if [ $fmt = "pdf" ]; then
/usr/bin/pdftotext $path - 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "doc" ]; then
(/usr/bin/catdoc $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "ppt" ]; then
(/usr/bin/catppt $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "xls" ]; then
(/usr/bin/xls2csv $path; true) 2>/dev/null&
wait_timeout 2>/dev/null
elif [ $fmt = "odt" -o $fmt = "ods" -o $fmt = "odp" ]; then
xmlunzip "content.xml"
elif [ $fmt = "docx" ]; then
xmlunzip "document.xml"
elif [ $fmt = "xlsx" ]; then
xmlunzip "sharedStrings.xml"
elif [ $fmt = "pptx" ]; then
xmlunzip "slide*.xml"
else
echo "Buggy decoder script: $fmt not handled" >&2
exit 1
fi
exit 0

View file

@ -0,0 +1,17 @@
connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql
default_pass_scheme = ARGON2ID
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \
AND users.name = SPLIT_PART('%u', '@', 1) \
AND domains.name = SPLIT_PART('%u', '@', 2)

View file

@ -0,0 +1,135 @@
protocols = imap lmtp sieve
auth_mechanisms = plain login
mail_privileged_group = mail
ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u
mail_plugins = fts fts_xapian
namespace inbox {
inbox = yes
separator = .
mailbox Drafts {
auto = subscribe
special_use = \Drafts
}
mailbox Junk {
auto = create
special_use = \Junk
}
mailbox Trash {
auto = subscribe
special_use = \Trash
}
mailbox Sent {
auto = subscribe
special_use = \Sent
}
}
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
# use sql for userdb too, to enable iterate_query
userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600
user = postfix
group = postfix
}
}
service stats {
unix_listener stats-reader {
user = vmail
group = vmail
mode = 0660
}
unix_listener stats-writer {
user = vmail
group = vmail
mode = 0660
}
}
service managesieve-login {
inet_listener sieve {
}
process_min_avail = 0
service_count = 1
vsz_limit = 64 M
}
service managesieve {
process_limit = 100
}
protocol imap {
mail_plugins = $mail_plugins imap_sieve
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
}
protocol sieve {
plugin {
sieve = /var/vmail/sieve/%u.sieve
sieve_storage = /var/vmail/sieve/%u/
}
}
# fulltext search
plugin {
fts = xapian
fts_xapian = partial=3 full=20 verbose=0
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
}
service indexer-worker {
vsz_limit = ${indexer_ram}
}
service decode2text {
executable = script /usr/local/libexec/dovecot/decode2text.sh
user = dovecot
unix_listener decode2text {
mode = 0666
}
}
# spam filter
plugin {
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/bin
sieve_extensions = +vnd.dovecot.pipe
sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/vmail/sieve/global/learn-ham.sieve
}

View file

@ -0,0 +1,2 @@
#!/bin/sh
exec /usr/bin/rspamc learn_ham

View file

@ -0,0 +1,7 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve", "variables"];
if string "${mailbox}" "Trash" {
stop;
}
pipe :copy "learn-ham.sh";

View file

@ -0,0 +1,2 @@
#!/bin/sh
exec /usr/bin/rspamc learn_spam

View file

@ -0,0 +1,3 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve"];
pipe :copy "learn-spam.sh";

View file

@ -0,0 +1,6 @@
require ["fileinto", "mailbox"];
if header :contains "X-Spam" "Yes" {
fileinto :create "Junk";
stop;
}

145
bundles/dovecot/items.py Normal file
View file

@ -0,0 +1,145 @@
assert node.has_bundle('mailserver')
users['vmail'] = {
'home': '/var/vmail',
}
directories = {
'/etc/dovecot': {
'purge': True,
},
'/etc/dovecot/conf.d': {
'purge': True,
'needs': [
'pkg_apt:dovecot-sieve',
'pkg_apt:dovecot-managesieved',
]
},
'/etc/dovecot/ssl': {},
'/var/vmail': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve/global': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve/bin': {
'owner': 'vmail',
'group': 'vmail',
},
}
files = {
'/etc/dovecot/dovecot.conf': {
'content_type': 'mako',
'context': {
'admin_email': node.metadata.get('mailserver/admin_email'),
'indexer_ram': node.metadata.get('dovecot/indexer_ram'),
},
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dhparam.pem': {
'content_type': 'any',
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata.get('mailserver/database'),
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/vmail/sieve/global/spam-to-folder.sieve': {
'owner': 'vmail',
'group': 'vmail',
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/vmail/sieve/global/learn-ham.sieve': {
'owner': 'vmail',
'group': 'vmail',
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/var/vmail/sieve/bin/learn-ham.sh': {
'owner': 'vmail',
'group': 'vmail',
'mode': '550',
},
'/var/vmail/sieve/global/learn-spam.sieve': {
'owner': 'vmail',
'group': 'vmail',
'triggers': {
'svc_systemd:dovecot:restart',
},
},
# /usr/local/libexec/dovecot?
# /usr/lib/dovecot/sieve-pipe?
'/var/vmail/sieve/bin/learn-spam.sh': {
'owner': 'vmail',
'group': 'vmail',
'mode': '550',
},
}
actions = {
'dovecot_generate_dhparam': {
'command': 'openssl dhparam -out /etc/dovecot/dhparam.pem 2048',
'unless': 'test -f /etc/dovecot/dhparam.pem',
'cascade_skip': False,
'needs': {
'pkg_apt:',
'directory:/etc/dovecot/ssl',
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
}
svc_systemd = {
'dovecot': {
'needs': {
'action:letsencrypt_update_certificates',
'action:dovecot_generate_dhparam',
'file:/etc/dovecot/dovecot.conf',
'file:/etc/dovecot/dovecot-sql.conf',
},
},
}
# fulltext search
directories['/usr/local/libexec/dovecot'] = {}
files['/usr/local/libexec/dovecot/decode2text.sh'] = {
'owner': 'dovecot',
'mode': '500',
}

View file

@ -0,0 +1,48 @@
defaults = {
'apt': {
'packages': {
'dovecot-imapd': {},
'dovecot-pgsql': {},
'dovecot-lmtpd': {},
# spam filtering
'dovecot-sieve': {},
'dovecot-managesieved': {},
# fulltext search
'dovecot-fts-xapian': {}, # buster-backports
'poppler-utils': {}, # pdftotext
'catdoc': {}, # catdoc, catppt, xls2csv
},
},
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
'letsencrypt': {
'reload_after': {
'dovecot',
},
},
'nftables': {
'input': {
'tcp dport {143, 993, 4190} accept',
},
},
'systemd-timers': {
'dovecot-optimize-index': {
'command': '/usr/bin/doveadm fts optimize -A',
'when': 'daily',
},
},
}
@metadata_reactor.provides(
'dovecot/indexer_ram',
)
def indexer_ram(metadata):
return {
'dovecot': {
'indexer_ram': str(metadata.get('vm/ram')//2)+ 'M',
},
}

View file

@ -0,0 +1,66 @@
defaults = {
'users': {
'downloads': {
'home': '/var/lib/downloads',
'needs': {
'zfs_dataset:tank/downloads'
},
},
},
'zfs': {
'datasets': {
'tank/downloads': {
'mountpoint': '/var/lib/downloads',
},
},
},
}
@metadata_reactor.provides(
'systemd-mount'
)
def mount_certs(metadata):
return {
'systemd-mount': {
'/var/lib/downloads_nginx': {
'source': '/var/lib/downloads',
'user': 'www-data',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('download-server/hostname'): {
'content': 'nginx/directory_listing.conf',
'context': {
'directory': '/var/lib/downloads_nginx',
},
},
},
},
}
@metadata_reactor.provides(
'users/downloads/authorized_users',
)
def ssh_keys(metadata):
return {
'users': {
'downloads': {
'authorized_users': {
f'build-server@{other_node.name}'
for other_node in repo.nodes
if other_node.has_bundle('build-server')
},
},
},
}

54
bundles/flask/README.md Normal file
View file

@ -0,0 +1,54 @@
# Flask
This bundle can deploy one or more Flask applications per node.
```python
'flask': {
'myapp': {
'app_module': "myapp",
'apt_dependencies': [
"libffi-dev",
"libssl-dev",
],
'env': {
'APP_SECRETS': "/opt/client_secrets.json",
},
'json_config': {
'this json': 'is_visible',
'inside': 'your template.cfg',
},
'git_url': "ssh://git@bitbucket.apps.seibert-media.net:7999/smedia/myapp.git",
'git_branch': "master",
'deployment_triggers': ["action:do-a-thing"],
},
},
```
The git repo containing the application has to obey some conventions:
* requirements-frozen.txt (preferred) or requirements.txt
* minimal setup.py to allow for installation with pip
The `app` instance has to exists in the module defined by `app_module`.
It is also very advisable to enable logging in your app (otherwise HTTP 500s won't be logged):
```python
import logging
if not app.debug:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
```
If you specify `json_config`, then `/opt/${app}/config.json` will be
created. The environment variable `$APP_CONFIG` will point to the exact
name. You can use it in your app to load your config:
```python
app.config.from_json(environ['APP_CONFIG'])
```
If `json_config` is *not* specified, you *can* put a static file in
`data/flask/files/cfg/$app_name`.

View file

@ -0,0 +1,10 @@
<%
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
%>
${dumps(
json_config,
cls=MetadataJSONEncoder,
indent=4,
sort_keys=True,
)}

View file

@ -0,0 +1,14 @@
[Unit]
Description=flask application ${name}
After=network.target
[Service]
% for key, value in env.items():
Environment=${key}=${value}
% endfor
User=${user}
Group=${group}
ExecStart=/opt/${name}/venv/bin/gunicorn -w ${workers} -b ${host}:${port} ${app_module}:app
[Install]
WantedBy=multi-user.target

119
bundles/flask/items.py Normal file
View file

@ -0,0 +1,119 @@
for name, conf in node.metadata.get('flask').items():
for dep in conf.get('apt_dependencies', []):
pkg_apt[dep] = {
'needed_by': {
f'svc_systemd:{name}',
},
}
directories[f'/opt/{name}'] = {
'owner': conf['user'],
'group': conf['group'],
}
directories[f'/opt/{name}/src'] = {}
git_deploy[f'/opt/{name}/src'] = {
'repo': conf['git_url'],
'rev': conf.get('git_branch', 'master'),
'triggers': [
f'action:flask_{name}_pip_install_deps',
*conf.get('deployment_triggers', []),
],
}
# CONFIG
env = conf.get('env', {})
if conf.get('json_config', {}):
env['APP_CONFIG'] = f'/opt/{name}/config.json'
files[env['APP_CONFIG']] = {
'source': 'flask.cfg',
'context': {
'json_config': conf.get('json_config', {}),
},
}
if 'APP_CONFIG' in env:
files[env['APP_CONFIG']].update({
'content_type': 'mako',
'group': 'www-data',
'needed_by': [
f'svc_systemd:{name}',
],
'triggers': [
f'svc_systemd:{name}:restart',
],
})
# secrets
if 'secrets.json' in conf:
env['APP_SECRETS'] = f'/opt/{name}/secrets.json'
files[env['APP_SECRETS']] = {
'content': conf['secrets.json'],
'mode': '0600',
'owner': conf.get('user', 'www-data'),
'group': conf.get('group', 'www-data'),
'needed_by': [
f'svc_systemd:{name}',
],
}
# VENV
actions[f'flask_{name}_create_virtualenv'] = {
'cascade_skip': False,
'command': f'python3 -m venv /opt/{name}/venv',
'unless': f'test -d /opt/{name}/venv',
'needs': [
f'directory:/opt/{name}',
'pkg_apt:python3-venv',
],
'triggers': [
f'action:flask_{name}_pip_install_deps',
],
}
actions[f'flask_{name}_pip_install_deps'] = {
'cascade_skip': False,
'command': f'/opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements-frozen.txt || /opt/{name}/venv/bin/pip3 install -r /opt/{name}/src/requirements.txt',
'triggered': True, # TODO: https://stackoverflow.com/questions/16294819/check-if-my-python-has-all-required-packages
'needs': [
f'git_deploy:/opt/{name}/src',
'pkg_apt:python3-pip',
],
'triggers': [
f'action:flask_{name}_pip_install_gunicorn',
],
}
actions[f'flask_{name}_pip_install_gunicorn'] = {
'command': f'/opt/{name}/venv/bin/pip3 install -U gunicorn',
'triggered': True,
'cascade_skip': False,
'needs': [
f'action:flask_{name}_create_virtualenv',
],
'triggers': [
f'action:flask_{name}_pip_install',
],
}
actions[f'flask_{name}_pip_install'] = {
'command': f'/opt/{name}/venv/bin/pip3 install -e /opt/{name}/src',
'triggered': True,
'cascade_skip': False,
'triggers': [
f'svc_systemd:{name}:restart',
],
}
# UNIT
svc_systemd[name] = {
'needs': [
f'action:flask_{name}_pip_install',
f'file:/usr/local/lib/systemd/system/{name}.service',
],
}

61
bundles/flask/metadata.py Normal file
View file

@ -0,0 +1,61 @@
defaults = {
'apt': {
'packages': {
'python3-pip': {},
'python3-dev': {},
'python3-venv': {},
},
},
'flask': {},
}
@metadata_reactor.provides(
'flask',
)
def app_defaults(metadata):
return {
'flask': {
name: {
'user': 'root',
'group': 'root',
'workers': 8,
'timeout': 30,
**conf,
}
for name, conf in metadata.get('flask').items()
}
}
@metadata_reactor.provides(
'systemd/units',
)
def units(metadata):
return {
'systemd': {
'units': {
f'{name}.service': {
'Unit': {
'Description': name,
'After': 'network.target',
},
'Service': {
'Environment': {
f'{k}={v}'
for k, v in conf.get('env', {}).items()
},
'User': conf['user'],
'Group': conf['group'],
'ExecStart': f"/opt/{name}/venv/bin/gunicorn -w {conf['workers']} -b 127.0.0.1:{conf['port']} --timeout {conf['timeout']} {conf['app_module']}:app"
},
'Install': {
'WantedBy': {
'multi-user.target'
}
},
}
for name, conf in metadata.get('flask').items()
}
}
}

View file

@ -0,0 +1,23 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -0,0 +1,66 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -0,0 +1,121 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

12
bundles/gcloud/README.md Normal file
View file

@ -0,0 +1,12 @@
```
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectViewer'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectCreator'
gcloud projects add-iam-policy-binding sublimity-182017 --member 'serviceAccount:backup@sublimity-182017.iam.gserviceaccount.com' --role 'roles/storage.objectAdmin'
gsutil -o "GSUtil:parallel_process_count=3" -o GSUtil:parallel_thread_count=4 -m rsync -r -d -e /var/vmail gs://sublimity-backup/mailserver
gsutil config
gsutil versioning set on gs://sublimity-backup
gcsfuse --key-file /root/.config/gcloud/service_account.json sublimity-backup gcsfuse
```

43
bundles/gcloud/items.py Normal file
View file

@ -0,0 +1,43 @@
from os.path import join
from json import dumps
service_account = node.metadata.get('gcloud/service_account')
project = node.metadata.get('gcloud/project')
directories[f'/etc/gcloud'] = {
'purge': True,
}
files['/etc/gcloud/gcloud.json'] = {
'content': dumps(
node.metadata.get('gcloud'),
indent=4,
sort_keys=True
),
}
files['/etc/gcloud/service_account.json'] = {
'content': repo.vault.decrypt_file(
join(repo.path, 'data', 'gcloud', 'service_accounts', f'{service_account}@{project}.json.enc')
),
'mode': '500',
'needs': [
'pkg_apt:google-cloud-sdk',
],
}
actions['gcloud_activate_service_account'] = {
'command': 'gcloud auth activate-service-account --key-file /etc/gcloud/service_account.json',
'unless': f"gcloud auth list | grep -q '^\*[[:space:]]*{service_account}@{project}.iam.gserviceaccount.com'",
'needs': [
f'file:/etc/gcloud/service_account.json'
],
}
actions['gcloud_select_project'] = {
'command': f"gcloud config set project '{project}'",
'unless': f"gcloud config get-value project | grep -q '^{project}$'",
'needs': [
f'action:gcloud_activate_service_account'
],
}

View file

@ -0,0 +1,22 @@
defaults = {
'apt': {
'packages': {
'apt-transport-https': {},
'ca-certificates': {},
'gnupg': {},
'google-cloud-sdk': {},
'python3-crcmod': {},
},
'sources': {
'google-cloud': {
'url': 'https://packages.cloud.google.com/apt/',
'suites': {
'cloud-sdk',
},
'components': {
'main',
},
},
},
},
}

View file

@ -0,0 +1,75 @@
[DEFAULT]
APP_NAME = ckn-gitea
RUN_USER = git
RUN_MODE = prod
[repository]
ROOT = /var/lib/gitea/repositories
MAX_CREATION_LIMIT = 0
DEFAULT_BRANCH = main
[ui]
ISSUE_PAGING_NUM = 50
MEMBERS_PAGING_NUM = 100
[server]
PROTOCOL = http
HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3500
DISABLE_SSH = true
SSH_PORT = 22
LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
OFFLINE_MODE = true
START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore
[admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true
[security]
INSTALL_LOCK = true
LOGIN_REMEMBER_DAYS = 30
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[service]
REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true
[session]
PROVIDER = file
[picture]
DISABLE_GRAVATAR = true
ENABLE_FEDERATED_AVATAR = false
[log]
MODE = console
LEVEL = warn
[other]
SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
[webhook]
ALLOWED_HOST_LIST = *
DELIVER_TIMEOUT = 600
[indexer]
REPO_INDEXER_ENABLED = true
MAX_FILE_SIZE = 10240000
[queue.issue_indexer]
LENGTH = 20

64
bundles/gitea/items.py Normal file
View file

@ -0,0 +1,64 @@
from os.path import join
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}',
'sha256_url': '{url}.sha256',
'triggers': {
'svc_systemd:gitea:restart',
},
'preceded_by': {
'action:stop_gitea',
},
}
directories['/var/lib/gitea'] = {
'owner': 'git',
'mode': '0700',
'triggers': {
'svc_systemd:gitea:restart',
},
}
actions = {
'chmod_gitea': {
'command': 'chmod a+x /usr/local/bin/gitea',
'unless': 'test -x /usr/local/bin/gitea',
'needs': {
'download:/usr/local/bin/gitea',
},
},
'stop_gitea': {
'command': 'systemctl stop gitea',
'triggered': True,
},
}
files['/etc/gitea/app.ini'] = {
'content': repo.libs.ini.dumps(
merge_dict(
repo.libs.ini.parse(open(join(repo.path, 'bundles', 'gitea', 'files', 'app.ini')).read()),
node.metadata.get('gitea/conf'),
),
),
'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'],
'triggers': {
'svc_systemd:gitea:restart',
},
}
svc_systemd['gitea'] = {
'needs': [
'action:chmod_gitea',
'download:/usr/local/bin/gitea',
'file:/etc/gitea/app.ini',
],
}

125
bundles/gitea/metadata.py Normal file
View file

@ -0,0 +1,125 @@
database_password = repo.vault.password_for(f'{node.name} postgresql gitea').value
defaults = {
'apt': {
'packages': {
'git': {
'needed_by': {
'svc_systemd:gitea',
}
},
},
},
'gitea': {
'conf': {
'DEFAULT': {
'WORK_PATH': '/var/lib/gitea',
},
'database': {
'DB_TYPE': 'postgres',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
},
},
'postgresql': {
'roles': {
'gitea': {
'password': database_password,
},
},
'databases': {
'gitea': {
'owner': 'gitea',
},
},
},
'systemd': {
'units': {
'gitea.service': {
'Unit': {
'Description': 'gitea',
'After': {'syslog.target', 'network.target'},
'Requires': 'postgresql.service',
},
'Service': {
'RestartSec': '2s',
'Type': 'simple',
'User': 'git',
'Group': 'git',
'WorkingDirectory': '/var/lib/gitea/',
'ExecStart': '/usr/local/bin/gitea web -c /etc/gitea/app.ini',
'Restart': 'always',
'Environment': 'USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
},
'users': {
'git': {
'home': '/home/git',
},
},
'zfs': {
'datasets': {
'tank/gitea': {
'mountpoint': '/var/lib/gitea',
},
},
},
}
@metadata_reactor.provides(
'gitea/conf',
)
def conf(metadata):
domain = metadata.get('gitea/domain')
return {
'gitea': {
'conf': {
'server': {
'SSH_DOMAIN': domain,
'DOMAIN': domain,
'ROOT_URL': f'https://{domain}/',
'LFS_JWT_SECRET': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
},
'security': {
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
},
'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}',
},
'oauth2': {
'JWT_SECRET': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('gitea/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3500',
},
},
},
},
}

View file

@ -0,0 +1,6 @@
directories['/opt/gocryptfs-inspect'] = {}
git_deploy['/opt/gocryptfs-inspect'] = {
'repo': 'https://github.com/slackner/gocryptfs-inspect.git',
'rev': 'ecd296c8f014bf18f5889e3cb9cb64807ff6b9c4',
}

View file

@ -0,0 +1,7 @@
defaults = {
'apt': {
'packages': {
'python3-pycryptodome': {},
},
},
}

View file

@ -0,0 +1,43 @@
from json import dumps
directories['/etc/gocryptfs'] = {
'purge': True,
}
files['/etc/gocryptfs/masterkey'] = {
'content': node.metadata.get('gocryptfs/masterkey'),
'mode': '500',
}
files['/etc/gocryptfs/gocryptfs.conf'] = {
'content': dumps({
'Version': 2,
'Creator': 'gocryptfs 1.6.1',
'ScryptObject': {
'Salt': node.metadata.get('gocryptfs/salt'),
'N': 65536,
'R': 8,
'P': 1,
'KeyLen': 32,
},
'FeatureFlags': [
'GCMIV128',
'HKDF',
'PlaintextNames',
'AESSIV',
]
}, indent=4, sort_keys=True)
}
for path, options in node.metadata.get('gocryptfs/paths').items():
directories[options['mountpoint']] = {
'owner': None,
'group': None,
'mode': None,
'preceded_by': [
f'svc_systemd:gocryptfs-{options["id"]}:stop',
],
'needed_by': [
f'svc_systemd:gocryptfs-{options["id"]}',
],
}

View file

@ -0,0 +1,103 @@
from hashlib import sha3_256
from base64 import b64decode, b64encode
from binascii import hexlify
from uuid import UUID
defaults = {
'apt': {
'packages': {
'gocryptfs': {},
'fuse': {},
'socat': {},
},
},
'gocryptfs': {
'paths': {},
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def config(metadata):
return {
'gocryptfs': {
'masterkey': hexlify(b64decode(
str(repo.vault.random_bytes_as_base64_for(metadata.get('id'), length=32))
)).decode(),
'salt': b64encode(
sha3_256(UUID(metadata.get('id')).bytes).digest()
).decode(),
},
}
@metadata_reactor.provides(
'gocryptfs',
)
def paths(metadata):
paths = {}
for path, options in metadata.get('gocryptfs/paths').items():
paths[path] = {
'id': hexlify(sha3_256(path.encode()).digest()[:8]).decode(),
}
return {
'gocryptfs': {
'paths': paths,
},
}
@metadata_reactor.provides(
'systemd/services',
)
def systemd(metadata):
services = {}
for path, options in metadata.get('gocryptfs/paths').items():
services[f'gocryptfs-{options["id"]}'] = {
'content': {
'Unit': {
'Description': f'gocryptfs@{path} ({options["id"]})',
'After': {
'filesystem.target',
'zfs.target',
},
},
'Service': {
'RuntimeDirectory': 'gocryptfs',
'Environment': {
'MASTERKEY': metadata.get('gocryptfs/masterkey'),
'SOCKET': f'/var/run/gocryptfs/{options["id"]}',
'PLAIN': path,
'CIPHER': options["mountpoint"]
},
'ExecStart': [
'/usr/bin/gocryptfs -fg -plaintextnames -reverse -masterkey $MASTERKEY -ctlsock $SOCKET $PLAIN $CIPHER',
],
'ExecStopPost': [
'/usr/bin/umount $CIPHER'
],
},
},
'needs': [
'pkg_apt:gocryptfs',
'pkg_apt:fuse',
'pkg_apt:socat',
'file:/etc/gocryptfs/masterkey',
'file:/etc/gocryptfs/gocryptfs.conf',
],
'triggers': [
f'svc_systemd:gocryptfs-{options["id"]}:restart',
],
}
return {
'systemd': {
'services': services,
},
}

61
bundles/gollum/items.py Normal file
View file

@ -0,0 +1,61 @@
from shlex import quote
users = {
'gollum': {
'home': '/var/lib/gollum',
}
}
directories = {
'/opt/gollum': {
'owner': 'gollum',
},
'/opt/gollum/.bundle': {
'owner': 'gollum',
},
'/var/lib/gollum': {
'owner': 'gollum',
},
}
files = {
'/opt/gollum/.bundle/config': {
'content': 'BUNDLE_PATH: ".bundle/gems"',
}
}
git_deploy = {
'/opt/gollum': {
'repo': 'https://github.com/gollum/gollum.git',
'rev': f"v{node.metadata.get('gollum/version')}",
},
'/var/lib/gollum': {
'repo': node.metadata.get('gollum/wiki'),
'rev': 'main',
'unless': 'test -e /var/lib/gollum/.git',
},
}
def run(cmd):
return f"su gollum -c " + quote(f"cd /opt/gollum && {cmd}")
actions = {
'gollum_install_bundler': {
'command': run("gem install bundler --user"),
'unless': run("test -e $(ruby -e 'puts Gem.user_dir')/bin/bundle"),
'needs': [
'file:/opt/gollum/.bundle/config',
],
},
'gollum_bundle_install': {
'command': run("$(ruby -e 'puts Gem.user_dir')/bin/bundle install"),
'unless': run("$(ruby -e 'puts Gem.user_dir')/bin/bundle check"),
'needs': [
'git_deploy:/opt/gollum',
'action:gollum_install_bundler',
],
},
}
# TODO: AUTH
#https://github.com/bjoernalbers/gollum-auth

View file

@ -0,0 +1,49 @@
defaults = {
'apt': {
'packages': {
'libgit2-dev': {},
'libssl-dev': {},
'cmake': {},
},
},
'systemd': {
'units': {
'gollum.service': {
'Unit': {
'Description': 'gollum',
'After': 'syslog.target',
'After': 'network.target',
'Requires': 'postgresql.service',
},
'Service': {
'User': 'gollum',
'Group': 'gollum',
'WorkingDirectory': '/opt/gollum',
'ExecStart': 'true',
'Restart': 'always',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('gollum/domain'): {
'content': 'nginx/proxy_pass.conf',
'context': {
'target': 'http://127.0.0.1:3600',
}
},
},
},
}

14
bundles/grafana/README.md Normal file
View file

@ -0,0 +1,14 @@
# metadata
```python
{
'hostname': 'example.com',
'influxdb_node': 'htz.influx',
}
```
# links
https://github.com/grafana/influxdb-flux-datasource/issues/42
https://community.grafana.com/t/no-alias-by-when-using-flux/15575/6

181
bundles/grafana/items.py Normal file
View file

@ -0,0 +1,181 @@
assert node.has_bundle('redis')
assert node.has_bundle('postgresql')
from mako.template import Template
from shlex import quote
from copy import deepcopy
from itertools import count
import yaml
import json
svc_systemd['grafana-server'] = {
'needs': [
'pkg_apt:grafana',
],
}
admin_password = node.metadata.get('grafana/config/security/admin_password')
port = node.metadata.get('grafana/config/server/http_port')
actions['reset_grafana_admin_password'] = {
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
'unless': f"sleep 5 && curl http://admin:{quote(admin_password)}@localhost:{port}/api/org --fail",
'needs': [
'svc_systemd:grafana-server',
],
}
directories = {
'/etc/grafana': {},
'/etc/grafana/provisioning': {
'owner': 'grafana',
'group': 'grafana',
},
'/etc/grafana/provisioning/datasources': {
'purge': True,
},
'/etc/grafana/provisioning/dashboards': {
'purge': True,
},
'/var/lib/grafana': {
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
'purge': True,
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
}
files = {
'/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
'/etc/grafana/provisioning/datasources/managed.yaml': {
'content': yaml.dump({
'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()),
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
'/etc/grafana/provisioning/dashboards/managed.yaml': {
'content': yaml.dump({
'apiVersion': 1,
'providers': [{
'name': 'Default',
'folder': 'Generated',
'type': 'file',
'options': {
'path': '/var/lib/grafana/dashboards',
},
}],
}),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
],
},
}
# DASHBOARDS
with open(repo.path.join([f'data/grafana/dashboard.py'])) as file:
dashboard_template = eval(file.read())
with open(repo.path.join([f'data/grafana/panel.py'])) as file:
panel_template = eval(file.read())
with open(repo.path.join([f'data/grafana/flux.mako'])) as file:
flux_template = Template(file.read())
bucket = repo.get_node(node.metadata.get('grafana/influxdb_node')).metadata.get('influxdb/bucket')
monitored_nodes = [
other_node
for other_node in repo.nodes
if other_node.metadata.get('telegraf/influxdb_node', None) == node.metadata.get('grafana/influxdb_node')
]
for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
dashboard = deepcopy(dashboard_template)
dashboard['id'] = dashboard_id
dashboard['title'] = monitored_node.name
dashboard['uid'] = monitored_node.metadata.get('id')
panel_id = count(start=1)
for row_id, row_name in enumerate(sorted(monitored_node.metadata.get('grafana_rows')), start=1):
with open(repo.path.join([f'data/grafana/rows/{row_name}.py'])) as file:
row = eval(file.read())
for panel_in_row, (panel_name, panel_config) in enumerate(row.items()):
panel = deepcopy(panel_template)
panel['id'] = next(panel_id)
panel['title'] = f'{row_name} {panel_name}'
panel['gridPos']['w'] = 24 // len(row)
panel['gridPos']['x'] = (24 // len(row)) * panel_in_row
panel['gridPos']['y'] = (row_id - 1) * panel['gridPos']['h']
if 'display_name' in panel_config:
panel['fieldConfig']['defaults']['displayName'] = '${'+panel_config['display_name']+'}'
if panel_config.get('stacked'):
panel['fieldConfig']['defaults']['custom']['stacking']['mode'] = 'normal'
if 'unit' in panel_config:
panel['fieldConfig']['defaults']['unit'] = panel_config['unit']
if 'min' in panel_config:
panel['fieldConfig']['defaults']['min'] = panel_config['min']
if 'max' in panel_config:
panel['fieldConfig']['defaults']['max'] = panel_config['max']
if 'soft_max' in panel_config:
panel['fieldConfig']['defaults']['custom']['axisSoftMax'] = panel_config['soft_max']
if 'legend' in panel_config:
panel['options']['legend'].update(panel_config['legend'])
if 'tooltip' in panel_config:
panel['options']['tooltip']['mode'] = panel_config['tooltip']
if panel_config['tooltip'] == 'multi':
panel['options']['tooltip']['sort'] = 'desc'
for query_name, query_config in panel_config['queries'].items():
panel['targets'].append({
'refId': query_name,
'query': flux_template.render(
bucket=bucket,
host=monitored_node.name,
negative=query_config.get('negative', False),
boolean_to_int=query_config.get('boolean_to_int', False),
minimum=query_config.get('minimum', None),
filters={
'host': monitored_node.name,
**query_config['filters'],
},
exists=query_config.get('exists', []),
function=query_config.get('function', None),
).strip()
})
dashboard['panels'].append(panel)
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [
'svc_systemd:grafana-server:restart',
]
}

147
bundles/grafana/metadata.py Normal file
View file

@ -0,0 +1,147 @@
from mako.template import Template
postgres_password = repo.vault.password_for(f'{node.name} postgres role grafana')
defaults = {
'apt': {
'packages': {
'grafana': {},
},
'sources': {
'grafana': {
'urls': {
'https://packages.grafana.com/oss/deb',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
},
},
'grafana': {
'config': {
'server': {
'http_port': 8300,
},
'database': {
'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
},
'remote_cache': {
'type': 'redis',
'connstr': 'addr=127.0.0.1:6379',
},
'security': {
'admin_user': 'admin',
'admin_password': str(repo.vault.password_for(f'{node.name} grafana admin')),
},
'users': {
'allow_signup': False,
},
},
'datasources': {},
},
'postgresql': {
'databases': {
'grafana': {
'owner': 'grafana',
},
},
'roles': {
'grafana': {
'password': postgres_password,
},
},
},
'zfs': {
'datasets': {
'tank/grafana': {
'mountpoint': '/var/lib/grafana'
},
},
},
}
@metadata_reactor.provides(
'grafana/config/server/domain',
)
def domain(metadata):
return {
'grafana': {
'config': {
'server': {
'domain': metadata.get('grafana/hostname'),
},
},
},
}
@metadata_reactor.provides(
'grafana/datasources',
)
def influxdb2(metadata):
influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb')
return {
'grafana': {
'datasources': {
f"influxdb@{influxdb_metadata['hostname']}": {
'type': 'influxdb',
'url': f"http://{influxdb_metadata['hostname']}:{influxdb_metadata['port']}",
'jsonData': {
'version': 'Flux',
'organization': influxdb_metadata['org'],
'defaultBucket': influxdb_metadata['bucket'],
},
'secureJsonData': {
'token': str(influxdb_metadata['readonly_token']),
},
'editable': False,
'isDefault': True,
},
},
},
}
@metadata_reactor.provides(
'grafana/datasources',
)
def datasource_key_to_name(metadata):
return {
'grafana': {
'datasources': {
name: {'name': name} for name in metadata.get('grafana/datasources').keys()
},
},
}
@metadata_reactor.provides(
'dns',
)
def dns(metadata):
return {
'dns': {
metadata.get('grafana/hostname'): repo.libs.ip.get_a_records(metadata),
}
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf',
},
},
},
}

5
bundles/grub/files/grub Normal file
View file

@ -0,0 +1,5 @@
GRUB_DEFAULT=0
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
GRUB_CMDLINE_LINUX_DEFAULT="${' '.join(kernel_params)}"
GRUB_CMDLINE_LINUX=""

20
bundles/grub/items.py Normal file
View file

@ -0,0 +1,20 @@
files = {
'/etc/default/grub': {
'content_type': 'mako',
'context': {
'timeout': node.metadata.get('grub/timeout'),
'kernel_params': node.metadata.get('grub/kernel_params'),
},
'mode': '0644',
'triggers': {
'action:update-grub',
},
}
}
actions = {
'update-grub': {
'command': 'update-grub',
'triggered': True,
},
}

6
bundles/grub/metadata.py Normal file
View file

@ -0,0 +1,6 @@
defaults = {
'grub': {
'timeout': 1,
'kernel_params': set(),
},
}

View file

@ -0,0 +1,10 @@
#!/bin/bash
date=$(date --utc +%s%N)
for cpu in $(cat /sys/devices/system/cpu/cpu0/cpufreq/affected_cpus)
do
# echo "cpu_frequency,cpu=$cpu min=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_min_freq) / 1000) $date"
echo "cpu_frequency,cpu=$cpu current=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_cur_freq) / 1000) $date"
# echo "cpu_frequency,cpu=$cpu max=$(expr $(cat /sys/devices/system/cpu/cpu$cpu/cpufreq/scaling_max_freq) / 1000) $date"
done

View file

@ -0,0 +1,8 @@
files = {
'/usr/local/share/telegraf/cpu_frequency': {
'mode': '0755',
'triggers': {
'svc_systemd:telegraf:restart',
},
},
}

View file

@ -0,0 +1,38 @@
defaults = {
'apt': {
'packages': {
'lm-sensors': {},
'console-data': {}, # leykeys de
},
},
'grafana_rows': {
'health',
},
'sudoers': {
'telegraf': {
'/usr/local/share/telegraf/cpu_frequency',
},
},
'telegraf': {
'config': {
'inputs': {
'sensors': {repo.libs.hashable.hashable({
'timeout': '2s',
})},
'exec': {
repo.libs.hashable.hashable({
'commands': ["sudo /usr/local/share/telegraf/cpu_frequency"],
'name_override': "cpu_frequency",
'data_format': "influx",
}),
# repo.libs.hashable.hashable({
# 'commands': ["/bin/bash -c 'expr $(cat /sys/class/thermal/thermal_zone0/temp) / 1000'"],
# 'name_override': "cpu_temperature",
# 'data_format': "value",
# 'data_type': "integer",
# }),
},
},
},
},
}

View file

@ -0,0 +1,27 @@
from ipaddress import ip_network, ip_interface
@metadata_reactor.provides(
'systemd/units',
)
def network(metadata):
interface = ip_interface(metadata.get('network/internal/ipv4'))
network = ip_interface(f'{interface.ip}/24').network
gateway = network[1]
return {
'systemd': {
'units': {
'internal.network': {
f'Route#hetzner_gateway': {
'Destination': str(gateway),
'Scope': 'link',
},
f'Route#hetzner_network': {
'Destination': str(network),
'Gateway': str(gateway),
},
},
},
},
}

Some files were not shown because too many files have changed in this diff Show more