Compare commits

...

5 commits

Author SHA1 Message Date
mwiegand
706c4028f8 zfs mirror: delete old snapshots and bookmarks 2022-08-09 19:59:24 +02:00
mwiegand
3cd41adeaf smartctl doesnt work here 2022-08-09 19:59:02 +02:00
mwiegand
8a13421577 improve wake on lan 2022-08-09 19:58:47 +02:00
mwiegand
9ff8dce802 ssh multiplexing 2022-08-09 19:58:13 +02:00
mwiegand
49081248ae bundles/backup/files/backup_path_via_zfs: delte old bookmarks and snapshots 2022-08-09 19:57:52 +02:00
7 changed files with 65 additions and 19 deletions

View file

@ -33,7 +33,7 @@ zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
@ -44,6 +44,19 @@ fi
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark" # keep snapshots?
echo "SUCCESS"

View file

@ -4,3 +4,6 @@ Host *
GSSAPIAuthentication yes
StrictHostKeyChecking yes
GlobalKnownHostsFile /etc/ssh/ssh_known_hosts
ControlMaster auto
ControlPath ~/.ssh/multiplex-%C
ControlPersist 5m

View file

@ -22,7 +22,7 @@ def systemd(metadata):
'Persistent': config.get('persistent', False),
'Unit': f'{name}.service',
},
},
},
f'{name}.service': {
'Unit':{
'Description': f'{name} timer service',
@ -37,9 +37,11 @@ def systemd(metadata):
})
if config.get('working_dir'):
units[f'{name}.service']['Service']['WorkingDirectory'] = config['working_dir']
if config.get('success_exit_status'):
units[f'{name}.service']['Service']['SuccessExitStatus'] = config['success_exit_status']
services[f'{name}.timer'] = {}
return {
'systemd': {
'units': units,

View file

@ -17,7 +17,7 @@ MIN_UPTIME=$(expr 60 \* 15)
if [[ "$UPTIME" -lt "$MIN_UPTIME" ]]
then
echo "ABORT: uptime of ${UPTIME}s is lower than minimum of ${MIN_UPTIME}s"
exit 0
exit 75
fi
# CHECK FOR RUNNING TIMERS
@ -30,7 +30,7 @@ do
elif systemctl is-active "$SERVICE" --quiet
then
echo "ABORT: service $SERVICE is running by timer"
exit 0
exit 75
fi
done
@ -40,7 +40,7 @@ LOGINS=$(netstat -tnpa | grep 'ESTABLISHED.*sshd' | tr -s ' ' | cut -d' ' -f5,7-
if ! [[ -z "$LOGINS" ]]
then
echo "ABORT: users logged in: $LOGINS"
exit 0
exit 75
fi
# SUSPEND!
@ -48,8 +48,26 @@ fi
if [[ "$1" = check ]]
then
echo "WOULD SESPEND"
exit 0
else
echo "SESPENDING AFTER TIMEOUT"
for i in 1 2 3 4 5 6
do
echo "TIMEOUT ${i} success"
sleep 10
# check if condition is still met
if "$0" check
then
continue
else
echo "SESPENSION ABORTED"
exit 75
fi
done
echo "SESPENDING"
sleep 60
systemctl suspend
exit 0
fi

View file

@ -6,6 +6,7 @@ defaults = {
'packages': {
'jq': {},
'ethtool': {},
'net-tools': {},
},
},
}
@ -20,6 +21,7 @@ def timer(metadata):
'suspend-if-idle': {
'command': f'suspend_if_idle',
'when': 'minutely',
'success_exit_status': '75',
'env': {
'THIS_SERVICE': 'suspend-if-idle.service',
},
@ -35,7 +37,7 @@ def wake_command(metadata):
waker_hostname = repo.get_node(metadata.get('wol-sleeper/waker')).hostname
mac = metadata.get(f"network/{metadata.get('wol-sleeper/network')}/mac")
ip = ip_interface(metadata.get(f"network/{metadata.get('wol-sleeper/network')}/ipv4")).ip
return {
'wol-sleeper': {
'wake_command': f"ssh -o StrictHostKeyChecking=no wol@{waker_hostname} 'wakeonlan {mac} && while ! ping {ip} -c1 -W3; do true; done'",
@ -49,7 +51,7 @@ def wake_command(metadata):
)
def systemd(metadata):
interface = metadata.get(f"network/{metadata.get('wol-sleeper/network')}/interface")
return {
'systemd': {
'units': {

View file

@ -2,14 +2,14 @@
set -exu
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${server_ip}"
ssh="ssh -o ConnectTimeout=5 root@${server_ip}"
bookmark_prefix="auto-mirror_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for dataset in $(zfs list -t filesystem -H -o name)
do
echo "MIRRORING $dataset"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$dataset$"
then
echo "CREATING PARENT DATASET..."
@ -17,11 +17,11 @@ do
fi
zfs snap "$dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$dataset@$new_bookmark" | $ssh sudo zfs recv -F "$dataset" -o mountpoint=none)
else
echo "INCREMENTAL BACKUP"
@ -32,6 +32,19 @@ do
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$dataset@$new_bookmark" "$dataset#$new_bookmark"
zfs destroy "$dataset@$new_bookmark"
echo "SUCCESS $dataset"

View file

@ -54,11 +54,6 @@
'device': '/dev/disk/by-id/ata-TOSHIBA_MG06ACA10TE_61C0A1B1FKQE',
},
},
'smartctl': {
'/dev/disk/by-id/ata-TOSHIBA_MG06ACA10TE_61C0A1B1FKQE': {
'apm': 1,
},
},
'zfs': {
'pools': {
'tank': {