Compare commits

..

No commits in common. "706c4028f850c8b2858ac2b021fb8203de706b1c" and "116697af9f5d30cb9fa0ba0c54f9335d4c4e3a78" have entirely different histories.

7 changed files with 19 additions and 65 deletions

View file

@ -33,7 +33,7 @@ zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
@ -44,19 +44,6 @@ fi
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark" # keep snapshots?
echo "SUCCESS"

View file

@ -4,6 +4,3 @@ Host *
GSSAPIAuthentication yes
StrictHostKeyChecking yes
GlobalKnownHostsFile /etc/ssh/ssh_known_hosts
ControlMaster auto
ControlPath ~/.ssh/multiplex-%C
ControlPersist 5m

View file

@ -22,7 +22,7 @@ def systemd(metadata):
'Persistent': config.get('persistent', False),
'Unit': f'{name}.service',
},
},
},
f'{name}.service': {
'Unit':{
'Description': f'{name} timer service',
@ -37,11 +37,9 @@ def systemd(metadata):
})
if config.get('working_dir'):
units[f'{name}.service']['Service']['WorkingDirectory'] = config['working_dir']
if config.get('success_exit_status'):
units[f'{name}.service']['Service']['SuccessExitStatus'] = config['success_exit_status']
services[f'{name}.timer'] = {}
return {
'systemd': {
'units': units,

View file

@ -17,7 +17,7 @@ MIN_UPTIME=$(expr 60 \* 15)
if [[ "$UPTIME" -lt "$MIN_UPTIME" ]]
then
echo "ABORT: uptime of ${UPTIME}s is lower than minimum of ${MIN_UPTIME}s"
exit 75
exit 0
fi
# CHECK FOR RUNNING TIMERS
@ -30,7 +30,7 @@ do
elif systemctl is-active "$SERVICE" --quiet
then
echo "ABORT: service $SERVICE is running by timer"
exit 75
exit 0
fi
done
@ -40,7 +40,7 @@ LOGINS=$(netstat -tnpa | grep 'ESTABLISHED.*sshd' | tr -s ' ' | cut -d' ' -f5,7-
if ! [[ -z "$LOGINS" ]]
then
echo "ABORT: users logged in: $LOGINS"
exit 75
exit 0
fi
# SUSPEND!
@ -48,26 +48,8 @@ fi
if [[ "$1" = check ]]
then
echo "WOULD SESPEND"
exit 0
else
echo "SESPENDING AFTER TIMEOUT"
for i in 1 2 3 4 5 6
do
echo "TIMEOUT ${i} success"
sleep 10
# check if condition is still met
if "$0" check
then
continue
else
echo "SESPENSION ABORTED"
exit 75
fi
done
echo "SESPENDING"
sleep 60
systemctl suspend
exit 0
fi

View file

@ -6,7 +6,6 @@ defaults = {
'packages': {
'jq': {},
'ethtool': {},
'net-tools': {},
},
},
}
@ -21,7 +20,6 @@ def timer(metadata):
'suspend-if-idle': {
'command': f'suspend_if_idle',
'when': 'minutely',
'success_exit_status': '75',
'env': {
'THIS_SERVICE': 'suspend-if-idle.service',
},
@ -37,7 +35,7 @@ def wake_command(metadata):
waker_hostname = repo.get_node(metadata.get('wol-sleeper/waker')).hostname
mac = metadata.get(f"network/{metadata.get('wol-sleeper/network')}/mac")
ip = ip_interface(metadata.get(f"network/{metadata.get('wol-sleeper/network')}/ipv4")).ip
return {
'wol-sleeper': {
'wake_command': f"ssh -o StrictHostKeyChecking=no wol@{waker_hostname} 'wakeonlan {mac} && while ! ping {ip} -c1 -W3; do true; done'",
@ -51,7 +49,7 @@ def wake_command(metadata):
)
def systemd(metadata):
interface = metadata.get(f"network/{metadata.get('wol-sleeper/network')}/interface")
return {
'systemd': {
'units': {

View file

@ -2,14 +2,14 @@
set -exu
ssh="ssh -o ConnectTimeout=5 root@${server_ip}"
ssh="ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${server_ip}"
bookmark_prefix="auto-mirror_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for dataset in $(zfs list -t filesystem -H -o name)
do
echo "MIRRORING $dataset"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$dataset$"
then
echo "CREATING PARENT DATASET..."
@ -17,11 +17,11 @@ do
fi
zfs snap "$dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$dataset@$new_bookmark" | $ssh sudo zfs recv -F "$dataset" -o mountpoint=none)
else
echo "INCREMENTAL BACKUP"
@ -32,19 +32,6 @@ do
if [[ "$?" == "0" ]]
then
# delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do
zfs destroy "$destroyable_bookmark"
done
# delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do
$ssh sudo zfs destroy "$destroyable_snapshot"
done
zfs bookmark "$dataset@$new_bookmark" "$dataset#$new_bookmark"
zfs destroy "$dataset@$new_bookmark"
echo "SUCCESS $dataset"

View file

@ -54,6 +54,11 @@
'device': '/dev/disk/by-id/ata-TOSHIBA_MG06ACA10TE_61C0A1B1FKQE',
},
},
'smartctl': {
'/dev/disk/by-id/ata-TOSHIBA_MG06ACA10TE_61C0A1B1FKQE': {
'apm': 1,
},
},
'zfs': {
'pools': {
'tank': {