This commit is contained in:
mwiegand 2021-07-06 21:30:16 +02:00
parent f41ba0b934
commit a051b4af17
9 changed files with 1 additions and 118 deletions

View file

@ -1,6 +0,0 @@
#!/usr/bin/env python3
from bundlewrap.repo import Repository
from os.path import realpath, dirname
repo = Repository(dirname(dirname(realpath(__file__))))

View file

@ -63,8 +63,7 @@ def backup_authorized_keys(metadata):
'authorized_keys': [
other_node.metadata.get('users/root/pubkey')
for other_node in repo.nodes
if other_node.has_bundle('backup')
and other_node.metadata.get('backup/server') == node.name
if other_node.metadata.get('backup/server') == node.name
],
},
},

View file

@ -1,14 +0,0 @@
#!/bin/bash
path=$1
if zfs list -H -o mountpoint | grep -q "$path"
then
/opt/backuo/backup_path_via_zfs "$path"
elif test -d "$path"
then
/opt/backuo/backup_path_via_rsync "$path"
else
echo "UNKNOWN PATH: $path"
exit 1
fi

View file

@ -1,53 +0,0 @@
#!/bin/bash
set -e
set -x
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o StrictHostKeyChecking=no backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
target_dataset_parent=$(echo $target_dataset | rev | cut -d / -f 2- | rev)
bookmark_prefix="auto-backup_"
new_bookmark="$bookmark_prefix$(date +"%Y-%m-%d_%H:%M:%S")"
for var in path uuid server ssh source_dataset target_dataset target_dataset_parent new_bookmark
do
[[ -z "${!var}" ]] && echo "ERROR - $var is empty" && exit 97
done
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $TARGET_DATASET"
if ! $ssh sudo zfs list -t filesystem -H -o name | grep -q "^$target_dataset_parent$"
then
echo "CREATING PARENT DATASET..."
$ssh sudo zfs create -p -o mountpoint=none "$target_dataset_parent"
fi
zfs snap "$source_dataset@$new_bookmark"
if zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
# do in subshell, otherwise ctr+c will lead to 0 exitcode
$(zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv -F "$target_dataset")
else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi
if [[ "$?" == "0" ]]
then
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark"
echo "SUCCESS"
else
zfs destroy "$source_dataset@$new_bookmark"
echo "ERROR"
exit 99
fi

View file

@ -1,30 +0,0 @@
from json import dumps
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
}
files['/opt/backup/backup_path'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_zfs'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_rsync'] = {
'mode': '700',
}
directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': repo.get_node(node.metadata.get('backup/server')).metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},
indent=4,
sort_keys=True
),
}

View file

@ -1,12 +0,0 @@
defaults = {
'apt': {
'packages': {
'jq': {},
'rsync': {},
},
},
'backup': {
'server': None,
'paths': [],
},
}

View file

@ -1,7 +1,6 @@
{
'bundles': [
'users',
'backup',
],
'metadata': {
'backup': {