This commit is contained in:
mwiegand 2021-06-26 00:12:20 +02:00
parent 534a88e101
commit 4e2f50f79b
12 changed files with 113 additions and 49 deletions

View file

@ -0,0 +1,3 @@
!/bin/bash
zfs send tank/nextcloud@test1 | ssh backup-receiver@10.0.0.5 sudo zfs recv tank/nextcloud

View file

@ -9,6 +9,29 @@ defaults = {
}
@metadata_reactor.provides(
'zfs/datasets'
)
def zfs(metadata):
datasets = {}
for other_node in repo.nodes:
if (
other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name
):
datasets[f"tank/{other_node.metadata.get('id')}/fs"] = {
'mountpoint': f"/mnt/backups/{other_node.metadata.get('id')}",
'backup': False,
}
return {
'zfs': {
'datasets': datasets,
},
}
@metadata_reactor.provides(
'dns'
)

View file

@ -1,44 +1,14 @@
#!/bin/bash
set -e
set -x
path=$1
PATH=$1
SERVER=$(jq -r .server_hostname < /etc/backup/config.json)
UUID=$(jq -r .client_uuid < /etc/backup/config.json)
echo "BACKUP - PATH: $PATH, SERVER: $SERVER, UUID: $UUID"
if zfs list -H -o mountpoint | grep -q "$PATH"
if zfs list -H -o mountpoint | grep -q "$path"
then
SOURCE_DATASET=$(zfs list -H -o mountpoint,name | grep -P "^$PATH\t" | cut -d $'\t' -f 2)
SOURCE_DATASET_STRIPPED=$(echo $SOURCE_DATASET | cut -f / -d 2-)
TARGET_DATASET="tank/$UUID/$DATASET_NAME"
SSH="ssh backup-receiver@$SERVER"
DATE=$(date +"%Y-%m-%d_%H:%M:%S")
BOOKMARK="ATUO_BACKUP_$DATE"
echo "BACKUP ZFS DATASET - SOURCE_DATASET: $SOURCE_DATASET, TARGET_DATASET: $TARGET_DATASET"
if ! $SSH zfs list -t filesystem -H -o name | grep -q "^$TARGET_DATASET$"
then
echo "CREATING TARGET DATASET..."
zfs create -p -o mountpoint=none "$TARGET_DATASET"
fi
zfs snap $SOURCE_DATASET@$BOOKMARK
zfs bookmark $SOURCE_DATASET@$BOOKMARK $SOURCE_DATASET\#$BOOKMARK
zfs destroy $SOURCE_DATASET@$BOOKMARK
zfs send -i tank/nextcloud@test1 tank/nextcloud@test2 | ssh backup-receiver@$SERVER sudo zfs recv tank/nextcloud
elif test -d "$PATH"
/opt/backuo/backup_path_via_zfs "$path"
elif test -d "$path"
then
TYPE=directory
elif test -f "$PATH"
then
TYPE=file
/opt/backuo/backup_path_via_zfs "$path"
else
echo "UNKNOWN TYPE: $PATH"
echo "UNKNOWN PATH: $path"
exit 1
fi

View file

@ -0,0 +1,44 @@
#!/bin/bash
set -e
set -x
path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o StrictHostKeyChecking=no backup-receiver@$server"
source_dataset=$(zfs list -H -o mountpoint,name | grep -P "^$path\t" | cut -d $'\t' -f 2)
target_dataset="tank/$uuid/$source_dataset"
new_bookmark="auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"
echo "BACKUP ZFS DATASET - PATH: $path, SERVER: $server, UUID: $uuid, SOURCE_DATASET: $source_dataset, TARGET_DATASET: $TARGET_DATASET"
# if ! $SSH zfs list -t filesystem -H -o name | grep -q "^$TARGET_DATASET$"
# then
# echo "CREATING TARGET DATASET..."
# $ssh sudo zfs create -p -o mountpoint=none "$target_dataset"
# fi
zfs snap $source_dataset@$new_bookmark
if zfs list -t bookmark -H -o name | grep '#auto-backup' | wc -l | grep -q "^0$"
then
echo "INITIAL BACKUP"
zfs send -v "$source_dataset@$new_bookmark" | $ssh sudo zfs recv $target_dataset
else
echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | sort | tail -1 | cut -d '#' -f 2)
zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv $target_dataset
fi
if [[ "$?" == "0" ]]
then
echo "SUCCESS"
zfs bookmark "$source_dataset@$new_bookmark" "$source_dataset#$new_bookmark"
zfs destroy "$source_dataset@$new_bookmark"
else
echo "ERROR"
zfs destroy "$source_dataset@$new_bookmark"
exit 99
fi

View file

@ -1,11 +1,27 @@
from json import dumps
directories['/opt/backup'] = {}
files['/opt/backup/backup_all'] = {
'mode': '700',
}
files['/opt/backup/backup_path'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_zfs'] = {
'mode': '700',
}
files['/opt/backup/backup_path_via_rsync'] = {
'mode': '700',
}
directories['/etc/backup'] = {}
files['/etc/backup/config.json'] = {
'content': dumps(
{
'server_hostname': repo.get_node(node.metadata.get('backup/server')).metadata.get('backup-server/hostname'),
'client_uuid': node.metadata.get('id'),
'paths': sorted(set(node.metadata.get('backup/paths'))),
},
indent=4,

View file

@ -1,6 +1,7 @@
defaults = {
'apt': {
'packages': {
'jq': {},
'rsync': {},
},
},

View file

@ -63,6 +63,9 @@ defaults = {
'datasets': {
'tank/nextcloud': {
'mountpoint': '/var/lib/nextcloud',
'needed_by': [
'bundle:nextcloud',
],
},
},
},

View file

@ -2,29 +2,27 @@ for group, config in node.metadata.get('groups', {}).items():
groups[group] = config
for name, config in node.metadata.get('users').items():
users[name] = {
k:v for k,v in config.items() if k in [
"full_name", "gid", "groups", "home", "password_hash", "shell", "uid",
]
}
directories[config['home']] = {
'owner': name,
'mode': '700',
}
files[f"{config['home']}/.ssh/id_{config['keytype']}"] = {
'content': config['privkey'],
'content': config['privkey'] + '\n',
'owner': name,
'mode': '0600',
}
files[f"{config['home']}/.ssh/id_{config['keytype']}.pub"] = {
'content': config['pubkey'],
'content': config['pubkey'] + '\n',
'owner': name,
'mode': '0600',
}
files[config['home'] + '/.ssh/authorized_keys'] = {
'content': '\n'.join(sorted(config['authorized_keys'])),
'content': '\n'.join(sorted(config['authorized_keys'])) + '\n',
'owner': name,
'mode': '0600',
}
users[name] = config
for option in ['authorized_keys', 'privkey', 'pubkey', 'keytype']:
users[name].pop(option, None)

View file

@ -26,10 +26,12 @@ svc_systemd = {
},
}
zfs_datasets = node.metadata.get('zfs/datasets')
for name, config in node.metadata.get('zfs/datasets', {}).items():
zfs_datasets[name] = config
zfs_datasets[name].pop('backup', None)
for name, attrs in node.metadata.get('zfs/pools', {}).items():
zfs_pools[name] = attrs
for name, config in node.metadata.get('zfs/pools', {}).items():
zfs_pools[name] = config
# actions[f'pool_{name}_enable_trim'] = {
# 'command': f'zpool set autotrim=on {name}',

View file

@ -14,6 +14,10 @@ def create(node, path, options):
option_list.append("-o {}={}".format(quote(option), quote(value)))
option_args = " ".join(option_list)
print("zfs create {} {}".format(
option_args,
quote(path),
))
node.run(
"zfs create {} {}".format(
option_args,

View file

@ -18,7 +18,7 @@ def generate_ad25519_key_pair(secret):
'-----BEGIN OPENSSH PRIVATE KEY-----',
b64encode(deterministic_bytes).decode(),
'-----END OPENSSH PRIVATE KEY-----',
]) + '\n'
])
public_key = privkey_bytes.public_key().public_bytes(
encoding=serialization.Encoding.OpenSSH,