From 706af2e12730e480bdd59998600354b8d44655b4 Mon Sep 17 00:00:00 2001 From: mwiegand Date: Fri, 12 Nov 2021 17:40:00 +0100 Subject: [PATCH] dont autosnapshot backup datasets --- bundles/backup-server/metadata.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/bundles/backup-server/metadata.py b/bundles/backup-server/metadata.py index de777d7..ae242f7 100644 --- a/bundles/backup-server/metadata.py +++ b/bundles/backup-server/metadata.py @@ -28,35 +28,45 @@ def zfs(metadata): other_node.has_bundle('backup') and other_node.metadata.get('backup/server') == node.name ): + base_dataset = f"tank/{other_node.metadata.get('id')}" + # container - datasets[f"tank/{other_node.metadata.get('id')}"] = { + datasets[base_dataset] = { 'mountpoint': None, 'readonly': 'on', 'backup': False, + 'com.sun:auto-snapshot': 'false', } + # for rsync backups - datasets[f"tank/{other_node.metadata.get('id')}/fs"] = { + datasets[f'{base_dataset}/fs'] = { 'mountpoint': f"/mnt/backups/{other_node.metadata.get('id')}", 'readonly': 'off', 'backup': False, + 'com.sun:auto-snapshot': 'true', } + # for zfs send/recv if other_node.has_bundle('zfs'): + # base datasets for each tank for pool in other_node.metadata.get('zfs/pools'): - datasets[f"tank/{other_node.metadata.get('id')}/{pool}"] = { + datasets[f'{base_dataset}/{pool}'] = { 'mountpoint': None, 'readonly': 'on', 'backup': False, + 'com.sun:auto-snapshot': 'false', } + # actual datasets for path in other_node.metadata.get('backup/paths'): for dataset, config in other_node.metadata.get('zfs/datasets').items(): if path == config.get('mountpoint'): - datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = { + datasets[f'{base_dataset}/{dataset}'] = { 'mountpoint': None, 'readonly': 'on', 'backup': False, + 'com.sun:auto-snapshot': 'false', } continue