From 7302811418604c5030d058b34af1389ec561af09 Mon Sep 17 00:00:00 2001 From: mwiegand Date: Sun, 10 Oct 2021 18:08:41 +0200 Subject: [PATCH] bw 4.12 --- bundles/backup-server/metadata.py | 4 +- bundles/zfs/items.py | 18 ++-- items/zfs_dataset.py | 144 --------------------------- items/zfs_pool.py | 160 ------------------------------ nodes/home.backups.py | 3 +- nodes/home.server.py | 5 +- nodes/htz.mails.py | 4 +- requirements.txt | 2 +- 8 files changed, 21 insertions(+), 319 deletions(-) delete mode 100644 items/zfs_dataset.py delete mode 100644 items/zfs_pool.py diff --git a/bundles/backup-server/metadata.py b/bundles/backup-server/metadata.py index a495959..36d46b6 100644 --- a/bundles/backup-server/metadata.py +++ b/bundles/backup-server/metadata.py @@ -30,7 +30,7 @@ def zfs(metadata): ): # container datasets[f"tank/{other_node.metadata.get('id')}"] = { - 'mountpoint': 'none', + 'mountpoint': None, 'readonly': 'on', 'backup': False, } @@ -47,7 +47,7 @@ def zfs(metadata): for dataset, config in other_node.metadata.get('zfs/datasets').items(): if path == config.get('mountpoint'): datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = { - 'mountpoint': 'none', + 'mountpoint': None, 'readonly': 'on', 'backup': False, } diff --git a/bundles/zfs/items.py b/bundles/zfs/items.py index 1d3fe18..8d74a6f 100644 --- a/bundles/zfs/items.py +++ b/bundles/zfs/items.py @@ -40,12 +40,14 @@ for name, config in node.metadata.get('zfs/datasets', {}).items(): zfs_datasets[name].pop('backup', None) for name, config in node.metadata.get('zfs/pools', {}).items(): - zfs_pools[name] = config - - actions[f'pool_{name}_enable_trim'] = { - 'command': f'zpool set autotrim=on {name}', - 'unless': f'zpool get autotrim -H -o value {name} | grep -q on', - 'needs': [ - f'zfs_pool:{name}' - ] + zfs_pools[name] = { + "when_creating": { + "config": [ + { + "type": config.get('type', None), + "devices": config['devices'], + }, + ], + }, + "autotrim": True, } diff --git a/items/zfs_dataset.py b/items/zfs_dataset.py deleted file mode 100644 index 730dc18..0000000 --- a/items/zfs_dataset.py +++ /dev/null @@ -1,144 +0,0 @@ -from pipes import quote - -from bundlewrap.exceptions import BundleError -from bundlewrap.items import Item -from bundlewrap.utils.text import mark_for_translation as _ - - -def create(node, path, options): - option_list = [] - for option, value in sorted(options.items()): - # We must exclude the 'mounted' property here because it's a - # read-only "informational" property. - if option != 'mounted' and value is not None: - option_list.append("-o {}={}".format(quote(option), quote(value))) - option_args = " ".join(option_list) - - node.run( - "zfs create -p {} {}".format( - option_args, - quote(path), - ), - may_fail=True, - ) - - if options['mounted'] == 'no': - set_option(node, path, 'mounted', 'no') - - -def does_exist(node, path): - status_result = node.run( - "zfs list {}".format(quote(path)), - may_fail=True, - ) - return status_result.return_code == 0 - - -def get_option(node, path, option): - cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path)) - # We always expect this to succeed since we don't call this function - # if we have already established that the dataset does not exist. - status_result = node.run(cmd) - return status_result.stdout.decode('utf-8').strip() - -def set_option(node, path, option, value): - if option == 'mounted': - # 'mounted' is a read-only property that can not be altered by - # 'set'. We need to call 'zfs mount tank/foo'. - node.run( - "zfs {} {}".format( - "mount" if value == 'yes' else "unmount", - quote(path), - ), - may_fail=True, - ) - else: - node.run( - "zfs set {}={} {}".format( - quote(option), - quote(value), - quote(path), - ), - may_fail=True, - ) - - -class ZFSDataset(Item): - """ - Creates ZFS datasets and manages their options. - """ - BUNDLE_ATTRIBUTE_NAME = "zfs_datasets" - ITEM_ATTRIBUTES = { - 'atime': None, - 'relatime': None, - 'acltype': None, - 'compression': None, - 'mountpoint': None, - 'quota': None, - 'recordsize': None, - 'dedup': None, - 'logbias': None, - 'readonly': None, - } - ITEM_TYPE_NAME = "zfs_dataset" - - def __repr__(self): - return f"" - - def cdict(self): - cdict = {} - for option, value in self.attributes.items(): - if option == 'mountpoint' and value is None: - value = "none" - if value is not None: - cdict[option] = value - cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes' - return cdict - - def fix(self, status): - if status.must_be_created: - create(self.node, self.name, status.cdict) - else: - for option in status.keys_to_fix: - set_option(self.node, self.name, option, status.cdict[option]) - - def get_auto_deps(self, items): - pool = self.name.split("/")[0] - pool_item = "zfs_pool:{}".format(pool) - pool_item_found = False - - for item in items: - if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool: - # Add dependency to the pool this dataset resides on. - pool_item_found = True - yield pool_item - elif ( - item.ITEM_TYPE_NAME == "zfs_dataset" and - self.name != item.name and - self.name.startswith(item.name + "/") - ): - # Find all other datasets that are parents of this - # dataset. - # XXX Could be optimized by finding the "largest" - # parent only. - yield item.id - - if not pool_item_found: - raise BundleError(_( - "ZFS dataset {dataset} resides on pool {pool} but item " - "{dep} does not exist" - ).format( - dataset=self.name, - pool=pool, - dep=pool_item, - )) - - def sdict(self): - if not does_exist(self.node, self.name): - return None - - sdict = {} - for option, value in self.attributes.items(): - sdict[option] = get_option(self.node, self.name, option) - sdict['mounted'] = get_option(self.node, self.name, 'mounted') - return sdict diff --git a/items/zfs_pool.py b/items/zfs_pool.py deleted file mode 100644 index 1b7419e..0000000 --- a/items/zfs_pool.py +++ /dev/null @@ -1,160 +0,0 @@ -from collections import Counter -from pipes import quote - -from bundlewrap.exceptions import BundleError -from bundlewrap.items import Item -from bundlewrap.utils.text import mark_for_translation as _ - - -def create_mirrors(node, path, mirrors): - cmd = "" - for devices in mirrors: - actual_targets = [] - for device in devices: - actual_targets.append(quote(prepare_blockdevice(node, device))) - cmd += "mirror {} ".format(" ".join(actual_targets)) - - node.run("zpool create {} {}".format(quote(path), cmd)) - node.run("zfs unmount {}".format(quote(path))) - - -def create_raidz(node, path, devices, raid='raidz'): - cmd = "" - actual_targets = [] - for device in devices: - actual_targets.append(quote(prepare_blockdevice(node, device))) - cmd += "{} {} ".format(raid, " ".join(actual_targets)) - - node.run("zpool create {} {}".format(quote(path), cmd)) - node.run("zfs unmount {}".format(quote(path))) - - -def create_single(node, path, device): - actual_target = prepare_blockdevice(node, device) - node.run("zpool create {} {}".format(quote(path), quote(actual_target))) - node.run("zfs unmount {}".format(quote(path))) - - -def does_exist(node, path): - status_result = node.run( - "zpool list {}".format(quote(path)), - may_fail=True, - ) - return status_result.return_code == 0 - - -def prepare_blockdevice(node, device): - # To increase our chances of success, we run partprobe beforehand to - # make the kernel re-scan all devices. - node.run("partprobe", may_fail=True) - - # Try to find out if the device already contains some filesystem. - # Please note that there is no 100% reliable way to do this. - res = node.run("lsblk -rndo fstype {}".format(quote(device))) - detected = res.stdout.decode('UTF-8').strip() - if detected != "": - raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format( - device, detected)) - else: - return device - - -class ZFSPool(Item): - """ - Creates ZFS pools and the required partitions. - """ - BUNDLE_ATTRIBUTE_NAME = "zfs_pools" - ITEM_ATTRIBUTES = { - 'device': None, - 'mirrors': None, - 'raidz': None, - 'raidz2': None, - 'raidz3': None, - } - ITEM_TYPE_NAME = "zfs_pool" - - def __repr__(self): - return "".format( - self.name, - self.attributes['device'], - self.attributes['mirrors'], - self.attributes['raidz'], - ) - - def cdict(self): - return {} - - @property - def devices_used(self): - devices = [] - if self.attributes['device'] is not None: - devices.append(self.attributes['device']) - if self.attributes['mirrors'] is not None: - for mirror in self.attributes['mirrors']: - devices.extend(mirror) - if self.attributes['raidz'] is not None: - devices.extend(self.attributes['raidz']) - return devices - - def fix(self, status): - if status.must_be_created: - if self.attributes['device'] is not None: - create_single(self.node, self.name, self.attributes['device']) - elif self.attributes['mirrors'] is not None: - create_mirrors(self.node, self.name, self.attributes['mirrors']) - elif self.attributes['raidz'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz']) - elif self.attributes['raidz2'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2') - elif self.attributes['raidz2'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3') - - def sdict(self): - # We don't care about the device if the pool already exists. - return {} if does_exist(self.node, self.name) else None - - def test(self): - duplicate_devices = [ - item for item, count in Counter(self.devices_used).items() if count > 1 - ] - if duplicate_devices: - raise BundleError(_( - "{item} on node {node} uses {devices} more than once as an underlying device" - ).format( - item=self.id, - node=self.node.name, - devices=_(" and ").join(duplicate_devices), - )) - - # Have a look at all other ZFS pools on this node and check if - # multiple pools try to use the same device. - for item in self.node.items: - if ( - item.ITEM_TYPE_NAME == "zfs_pool" and - item.name != self.name and - set(item.devices_used).intersection(set(self.devices_used)) - ): - raise BundleError(_( - "Both the ZFS pools {self} and {other} on node {node} " - "try to use {devices} as the underlying storage device" - ).format( - self=self.name, - other=item.name, - node=self.node.name, - devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))), - )) - - @classmethod - def validate_attributes(cls, bundle, item_id, attributes): - device_config = [] - for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'): - device_config.append(attributes.get(key)) - device_config = [key for key in device_config if key is not None] - if len(device_config) != 1: - raise BundleError(_( - "{item} on node {node} must have exactly one of " - "'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'" - ).format( - item=item_id, - node=bundle.node.name, - )) diff --git a/nodes/home.backups.py b/nodes/home.backups.py index 3aeaadc..ffe982d 100644 --- a/nodes/home.backups.py +++ b/nodes/home.backups.py @@ -23,7 +23,8 @@ 'zfs': { 'pools': { 'tank': { - 'raidz': [ + 'type': 'raidz', + 'devices': [ '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K3GV6TPL', '/dev/disk/by-id/ata-HGST_HDN726040ALE614_K4KAJXEB', '/dev/disk/by-id/ata-TOSHIBA_HDWQ140_19VZK0EMFAYG', diff --git a/nodes/home.server.py b/nodes/home.server.py index 7193fff..aa0946a 100644 --- a/nodes/home.server.py +++ b/nodes/home.server.py @@ -72,10 +72,11 @@ 'zfs': { 'pools': { 'tank': { - 'mirrors': [[ + 'type': 'mirror', + 'devices': [ '/dev/disk/by-partlabel/zfs-data-1', '/dev/disk/by-partlabel/zfs-data-2', - ]], + ], }, }, }, diff --git a/nodes/htz.mails.py b/nodes/htz.mails.py index 6896978..9b1a271 100644 --- a/nodes/htz.mails.py +++ b/nodes/htz.mails.py @@ -175,7 +175,9 @@ 'zfs': { 'pools': { 'tank': { - 'device': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0-part2', + 'devices': [ + '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0-part2', + ], }, }, }, diff --git a/requirements.txt b/requirements.txt index d5312b2..4340de5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -bundlewrap>=4.10.1 +bundlewrap>=4.12.0 pycryptodome PyNaCl PyYAML