diff --git a/bundles/mailserver/metadata.py b/bundles/mailserver/metadata.py index ed29b28..41049ba 100644 --- a/bundles/mailserver/metadata.py +++ b/bundles/mailserver/metadata.py @@ -10,14 +10,6 @@ defaults = { 'password': database_password, }, }, - 'zfs': { - 'datasets': { - 'tank/vmail': { - 'mountpoint': '/var/vmail', - 'compression': 'on', - }, - }, - }, 'postgresql': { 'roles': { 'mailserver': { @@ -30,4 +22,12 @@ defaults = { }, }, }, + 'zfs': { + 'datasets': { + 'tank/vmail': { + 'mountpoint': '/var/vmail', + 'compression': 'on', + }, + }, + }, } diff --git a/bundles/zfs/items.py b/bundles/zfs/items.py index 3603d0c..4c1d2ac 100644 --- a/bundles/zfs/items.py +++ b/bundles/zfs/items.py @@ -26,16 +26,15 @@ svc_systemd = { }, } -zfs_datasets = node.metadata.get('zfs/datasets', {}) -zfs_pools = {} +zfs_datasets = node.metadata.get('zfs/datasets') -# TRIM for name, attrs in node.metadata.get('zfs/pools', {}).items(): zfs_pools[name] = attrs - actions[f'pool_{name}_enable_trim'] = { - 'command': f'zpool set autotrim=on {name}', - 'unless': f'zpool get autotrim -H -o value {name} | grep -q on', - 'needs': [ - f'zfs_pool:{name}' - ] - } + + # actions[f'pool_{name}_enable_trim'] = { + # 'command': f'zpool set autotrim=on {name}', + # 'unless': f'zpool get autotrim -H -o value {name} | grep -q on', + # 'needs': [ + # f'zfs_pool:{name}' + # ] + # } diff --git a/bundles/zfs/metadata.py b/bundles/zfs/metadata.py index 4139f33..40010d8 100644 --- a/bundles/zfs/metadata.py +++ b/bundles/zfs/metadata.py @@ -27,11 +27,6 @@ defaults = { 'zfs_pool:', }, }, - 'parted': { - 'needed_by': { - 'zfs_pool:', - }, - }, }, }, 'zfs': { diff --git a/items/zfs_dataset.py b/items/zfs_dataset.py new file mode 100644 index 0000000..90a7886 --- /dev/null +++ b/items/zfs_dataset.py @@ -0,0 +1,141 @@ +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def create(node, path, options): + option_list = [] + for option, value in sorted(options.items()): + # We must exclude the 'mounted' property here because it's a + # read-only "informational" property. + if option != 'mounted' and value is not None: + option_list.append("-o {}={}".format(quote(option), quote(value))) + option_args = " ".join(option_list) + + node.run( + "zfs create {} {}".format( + option_args, + quote(path), + ), + may_fail=True, + ) + + if options['mounted'] == 'no': + set_option(node, path, 'mounted', 'no') + + +def does_exist(node, path): + status_result = node.run( + "zfs list {}".format(quote(path)), + may_fail=True, + ) + return status_result.return_code == 0 + + +def get_option(node, path, option): + cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path)) + # We always expect this to succeed since we don't call this function + # if we have already established that the dataset does not exist. + status_result = node.run(cmd) + return status_result.stdout.decode('utf-8').strip() + +def set_option(node, path, option, value): + if option == 'mounted': + # 'mounted' is a read-only property that can not be altered by + # 'set'. We need to call 'zfs mount tank/foo'. + node.run( + "zfs {} {}".format( + "mount" if value == 'yes' else "unmount", + quote(path), + ), + may_fail=True, + ) + else: + node.run( + "zfs set {}={} {}".format( + quote(option), + quote(value), + quote(path), + ), + may_fail=True, + ) + + +class ZFSDataset(Item): + """ + Creates ZFS datasets and manages their options. + """ + BUNDLE_ATTRIBUTE_NAME = "zfs_datasets" + ITEM_ATTRIBUTES = { + 'atime': None, + 'acltype': None, + 'compression': None, + 'mountpoint': None, + 'quota': None, + 'recordsize': None, + 'dedup': None, + } + ITEM_TYPE_NAME = "zfs_dataset" + + def __repr__(self): + return f"" + + def cdict(self): + cdict = {} + for option, value in self.attributes.items(): + if option == 'mountpoint' and value is None: + value = "none" + if value is not None: + cdict[option] = value + cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes' + return cdict + + def fix(self, status): + if status.must_be_created: + create(self.node, self.name, status.cdict) + else: + for option in status.keys_to_fix: + set_option(self.node, self.name, option, status.cdict[option]) + + def get_auto_deps(self, items): + pool = self.name.split("/")[0] + pool_item = "zfs_pool:{}".format(pool) + pool_item_found = False + + for item in items: + if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool: + # Add dependency to the pool this dataset resides on. + pool_item_found = True + yield pool_item + elif ( + item.ITEM_TYPE_NAME == "zfs_dataset" and + self.name != item.name and + self.name.startswith(item.name + "/") + ): + # Find all other datasets that are parents of this + # dataset. + # XXX Could be optimized by finding the "largest" + # parent only. + yield item.id + + if not pool_item_found: + raise BundleError(_( + "ZFS dataset {dataset} resides on pool {pool} but item " + "{dep} does not exist" + ).format( + dataset=self.name, + pool=pool, + dep=pool_item, + )) + + def sdict(self): + if not does_exist(self.node, self.name): + return None + + sdict = {} + for option, value in self.attributes.items(): + sdict[option] = get_option(self.node, self.name, option) + sdict['mounted'] = get_option(self.node, self.name, 'mounted') + return sdict diff --git a/items/zfs_pool.py b/items/zfs_pool.py new file mode 100644 index 0000000..1b7419e --- /dev/null +++ b/items/zfs_pool.py @@ -0,0 +1,160 @@ +from collections import Counter +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def create_mirrors(node, path, mirrors): + cmd = "" + for devices in mirrors: + actual_targets = [] + for device in devices: + actual_targets.append(quote(prepare_blockdevice(node, device))) + cmd += "mirror {} ".format(" ".join(actual_targets)) + + node.run("zpool create {} {}".format(quote(path), cmd)) + node.run("zfs unmount {}".format(quote(path))) + + +def create_raidz(node, path, devices, raid='raidz'): + cmd = "" + actual_targets = [] + for device in devices: + actual_targets.append(quote(prepare_blockdevice(node, device))) + cmd += "{} {} ".format(raid, " ".join(actual_targets)) + + node.run("zpool create {} {}".format(quote(path), cmd)) + node.run("zfs unmount {}".format(quote(path))) + + +def create_single(node, path, device): + actual_target = prepare_blockdevice(node, device) + node.run("zpool create {} {}".format(quote(path), quote(actual_target))) + node.run("zfs unmount {}".format(quote(path))) + + +def does_exist(node, path): + status_result = node.run( + "zpool list {}".format(quote(path)), + may_fail=True, + ) + return status_result.return_code == 0 + + +def prepare_blockdevice(node, device): + # To increase our chances of success, we run partprobe beforehand to + # make the kernel re-scan all devices. + node.run("partprobe", may_fail=True) + + # Try to find out if the device already contains some filesystem. + # Please note that there is no 100% reliable way to do this. + res = node.run("lsblk -rndo fstype {}".format(quote(device))) + detected = res.stdout.decode('UTF-8').strip() + if detected != "": + raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format( + device, detected)) + else: + return device + + +class ZFSPool(Item): + """ + Creates ZFS pools and the required partitions. + """ + BUNDLE_ATTRIBUTE_NAME = "zfs_pools" + ITEM_ATTRIBUTES = { + 'device': None, + 'mirrors': None, + 'raidz': None, + 'raidz2': None, + 'raidz3': None, + } + ITEM_TYPE_NAME = "zfs_pool" + + def __repr__(self): + return "".format( + self.name, + self.attributes['device'], + self.attributes['mirrors'], + self.attributes['raidz'], + ) + + def cdict(self): + return {} + + @property + def devices_used(self): + devices = [] + if self.attributes['device'] is not None: + devices.append(self.attributes['device']) + if self.attributes['mirrors'] is not None: + for mirror in self.attributes['mirrors']: + devices.extend(mirror) + if self.attributes['raidz'] is not None: + devices.extend(self.attributes['raidz']) + return devices + + def fix(self, status): + if status.must_be_created: + if self.attributes['device'] is not None: + create_single(self.node, self.name, self.attributes['device']) + elif self.attributes['mirrors'] is not None: + create_mirrors(self.node, self.name, self.attributes['mirrors']) + elif self.attributes['raidz'] is not None: + create_raidz(self.node, self.name, self.attributes['raidz']) + elif self.attributes['raidz2'] is not None: + create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2') + elif self.attributes['raidz2'] is not None: + create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3') + + def sdict(self): + # We don't care about the device if the pool already exists. + return {} if does_exist(self.node, self.name) else None + + def test(self): + duplicate_devices = [ + item for item, count in Counter(self.devices_used).items() if count > 1 + ] + if duplicate_devices: + raise BundleError(_( + "{item} on node {node} uses {devices} more than once as an underlying device" + ).format( + item=self.id, + node=self.node.name, + devices=_(" and ").join(duplicate_devices), + )) + + # Have a look at all other ZFS pools on this node and check if + # multiple pools try to use the same device. + for item in self.node.items: + if ( + item.ITEM_TYPE_NAME == "zfs_pool" and + item.name != self.name and + set(item.devices_used).intersection(set(self.devices_used)) + ): + raise BundleError(_( + "Both the ZFS pools {self} and {other} on node {node} " + "try to use {devices} as the underlying storage device" + ).format( + self=self.name, + other=item.name, + node=self.node.name, + devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))), + )) + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + device_config = [] + for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'): + device_config.append(attributes.get(key)) + device_config = [key for key in device_config if key is not None] + if len(device_config) != 1: + raise BundleError(_( + "{item} on node {node} must have exactly one of " + "'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'" + ).format( + item=item_id, + node=bundle.node.name, + )) diff --git a/nodes/htz.mails.py b/nodes/htz.mails.py index 438ac49..95f98e4 100644 --- a/nodes/htz.mails.py +++ b/nodes/htz.mails.py @@ -30,6 +30,13 @@ 'mailserver': { 'admin_email': 'postmaster@sublimity.de', 'hostname': 'mail.sublimity.de', - }, + }, + 'zfs': { + 'pools': { + 'tank': { + 'device': '/dev/disk/by-id/scsi-0HC_Volume_11764264', + }, + }, + }, }, }