This commit is contained in:
mwiegand 2021-10-10 18:08:41 +02:00
parent c888aca1af
commit 7302811418
8 changed files with 21 additions and 319 deletions

View file

@ -30,7 +30,7 @@ def zfs(metadata):
):
# container
datasets[f"tank/{other_node.metadata.get('id')}"] = {
'mountpoint': 'none',
'mountpoint': None,
'readonly': 'on',
'backup': False,
}
@ -47,7 +47,7 @@ def zfs(metadata):
for dataset, config in other_node.metadata.get('zfs/datasets').items():
if path == config.get('mountpoint'):
datasets[f"tank/{other_node.metadata.get('id')}/{dataset}"] = {
'mountpoint': 'none',
'mountpoint': None,
'readonly': 'on',
'backup': False,
}

View file

@ -40,12 +40,14 @@ for name, config in node.metadata.get('zfs/datasets', {}).items():
zfs_datasets[name].pop('backup', None)
for name, config in node.metadata.get('zfs/pools', {}).items():
zfs_pools[name] = config
actions[f'pool_{name}_enable_trim'] = {
'command': f'zpool set autotrim=on {name}',
'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
'needs': [
f'zfs_pool:{name}'
]
zfs_pools[name] = {
"when_creating": {
"config": [
{
"type": config.get('type', None),
"devices": config['devices'],
},
],
},
"autotrim": True,
}

View file

@ -1,144 +0,0 @@
from pipes import quote
from bundlewrap.exceptions import BundleError
from bundlewrap.items import Item
from bundlewrap.utils.text import mark_for_translation as _
def create(node, path, options):
option_list = []
for option, value in sorted(options.items()):
# We must exclude the 'mounted' property here because it's a
# read-only "informational" property.
if option != 'mounted' and value is not None:
option_list.append("-o {}={}".format(quote(option), quote(value)))
option_args = " ".join(option_list)
node.run(
"zfs create -p {} {}".format(
option_args,
quote(path),
),
may_fail=True,
)
if options['mounted'] == 'no':
set_option(node, path, 'mounted', 'no')
def does_exist(node, path):
status_result = node.run(
"zfs list {}".format(quote(path)),
may_fail=True,
)
return status_result.return_code == 0
def get_option(node, path, option):
cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path))
# We always expect this to succeed since we don't call this function
# if we have already established that the dataset does not exist.
status_result = node.run(cmd)
return status_result.stdout.decode('utf-8').strip()
def set_option(node, path, option, value):
if option == 'mounted':
# 'mounted' is a read-only property that can not be altered by
# 'set'. We need to call 'zfs mount tank/foo'.
node.run(
"zfs {} {}".format(
"mount" if value == 'yes' else "unmount",
quote(path),
),
may_fail=True,
)
else:
node.run(
"zfs set {}={} {}".format(
quote(option),
quote(value),
quote(path),
),
may_fail=True,
)
class ZFSDataset(Item):
"""
Creates ZFS datasets and manages their options.
"""
BUNDLE_ATTRIBUTE_NAME = "zfs_datasets"
ITEM_ATTRIBUTES = {
'atime': None,
'relatime': None,
'acltype': None,
'compression': None,
'mountpoint': None,
'quota': None,
'recordsize': None,
'dedup': None,
'logbias': None,
'readonly': None,
}
ITEM_TYPE_NAME = "zfs_dataset"
def __repr__(self):
return f"<ZFSDataset name:{self.name} {' '.join(f'{k}:{v}' for k,v in self.attributes.items())}>"
def cdict(self):
cdict = {}
for option, value in self.attributes.items():
if option == 'mountpoint' and value is None:
value = "none"
if value is not None:
cdict[option] = value
cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes'
return cdict
def fix(self, status):
if status.must_be_created:
create(self.node, self.name, status.cdict)
else:
for option in status.keys_to_fix:
set_option(self.node, self.name, option, status.cdict[option])
def get_auto_deps(self, items):
pool = self.name.split("/")[0]
pool_item = "zfs_pool:{}".format(pool)
pool_item_found = False
for item in items:
if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool:
# Add dependency to the pool this dataset resides on.
pool_item_found = True
yield pool_item
elif (
item.ITEM_TYPE_NAME == "zfs_dataset" and
self.name != item.name and
self.name.startswith(item.name + "/")
):
# Find all other datasets that are parents of this
# dataset.
# XXX Could be optimized by finding the "largest"
# parent only.
yield item.id
if not pool_item_found:
raise BundleError(_(
"ZFS dataset {dataset} resides on pool {pool} but item "
"{dep} does not exist"
).format(
dataset=self.name,
pool=pool,
dep=pool_item,
))
def sdict(self):
if not does_exist(self.node, self.name):
return None
sdict = {}
for option, value in self.attributes.items():
sdict[option] = get_option(self.node, self.name, option)
sdict['mounted'] = get_option(self.node, self.name, 'mounted')
return sdict

View file

@ -1,160 +0,0 @@
from collections import Counter
from pipes import quote
from bundlewrap.exceptions import BundleError
from bundlewrap.items import Item
from bundlewrap.utils.text import mark_for_translation as _
def create_mirrors(node, path, mirrors):
cmd = ""
for devices in mirrors:
actual_targets = []
for device in devices:
actual_targets.append(quote(prepare_blockdevice(node, device)))
cmd += "mirror {} ".format(" ".join(actual_targets))
node.run("zpool create {} {}".format(quote(path), cmd))
node.run("zfs unmount {}".format(quote(path)))
def create_raidz(node, path, devices, raid='raidz'):
cmd = ""
actual_targets = []
for device in devices:
actual_targets.append(quote(prepare_blockdevice(node, device)))
cmd += "{} {} ".format(raid, " ".join(actual_targets))
node.run("zpool create {} {}".format(quote(path), cmd))
node.run("zfs unmount {}".format(quote(path)))
def create_single(node, path, device):
actual_target = prepare_blockdevice(node, device)
node.run("zpool create {} {}".format(quote(path), quote(actual_target)))
node.run("zfs unmount {}".format(quote(path)))
def does_exist(node, path):
status_result = node.run(
"zpool list {}".format(quote(path)),
may_fail=True,
)
return status_result.return_code == 0
def prepare_blockdevice(node, device):
# To increase our chances of success, we run partprobe beforehand to
# make the kernel re-scan all devices.
node.run("partprobe", may_fail=True)
# Try to find out if the device already contains some filesystem.
# Please note that there is no 100% reliable way to do this.
res = node.run("lsblk -rndo fstype {}".format(quote(device)))
detected = res.stdout.decode('UTF-8').strip()
if detected != "":
raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(
device, detected))
else:
return device
class ZFSPool(Item):
"""
Creates ZFS pools and the required partitions.
"""
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
ITEM_ATTRIBUTES = {
'device': None,
'mirrors': None,
'raidz': None,
'raidz2': None,
'raidz3': None,
}
ITEM_TYPE_NAME = "zfs_pool"
def __repr__(self):
return "<ZFSPool name:{} device:{} mirrors:{} raidz:{}>".format(
self.name,
self.attributes['device'],
self.attributes['mirrors'],
self.attributes['raidz'],
)
def cdict(self):
return {}
@property
def devices_used(self):
devices = []
if self.attributes['device'] is not None:
devices.append(self.attributes['device'])
if self.attributes['mirrors'] is not None:
for mirror in self.attributes['mirrors']:
devices.extend(mirror)
if self.attributes['raidz'] is not None:
devices.extend(self.attributes['raidz'])
return devices
def fix(self, status):
if status.must_be_created:
if self.attributes['device'] is not None:
create_single(self.node, self.name, self.attributes['device'])
elif self.attributes['mirrors'] is not None:
create_mirrors(self.node, self.name, self.attributes['mirrors'])
elif self.attributes['raidz'] is not None:
create_raidz(self.node, self.name, self.attributes['raidz'])
elif self.attributes['raidz2'] is not None:
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2')
elif self.attributes['raidz2'] is not None:
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3')
def sdict(self):
# We don't care about the device if the pool already exists.
return {} if does_exist(self.node, self.name) else None
def test(self):
duplicate_devices = [
item for item, count in Counter(self.devices_used).items() if count > 1
]
if duplicate_devices:
raise BundleError(_(
"{item} on node {node} uses {devices} more than once as an underlying device"
).format(
item=self.id,
node=self.node.name,
devices=_(" and ").join(duplicate_devices),
))
# Have a look at all other ZFS pools on this node and check if
# multiple pools try to use the same device.
for item in self.node.items:
if (
item.ITEM_TYPE_NAME == "zfs_pool" and
item.name != self.name and
set(item.devices_used).intersection(set(self.devices_used))
):
raise BundleError(_(
"Both the ZFS pools {self} and {other} on node {node} "
"try to use {devices} as the underlying storage device"
).format(
self=self.name,
other=item.name,
node=self.node.name,
devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))),
))
@classmethod
def validate_attributes(cls, bundle, item_id, attributes):
device_config = []
for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'):
device_config.append(attributes.get(key))
device_config = [key for key in device_config if key is not None]
if len(device_config) != 1:
raise BundleError(_(
"{item} on node {node} must have exactly one of "
"'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'"
).format(
item=item_id,
node=bundle.node.name,
))

View file

@ -23,7 +23,8 @@
'zfs': {
'pools': {
'tank': {
'raidz': [
'type': 'raidz',
'devices': [
'/dev/disk/by-id/ata-HGST_HDN726040ALE614_K3GV6TPL',
'/dev/disk/by-id/ata-HGST_HDN726040ALE614_K4KAJXEB',
'/dev/disk/by-id/ata-TOSHIBA_HDWQ140_19VZK0EMFAYG',

View file

@ -72,10 +72,11 @@
'zfs': {
'pools': {
'tank': {
'mirrors': [[
'type': 'mirror',
'devices': [
'/dev/disk/by-partlabel/zfs-data-1',
'/dev/disk/by-partlabel/zfs-data-2',
]],
],
},
},
},

View file

@ -175,7 +175,9 @@
'zfs': {
'pools': {
'tank': {
'device': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0-part2',
'devices': [
'/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0-part2',
],
},
},
},

View file

@ -1,4 +1,4 @@
bundlewrap>=4.10.1
bundlewrap>=4.12.0
pycryptodome
PyNaCl
PyYAML