wip
This commit is contained in:
parent
a2ceae83bb
commit
e65e7185b5
5 changed files with 0 additions and 575 deletions
|
@ -1,51 +0,0 @@
|
|||
from json import dumps
|
||||
from bundlewrap.metadata import MetadataJSONEncoder
|
||||
|
||||
files = {
|
||||
'/etc/cron.d/zfsutils-linux': {'delete': True},
|
||||
'/etc/cron.d/zfs-auto-snapshot': {'delete': True},
|
||||
'/etc/cron.hourly/zfs-auto-snapshot': {'delete': True},
|
||||
'/etc/cron.daily/zfs-auto-snapshot': {'delete': True},
|
||||
'/etc/cron.weekly/zfs-auto-snapshot': {'delete': True},
|
||||
'/etc/cron.monthly/zfs-auto-snapshot': {'delete': True},
|
||||
}
|
||||
|
||||
actions = {
|
||||
'modprobe_zfs': {
|
||||
'command': 'modprobe zfs',
|
||||
'unless': 'lsmod | grep ^zfs',
|
||||
'needs': {
|
||||
'pkg_apt:zfs-dkms',
|
||||
},
|
||||
'needed_by': {
|
||||
'pkg_apt:zfs-zed',
|
||||
'pkg_apt:zfsutils-linux',
|
||||
'zfs_dataset:',
|
||||
'zfs_pool:',
|
||||
},
|
||||
'comment': 'If this fails, do a dist-upgrade, reinstall zfs-dkms, reboot',
|
||||
},
|
||||
}
|
||||
|
||||
svc_systemd = {
|
||||
'zfs-zed': {
|
||||
'needs': {
|
||||
'pkg_apt:zfs-zed'
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, config in node.metadata.get('zfs/datasets', {}).items():
|
||||
zfs_datasets[name] = config
|
||||
zfs_datasets[name].pop('backup', None)
|
||||
|
||||
for name, config in node.metadata.get('zfs/pools', {}).items():
|
||||
zfs_pools[name] = config
|
||||
|
||||
actions[f'pool_{name}_enable_trim'] = {
|
||||
'command': f'zpool set autotrim=on {name}',
|
||||
'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
|
||||
'needs': [
|
||||
f'zfs_pool:{name}'
|
||||
]
|
||||
}
|
|
@ -1,43 +1,6 @@
|
|||
#import re
|
||||
|
||||
defaults = {
|
||||
'apt': {
|
||||
'packages': {
|
||||
'linux-headers-amd64': {
|
||||
'needed_by': {
|
||||
'pkg_apt:zfs-dkms',
|
||||
},
|
||||
},
|
||||
'parted':{
|
||||
'needed_by': {
|
||||
'pkg_apt:zfs-zed',
|
||||
'pkg_apt:zfsutils-linux',
|
||||
},
|
||||
},
|
||||
'zfs-dkms': {
|
||||
'backports': node.os_version < (11,),
|
||||
'needed_by': {
|
||||
'pkg_apt:zfs-zed',
|
||||
'pkg_apt:zfsutils-linux',
|
||||
},
|
||||
},
|
||||
'zfs-zed': {
|
||||
'backports': node.os_version < (11,),
|
||||
'needed_by': {
|
||||
'zfs_dataset:',
|
||||
'zfs_pool:',
|
||||
},
|
||||
},
|
||||
'zfsutils-linux': {
|
||||
'backports': node.os_version < (11,),
|
||||
'needed_by': {
|
||||
'pkg_apt:zfs-zed',
|
||||
'zfs_dataset:',
|
||||
'zfs_pool:',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'systemd-timers': {
|
||||
'zfs-trim': {
|
||||
'command': '/usr/lib/zfs-linux/trim',
|
||||
|
@ -68,45 +31,4 @@ defaults = {
|
|||
'persistent': True,
|
||||
},
|
||||
},
|
||||
'telegraf': {
|
||||
'config': {
|
||||
'inputs': {
|
||||
'zfs': [{}],
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'datasets': {},
|
||||
'pools': {},
|
||||
},
|
||||
}
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'zfs/datasets'
|
||||
)
|
||||
def dataset_defaults(metadata):
|
||||
return {
|
||||
'zfs': {
|
||||
'datasets': {
|
||||
name: {
|
||||
'compression': 'lz4',
|
||||
'relatime': 'on',
|
||||
} for name, config in metadata.get('zfs/datasets').items()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'backup/paths'
|
||||
)
|
||||
def backup(metadata):
|
||||
return {
|
||||
'backup': {
|
||||
'paths': [
|
||||
options['mountpoint']
|
||||
for options in metadata.get('zfs/datasets').values()
|
||||
if options.get('backup', True)
|
||||
],
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
from bundlewrap.items import Item, ItemStatus
|
||||
from bundlewrap.exceptions import BundleError
|
||||
from bundlewrap.utils.text import force_text, mark_for_translation as _
|
||||
from bundlewrap.utils.remote import PathInfo
|
||||
import types
|
||||
from pipes import quote
|
||||
|
||||
# Downloaded from https://github.com/bundlewrap/plugins/blob/master/item_download/items/download.py
|
||||
# No, we can't use plugins here, because bw4 won't support them anymore.
|
||||
|
||||
class Download(Item):
|
||||
"""
|
||||
Download a file and verify its Hash.
|
||||
"""
|
||||
BUNDLE_ATTRIBUTE_NAME = "downloads"
|
||||
NEEDS_STATIC = [
|
||||
"pkg_apt:",
|
||||
"pkg_pacman:",
|
||||
"pkg_yum:",
|
||||
"pkg_zypper:",
|
||||
]
|
||||
ITEM_ATTRIBUTES = {
|
||||
'url': None,
|
||||
'sha256': None,
|
||||
'sha256_url': None,
|
||||
'gpg_signature_url': None,
|
||||
'gpg_pubkey_url': None,
|
||||
'verifySSL': True,
|
||||
'decompress': None,
|
||||
}
|
||||
ITEM_TYPE_NAME = "download"
|
||||
REQUIRED_ATTRIBUTES = ['url']
|
||||
|
||||
def __repr__(self):
|
||||
return "<Download name:{}>".format(self.name)
|
||||
|
||||
def __hash_remote_file(self, filename):
|
||||
path_info = PathInfo(self.node, filename)
|
||||
if not path_info.is_file:
|
||||
return None
|
||||
|
||||
if hasattr(path_info, 'sha256'):
|
||||
return path_info.sha256
|
||||
else:
|
||||
""""pending pr so do it manualy"""
|
||||
if self.node.os == 'macos':
|
||||
result = self.node.run("shasum -a 256 -- {}".format(quote(filename)))
|
||||
elif self.node.os in self.node.OS_FAMILY_BSD:
|
||||
result = self.node.run("sha256 -q -- {}".format(quote(filename)))
|
||||
else:
|
||||
result = self.node.run("sha256sum -- {}".format(quote(filename)))
|
||||
return force_text(result.stdout).strip().split()[0]
|
||||
|
||||
def fix(self, status):
|
||||
if status.must_be_deleted:
|
||||
# Not possible
|
||||
pass
|
||||
else:
|
||||
decompress = self.attributes.get('decompress')
|
||||
# download file
|
||||
self.node.run("curl -L {verify}-s -- {url}{pipe} > {file}".format(
|
||||
pipe = ' | ' + decompress if decompress else '',
|
||||
verify="" if self.attributes.get('verifySSL', True) else "-k ",
|
||||
file=quote(self.name),
|
||||
url=quote(self.attributes['url'])
|
||||
))
|
||||
|
||||
def cdict(self):
|
||||
"""This is how the world should be"""
|
||||
cdict = {
|
||||
'type': 'download',
|
||||
}
|
||||
|
||||
if self.attributes.get('sha256'):
|
||||
cdict['sha256'] = self.attributes['sha256']
|
||||
elif self.attributes.get('gpg_signature_url'):
|
||||
cdict['verified'] = True
|
||||
else:
|
||||
raise
|
||||
|
||||
return cdict
|
||||
|
||||
def sdict(self):
|
||||
"""This is how the world is right now"""
|
||||
path_info = PathInfo(self.node, self.name)
|
||||
if not path_info.exists:
|
||||
return None
|
||||
else:
|
||||
sdict = {
|
||||
'type': 'download',
|
||||
}
|
||||
if self.attributes.get('sha256'):
|
||||
sdict['sha256'] = self.attributes['sha256']
|
||||
elif self.attributes.get('sha256_url'):
|
||||
full_sha256_url = self.attributes['sha256_url'].format(url=self.attributes['url'])
|
||||
sdict['sha256'] = force_text(
|
||||
self.node.run(f"curl -sL -- {quote(full_sha256_url)}").stdout
|
||||
).strip().split()[0]
|
||||
elif self.attributes.get('gpg_signature_url'):
|
||||
full_signature_url = self.attributes['gpg_signature_url'].format(url=self.attributes['url'])
|
||||
signature_path = f'{self.name}.signature'
|
||||
|
||||
self.node.run(f"curl -sSL {self.attributes['gpg_pubkey_url']} | gpg --import -")
|
||||
self.node.run(f"curl -L {full_signature_url} -o {quote(signature_path)}")
|
||||
gpg_output = self.node.run(f"gpg --verify {quote(signature_path)} {quote(self.name)}").stderr
|
||||
|
||||
if b'Good signature' in gpg_output:
|
||||
sdict['verified'] = True
|
||||
else:
|
||||
sdict['verified'] = False
|
||||
|
||||
return sdict
|
||||
|
||||
@classmethod
|
||||
def validate_attributes(cls, bundle, item_id, attributes):
|
||||
if (
|
||||
'sha256' not in attributes and
|
||||
'sha256_url' not in attributes and
|
||||
'gpg_signature_url'not in attributes
|
||||
):
|
||||
raise BundleError(_(
|
||||
"at least one hash must be set on {item} in bundle '{bundle}'"
|
||||
).format(
|
||||
bundle=bundle.name,
|
||||
item=item_id,
|
||||
))
|
||||
|
||||
if 'url' not in attributes:
|
||||
raise BundleError(_(
|
||||
"you need to specify the url on {item} in bundle '{bundle}'"
|
||||
).format(
|
||||
bundle=bundle.name,
|
||||
item=item_id,
|
||||
))
|
||||
|
||||
def get_auto_deps(self, items):
|
||||
deps = []
|
||||
for item in items:
|
||||
# debian TODO: add other package manager
|
||||
if item.ITEM_TYPE_NAME == 'pkg_apt' and item.name == 'curl':
|
||||
deps.append(item.id)
|
||||
return deps
|
|
@ -1,144 +0,0 @@
|
|||
from pipes import quote
|
||||
|
||||
from bundlewrap.exceptions import BundleError
|
||||
from bundlewrap.items import Item
|
||||
from bundlewrap.utils.text import mark_for_translation as _
|
||||
|
||||
|
||||
def create(node, path, options):
|
||||
option_list = []
|
||||
for option, value in sorted(options.items()):
|
||||
# We must exclude the 'mounted' property here because it's a
|
||||
# read-only "informational" property.
|
||||
if option != 'mounted' and value is not None:
|
||||
option_list.append("-o {}={}".format(quote(option), quote(value)))
|
||||
option_args = " ".join(option_list)
|
||||
|
||||
node.run(
|
||||
"zfs create -p {} {}".format(
|
||||
option_args,
|
||||
quote(path),
|
||||
),
|
||||
may_fail=True,
|
||||
)
|
||||
|
||||
if options['mounted'] == 'no':
|
||||
set_option(node, path, 'mounted', 'no')
|
||||
|
||||
|
||||
def does_exist(node, path):
|
||||
status_result = node.run(
|
||||
"zfs list {}".format(quote(path)),
|
||||
may_fail=True,
|
||||
)
|
||||
return status_result.return_code == 0
|
||||
|
||||
|
||||
def get_option(node, path, option):
|
||||
cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path))
|
||||
# We always expect this to succeed since we don't call this function
|
||||
# if we have already established that the dataset does not exist.
|
||||
status_result = node.run(cmd)
|
||||
return status_result.stdout.decode('utf-8').strip()
|
||||
|
||||
def set_option(node, path, option, value):
|
||||
if option == 'mounted':
|
||||
# 'mounted' is a read-only property that can not be altered by
|
||||
# 'set'. We need to call 'zfs mount tank/foo'.
|
||||
node.run(
|
||||
"zfs {} {}".format(
|
||||
"mount" if value == 'yes' else "unmount",
|
||||
quote(path),
|
||||
),
|
||||
may_fail=True,
|
||||
)
|
||||
else:
|
||||
node.run(
|
||||
"zfs set {}={} {}".format(
|
||||
quote(option),
|
||||
quote(value),
|
||||
quote(path),
|
||||
),
|
||||
may_fail=True,
|
||||
)
|
||||
|
||||
|
||||
class ZFSDataset(Item):
|
||||
"""
|
||||
Creates ZFS datasets and manages their options.
|
||||
"""
|
||||
BUNDLE_ATTRIBUTE_NAME = "zfs_datasets"
|
||||
ITEM_ATTRIBUTES = {
|
||||
'atime': None,
|
||||
'relatime': None,
|
||||
'acltype': None,
|
||||
'compression': None,
|
||||
'mountpoint': None,
|
||||
'quota': None,
|
||||
'recordsize': None,
|
||||
'dedup': None,
|
||||
'logbias': None,
|
||||
'readonly': None,
|
||||
}
|
||||
ITEM_TYPE_NAME = "zfs_dataset"
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ZFSDataset name:{self.name} {' '.join(f'{k}:{v}' for k,v in self.attributes.items())}>"
|
||||
|
||||
def cdict(self):
|
||||
cdict = {}
|
||||
for option, value in self.attributes.items():
|
||||
if option == 'mountpoint' and value is None:
|
||||
value = "none"
|
||||
if value is not None:
|
||||
cdict[option] = value
|
||||
cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes'
|
||||
return cdict
|
||||
|
||||
def fix(self, status):
|
||||
if status.must_be_created:
|
||||
create(self.node, self.name, status.cdict)
|
||||
else:
|
||||
for option in status.keys_to_fix:
|
||||
set_option(self.node, self.name, option, status.cdict[option])
|
||||
|
||||
def get_auto_deps(self, items):
|
||||
pool = self.name.split("/")[0]
|
||||
pool_item = "zfs_pool:{}".format(pool)
|
||||
pool_item_found = False
|
||||
|
||||
for item in items:
|
||||
if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool:
|
||||
# Add dependency to the pool this dataset resides on.
|
||||
pool_item_found = True
|
||||
yield pool_item
|
||||
elif (
|
||||
item.ITEM_TYPE_NAME == "zfs_dataset" and
|
||||
self.name != item.name and
|
||||
self.name.startswith(item.name + "/")
|
||||
):
|
||||
# Find all other datasets that are parents of this
|
||||
# dataset.
|
||||
# XXX Could be optimized by finding the "largest"
|
||||
# parent only.
|
||||
yield item.id
|
||||
|
||||
if not pool_item_found:
|
||||
raise BundleError(_(
|
||||
"ZFS dataset {dataset} resides on pool {pool} but item "
|
||||
"{dep} does not exist"
|
||||
).format(
|
||||
dataset=self.name,
|
||||
pool=pool,
|
||||
dep=pool_item,
|
||||
))
|
||||
|
||||
def sdict(self):
|
||||
if not does_exist(self.node, self.name):
|
||||
return None
|
||||
|
||||
sdict = {}
|
||||
for option, value in self.attributes.items():
|
||||
sdict[option] = get_option(self.node, self.name, option)
|
||||
sdict['mounted'] = get_option(self.node, self.name, 'mounted')
|
||||
return sdict
|
|
@ -1,160 +0,0 @@
|
|||
from collections import Counter
|
||||
from pipes import quote
|
||||
|
||||
from bundlewrap.exceptions import BundleError
|
||||
from bundlewrap.items import Item
|
||||
from bundlewrap.utils.text import mark_for_translation as _
|
||||
|
||||
|
||||
def create_mirrors(node, path, mirrors):
|
||||
cmd = ""
|
||||
for devices in mirrors:
|
||||
actual_targets = []
|
||||
for device in devices:
|
||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
||||
cmd += "mirror {} ".format(" ".join(actual_targets))
|
||||
|
||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def create_raidz(node, path, devices, raid='raidz'):
|
||||
cmd = ""
|
||||
actual_targets = []
|
||||
for device in devices:
|
||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
||||
cmd += "{} {} ".format(raid, " ".join(actual_targets))
|
||||
|
||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def create_single(node, path, device):
|
||||
actual_target = prepare_blockdevice(node, device)
|
||||
node.run("zpool create {} {}".format(quote(path), quote(actual_target)))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def does_exist(node, path):
|
||||
status_result = node.run(
|
||||
"zpool list {}".format(quote(path)),
|
||||
may_fail=True,
|
||||
)
|
||||
return status_result.return_code == 0
|
||||
|
||||
|
||||
def prepare_blockdevice(node, device):
|
||||
# To increase our chances of success, we run partprobe beforehand to
|
||||
# make the kernel re-scan all devices.
|
||||
node.run("partprobe", may_fail=True)
|
||||
|
||||
# Try to find out if the device already contains some filesystem.
|
||||
# Please note that there is no 100% reliable way to do this.
|
||||
res = node.run("lsblk -rndo fstype {}".format(quote(device)))
|
||||
detected = res.stdout.decode('UTF-8').strip()
|
||||
if detected != "":
|
||||
raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(
|
||||
device, detected))
|
||||
else:
|
||||
return device
|
||||
|
||||
|
||||
class ZFSPool(Item):
|
||||
"""
|
||||
Creates ZFS pools and the required partitions.
|
||||
"""
|
||||
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
|
||||
ITEM_ATTRIBUTES = {
|
||||
'device': None,
|
||||
'mirrors': None,
|
||||
'raidz': None,
|
||||
'raidz2': None,
|
||||
'raidz3': None,
|
||||
}
|
||||
ITEM_TYPE_NAME = "zfs_pool"
|
||||
|
||||
def __repr__(self):
|
||||
return "<ZFSPool name:{} device:{} mirrors:{} raidz:{}>".format(
|
||||
self.name,
|
||||
self.attributes['device'],
|
||||
self.attributes['mirrors'],
|
||||
self.attributes['raidz'],
|
||||
)
|
||||
|
||||
def cdict(self):
|
||||
return {}
|
||||
|
||||
@property
|
||||
def devices_used(self):
|
||||
devices = []
|
||||
if self.attributes['device'] is not None:
|
||||
devices.append(self.attributes['device'])
|
||||
if self.attributes['mirrors'] is not None:
|
||||
for mirror in self.attributes['mirrors']:
|
||||
devices.extend(mirror)
|
||||
if self.attributes['raidz'] is not None:
|
||||
devices.extend(self.attributes['raidz'])
|
||||
return devices
|
||||
|
||||
def fix(self, status):
|
||||
if status.must_be_created:
|
||||
if self.attributes['device'] is not None:
|
||||
create_single(self.node, self.name, self.attributes['device'])
|
||||
elif self.attributes['mirrors'] is not None:
|
||||
create_mirrors(self.node, self.name, self.attributes['mirrors'])
|
||||
elif self.attributes['raidz'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'])
|
||||
elif self.attributes['raidz2'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2')
|
||||
elif self.attributes['raidz2'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3')
|
||||
|
||||
def sdict(self):
|
||||
# We don't care about the device if the pool already exists.
|
||||
return {} if does_exist(self.node, self.name) else None
|
||||
|
||||
def test(self):
|
||||
duplicate_devices = [
|
||||
item for item, count in Counter(self.devices_used).items() if count > 1
|
||||
]
|
||||
if duplicate_devices:
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} uses {devices} more than once as an underlying device"
|
||||
).format(
|
||||
item=self.id,
|
||||
node=self.node.name,
|
||||
devices=_(" and ").join(duplicate_devices),
|
||||
))
|
||||
|
||||
# Have a look at all other ZFS pools on this node and check if
|
||||
# multiple pools try to use the same device.
|
||||
for item in self.node.items:
|
||||
if (
|
||||
item.ITEM_TYPE_NAME == "zfs_pool" and
|
||||
item.name != self.name and
|
||||
set(item.devices_used).intersection(set(self.devices_used))
|
||||
):
|
||||
raise BundleError(_(
|
||||
"Both the ZFS pools {self} and {other} on node {node} "
|
||||
"try to use {devices} as the underlying storage device"
|
||||
).format(
|
||||
self=self.name,
|
||||
other=item.name,
|
||||
node=self.node.name,
|
||||
devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))),
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def validate_attributes(cls, bundle, item_id, attributes):
|
||||
device_config = []
|
||||
for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'):
|
||||
device_config.append(attributes.get(key))
|
||||
device_config = [key for key in device_config if key is not None]
|
||||
if len(device_config) != 1:
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} must have exactly one of "
|
||||
"'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
))
|
Loading…
Reference in a new issue