diff -Nru curtin-18.1-17-gae48e86f/bin/curtin curtin-18.2-10-g7afd77fa/bin/curtin --- curtin-18.1-17-gae48e86f/bin/curtin 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/bin/curtin 2019-02-15 20:42:42.000000000 +0000 @@ -1,7 +1,7 @@ #!/bin/sh # This file is part of curtin. See LICENSE file for copyright and license info. -PY3OR2_MAIN="curtin.commands.main" +PY3OR2_MAIN="curtin" PY3OR2_MCHECK="curtin.deps.check" PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"} PYTHON=${PY3OR2_PYTHON} diff -Nru curtin-18.1-17-gae48e86f/curtin/block/clear_holders.py curtin-18.2-10-g7afd77fa/curtin/block/clear_holders.py --- curtin-18.1-17-gae48e86f/curtin/block/clear_holders.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/clear_holders.py 2019-02-15 20:42:42.000000000 +0000 @@ -130,8 +130,12 @@ return # get slaves [vdb1, vdc], allow for slaves to not have bcache dir - slave_paths = [get_bcache_sys_path(k, strict=False) for k in - os.listdir(os.path.join(device, 'slaves'))] + try: + slave_paths = [get_bcache_sys_path(k, strict=False) for k in + os.listdir(os.path.join(device, 'slaves'))] + except util.FileMissingError as e: + LOG.debug('Transient race, bcache slave path not found: %s', e) + slave_paths = [] # stop cacheset if it exists bcache_cache_sysfs = get_bcache_using_dev(device, strict=False) @@ -300,12 +304,18 @@ else: raise e + # gather any partitions + partitions = block.get_sysfs_partitions(device) + # release zfs member by exporting the pool - if block.is_zfs_member(blockdev): + if zfs.zfs_supported() and block.is_zfs_member(blockdev): poolname = zfs.device_to_poolname(blockdev) # only export pools that have been imported if poolname in zfs.zpool_list(): - zfs.zpool_export(poolname) + try: + zfs.zpool_export(poolname) + except util.ProcessExecutionError as e: + LOG.warning('Failed to export zpool "%s": %s', poolname, e) if is_swap_device(blockdev): shutdown_swap(blockdev) @@ -325,6 +335,27 @@ _wipe_superblock(blockdev) + # if we had partitions, make sure they've been removed + if partitions: + LOG.debug('%s had partitions, issuing partition reread', device) + retries = [.5, .5, 1, 2, 5, 7] + for attempt, wait in enumerate(retries): + try: + # only rereadpt on wiped device + block.rescan_block_devices(devices=[blockdev]) + # may raise IOError, OSError due to wiped partition table + curparts = block.get_sysfs_partitions(device) + if len(curparts) == 0: + return + except (IOError, OSError): + if attempt + 1 >= len(retries): + raise + + LOG.debug("%s partitions still present, rereading pt" + " (%s/%s). sleeping %ss before retry", + device, attempt + 1, len(retries), wait) + time.sleep(wait) + def _wipe_superblock(blockdev, exclusive=True): """ No checks, just call wipe_volume """ @@ -379,7 +410,10 @@ """ determine if specified device is a bcache device """ - return block.path_to_kname(device).startswith('bcache') + # bcache devices can be partitioned and the partitions are *not* + # bcache devices with a sysfs 'slaves' subdirectory + partition = identify_partition(device) + return block.path_to_kname(device).startswith('bcache') and not partition def identify_partition(device): @@ -579,8 +613,6 @@ dev_info['dev_type']) continue - # scan before we check - block.rescan_block_devices(warn_on_fail=False) if os.path.exists(dev_info['device']): LOG.info("shutdown running on holder type: '%s' syspath: '%s'", dev_info['dev_type'], dev_info['device']) @@ -602,19 +634,18 @@ # all disks and partitions should be sufficient to remove the mdadm # metadata mdadm.mdadm_assemble(scan=True, ignore_errors=True) + # scan and activate for logical volumes + lvm.lvm_scan() + lvm.activate_volgroups() # the bcache module needs to be present to properly detect bcache devs # on some systems (precise without hwe kernel) it may not be possible to # lad the bcache module bcause it is not present in the kernel. if this # happens then there is no need to halt installation, as the bcache devices # will never appear and will never prevent the disk from being reformatted util.load_kernel_module('bcache') - # the zfs module is needed to find and export devices which may be in-use - # and need to be cleared, only on xenial+. - try: - if zfs.zfs_supported(): - util.load_kernel_module('zfs') - except RuntimeError as e: - LOG.warning('Failed to load zfs kernel module: %s', e) + + if not zfs.zfs_supported(): + LOG.warning('zfs filesystem is not supported in this environment') # anything that is not identified can assumed to be a 'disk' or similar diff -Nru curtin-18.1-17-gae48e86f/curtin/block/deps.py curtin-18.2-10-g7afd77fa/curtin/block/deps.py --- curtin-18.1-17-gae48e86f/curtin/block/deps.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/deps.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,103 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +from curtin.distro import DISTROS +from curtin.block import iscsi + + +def storage_config_required_packages(storage_config, mapping): + """Read storage configuration dictionary and determine + which packages are required for the supplied configuration + to function. Return a list of packaged to install. + """ + + if not storage_config or not isinstance(storage_config, dict): + raise ValueError('Invalid storage configuration. ' + 'Must be a dict:\n %s' % storage_config) + + if not mapping or not isinstance(mapping, dict): + raise ValueError('Invalid storage mapping. Must be a dict') + + if 'storage' in storage_config: + storage_config = storage_config.get('storage') + + needed_packages = [] + + # get reqs by device operation type + dev_configs = set(operation['type'] + for operation in storage_config['config']) + + for dev_type in dev_configs: + if dev_type in mapping: + needed_packages.extend(mapping[dev_type]) + + # for disks with path: iscsi: we need iscsi tools + iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config) + if len(iscsi_vols) > 0: + needed_packages.extend(mapping['iscsi']) + + # for any format operations, check the fstype and + # determine if we need any mkfs tools as well. + format_configs = set([operation['fstype'] + for operation in storage_config['config'] + if operation['type'] == 'format']) + for format_type in format_configs: + if format_type in mapping: + needed_packages.extend(mapping[format_type]) + + return needed_packages + + +def detect_required_packages_mapping(osfamily=DISTROS.debian): + """Return a dictionary providing a versioned configuration which maps + storage configuration elements to the packages which are required + for functionality. + + The mapping key is either a config type value, or an fstype value. + + """ + distro_mapping = { + DISTROS.debian: { + 'bcache': ['bcache-tools'], + 'btrfs': ['btrfs-tools'], + 'ext2': ['e2fsprogs'], + 'ext3': ['e2fsprogs'], + 'ext4': ['e2fsprogs'], + 'jfs': ['jfsutils'], + 'iscsi': ['open-iscsi'], + 'lvm_partition': ['lvm2'], + 'lvm_volgroup': ['lvm2'], + 'ntfs': ['ntfs-3g'], + 'raid': ['mdadm'], + 'reiserfs': ['reiserfsprogs'], + 'xfs': ['xfsprogs'], + 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], + 'zfs': ['zfsutils-linux', 'zfs-initramfs'], + 'zpool': ['zfsutils-linux', 'zfs-initramfs'], + }, + DISTROS.redhat: { + 'bcache': [], + 'btrfs': ['btrfs-progs'], + 'ext2': ['e2fsprogs'], + 'ext3': ['e2fsprogs'], + 'ext4': ['e2fsprogs'], + 'jfs': [], + 'iscsi': ['iscsi-initiator-utils'], + 'lvm_partition': ['lvm2'], + 'lvm_volgroup': ['lvm2'], + 'ntfs': [], + 'raid': ['mdadm'], + 'reiserfs': [], + 'xfs': ['xfsprogs'], + 'zfsroot': [], + 'zfs': [], + 'zpool': [], + }, + } + if osfamily not in distro_mapping: + raise ValueError('No block package mapping for distro: %s' % osfamily) + + return {1: {'handler': storage_config_required_packages, + 'mapping': distro_mapping.get(osfamily)}} + + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/block/__init__.py curtin-18.2-10-g7afd77fa/curtin/block/__init__.py --- curtin-18.1-17-gae48e86f/curtin/block/__init__.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/__init__.py 2019-02-15 20:42:42.000000000 +0000 @@ -103,7 +103,7 @@ """ Add number to disk_kname prepending a 'p' if needed """ - for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm', 'md']: + for dev_type in ['bcache', 'nvme', 'mmcblk', 'cciss', 'mpath', 'dm', 'md']: if disk_kname.startswith(dev_type): partition_number = "p%s" % partition_number break @@ -378,24 +378,28 @@ LOG.warn("Failed to stop multipath devices: %s", e) -def rescan_block_devices(warn_on_fail=True): +def rescan_block_devices(devices=None, warn_on_fail=True): """ run 'blockdev --rereadpt' for all block devices not currently mounted """ - unused = get_unused_blockdev_info() - devices = [] - for devname, data in unused.items(): - if data.get('RM') == "1": - continue - if data.get('RO') != "0" or data.get('TYPE') != "disk": - continue - devices.append(data['device_path']) + if not devices: + unused = get_unused_blockdev_info() + devices = [] + for devname, data in unused.items(): + if data.get('RM') == "1": + continue + if data.get('RO') != "0" or data.get('TYPE') != "disk": + continue + devices.append(data['device_path']) if not devices: LOG.debug("no devices found to rescan") return - cmd = ['blockdev', '--rereadpt'] + devices + # blockdev needs /dev/ parameters, convert if needed + cmd = ['blockdev', '--rereadpt'] + [dev if dev.startswith('/dev/') + else sysfs_to_devpath(dev) + for dev in devices] try: util.subp(cmd, capture=True) except util.ProcessExecutionError as e: @@ -999,75 +1003,17 @@ raise ValueError("wipe mode %s not supported" % mode) -def storage_config_required_packages(storage_config, mapping): - """Read storage configuration dictionary and determine - which packages are required for the supplied configuration - to function. Return a list of packaged to install. - """ - - if not storage_config or not isinstance(storage_config, dict): - raise ValueError('Invalid storage configuration. ' - 'Must be a dict:\n %s' % storage_config) - - if not mapping or not isinstance(mapping, dict): - raise ValueError('Invalid storage mapping. Must be a dict') - - if 'storage' in storage_config: - storage_config = storage_config.get('storage') - - needed_packages = [] - - # get reqs by device operation type - dev_configs = set(operation['type'] - for operation in storage_config['config']) - - for dev_type in dev_configs: - if dev_type in mapping: - needed_packages.extend(mapping[dev_type]) - - # for any format operations, check the fstype and - # determine if we need any mkfs tools as well. - format_configs = set([operation['fstype'] - for operation in storage_config['config'] - if operation['type'] == 'format']) - for format_type in format_configs: - if format_type in mapping: - needed_packages.extend(mapping[format_type]) - - return needed_packages - - -def detect_required_packages_mapping(): - """Return a dictionary providing a versioned configuration which maps - storage configuration elements to the packages which are required - for functionality. - - The mapping key is either a config type value, or an fstype value. - - """ - version = 1 - mapping = { - version: { - 'handler': storage_config_required_packages, - 'mapping': { - 'bcache': ['bcache-tools'], - 'btrfs': ['btrfs-tools'], - 'ext2': ['e2fsprogs'], - 'ext3': ['e2fsprogs'], - 'ext4': ['e2fsprogs'], - 'jfs': ['jfsutils'], - 'lvm_partition': ['lvm2'], - 'lvm_volgroup': ['lvm2'], - 'ntfs': ['ntfs-3g'], - 'raid': ['mdadm'], - 'reiserfs': ['reiserfsprogs'], - 'xfs': ['xfsprogs'], - 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], - 'zfs': ['zfsutils-linux', 'zfs-initramfs'], - 'zpool': ['zfsutils-linux', 'zfs-initramfs'], - }, - }, - } - return mapping +def get_supported_filesystems(): + """ Return a list of filesystems that the kernel currently supports + as read from /proc/filesystems. + + Raises RuntimeError if /proc/filesystems does not exist. + """ + proc_fs = "/proc/filesystems" + if not os.path.exists(proc_fs): + raise RuntimeError("Unable to read 'filesystems' from %s" % proc_fs) + + return [l.split('\t')[1].strip() + for l in util.load_file(proc_fs).splitlines()] # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/block/iscsi.py curtin-18.2-10-g7afd77fa/curtin/block/iscsi.py --- curtin-18.1-17-gae48e86f/curtin/block/iscsi.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/iscsi.py 2019-02-15 20:42:42.000000000 +0000 @@ -9,7 +9,7 @@ import re import shutil -from curtin import (util, udev) +from curtin import (paths, util, udev) from curtin.block import (get_device_slave_knames, path_to_kname) @@ -230,29 +230,45 @@ return _ISCSI_DISKS -def get_iscsi_disks_from_config(cfg): +def get_iscsi_volumes_from_config(cfg): """Parse a curtin storage config and return a list - of iscsi disk objects for each configuration present + of iscsi disk rfc4173 uris for each configuration present. """ if not cfg: cfg = {} - sconfig = cfg.get('storage', {}).get('config', {}) - if not sconfig: + if 'storage' in cfg: + sconfig = cfg.get('storage', {}).get('config', []) + else: + sconfig = cfg.get('config', []) + if not sconfig or not isinstance(sconfig, list): LOG.warning('Configuration dictionary did not contain' ' a storage configuration') return [] + return [disk['path'] for disk in sconfig + if disk['type'] == 'disk' and + disk.get('path', "").startswith('iscsi:')] + + +def get_iscsi_disks_from_config(cfg): + """Return a list of IscsiDisk objects for each iscsi volume present.""" # Construct IscsiDisk objects for each iscsi volume present - iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig - if disk['type'] == 'disk' and - disk.get('path', "").startswith('iscsi:')] + iscsi_disks = [IscsiDisk(volume) for volume in + get_iscsi_volumes_from_config(cfg)] LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) return iscsi_disks +def get_iscsi_ports_from_config(cfg): + """Return a set of ports that may be used when connecting to volumes.""" + ports = set([d.port for d in get_iscsi_disks_from_config(cfg)]) + LOG.debug('Found iscsi ports in use: %s', ports) + return ports + + def disconnect_target_disks(target_root_path=None): - target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes') + target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes') fails = [] if os.path.isdir(target_nodes_path): for target in os.listdir(target_nodes_path): diff -Nru curtin-18.1-17-gae48e86f/curtin/block/lvm.py curtin-18.2-10-g7afd77fa/curtin/block/lvm.py --- curtin-18.1-17-gae48e86f/curtin/block/lvm.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/lvm.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,6 +4,7 @@ This module provides some helper functions for manipulating lvm devices """ +from curtin import distro from curtin import util from curtin.log import LOG import os @@ -57,20 +58,38 @@ '/run/lvmetad.pid')) -def lvm_scan(): +def activate_volgroups(): + """ + Activate available volgroups and logical volumes within. + + # found + % vgchange -ay + 1 logical volume(s) in volume group "vg1sdd" now active + + # none found (no output) + % vgchange -ay + """ + + # vgchange handles syncing with udev by default + # see man 8 vgchange and flag --noudevsync + out, _ = util.subp(['vgchange', '--activate=y'], capture=True) + if out: + LOG.info(out) + + +def lvm_scan(activate=True): """ run full scan for volgroups, logical volumes and physical volumes """ - # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not - # support the flag --cache. the flag is present for the tools in ubuntu - # trusty and later. since lvmetad is used in current releases of - # ubuntu, the --cache flag is needed to ensure that the data cached by + # prior to xenial, lvmetad is not packaged, so even if a tool supports + # flag --cache it has no effect. In Xenial and newer the --cache flag is + # used (if lvmetad is running) to ensure that the data cached by # lvmetad is updated. # before appending the cache flag though, check if lvmetad is running. this # ensures that we do the right thing even if lvmetad is supported but is # not running - release = util.lsb_release().get('codename') + release = distro.lsb_release().get('codename') if release in [None, 'UNAVAILABLE']: LOG.warning('unable to find release number, assuming xenial or later') release = 'xenial' diff -Nru curtin-18.1-17-gae48e86f/curtin/block/mdadm.py curtin-18.2-10-g7afd77fa/curtin/block/mdadm.py --- curtin-18.1-17-gae48e86f/curtin/block/mdadm.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/mdadm.py 2019-02-15 20:42:42.000000000 +0000 @@ -13,6 +13,7 @@ from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) from curtin.block import get_holders +from curtin.distro import lsb_release from curtin import (util, udev) from curtin.log import LOG @@ -95,7 +96,7 @@ checks the mdadm version and will return True if we can use --export for key=value list with enough info, false if version is less than ''' -MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty'] +MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty'] # # mdadm executors @@ -184,7 +185,7 @@ cmd.append(device) # Create the raid device - util.subp(["udevadm", "settle"]) + udev.udevadm_settle() util.subp(["udevadm", "control", "--stop-exec-queue"]) try: util.subp(cmd, capture=True) @@ -208,8 +209,7 @@ raise util.subp(["udevadm", "control", "--start-exec-queue"]) - util.subp(["udevadm", "settle", - "--exit-if-exists=%s" % md_devname]) + udev.udevadm_settle(exists=md_devname) def mdadm_examine(devpath, export=MDADM_USE_EXPORT): diff -Nru curtin-18.1-17-gae48e86f/curtin/block/mkfs.py curtin-18.2-10-g7afd77fa/curtin/block/mkfs.py --- curtin-18.1-17-gae48e86f/curtin/block/mkfs.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/mkfs.py 2019-02-15 20:42:42.000000000 +0000 @@ -3,12 +3,13 @@ # This module wraps calls to mkfs. and determines the appropriate flags # for each filesystem type -from curtin import util from curtin import block +from curtin import distro +from curtin import util import string import os -from uuid import uuid1 +from uuid import uuid4 mkfs_commands = { "btrfs": "mkfs.btrfs", @@ -102,7 +103,7 @@ def get_flag_mapping(flag_name, fs_family, param=None, strict=False): ret = [] - release = util.lsb_release()['codename'] + release = distro.lsb_release()['codename'] overrides = release_flag_mapping_overrides.get(release, {}) if flag_name in overrides and fs_family in overrides[flag_name]: flag_sym = overrides[flag_name][fs_family] @@ -191,7 +192,7 @@ # If uuid is not specified, generate one and try to use it if uuid is None: - uuid = str(uuid1()) + uuid = str(uuid4()) cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict)) if fs_family == "fat": diff -Nru curtin-18.1-17-gae48e86f/curtin/block/zfs.py curtin-18.2-10-g7afd77fa/curtin/block/zfs.py --- curtin-18.1-17-gae48e86f/curtin/block/zfs.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/block/zfs.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,8 +7,9 @@ import os from curtin.config import merge_config +from curtin import distro from curtin import util -from . import blkid +from . import blkid, get_supported_filesystems ZPOOL_DEFAULT_PROPERTIES = { 'ashift': 12, @@ -73,6 +74,15 @@ def zfs_supported(): + """Return a boolean indicating if zfs is supported.""" + try: + zfs_assert_supported() + return True + except RuntimeError: + return False + + +def zfs_assert_supported(): """ Determine if the runtime system supports zfs. returns: True if system supports zfs raises: RuntimeError: if system does not support zfs @@ -81,17 +91,19 @@ if arch in ZFS_UNSUPPORTED_ARCHES: raise RuntimeError("zfs is not supported on architecture: %s" % arch) - release = util.lsb_release()['codename'] + release = distro.lsb_release()['codename'] if release in ZFS_UNSUPPORTED_RELEASES: raise RuntimeError("zfs is not supported on release: %s" % release) - try: - util.subp(['modinfo', 'zfs'], capture=True) - except util.ProcessExecutionError as err: - if err.stderr.startswith("modinfo: ERROR: Module zfs not found."): - raise RuntimeError("zfs kernel module is not available: %s" % err) - - return True + if 'zfs' not in get_supported_filesystems(): + try: + util.load_kernel_module('zfs') + except util.ProcessExecutionError as err: + raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err) + + missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)] + if missing_progs: + raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs)) def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/apply_net.py curtin-18.2-10-g7afd77fa/curtin/commands/apply_net.py --- curtin-18.1-17-gae48e86f/curtin/commands/apply_net.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/apply_net.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,6 +7,7 @@ import curtin.net as net import curtin.util as util from curtin import config +from curtin import paths from . import populate_one_subcmd @@ -123,7 +124,7 @@ for hook in ['prehook', 'posthook']: fn = hookfn[hook] - cfg = util.target_path(target, path=fn) + cfg = paths.target_path(target, path=fn) LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) util.write_file(cfg, contents[hook], mode=0o755) @@ -136,7 +137,7 @@ Resolve this by allowing the cloud-image setting to win. """ LOG.debug('Attempting to remove ipv6 privacy extensions') - cfg = util.target_path(target, path=path) + cfg = paths.target_path(target, path=path) if not os.path.exists(cfg): LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) return @@ -182,7 +183,7 @@ - with unknown content, leave it and warn """ - cfg = util.target_path(target, path=path) + cfg = paths.target_path(target, path=path) if not os.path.exists(cfg): LOG.warn('Failed to find legacy network conf file %s', cfg) return diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/apt_config.py curtin-18.2-10-g7afd77fa/curtin/commands/apt_config.py --- curtin-18.1-17-gae48e86f/curtin/commands/apt_config.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/apt_config.py 2019-02-15 20:42:42.000000000 +0000 @@ -10,10 +10,9 @@ import os import re import sys -import yaml from curtin.log import LOG -from curtin import (config, util, gpg) +from curtin import (config, distro, gpg, paths, util) from . import populate_one_subcmd @@ -61,7 +60,7 @@ curthooks if a global apt config was provided or via the "apt" standalone command. """ - release = util.lsb_release(target=target)['codename'] + release = distro.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, arch) LOG.debug("Apt Mirror info: %s", mirrors) @@ -71,6 +70,7 @@ if not config.value_as_boolean(cfg.get('preserve_sources_list', True)): generate_sources_list(cfg, release, mirrors, target) + apply_preserve_sources_list(target) rename_apt_lists(mirrors, target) try: @@ -148,7 +148,7 @@ pkg = re.sub(r"[:\s].*", "", line) pkgs_cfgd.add(pkg) - pkgs_installed = util.get_installed_packages(target) + pkgs_installed = distro.get_installed_packages(target) LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) LOG.debug("pkgs_installed: %s", pkgs_installed) @@ -164,7 +164,7 @@ def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -194,7 +194,7 @@ """rename_apt_lists - rename apt lists to preserve old cache data""" default_mirrors = get_default_mirrors(util.get_architecture(target)) - pre = util.target_path(target, APT_LISTS) + pre = paths.target_path(target, APT_LISTS) for (name, omirror) in default_mirrors.items(): nmirror = new_mirrors.get(name) if not nmirror: @@ -299,7 +299,7 @@ if tmpl is None: LOG.info("No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) - tmpl = util.load_file(util.target_path(target, aptsrc)) + tmpl = util.load_file(paths.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway @@ -310,24 +310,40 @@ tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") - orig = util.target_path(target, aptsrc) + orig = paths.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) - util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) + util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644) + +def apply_preserve_sources_list(target): # protect the just generated sources.list from cloud-init cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" - # this has to work with older cloud-init as well, so use old key - cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) + + target_ver = distro.get_package_version('cloud-init', target=target) + if not target_ver: + LOG.info("Attempt to read cloud-init version from target returned " + "'%s', not writing preserve_sources_list config.", + target_ver) + return + + cfg = {'apt': {'preserve_sources_list': True}} + if target_ver['major'] < 1: + # anything cloud-init 0.X.X will get the old config key. + cfg = {'apt_preserve_sources_list': True} + try: - util.write_file(util.target_path(target, cloudfile), - cloudconf, mode=0o644) + util.write_file(paths.target_path(target, cloudfile), + config.dump_config(cfg), mode=0o644) + LOG.debug("Set preserve_sources_list to True in %s with: %s", + cloudfile, cfg) except IOError: - LOG.exception("Failed to protect source.list from cloud-init in (%s)", - util.target_path(target, cloudfile)) + LOG.exception( + "Failed to protect /etc/apt/sources.list from cloud-init in '%s'", + cloudfile) raise @@ -409,7 +425,7 @@ raise continue - sourcefn = util.target_path(target, ent['filename']) + sourcefn = paths.target_path(target, ent['filename']) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -417,8 +433,8 @@ LOG.exception("failed write to file %s: %s", sourcefn, detail) raise - util.apt_update(target=target, force=True, - comment="apt-source changed config") + distro.apt_update(target=target, force=True, + comment="apt-source changed config") return diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/block_meta.py curtin-18.2-10-g7afd77fa/curtin/commands/block_meta.py --- curtin-18.1-17-gae48e86f/curtin/commands/block_meta.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/block_meta.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,13 +1,15 @@ # This file is part of curtin. See LICENSE file for copyright and license info. from collections import OrderedDict, namedtuple -from curtin import (block, config, util) +from curtin import (block, config, paths, util) from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) -from curtin.log import LOG +from curtin import distro +from curtin.log import LOG, logged_time from curtin.reporter import events from . import populate_one_subcmd -from curtin.udev import compose_udev_equality, udevadm_settle, udevadm_trigger +from curtin.udev import (compose_udev_equality, udevadm_settle, + udevadm_trigger, udevadm_info) import glob import os @@ -34,6 +36,8 @@ 'metavar': 'DEVICE', 'default': None, }), ('--fstype', {'help': 'root partition filesystem type', 'choices': ['ext4', 'ext3'], 'default': 'ext4'}), + ('--force-mode', {'help': 'force mode, disable mode detection', + 'action': 'store_true', 'default': False}), (('-t', '--target'), {'help': 'chroot to target. default is env[TARGET_MOUNT_POINT]', 'action': 'store', 'metavar': 'TARGET', @@ -48,16 +52,41 @@ ) +@logged_time("BLOCK_META") def block_meta(args): # main entry point for the block-meta command. state = util.load_command_environment() cfg = config.load_command_config(args, state) dd_images = util.get_dd_images(cfg.get('sources', {})) - if ((args.mode == CUSTOM or cfg.get("storage") is not None) and - len(dd_images) == 0): - meta_custom(args) - elif args.mode in (SIMPLE, SIMPLE_BOOT) or len(dd_images) > 0: - meta_simple(args) + + # run clear holders on potential devices + devices = args.devices + if devices is None: + devices = [] + if 'storage' in cfg: + devices = get_disk_paths_from_storage_config( + extract_storage_ordered_dict(cfg)) + if len(devices) == 0: + devices = cfg.get('block-meta', {}).get('devices', []) + LOG.debug('Declared block devices: %s', devices) + args.devices = devices + + meta_clear(devices, state.get('report_stack_prefix', '')) + + # dd-images requires use of meta_simple + if len(dd_images) > 0 and args.force_mode is False: + LOG.info('blockmeta: detected dd-images, using mode=simple') + return meta_simple(args) + + if cfg.get("storage") and args.force_mode is False: + LOG.info('blockmeta: detected storage config, using mode=custom') + return meta_custom(args) + + LOG.info('blockmeta: mode=%s force=%s', args.mode, args.force_mode) + if args.mode == CUSTOM: + return meta_custom(args) + elif args.mode in (SIMPLE, SIMPLE_BOOT): + return meta_simple(args) else: raise NotImplementedError("mode=%s is not implemented" % args.mode) @@ -193,12 +222,45 @@ return ''.join(c if c in valid else '-' for c in dname) +def make_dname_byid(path, error_msg=None, info=None): + """ Returns a list of udev equalities for a given disk path + + :param path: string of a kernel device path to a block device + :param error_msg: more information about path for log/errors + :param info: dict of udevadm info key, value pairs of device specified by + path. + :returns: list of udev equalities (lists) + :raises: ValueError if path is not a disk. + :raises: RuntimeError if there is no serial or wwn. + """ + error_msg = str(path) + ("" if not error_msg else " [%s]" % error_msg) + if info is None: + info = udevadm_info(path=path) + devtype = info.get('DEVTYPE') + if devtype != "disk": + raise ValueError( + "Disk tag udev rules are only for disks, %s has devtype=%s" % + (error_msg, devtype)) + + byid_keys = ['ID_WWN_WITH_EXTENSION', 'ID_WWN', + 'ID_SERIAL', 'ID_SERIAL_SHORT'] + present = [k for k in byid_keys if info.get(k)] + if not present: + LOG.warning( + "Cannot create disk tag udev rule for %s, " + "missing 'serial' or 'wwn' value" % error_msg) + return [] + + return [[compose_udev_equality('ENV{%s}' % k, info[k]) for k in present]] + + def make_dname(volume, storage_config): state = util.load_command_environment() rules_dir = os.path.join(state['scratch'], "rules.d") vol = storage_config.get(volume) path = get_path_to_storage_volume(volume, storage_config) ptuuid = None + byid = None dname = vol.get('name') if vol.get('type') in ["partition", "disk"]: (out, _err) = util.subp(["blkid", "-o", "export", path], capture=True, @@ -207,28 +269,41 @@ if "PTUUID" in line or "PARTUUID" in line: ptuuid = line.split('=')[-1] break + if vol.get('type') == 'disk': + byid = make_dname_byid(path, error_msg="id=%s" % vol.get('id')) # we may not always be able to find a uniq identifier on devices with names - if not ptuuid and vol.get('type') in ["disk", "partition"]: + if (not ptuuid and not byid) and vol.get('type') in ["disk", "partition"]: LOG.warning("Can't find a uuid for volume: {}. Skipping dname.".format( volume)) return - rule = [ + matches = [] + base_rule = [ compose_udev_equality("SUBSYSTEM", "block"), compose_udev_equality("ACTION", "add|change"), ] if vol.get('type') == "disk": - rule.append(compose_udev_equality('ENV{DEVTYPE}', "disk")) - rule.append(compose_udev_equality('ENV{ID_PART_TABLE_UUID}', ptuuid)) + if ptuuid: + matches += [[compose_udev_equality('ENV{DEVTYPE}', "disk"), + compose_udev_equality('ENV{ID_PART_TABLE_UUID}', + ptuuid)]] + for rule in byid: + matches += [ + [compose_udev_equality('ENV{DEVTYPE}', "disk")] + rule] elif vol.get('type') == "partition": - rule.append(compose_udev_equality('ENV{DEVTYPE}', "partition")) - dname = storage_config.get(vol.get('device')).get('name') + \ - "-part%s" % determine_partition_number(volume, storage_config) - rule.append(compose_udev_equality('ENV{ID_PART_ENTRY_UUID}', ptuuid)) + # if partition has its own name, bind that to the existing PTUUID + if dname: + matches += [[compose_udev_equality('ENV{DEVTYPE}', "partition"), + compose_udev_equality('ENV{ID_PART_ENTRY_UUID}', + ptuuid)]] + else: + # disks generate dname-part%n rules automatically + LOG.debug('No partition-specific dname') + return elif vol.get('type') == "raid": md_data = mdadm.mdadm_query_detail(path) md_uuid = md_data.get('MD_UUID') - rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid)) + matches += [[compose_udev_equality("ENV{MD_UUID}", md_uuid)]] elif vol.get('type') == "bcache": # bind dname to bcache backing device's dev.uuid as the bcache minor # device numbers are not stable across reboots. @@ -236,13 +311,13 @@ storage_config) bcache_super = bcache.superblock_asdict(device=backing_dev) if bcache_super and bcache_super['sb.version'].startswith('1'): - bdev_uuid = bcache_super['dev.uuid'] - rule.append(compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid)) + bdev_uuid = bcache_super['dev.uuid'] + matches += [[compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid)]] bcache.write_label(sanitize_dname(dname), backing_dev) elif vol.get('type') == "lvm_partition": volgroup_name = storage_config.get(vol.get('volgroup')).get('name') dname = "%s-%s" % (volgroup_name, dname) - rule.append(compose_udev_equality("ENV{DM_NAME}", dname)) + matches += [[compose_udev_equality("ENV{DM_NAME}", dname)]] else: raise ValueError('cannot make dname for device with type: {}' .format(vol.get('type'))) @@ -255,11 +330,25 @@ LOG.warning( "dname modified to remove invalid chars. old: '{}' new: '{}'" .format(dname, sanitized)) - rule.append("SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized) - LOG.debug("Writing dname udev rule '{}'".format(str(rule))) + content = ['# Written by curtin'] + for match in matches: + rule = (base_rule + match + + ["SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized]) + LOG.debug("Creating dname udev rule '{}'".format(str(rule))) + content.append(', '.join(rule)) + + if vol.get('type') == 'disk': + for brule in byid: + rule = (base_rule + + [compose_udev_equality('ENV{DEVTYPE}', 'partition')] + + brule + + ['SYMLINK+="disk/by-dname/%s-part%%n"\n' % sanitized]) + LOG.debug("Creating dname udev rule '{}'".format(str(rule))) + content.append(', '.join(rule)) + util.ensure_dir(rules_dir) rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized)) - util.write_file(rule_file, ', '.join(rule)) + util.write_file(rule_file, '\n'.join(content)) def get_poolname(info, storage_config): @@ -582,6 +671,9 @@ block.zero_file_at_offsets(disk, [wipe_offset], exclusive=False) if disk_ptable == "msdos": + if flag and flag == 'prep': + raise ValueError('PReP partitions require a GPT partition table') + if flag in ["extended", "logical", "primary"]: partition_type = flag else: @@ -602,6 +694,17 @@ else: raise ValueError("parent partition has invalid partition table") + # ensure partition exists + part_path = block.dev_path(block.partition_kname(disk_kname, partnumber)) + block.rescan_block_devices([disk]) + udevadm_settle(exists=part_path) + + # wipe the created partition if needed, superblocks have already been wiped + wipe_mode = info.get('wipe', 'superblock') + if wipe_mode != 'superblock': + LOG.debug('Wiping partition %s mode=%s', part_path, wipe_mode) + block.wipe_volume(part_path, mode=wipe_mode, exclusive=False) + # Make the name if needed if storage_config.get(device).get('name') and partition_type != 'extended': make_dname(info.get('id'), storage_config) @@ -729,12 +832,12 @@ :param fdata: a FstabData type :return None.""" - mp = util.target_path(target, fdata.path) + mp = paths.target_path(target, fdata.path) if fdata.device: device = fdata.device else: if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): - device = util.target_path(target, fdata.spec) + device = paths.target_path(target, fdata.spec) else: device = fdata.spec @@ -855,7 +958,7 @@ # Use 'wipesignatures' (if available) and 'zero' to clear target lv # of any fs metadata cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] - release = util.lsb_release()['codename'] + release = distro.lsb_release()['codename'] if release not in ['precise', 'trusty']: cmd.extend(["--wipesignatures=y"]) @@ -1263,7 +1366,7 @@ """ Create a zpool based in storage_configuration """ - zfs.zfs_supported() + zfs.zfs_assert_supported() state = util.load_command_environment() @@ -1298,7 +1401,8 @@ """ Create a zfs filesystem """ - zfs.zfs_supported() + zfs.zfs_assert_supported() + state = util.load_command_environment() poolname = get_poolname(info, storage_config) volume = info.get('volume') @@ -1332,6 +1436,19 @@ return OrderedDict((d["id"], d) for (i, d) in enumerate(scfg)) +def get_disk_paths_from_storage_config(storage_config): + """Returns a list of disk paths in a storage config filtering out + preserved or disks which do not have wipe configuration. + + :param: storage_config: Ordered dict of storage configation + """ + return [get_path_to_storage_volume(k, storage_config) + for (k, v) in storage_config.items() + if v.get('type') == 'disk' and + config.value_as_boolean(v.get('wipe')) and + not config.value_as_boolean(v.get('preserve'))] + + def zfsroot_update_storage_config(storage_config): """Return an OrderedDict that has 'zfsroot' format expanded into zpool and zfs commands to enable ZFS on rootfs. @@ -1426,6 +1543,24 @@ return ret +def meta_clear(devices, report_prefix=''): + """ Run clear_holders on specified list of devices. + + :param: devices: a list of block devices (/dev/XXX) to be cleared + :param: report_prefix: a string to pass to the ReportEventStack + """ + # shut down any already existing storage layers above any disks used in + # config that have 'wipe' set + with events.ReportEventStack( + name=report_prefix + '/clear-holders', + reporting_enabled=True, level='INFO', + description="removing previous storage devices"): + clear_holders.start_clear_holders_deps() + clear_holders.clear_holders(devices) + # if anything was not properly shut down, stop installation + clear_holders.assert_clear(devices) + + def meta_custom(args): """Does custom partitioning based on the layout provided in the config file. Section with the name storage contains information on which @@ -1457,21 +1592,6 @@ # set up reportstack stack_prefix = state.get('report_stack_prefix', '') - # shut down any already existing storage layers above any disks used in - # config that have 'wipe' set - with events.ReportEventStack( - name=stack_prefix, reporting_enabled=True, level='INFO', - description="removing previous storage devices"): - clear_holders.start_clear_holders_deps() - disk_paths = [get_path_to_storage_volume(k, storage_config_dict) - for (k, v) in storage_config_dict.items() - if v.get('type') == 'disk' and - config.value_as_boolean(v.get('wipe')) and - not config.value_as_boolean(v.get('preserve'))] - clear_holders.clear_holders(disk_paths) - # if anything was not properly shut down, stop installation - clear_holders.assert_clear(disk_paths) - for item_id, command in storage_config_dict.items(): handler = command_handlers.get(command['type']) if not handler: @@ -1497,8 +1617,15 @@ create a separate /boot partition. """ state = util.load_command_environment() - cfg = config.load_command_config(args, state) + if args.target is not None: + state['target'] = args.target + + if state['target'] is None: + sys.stderr.write("Unable to find target. " + "Use --target or set TARGET_MOUNT_POINT\n") + sys.exit(2) + devpath = None if cfg.get("storage") is not None: for i in cfg["storage"]["config"]: @@ -1509,21 +1636,8 @@ diskPath = block.lookup_disk(serial) if grub is True: devpath = diskPath - if config.value_as_boolean(i.get('wipe')): - block.wipe_volume(diskPath, mode=i.get('wipe')) - - if args.target is not None: - state['target'] = args.target - - if state['target'] is None: - sys.stderr.write("Unable to find target. " - "Use --target or set TARGET_MOUNT_POINT\n") - sys.exit(2) devices = args.devices - if devices is None: - devices = cfg.get('block-meta', {}).get('devices', []) - bootpt = get_bootpt_cfg( cfg.get('block-meta', {}).get('boot-partition', {}), enabled=args.mode == SIMPLE_BOOT, fstype=args.boot_fstype, diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/curthooks.py curtin-18.2-10-g7afd77fa/curtin/commands/curthooks.py --- curtin-18.1-17-gae48e86f/curtin/commands/curthooks.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/curthooks.py 2019-02-15 20:42:42.000000000 +0000 @@ -11,12 +11,18 @@ from curtin import config from curtin import block +from curtin import distro +from curtin.block import iscsi from curtin import net from curtin import futil from curtin.log import LOG +from curtin import paths from curtin import swap from curtin import util from curtin import version as curtin_version +from curtin.block import deps as bdeps +from curtin.distro import DISTROS +from curtin.net import deps as ndeps from curtin.reporter import events from curtin.commands import apply_net, apt_config from curtin.url_helper import get_maas_version @@ -173,10 +179,10 @@ # target only has required packages installed. See LP:1640519 fk_packages = get_flash_kernel_pkgs() if fk_packages: - util.install_packages(fk_packages.split(), target=target) + distro.install_packages(fk_packages.split(), target=target) if kernel_package: - util.install_packages([kernel_package], target=target) + distro.install_packages([kernel_package], target=target) return # uname[2] is kernel name (ie: 3.16.0-7-generic) @@ -193,24 +199,24 @@ LOG.warn("Couldn't detect kernel package to install for %s." % kernel) if kernel_fallback is not None: - util.install_packages([kernel_fallback], target=target) + distro.install_packages([kernel_fallback], target=target) return package = "linux-{flavor}{map_suffix}".format( flavor=flavor, map_suffix=map_suffix) - if util.has_pkg_available(package, target): - if util.has_pkg_installed(package, target): + if distro.has_pkg_available(package, target): + if distro.has_pkg_installed(package, target): LOG.debug("Kernel package '%s' already installed", package) else: LOG.debug("installing kernel package '%s'", package) - util.install_packages([package], target=target) + distro.install_packages([package], target=target) else: if kernel_fallback is not None: LOG.info("Kernel package '%s' not available. " "Installing fallback package '%s'.", package, kernel_fallback) - util.install_packages([kernel_fallback], target=target) + distro.install_packages([kernel_fallback], target=target) else: LOG.warn("Kernel package '%s' not available and no fallback." " System may not boot.", package) @@ -273,7 +279,7 @@ LOG.debug("Currently booted UEFI loader might no longer boot.") -def setup_grub(cfg, target): +def setup_grub(cfg, target, osfamily=DISTROS.debian): # target is the path to the mounted filesystem # FIXME: these methods need moving to curtin.block @@ -292,7 +298,7 @@ storage_cfg_odict = None try: storage_cfg_odict = extract_storage_ordered_dict(cfg) - except ValueError as e: + except ValueError: pass if storage_cfg_odict: @@ -324,7 +330,7 @@ try: (blockdev, part) = block.get_blockdev_for_partition(maybepart) blockdevs.add(blockdev) - except ValueError as e: + except ValueError: # if there is no syspath for this device such as a lvm # or raid device, then a ValueError is raised here. LOG.debug("failed to find block device for %s", maybepart) @@ -353,24 +359,6 @@ else: instdevs = list(blockdevs) - # UEFI requires grub-efi-{arch}. If a signed version of that package - # exists then it will be installed. - if util.is_uefi_bootable(): - arch = util.get_architecture() - pkgs = ['grub-efi-%s' % arch] - - # Architecture might support a signed UEFI loader - uefi_pkg_signed = 'grub-efi-%s-signed' % arch - if util.has_pkg_available(uefi_pkg_signed): - pkgs.append(uefi_pkg_signed) - - # AMD64 has shim-signed for SecureBoot support - if arch == "amd64": - pkgs.append("shim-signed") - - # Install the UEFI packages needed for the architecture - util.install_packages(pkgs, target=target) - env = os.environ.copy() replace_default = grubcfg.get('replace_linux_default', True) @@ -399,6 +387,7 @@ else: LOG.debug("NOT enabling UEFI nvram updates") LOG.debug("Target system may not boot") + args.append('--os-family=%s' % osfamily) args.append(target) # capture stdout and stderr joined. @@ -435,14 +424,21 @@ shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) -def copy_iscsi_conf(nodes_dir, target): +def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'): if not nodes_dir: LOG.warn("nodes directory must be specified, not copying") return LOG.info("copying iscsi nodes database into target") - shutil.copytree(nodes_dir, os.path.sep.join([target, - 'etc/iscsi/nodes'])) + tdir = os.path.sep.join([target, target_nodes_dir]) + if not os.path.exists(tdir): + shutil.copytree(nodes_dir, tdir) + else: + # if /etc/iscsi/nodes exists, copy dirs underneath + for ndir in os.listdir(nodes_dir): + source_dir = os.path.join(nodes_dir, ndir) + target_dir = os.path.join(tdir, ndir) + shutil.copytree(source_dir, target_dir) def copy_mdadm_conf(mdadm_conf, target): @@ -486,7 +482,7 @@ if not rules_d: LOG.warn("no udev rules directory to copy") return - target_rules_dir = util.target_path(target, "etc/udev/rules.d") + target_rules_dir = paths.target_path(target, "etc/udev/rules.d") for rule in os.listdir(rules_d): target_file = os.path.join(target_rules_dir, rule) shutil.copy(os.path.join(rules_d, rule), target_file) @@ -532,11 +528,19 @@ maxsize=maxsize) -def detect_and_handle_multipath(cfg, target): - DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot'] +def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian): + DEFAULT_MULTIPATH_PACKAGES = { + DISTROS.debian: ['multipath-tools-boot'], + DISTROS.redhat: ['device-mapper-multipath'], + } + if osfamily not in DEFAULT_MULTIPATH_PACKAGES: + raise ValueError( + 'No multipath package mapping for distro: %s' % osfamily) + mpcfg = cfg.get('multipath', {}) mpmode = mpcfg.get('mode', 'auto') - mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES) + mppkgs = mpcfg.get('packages', + DEFAULT_MULTIPATH_PACKAGES.get(osfamily)) mpbindings = mpcfg.get('overwrite_bindings', True) if isinstance(mppkgs, str): @@ -549,23 +553,28 @@ return LOG.info("Detected multipath devices. Installing support via %s", mppkgs) + needed = [pkg for pkg in mppkgs if pkg + not in distro.get_installed_packages(target)] + if needed: + distro.install_packages(needed, target=target, osfamily=osfamily) - util.install_packages(mppkgs, target=target) replace_spaces = True - try: - # check in-target version - pkg_ver = util.get_package_version('multipath-tools', target=target) - LOG.debug("get_package_version:\n%s", pkg_ver) - LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", - pkg_ver['semantic_version'], pkg_ver['major'], - pkg_ver['minor'], pkg_ver['micro']) - # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced - # i.e. 0.4.X in Trusty. - if pkg_ver['semantic_version'] < 500: - replace_spaces = False - except Exception as e: - LOG.warn("failed reading multipath-tools version, " - "assuming it wants no spaces in wwids: %s", e) + if osfamily == DISTROS.debian: + try: + # check in-target version + pkg_ver = distro.get_package_version('multipath-tools', + target=target) + LOG.debug("get_package_version:\n%s", pkg_ver) + LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", + pkg_ver['semantic_version'], pkg_ver['major'], + pkg_ver['minor'], pkg_ver['micro']) + # multipath-tools versions < 0.5.0 do _NOT_ + # want whitespace replaced i.e. 0.4.X in Trusty. + if pkg_ver['semantic_version'] < 500: + replace_spaces = False + except Exception as e: + LOG.warn("failed reading multipath-tools version, " + "assuming it wants no spaces in wwids: %s", e) multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) @@ -574,7 +583,7 @@ if not os.path.isfile(multipath_cfg_path): # Without user_friendly_names option enabled system fails to boot # if any of the disks has spaces in its name. Package multipath-tools - # has bug opened for this issue (LP: 1432062) but it was not fixed yet. + # has bug opened for this issue LP: #1432062 but it was not fixed yet. multipath_cfg_content = '\n'.join( ['# This file was created by curtin while installing the system.', 'defaults {', @@ -593,7 +602,13 @@ mpname = "mpath0" grub_dev = "/dev/mapper/" + mpname if partno is not None: - grub_dev += "-part%s" % partno + if osfamily == DISTROS.debian: + grub_dev += "-part%s" % partno + elif osfamily == DISTROS.redhat: + grub_dev += "p%s" % partno + else: + raise ValueError( + 'Unknown grub_dev mapping for distro: %s' % osfamily) LOG.debug("configuring multipath install for root=%s wwid=%s", grub_dev, wwid) @@ -606,31 +621,54 @@ '']) util.write_file(multipath_bind_path, content=multipath_bind_content) - grub_cfg = os.path.sep.join( - [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) + if osfamily == DISTROS.debian: + grub_cfg = os.path.sep.join( + [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) + omode = 'w' + elif osfamily == DISTROS.redhat: + grub_cfg = os.path.sep.join([target, '/etc/default/grub']) + omode = 'a' + else: + raise ValueError( + 'Unknown grub_cfg mapping for distro: %s' % osfamily) + msg = '\n'.join([ - '# Written by curtin for multipath device wwid "%s"' % wwid, + '# Written by curtin for multipath device %s %s' % (mpname, wwid), 'GRUB_DEVICE=%s' % grub_dev, 'GRUB_DISABLE_LINUX_UUID=true', '']) - util.write_file(grub_cfg, content=msg) - + util.write_file(grub_cfg, omode=omode, content=msg) else: LOG.warn("Not sure how this will boot") - # Initrams needs to be updated to include /etc/multipath.cfg - # and /etc/multipath/bindings files. - update_initramfs(target, all_kernels=True) + if osfamily == DISTROS.debian: + # Initrams needs to be updated to include /etc/multipath.cfg + # and /etc/multipath/bindings files. + update_initramfs(target, all_kernels=True) + elif osfamily == DISTROS.redhat: + # Write out initramfs/dracut config for multipath + dracut_conf_multipath = os.path.sep.join( + [target, '/etc/dracut.conf.d/10-curtin-multipath.conf']) + msg = '\n'.join([ + '# Written by curtin for multipath device wwid "%s"' % wwid, + 'force_drivers+=" dm-multipath "', + 'add_dracutmodules+="multipath"', + 'install_items+="/etc/multipath.conf /etc/multipath/bindings"', + '']) + util.write_file(dracut_conf_multipath, content=msg) + else: + raise ValueError( + 'Unknown initramfs mapping for distro: %s' % osfamily) -def detect_required_packages(cfg): +def detect_required_packages(cfg, osfamily=DISTROS.debian): """ detect packages that will be required in-target by custom config items """ mapping = { - 'storage': block.detect_required_packages_mapping(), - 'network': net.detect_required_packages_mapping(), + 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily), + 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily), } needed_packages = [] @@ -657,16 +695,16 @@ return needed_packages -def install_missing_packages(cfg, target): +def install_missing_packages(cfg, target, osfamily=DISTROS.debian): ''' describe which operation types will require specific packages 'custom_config_key': { 'pkg1': ['op_name_1', 'op_name_2', ...] } ''' - - installed_packages = util.get_installed_packages(target) - needed_packages = set([pkg for pkg in detect_required_packages(cfg) + installed_packages = distro.get_installed_packages(target) + needed_packages = set([pkg for pkg in + detect_required_packages(cfg, osfamily=osfamily) if pkg not in installed_packages]) arch_packages = { @@ -678,8 +716,35 @@ if pkg not in needed_packages: needed_packages.add(pkg) + # UEFI requires grub-efi-{arch}. If a signed version of that package + # exists then it will be installed. + if util.is_uefi_bootable(): + uefi_pkgs = [] + if osfamily == DISTROS.redhat: + # centos/redhat doesn't support 32-bit? + uefi_pkgs.extend(['grub2-efi-x64-modules']) + elif osfamily == DISTROS.debian: + arch = util.get_architecture() + uefi_pkgs.append('grub-efi-%s' % arch) + + # Architecture might support a signed UEFI loader + uefi_pkg_signed = 'grub-efi-%s-signed' % arch + if distro.has_pkg_available(uefi_pkg_signed): + uefi_pkgs.append(uefi_pkg_signed) + + # AMD64 has shim-signed for SecureBoot support + if arch == "amd64": + uefi_pkgs.append("shim-signed") + else: + raise ValueError('Unknown grub2 package list for distro: %s' % + osfamily) + needed_packages.update([pkg for pkg in uefi_pkgs + if pkg not in installed_packages]) + # Filter out ifupdown network packages on netplan enabled systems. - if 'ifupdown' not in installed_packages and 'nplan' in installed_packages: + has_netplan = ('nplan' in installed_packages or + 'netplan.io' in installed_packages) + if 'ifupdown' not in installed_packages and has_netplan: drops = set(['bridge-utils', 'ifenslave', 'vlan']) if needed_packages.union(drops): LOG.debug("Skipping install of %s. Not needed on netplan system.", @@ -694,10 +759,10 @@ reporting_enabled=True, level="INFO", description="Installing packages on target system: " + str(to_add)): - util.install_packages(to_add, target=target) + distro.install_packages(to_add, target=target, osfamily=osfamily) -def system_upgrade(cfg, target): +def system_upgrade(cfg, target, osfamily=DISTROS.debian): """run system-upgrade (apt-get dist-upgrade) or other in target. config: @@ -716,7 +781,7 @@ LOG.debug("system_upgrade disabled by config.") return - util.system_upgrade(target=target) + distro.system_upgrade(target=target, osfamily=osfamily) def inject_pollinate_user_agent_config(ua_cfg, target): @@ -726,7 +791,7 @@ if not isinstance(ua_cfg, dict): raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) - pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent') + pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent') comment = "# written by curtin" content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) for ua_key, ua_val in ua_cfg.items()]) + "\n" @@ -749,6 +814,8 @@ curtin version maas version (via endpoint URL, if present) """ + if not util.which('pollinate', target=target): + return pcfg = cfg.get('pollinate') if not isinstance(pcfg, dict): @@ -774,6 +841,63 @@ inject_pollinate_user_agent_config(uacfg, target) +def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian): + # If a /etc/iscsi/nodes/... file was created by block_meta then it + # needs to be copied onto the target system + nodes = os.path.join(state_etcd, "nodes") + if not os.path.exists(nodes): + return + + LOG.info('Iscsi configuration found, enabling service') + if osfamily == DISTROS.redhat: + # copy iscsi node config to target image + LOG.debug('Copying iscsi node config to target') + copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes') + + # update in-target config + with util.ChrootableTarget(target) as in_chroot: + # enable iscsid service + LOG.debug('Enabling iscsi daemon') + in_chroot.subp(['chkconfig', 'iscsid', 'on']) + + # update selinux config for iscsi ports required + for port in [str(port) for port in + iscsi.get_iscsi_ports_from_config(cfg)]: + LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list', + port) + in_chroot.subp(['semanage', 'port', '-a', '-t', + 'iscsi_port_t', '-p', 'tcp', port]) + + elif osfamily == DISTROS.debian: + copy_iscsi_conf(nodes, target) + else: + raise ValueError( + 'Unknown iscsi requirements for distro: %s' % osfamily) + + +def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian): + # If a mdadm.conf file was created by block_meta than it needs + # to be copied onto the target system + mdadm_location = os.path.join(state_etcd, "mdadm.conf") + if not os.path.exists(mdadm_location): + return + + conf_map = { + DISTROS.debian: 'etc/mdadm/mdadm.conf', + DISTROS.redhat: 'etc/mdadm.conf', + } + if osfamily not in conf_map: + raise ValueError( + 'Unknown mdadm conf mapping for distro: %s' % osfamily) + LOG.info('Mdadm configuration found, enabling service') + shutil.copy(mdadm_location, paths.target_path(target, + conf_map[osfamily])) + if osfamily == DISTROS.debian: + # as per LP: #964052 reconfigure mdadm + util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], + data=None, target=target) + + def handle_cloudconfig(cfg, base_dir=None): """write cloud-init configuration files into base_dir. @@ -843,21 +967,11 @@ content=config.dump_config({'network': netconfig})) -def rpm_get_dist_id(target): - """Use rpm command to extract the '%rhel' distro macro which returns - the major os version id (6, 7, 8). This works for centos or rhel - """ - with util.ChrootableTarget(target) as in_chroot: - dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) - return dist.rstrip() - - -def centos_apply_network_config(netcfg, target=None): +def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat): """ CentOS images execute built-in curthooks which only supports simple networking configuration. This hook enables advanced network configuration via config passthrough to the target. """ - def cloud_init_repo(version): if not version: raise ValueError('Missing required version parameter') @@ -866,9 +980,9 @@ if netcfg: LOG.info('Removing embedded network configuration (if present)') - ifcfgs = glob.glob(util.target_path(target, - 'etc/sysconfig/network-scripts') + - '/ifcfg-*') + ifcfgs = glob.glob( + paths.target_path(target, 'etc/sysconfig/network-scripts') + + '/ifcfg-*') # remove ifcfg-* (except ifcfg-lo) for ifcfg in ifcfgs: if os.path.basename(ifcfg) != "ifcfg-lo": @@ -882,29 +996,27 @@ # if in-target cloud-init is not updated, upgrade via cloud-init repo if not passthrough: cloud_init_yum_repo = ( - util.target_path(target, - 'etc/yum.repos.d/curtin-cloud-init.repo')) + paths.target_path(target, + 'etc/yum.repos.d/curtin-cloud-init.repo')) # Inject cloud-init daily yum repo util.write_file(cloud_init_yum_repo, - content=cloud_init_repo(rpm_get_dist_id(target))) + content=cloud_init_repo( + distro.rpm_get_dist_id(target))) # we separate the installation of repository packages (epel, # cloud-init-el-release) as we need a new invocation of yum # to read the newly installed repo files. - YUM_CMD = ['yum', '-y', '--noplugins', 'install'] - retries = [1] * 30 - with util.ChrootableTarget(target) as in_chroot: - # ensure up-to-date ca-certificates to handle https mirror - # connections - in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True, - log_captured=True, retries=retries) - in_chroot.subp(YUM_CMD + ['epel-release'], capture=True, - log_captured=True, retries=retries) - in_chroot.subp(YUM_CMD + ['cloud-init-el-release'], - log_captured=True, capture=True, - retries=retries) - in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True, - log_captured=True, retries=retries) + + # ensure up-to-date ca-certificates to handle https mirror + # connections + distro.install_packages(['ca-certificates'], target=target, + osfamily=osfamily) + distro.install_packages(['epel-release'], target=target, + osfamily=osfamily) + distro.install_packages(['cloud-init-el-release'], target=target, + osfamily=osfamily) + distro.install_packages(['cloud-init'], target=target, + osfamily=osfamily) # remove cloud-init el-stable bootstrap repo config as the # cloud-init-el-release package points to the correct repo @@ -917,127 +1029,136 @@ capture=False, rcs=[0]) except util.ProcessExecutionError: LOG.debug('Image missing bridge-utils package, installing') - in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True, - log_captured=True, retries=retries) + distro.install_packages(['bridge-utils'], target=target, + osfamily=osfamily) LOG.info('Passing network configuration through to target') net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) -def target_is_ubuntu_core(target): - """Check if Ubuntu-Core specific directory is present at target""" - if target: - return os.path.exists(util.target_path(target, - 'system-data/var/lib/snapd')) - return False +# Public API, maas may call this from internal curthooks +centos_apply_network_config = redhat_upgrade_cloud_init -def target_is_centos(target): - """Check if CentOS specific file is present at target""" - if target: - return os.path.exists(util.target_path(target, 'etc/centos-release')) - - return False +def redhat_apply_selinux_autorelabel(target): + """Creates file /.autorelabel. + This is used by SELinux to relabel all of the + files on the filesystem to have the correct + security context. Without this SSH login will + fail. + """ + LOG.debug('enabling selinux autorelabel') + open(paths.target_path(target, '.autorelabel'), 'a').close() -def target_is_rhel(target): - """Check if RHEL specific file is present at target""" - if target: - return os.path.exists(util.target_path(target, 'etc/redhat-release')) - return False +def redhat_update_dracut_config(target, cfg): + initramfs_mapping = { + 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'}, + 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'}, + } + # no need to update initramfs if no custom storage + if 'storage' not in cfg: + return False -def curthooks(args): - state = util.load_command_environment() + storage_config = cfg.get('storage', {}).get('config') + if not storage_config: + raise ValueError('Invalid storage config') + + add_conf = set() + add_modules = set() + for scfg in storage_config: + if scfg['type'] == 'raid': + add_conf.add(initramfs_mapping['raid']['conf']) + add_modules.add(initramfs_mapping['raid']['modules']) + elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']: + add_conf.add(initramfs_mapping['lvm']['conf']) + add_modules.add(initramfs_mapping['lvm']['modules']) + + dconfig = ['# Written by curtin for custom storage config'] + dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules))) + for conf in add_conf: + dconfig.append('%s="yes"' % conf) + + # Write out initramfs/dracut config for storage config + dracut_conf_storage = os.path.sep.join( + [target, '/etc/dracut.conf.d/50-curtin-storage.conf']) + msg = '\n'.join(dconfig + ['']) + LOG.debug('Updating redhat dracut config') + util.write_file(dracut_conf_storage, content=msg) + return True + + +def redhat_update_initramfs(target, cfg): + if not redhat_update_dracut_config(target, cfg): + LOG.debug('Skipping redhat initramfs update, no custom storage config') + return + kver_cmd = ['rpm', '-q', '--queryformat', + '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel'] + with util.ChrootableTarget(target) as in_chroot: + LOG.debug('Finding redhat kernel version: %s', kver_cmd) + kver, _err = in_chroot.subp(kver_cmd, capture=True) + LOG.debug('Found kver=%s' % kver) + initramfs = '/boot/initramfs-%s.img' % kver + dracut_cmd = ['dracut', '-f', initramfs, kver] + LOG.debug('Rebuilding initramfs with: %s', dracut_cmd) + in_chroot.subp(dracut_cmd, capture=True) - if args.target is not None: - target = args.target - else: - target = state['target'] - if target is None: - sys.stderr.write("Unable to find target. " - "Use --target or set TARGET_MOUNT_POINT\n") - sys.exit(2) - - cfg = config.load_command_config(args, state) +def builtin_curthooks(cfg, target, state): + LOG.info('Running curtin builtin curthooks') stack_prefix = state.get('report_stack_prefix', '') + state_etcd = os.path.split(state['fstab'])[0] - # if curtin-hooks hook exists in target we can defer to the in-target hooks - if util.run_hook_if_exists(target, 'curtin-hooks'): - # For vmtests to force execute centos_apply_network_config, uncomment - # the value in examples/tests/centos_defaults.yaml - if cfg.get('_ammend_centos_curthooks'): - if cfg.get('cloudconfig'): - handle_cloudconfig( - cfg['cloudconfig'], - base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d')) - - if target_is_centos(target) or target_is_rhel(target): - LOG.info('Detected RHEL/CentOS image, running extra hooks') - with events.ReportEventStack( - name=stack_prefix, reporting_enabled=True, - level="INFO", - description="Configuring CentOS for first boot"): - centos_apply_network_config(cfg.get('network', {}), target) - sys.exit(0) - - if target_is_ubuntu_core(target): - LOG.info('Detected Ubuntu-Core image, running hooks') + distro_info = distro.get_distroinfo(target=target) + if not distro_info: + raise RuntimeError('Failed to determine target distro') + osfamily = distro_info.family + LOG.info('Configuring target system for distro: %s osfamily: %s', + distro_info.variant, osfamily) + if osfamily == DISTROS.debian: with events.ReportEventStack( - name=stack_prefix, reporting_enabled=True, level="INFO", - description="Configuring Ubuntu-Core for first boot"): - ubuntu_core_curthooks(cfg, target) - sys.exit(0) - - with events.ReportEventStack( - name=stack_prefix + '/writing-config', - reporting_enabled=True, level="INFO", - description="configuring apt configuring apt"): - do_apt_config(cfg, target) - disable_overlayroot(cfg, target) - - # LP: #1742560 prevent zfs-dkms from being installed (Xenial) - if util.lsb_release(target=target)['codename'] == 'xenial': - util.apt_update(target=target) - with util.ChrootableTarget(target) as in_chroot: - in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) + name=stack_prefix + '/writing-apt-config', + reporting_enabled=True, level="INFO", + description="configuring apt configuring apt"): + do_apt_config(cfg, target) + disable_overlayroot(cfg, target) + + # LP: #1742560 prevent zfs-dkms from being installed (Xenial) + if distro.lsb_release(target=target)['codename'] == 'xenial': + distro.apt_update(target=target) + with util.ChrootableTarget(target) as in_chroot: + in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) # packages may be needed prior to installing kernel with events.ReportEventStack( name=stack_prefix + '/installing-missing-packages', reporting_enabled=True, level="INFO", description="installing missing packages"): - install_missing_packages(cfg, target) + install_missing_packages(cfg, target, osfamily=osfamily) - # If a /etc/iscsi/nodes/... file was created by block_meta then it - # needs to be copied onto the target system - nodes_location = os.path.join(os.path.split(state['fstab'])[0], - "nodes") - if os.path.exists(nodes_location): - copy_iscsi_conf(nodes_location, target) - # do we need to reconfigure open-iscsi? - - # If a mdadm.conf file was created by block_meta than it needs to be copied - # onto the target system - mdadm_location = os.path.join(os.path.split(state['fstab'])[0], - "mdadm.conf") - if os.path.exists(mdadm_location): - copy_mdadm_conf(mdadm_location, target) - # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 - # reconfigure mdadm - util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], - data=None, target=target) + with events.ReportEventStack( + name=stack_prefix + '/configuring-iscsi-service', + reporting_enabled=True, level="INFO", + description="configuring iscsi service"): + configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) with events.ReportEventStack( - name=stack_prefix + '/installing-kernel', + name=stack_prefix + '/configuring-mdadm-service', reporting_enabled=True, level="INFO", - description="installing kernel"): - setup_zipl(cfg, target) - install_kernel(cfg, target) - run_zipl(cfg, target) - restore_dist_interfaces(cfg, target) + description="configuring raid (mdadm) service"): + configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) + + if osfamily == DISTROS.debian: + with events.ReportEventStack( + name=stack_prefix + '/installing-kernel', + reporting_enabled=True, level="INFO", + description="installing kernel"): + setup_zipl(cfg, target) + install_kernel(cfg, target) + run_zipl(cfg, target) + restore_dist_interfaces(cfg, target) with events.ReportEventStack( name=stack_prefix + '/setting-up-swap', @@ -1045,6 +1166,23 @@ description="setting up swap"): add_swap(cfg, target, state.get('fstab')) + if osfamily == DISTROS.redhat: + # set cloud-init maas datasource for centos images + if cfg.get('cloudconfig'): + handle_cloudconfig( + cfg['cloudconfig'], + base_dir=paths.target_path(target, + 'etc/cloud/cloud.cfg.d')) + + # For vmtests to force execute redhat_upgrade_cloud_init, uncomment + # the value in examples/tests/centos_defaults.yaml + if cfg.get('_ammend_centos_curthooks'): + with events.ReportEventStack( + name=stack_prefix + '/upgrading cloud-init', + reporting_enabled=True, level="INFO", + description="Upgrading cloud-init in target"): + redhat_upgrade_cloud_init(cfg.get('network', {}), target) + with events.ReportEventStack( name=stack_prefix + '/apply-networking-config', reporting_enabled=True, level="INFO", @@ -1061,29 +1199,44 @@ name=stack_prefix + '/configuring-multipath', reporting_enabled=True, level="INFO", description="configuring multipath"): - detect_and_handle_multipath(cfg, target) + detect_and_handle_multipath(cfg, target, osfamily=osfamily) with events.ReportEventStack( name=stack_prefix + '/system-upgrade', reporting_enabled=True, level="INFO", description="updating packages on target system"): - system_upgrade(cfg, target) + system_upgrade(cfg, target, osfamily=osfamily) + + if osfamily == DISTROS.redhat: + with events.ReportEventStack( + name=stack_prefix + '/enabling-selinux-autorelabel', + reporting_enabled=True, level="INFO", + description="enabling selinux autorelabel mode"): + redhat_apply_selinux_autorelabel(target) + + with events.ReportEventStack( + name=stack_prefix + '/updating-initramfs-configuration', + reporting_enabled=True, level="INFO", + description="updating initramfs configuration"): + redhat_update_initramfs(target, cfg) with events.ReportEventStack( name=stack_prefix + '/pollinate-user-agent', reporting_enabled=True, level="INFO", - description="configuring pollinate user-agent on target system"): + description="configuring pollinate user-agent on target"): handle_pollinate_user_agent(cfg, target) - # If a crypttab file was created by block_meta than it needs to be copied - # onto the target system, and update_initramfs() needs to be run, so that - # the cryptsetup hooks are properly configured on the installed system and - # it will be able to open encrypted volumes at boot. - crypttab_location = os.path.join(os.path.split(state['fstab'])[0], - "crypttab") - if os.path.exists(crypttab_location): - copy_crypttab(crypttab_location, target) - update_initramfs(target) + if osfamily == DISTROS.debian: + # If a crypttab file was created by block_meta than it needs to be + # copied onto the target system, and update_initramfs() needs to be + # run, so that the cryptsetup hooks are properly configured on the + # installed system and it will be able to open encrypted volumes + # at boot. + crypttab_location = os.path.join(os.path.split(state['fstab'])[0], + "crypttab") + if os.path.exists(crypttab_location): + copy_crypttab(crypttab_location, target) + update_initramfs(target) # If udev dname rules were created, copy them to target udev_rules_d = os.path.join(state['scratch'], "rules.d") @@ -1100,8 +1253,41 @@ machine.startswith('aarch64') and not util.is_uefi_bootable()): update_initramfs(target) else: - setup_grub(cfg, target) + setup_grub(cfg, target, osfamily=osfamily) + + +def curthooks(args): + state = util.load_command_environment() + + if args.target is not None: + target = args.target + else: + target = state['target'] + + if target is None: + sys.stderr.write("Unable to find target. " + "Use --target or set TARGET_MOUNT_POINT\n") + sys.exit(2) + + cfg = config.load_command_config(args, state) + stack_prefix = state.get('report_stack_prefix', '') + curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto') + + # UC is special, handle it first. + if distro.is_ubuntu_core(target): + LOG.info('Detected Ubuntu-Core image, running hooks') + with events.ReportEventStack( + name=stack_prefix, reporting_enabled=True, level="INFO", + description="Configuring Ubuntu-Core for first boot"): + ubuntu_core_curthooks(cfg, target) + sys.exit(0) + + # user asked for target, or auto mode + if curthooks_mode in ['auto', 'target']: + if util.run_hook_if_exists(target, 'curtin-hooks'): + sys.exit(0) + builtin_curthooks(cfg, target, state) sys.exit(0) diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/extract.py curtin-18.2-10-g7afd77fa/curtin/commands/extract.py --- curtin-18.1-17-gae48e86f/curtin/commands/extract.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/extract.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,7 @@ # This file is part of curtin. See LICENSE file for copyright and license info. import os +import shutil import sys import tempfile @@ -59,12 +60,12 @@ def extract_root_fsimage_url(url, target): path = _path_from_file_url(url) if path != url or os.path.isfile(path): - return _extract_root_fsimage(path(url), target) + return _extract_root_fsimage(path, target) wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False) wfp.close() try: - url_helper.download(url, wfp.name) + url_helper.download(url, wfp.name, retries=3) return _extract_root_fsimage(wfp.name, target) finally: os.unlink(wfp.name) @@ -85,6 +86,121 @@ os.rmdir(mp) +def extract_root_layered_fsimage_url(uri, target): + ''' Build images list to consider from a layered structure + + uri: URI of the layer file + target: Target file system to provision + + return: None + ''' + path = _path_from_file_url(uri) + + image_stack = _get_image_stack(path) + LOG.debug("Considering fsimages: '%s'", ",".join(image_stack)) + + tmp_dir = None + try: + # Download every remote images if remote url + if url_helper.urlparse(path).scheme != "": + tmp_dir = tempfile.mkdtemp() + image_stack = _download_layered_images(image_stack, tmp_dir) + + # Check that all images exists on disk and are not empty + for img in image_stack: + if not os.path.isfile(img) or os.path.getsize(img) <= 0: + raise ValueError("Failed to use fsimage: '%s' doesn't exist " + + "or is invalid", img) + + return _extract_root_layered_fsimage(image_stack, target) + finally: + if tmp_dir and os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + + +def _download_layered_images(image_stack, tmp_dir): + local_image_stack = [] + try: + for img_url in image_stack: + dest_path = os.path.join(tmp_dir, + os.path.basename(img_url)) + url_helper.download(img_url, dest_path, retries=3) + local_image_stack.append(dest_path) + except url_helper.UrlError as e: + LOG.error("Failed to download '%s'" % img_url) + raise e + return local_image_stack + + +def _extract_root_layered_fsimage(image_stack, target): + mp_base = tempfile.mkdtemp() + mps = [] + try: + # Create a mount point for each image file and mount the image + try: + for img in image_stack: + mp = os.path.join(mp_base, os.path.basename(img) + ".dir") + os.mkdir(mp) + util.subp(['mount', '-o', 'loop,ro', img, mp], capture=True) + mps.insert(0, mp) + except util.ProcessExecutionError as e: + LOG.error("Failed to mount '%s' for extraction: %s", img, e) + raise e + + # Prepare + if len(mps) == 1: + root_dir = mps[0] + else: + # Multiple image files, merge them with an overlay and do the copy + root_dir = os.path.join(mp_base, "root.dir") + os.mkdir(root_dir) + try: + util.subp(['mount', '-t', 'overlay', 'overlay', '-o', + 'lowerdir=' + ':'.join(mps), root_dir], + capture=True) + mps.append(root_dir) + except util.ProcessExecutionError as e: + LOG.error("overlay mount to %s failed: %s", root_dir, e) + raise e + + copy_to_target(root_dir, target) + finally: + umount_err_mps = [] + for mp in reversed(mps): + try: + util.subp(['umount', mp], capture=True) + except util.ProcessExecutionError as e: + LOG.error("can't unmount %s: %e", mp, e) + umount_err_mps.append(mp) + if umount_err_mps: + raise util.ProcessExecutionError( + "Failed to umount: %s", ", ".join(umount_err_mps)) + shutil.rmtree(mp_base) + + +def _get_image_stack(uri): + '''Find a list of dependent images for given layered fsimage path + + uri: URI of the layer file + return: tuple of path to dependent images + ''' + + image_stack = [] + root_dir = os.path.dirname(uri) + img_name = os.path.basename(uri) + _, img_ext = os.path.splitext(img_name) + + img_parts = img_name.split('.') + # Last item is the extension + for i in img_parts[:-1]: + image_stack.append( + os.path.join( + root_dir, + '.'.join(img_parts[0:img_parts.index(i)+1]) + img_ext)) + + return image_stack + + def copy_to_target(source, target): if source.startswith("cp://"): source = source[5:] @@ -133,6 +249,8 @@ copy_to_target(source['uri'], target) elif source['type'] == "fsimage": extract_root_fsimage_url(source['uri'], target=target) + elif source['type'] == "fsimage-layered": + extract_root_layered_fsimage_url(source['uri'], target=target) else: extract_root_tgz_url(source['uri'], target=target) diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/features.py curtin-18.2-10-g7afd77fa/curtin/commands/features.py --- curtin-18.1-17-gae48e86f/curtin/commands/features.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/features.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,20 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. +"""List the supported feature names to stdout.""" + +import sys +from .. import FEATURES +from . import populate_one_subcmd + +CMD_ARGUMENTS = ((tuple())) + + +def features_main(args): + sys.stdout.write("\n".join(sorted(FEATURES)) + "\n") + sys.exit(0) + + +def POPULATE_SUBCMD(parser): + populate_one_subcmd(parser, CMD_ARGUMENTS, features_main) + parser.description = __doc__ + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/install.py curtin-18.2-10-g7afd77fa/curtin/commands/install.py --- curtin-18.1-17-gae48e86f/curtin/commands/install.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/install.py 2019-02-15 20:42:42.000000000 +0000 @@ -13,9 +13,11 @@ from curtin.block import iscsi from curtin import config +from curtin import distro from curtin import util +from curtin import paths from curtin import version -from curtin.log import LOG +from curtin.log import LOG, logged_time from curtin.reporter.legacy import load_reporter from curtin.reporter import events from . import populate_one_subcmd @@ -80,7 +82,7 @@ LOG.debug('Copying curtin install log from %s to target/%s', logfile, log_target_path) util.write_file( - filename=util.target_path(target, log_target_path), + filename=paths.target_path(target, log_target_path), content=util.load_file(logfile, decode=False), mode=0o400, omode="wb") @@ -111,12 +113,22 @@ def __init__(self, config): top_d = tempfile.mkdtemp() state_d = os.path.join(top_d, 'state') + scratch_d = os.path.join(top_d, 'scratch') + for p in (state_d, scratch_d): + os.mkdir(p) + target_d = config.get('install', {}).get('target') if not target_d: target_d = os.path.join(top_d, 'target') - scratch_d = os.path.join(top_d, 'scratch') - for p in (state_d, target_d, scratch_d): - os.mkdir(p) + try: + util.ensure_dir(target_d) + except OSError as e: + raise ValueError( + "Unable to create target directory '%s': %s" % + (target_d, e)) + if os.listdir(target_d) != []: + raise ValueError( + "Provided target dir '%s' was not empty." % target_d) netconf_f = os.path.join(state_d, 'network_config') netstate_f = os.path.join(state_d, 'network_state') @@ -309,7 +321,7 @@ raise TypeError("kexec is not a dict.") if not util.which('kexec'): - util.install_packages('kexec-tools') + distro.install_packages('kexec-tools') if not os.path.isfile(target_grubcfg): raise ValueError("%s does not exist in target" % grubcfg) @@ -380,6 +392,7 @@ cfg['proxy'] = proxy +@logged_time("INSTALL_COMMAND") def cmd_install(args): from .collect_logs import create_log_tarfile cfg = deepcopy(CONFIG_BUILTIN) @@ -395,10 +408,6 @@ if not len(cfg.get('sources', [])): raise util.BadUsage("no sources provided to install") - for i in cfg['sources']: - # we default to tgz for old style sources config - cfg['sources'][i] = util.sanitize_source(cfg['sources'][i]) - migrate_proxy_settings(cfg) for k in ('http_proxy', 'https_proxy', 'no_proxy'): if k in cfg['proxy']: @@ -429,6 +438,7 @@ writeline_and_stdout(logfile, INSTALL_START_MSG) args.reportstack.post_files = post_files + workingd = None try: workingd = WorkingDir(cfg) dd_images = util.get_dd_images(cfg.get('sources', {})) @@ -469,12 +479,12 @@ raise e finally: log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG) - if log_target_path: + if log_target_path and workingd: copy_install_log(logfile, workingd.target, log_target_path) if instcfg.get('unmount', "") == "disabled": LOG.info('Skipping unmount: config disabled target unmounting') - else: + elif workingd: # unmount everything (including iscsi disks) util.do_umount(workingd.target, recursive=True) diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/in_target.py curtin-18.2-10-g7afd77fa/curtin/commands/in_target.py --- curtin-18.1-17-gae48e86f/curtin/commands/in_target.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/in_target.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,7 +4,7 @@ import pty import sys -from curtin import util +from curtin import paths, util from . import populate_one_subcmd @@ -41,7 +41,7 @@ sys.exit(2) daemons = args.allow_daemons - if util.target_path(args.target) == "/": + if paths.target_path(args.target) == "/": sys.stderr.write("WARN: Target is /, daemons are allowed.\n") daemons = True cmd = args.command_args diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/__main__.py curtin-18.2-10-g7afd77fa/curtin/commands/__main__.py --- curtin-18.1-17-gae48e86f/curtin/commands/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/__main__.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,4 @@ +if __name__ == '__main__': + from .main import main + import sys + sys.exit(main()) diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/main.py curtin-18.2-10-g7afd77fa/curtin/commands/main.py --- curtin-18.1-17-gae48e86f/curtin/commands/main.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/main.py 2019-02-15 20:42:42.000000000 +0000 @@ -16,9 +16,9 @@ SUB_COMMAND_MODULES = [ 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi', 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks', - 'collect-logs', 'extract', 'hook', 'install', 'mkfs', 'in-target', - 'net-meta', 'pack', 'swap', 'system-install', 'system-upgrade', 'unmount', - 'version', + 'collect-logs', 'extract', 'features', + 'hook', 'install', 'mkfs', 'in-target', 'net-meta', 'pack', 'swap', + 'system-install', 'system-upgrade', 'unmount', 'version', ] diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/system_install.py curtin-18.2-10-g7afd77fa/curtin/commands/system_install.py --- curtin-18.1-17-gae48e86f/curtin/commands/system_install.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/system_install.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,6 +7,7 @@ from . import populate_one_subcmd from curtin.log import LOG +from curtin import distro def system_install_pkgs_main(args): @@ -16,7 +17,7 @@ exit_code = 0 try: - util.install_packages( + distro.install_packages( pkglist=args.packages, target=args.target, allow_daemons=args.allow_daemons) except util.ProcessExecutionError as e: diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/system_upgrade.py curtin-18.2-10-g7afd77fa/curtin/commands/system_upgrade.py --- curtin-18.1-17-gae48e86f/curtin/commands/system_upgrade.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/system_upgrade.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,6 +7,7 @@ from . import populate_one_subcmd from curtin.log import LOG +from curtin import distro def system_upgrade_main(args): @@ -16,8 +17,8 @@ exit_code = 0 try: - util.system_upgrade(target=args.target, - allow_daemons=args.allow_daemons) + distro.system_upgrade(target=args.target, + allow_daemons=args.allow_daemons) except util.ProcessExecutionError as e: LOG.warn("system upgrade failed: %s" % e) exit_code = e.exit_code diff -Nru curtin-18.1-17-gae48e86f/curtin/commands/unmount.py curtin-18.2-10-g7afd77fa/curtin/commands/unmount.py --- curtin-18.1-17-gae48e86f/curtin/commands/unmount.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/commands/unmount.py 2019-02-15 20:42:42.000000000 +0000 @@ -6,11 +6,6 @@ import os -try: - FileMissingError = FileNotFoundError -except NameError: - FileMissingError = IOError - def unmount_main(args): """ @@ -22,7 +17,7 @@ if not os.path.exists(args.target): msg = "Cannot unmount target path %s: it does not exist" % args.target - raise FileMissingError(msg) + raise util.FileMissingError(msg) LOG.info("Unmounting devices from target path: %s", args.target) recursive_mode = not args.disable_recursive_mounts diff -Nru curtin-18.1-17-gae48e86f/curtin/deps/__init__.py curtin-18.2-10-g7afd77fa/curtin/deps/__init__.py --- curtin-18.1-17-gae48e86f/curtin/deps/__init__.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/deps/__init__.py 2019-02-15 20:42:42.000000000 +0000 @@ -6,13 +6,13 @@ from curtin.util import ( ProcessExecutionError, get_architecture, - install_packages, is_uefi_bootable, - lsb_release, subp, which, ) +from curtin.distro import install_packages, lsb_release + REQUIRED_IMPORTS = [ # import string to execute, python2 package, python3 package ('import yaml', 'python-yaml', 'python3-yaml'), @@ -177,7 +177,7 @@ ret = 0 try: install_packages(missing_pkgs, allow_daemons=allow_daemons, - aptopts=["--no-install-recommends"]) + opts=["--no-install-recommends"]) except ProcessExecutionError as e: sys.stderr.write("%s\n" % e) ret = e.exit_code diff -Nru curtin-18.1-17-gae48e86f/curtin/distro.py curtin-18.2-10-g7afd77fa/curtin/distro.py --- curtin-18.1-17-gae48e86f/curtin/distro.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/distro.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,512 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. +import glob +from collections import namedtuple +import os +import re +import shutil +import tempfile + +from .paths import target_path +from .util import ( + ChrootableTarget, + find_newer, + load_file, + load_shell_content, + ProcessExecutionError, + set_unexecutable, + string_types, + subp, + which +) +from .log import LOG + +DistroInfo = namedtuple('DistroInfo', ('variant', 'family')) +DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo', + 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu'] + + +# python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x +# https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python +def distro_enum(*distros): + return namedtuple('Distros', distros)(*distros) + + +DISTROS = distro_enum(*DISTRO_NAMES) + +OS_FAMILIES = { + DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu], + DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat, + DISTROS.rhel], + DISTROS.gentoo: [DISTROS.gentoo], + DISTROS.freebsd: [DISTROS.freebsd], + DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse], + DISTROS.arch: [DISTROS.arch], +} + +# invert the mapping for faster lookup of variants +DISTRO_TO_OSFAMILY = ( + {variant: family for family, variants in OS_FAMILIES.items() + for variant in variants}) + +_LSB_RELEASE = {} + + +def name_to_distro(distname): + try: + return DISTROS[DISTROS.index(distname)] + except (IndexError, AttributeError): + LOG.error('Unknown distro name: %s', distname) + + +def lsb_release(target=None): + if target_path(target) != "/": + # do not use or update cache if target is provided + return _lsb_release(target) + + global _LSB_RELEASE + if not _LSB_RELEASE: + data = _lsb_release() + _LSB_RELEASE.update(data) + return _LSB_RELEASE + + +def os_release(target=None): + data = {} + os_release = target_path(target, 'etc/os-release') + if os.path.exists(os_release): + data = load_shell_content(load_file(os_release), + add_empty=False, empty_val=None) + if not data: + for relfile in [target_path(target, rel) for rel in + ['etc/centos-release', 'etc/redhat-release']]: + data = _parse_redhat_release(release_file=relfile, target=target) + if data: + break + + return data + + +def _parse_redhat_release(release_file=None, target=None): + """Return a dictionary of distro info fields from /etc/redhat-release. + + Dict keys will align with /etc/os-release keys: + ID, VERSION_ID, VERSION_CODENAME + """ + + if not release_file: + release_file = target_path('etc/redhat-release') + if not os.path.exists(release_file): + return {} + redhat_release = load_file(release_file) + redhat_regex = ( + r'(?P.+) release (?P[\d\.]+) ' + r'\((?P[^)]+)\)') + match = re.match(redhat_regex, redhat_release) + if match: + group = match.groupdict() + group['name'] = group['name'].lower().partition(' linux')[0] + if group['name'] == 'red hat enterprise': + group['name'] = 'redhat' + return {'ID': group['name'], 'VERSION_ID': group['version'], + 'VERSION_CODENAME': group['codename']} + return {} + + +def get_distroinfo(target=None): + variant_name = os_release(target=target)['ID'] + variant = name_to_distro(variant_name) + family = DISTRO_TO_OSFAMILY.get(variant) + return DistroInfo(variant, family) + + +def get_distro(target=None): + distinfo = get_distroinfo(target=target) + return distinfo.variant + + +def get_osfamily(target=None): + distinfo = get_distroinfo(target=target) + return distinfo.family + + +def is_ubuntu_core(target=None): + """Check if Ubuntu-Core specific directory is present at target""" + return os.path.exists(target_path(target, 'system-data/var/lib/snapd')) + + +def is_centos(target=None): + """Check if CentOS specific file is present at target""" + return os.path.exists(target_path(target, 'etc/centos-release')) + + +def is_rhel(target=None): + """Check if RHEL specific file is present at target""" + return os.path.exists(target_path(target, 'etc/redhat-release')) + + +def _lsb_release(target=None): + fmap = {'Codename': 'codename', 'Description': 'description', + 'Distributor ID': 'id', 'Release': 'release'} + + data = {} + try: + out, _ = subp(['lsb_release', '--all'], capture=True, target=target) + for line in out.splitlines(): + fname, _, val = line.partition(":") + if fname in fmap: + data[fmap[fname]] = val.strip() + missing = [k for k in fmap.values() if k not in data] + if len(missing): + LOG.warn("Missing fields in lsb_release --all output: %s", + ','.join(missing)) + + except ProcessExecutionError as err: + LOG.warn("Unable to get lsb_release --all: %s", err) + data = {v: "UNAVAILABLE" for v in fmap.values()} + + return data + + +def apt_update(target=None, env=None, force=False, comment=None, + retries=None): + + marker = "tmp/curtin.aptupdate" + + if env is None: + env = os.environ.copy() + + if retries is None: + # by default run apt-update up to 3 times to allow + # for transient failures + retries = (1, 2, 3) + + if comment is None: + comment = "no comment provided" + + if comment.endswith("\n"): + comment = comment[:-1] + + marker = target_path(target, marker) + # if marker exists, check if there are files that would make it obsolete + listfiles = [target_path(target, "/etc/apt/sources.list")] + listfiles += glob.glob( + target_path(target, "etc/apt/sources.list.d/*.list")) + + if os.path.exists(marker) and not force: + if len(find_newer(marker, listfiles)) == 0: + return + + restore_perms = [] + + abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) + try: + abs_slist = abs_tmpdir + "/sources.list" + abs_slistd = abs_tmpdir + "/sources.list.d" + ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) + ch_slist = ch_tmpdir + "/sources.list" + ch_slistd = ch_tmpdir + "/sources.list.d" + + # this file gets executed on apt-get update sometimes. (LP: #1527710) + motd_update = target_path( + target, "/usr/lib/update-notifier/update-motd-updates-available") + pmode = set_unexecutable(motd_update) + if pmode is not None: + restore_perms.append((motd_update, pmode),) + + # create tmpdir/sources.list with all lines other than deb-src + # avoid apt complaining by using existing and empty dir for sourceparts + os.mkdir(abs_slistd) + with open(abs_slist, "w") as sfp: + for sfile in listfiles: + with open(sfile, "r") as fp: + contents = fp.read() + for line in contents.splitlines(): + line = line.lstrip() + if not line.startswith("deb-src"): + sfp.write(line + "\n") + + update_cmd = [ + 'apt-get', '--quiet', + '--option=Acquire::Languages=none', + '--option=Dir::Etc::sourcelist=%s' % ch_slist, + '--option=Dir::Etc::sourceparts=%s' % ch_slistd, + 'update'] + + # do not using 'run_apt_command' so we can use 'retries' to subp + with ChrootableTarget(target, allow_daemons=True) as inchroot: + inchroot.subp(update_cmd, env=env, retries=retries) + finally: + for fname, perms in restore_perms: + os.chmod(fname, perms) + if abs_tmpdir: + shutil.rmtree(abs_tmpdir) + + with open(marker, "w") as fp: + fp.write(comment + "\n") + + +def run_apt_command(mode, args=None, opts=None, env=None, target=None, + execute=True, allow_daemons=False): + defopts = ['--quiet', '--assume-yes', + '--option=Dpkg::options::=--force-unsafe-io', + '--option=Dpkg::Options::=--force-confold'] + if args is None: + args = [] + + if opts is None: + opts = [] + + if env is None: + env = os.environ.copy() + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if which('eatmydata', target=target): + emd = ['eatmydata'] + else: + emd = [] + + cmd = emd + ['apt-get'] + defopts + opts + [mode] + args + if not execute: + return env, cmd + + apt_update(target, env=env, comment=' '.join(cmd)) + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: + return inchroot.subp(cmd, env=env) + + +def run_yum_command(mode, args=None, opts=None, env=None, target=None, + execute=True, allow_daemons=False): + defopts = ['--assumeyes', '--quiet'] + + if args is None: + args = [] + + if opts is None: + opts = [] + + cmd = ['yum'] + defopts + opts + [mode] + args + if not execute: + return env, cmd + + if mode in ["install", "update", "upgrade"]: + return yum_install(mode, args, opts=opts, env=env, target=target, + allow_daemons=allow_daemons) + + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: + return inchroot.subp(cmd, env=env) + + +def yum_install(mode, packages=None, opts=None, env=None, target=None, + allow_daemons=False): + + defopts = ['--assumeyes', '--quiet'] + + if packages is None: + packages = [] + + if opts is None: + opts = [] + + if mode not in ['install', 'update', 'upgrade']: + raise ValueError( + 'Unsupported mode "%s" for yum package install/upgrade' % mode) + + # download first, then install/upgrade from cache + cmd = ['yum'] + defopts + opts + [mode] + dl_opts = ['--downloadonly', '--setopt=keepcache=1'] + inst_opts = ['--cacheonly'] + + # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: + inchroot.subp(cmd + dl_opts + packages, + env=env, retries=[1] * 10) + return inchroot.subp(cmd + inst_opts + packages, env=env) + + +def rpm_get_dist_id(target=None): + """Use rpm command to extract the '%rhel' distro macro which returns + the major os version id (6, 7, 8). This works for centos or rhel + """ + with ChrootableTarget(target) as in_chroot: + dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) + return dist.rstrip() + + +def system_upgrade(opts=None, target=None, env=None, allow_daemons=False, + osfamily=None): + LOG.debug("Upgrading system in %s", target) + + distro_cfg = { + DISTROS.debian: {'function': run_apt_command, + 'subcommands': ('dist-upgrade', 'autoremove')}, + DISTROS.redhat: {'function': run_yum_command, + 'subcommands': ('upgrade',)}, + } + if osfamily not in distro_cfg: + raise ValueError('Distro "%s" does not have system_upgrade support', + osfamily) + + for mode in distro_cfg[osfamily]['subcommands']: + ret = distro_cfg[osfamily]['function']( + mode, opts=opts, target=target, + env=env, allow_daemons=allow_daemons) + return ret + + +def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None, + allow_daemons=False): + if isinstance(pkglist, str): + pkglist = [pkglist] + + if not osfamily: + osfamily = get_osfamily(target=target) + + installer_map = { + DISTROS.debian: run_apt_command, + DISTROS.redhat: run_yum_command, + } + + install_cmd = installer_map.get(osfamily) + if not install_cmd: + raise ValueError('No packge install command for distro: %s' % + osfamily) + + return install_cmd('install', args=pkglist, opts=opts, target=target, + env=env, allow_daemons=allow_daemons) + + +def has_pkg_available(pkg, target=None, osfamily=None): + if not osfamily: + osfamily = get_osfamily(target=target) + + if osfamily not in [DISTROS.debian, DISTROS.redhat]: + raise ValueError('has_pkg_available: unsupported distro family: %s', + osfamily) + + if osfamily == DISTROS.debian: + out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) + for item in out.splitlines(): + if pkg == item.strip(): + return True + return False + + if osfamily == DISTROS.redhat: + out, _ = run_yum_command('list', opts=['--cacheonly']) + for item in out.splitlines(): + if item.lower().startswith(pkg.lower()): + return True + return False + + +def get_installed_packages(target=None): + if which('dpkg-query', target=target): + (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) + elif which('rpm', target=target): + # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget + with ChrootableTarget(target) as in_chroot: + (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat', + 'ii %{NAME} %{VERSION}-%{RELEASE}\n'], + target=target, capture=True) + if not out: + raise ValueError('No package query tool') + + pkgs_inst = set() + for line in out.splitlines(): + try: + (state, pkg, other) = line.split(None, 2) + except ValueError: + continue + if state.startswith("hi") or state.startswith("ii"): + pkgs_inst.add(re.sub(":.*", "", pkg)) + + return pkgs_inst + + +def has_pkg_installed(pkg, target=None): + try: + out, _ = subp(['dpkg-query', '--show', '--showformat', + '${db:Status-Abbrev}', pkg], + capture=True, target=target) + return out.rstrip() == "ii" + except ProcessExecutionError: + return False + + +def parse_dpkg_version(raw, name=None, semx=None): + """Parse a dpkg version string into various parts and calcualate a + numerical value of the version for use in comparing package versions + + Native packages (without a '-'), will have the package version treated + as the upstream version. + + returns a dictionary with fields: + 'major' (int), 'minor' (int), 'micro' (int), + 'semantic_version' (int), + 'extra' (string), 'raw' (string), 'upstream' (string), + 'name' (present only if name is not None) + """ + if not isinstance(raw, string_types): + raise TypeError( + "Invalid type %s for parse_dpkg_version" % raw.__class__) + + if semx is None: + semx = (10000, 100, 1) + + if "-" in raw: + upstream = raw.rsplit('-', 1)[0] + else: + # this is a native package, package version treated as upstream. + upstream = raw + + match = re.search(r'[^0-9.]', upstream) + if match: + extra = upstream[match.start():] + upstream_base = upstream[:match.start()] + else: + upstream_base = upstream + extra = None + + toks = upstream_base.split(".", 2) + if len(toks) == 3: + major, minor, micro = toks + elif len(toks) == 2: + major, minor, micro = (toks[0], toks[1], 0) + elif len(toks) == 1: + major, minor, micro = (toks[0], 0, 0) + + version = { + 'major': int(major), + 'minor': int(minor), + 'micro': int(micro), + 'extra': extra, + 'raw': raw, + 'upstream': upstream, + } + if name: + version['name'] = name + + if semx: + try: + version['semantic_version'] = int( + int(major) * semx[0] + int(minor) * semx[1] + + int(micro) * semx[2]) + except (ValueError, IndexError): + version['semantic_version'] = None + + return version + + +def get_package_version(pkg, target=None, semx=None): + """Use dpkg-query to extract package pkg's version string + and parse the version string into a dictionary + """ + try: + out, _ = subp(['dpkg-query', '--show', '--showformat', + '${Version}', pkg], capture=True, target=target) + raw = out.rstrip() + return parse_dpkg_version(raw, name=pkg, semx=semx) + except ProcessExecutionError: + return None + + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/futil.py curtin-18.2-10-g7afd77fa/curtin/futil.py --- curtin-18.1-17-gae48e86f/curtin/futil.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/futil.py 2019-02-15 20:42:42.000000000 +0000 @@ -5,7 +5,8 @@ import os import warnings -from .util import write_file, target_path +from .util import write_file +from .paths import target_path from .log import LOG diff -Nru curtin-18.1-17-gae48e86f/curtin/__init__.py curtin-18.2-10-g7afd77fa/curtin/__init__.py --- curtin-18.1-17-gae48e86f/curtin/__init__.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/__init__.py 2019-02-15 20:42:42.000000000 +0000 @@ -10,6 +10,8 @@ FEATURES = [ # curtin can apply centos networking via centos_apply_network_config 'CENTOS_APPLY_NETWORK_CONFIG', + # curtin can configure centos storage devices and boot devices + 'CENTOS_CURTHOOK_SUPPORT', # install supports the 'network' config version 1 'NETWORK_CONFIG_V1', # reporter supports 'webhook' type @@ -28,6 +30,6 @@ 'HAS_VERSION_MODULE', ] -__version__ = "18.1" +__version__ = "18.2" # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/log.py curtin-18.2-10-g7afd77fa/curtin/log.py --- curtin-18.1-17-gae48e86f/curtin/log.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/log.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,9 @@ # This file is part of curtin. See LICENSE file for copyright and license info. import logging +import time + +from functools import wraps # Logging items for easy access getLogger = logging.getLogger @@ -56,6 +59,46 @@ if not logging.getLogger().handlers: logging.getLogger().addHandler(NullHandler()) + +def _repr_call(name, *args, **kwargs): + return "%s(%s)" % ( + name, + ', '.join([str(repr(a)) for a in args] + + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) + + +def log_call(func, *args, **kwargs): + return log_time( + "TIMED %s: " % _repr_call(func.__name__, *args, **kwargs), + func, *args, **kwargs) + + +def log_time(msg, func, *args, **kwargs): + start = time.time() + try: + return func(*args, **kwargs) + finally: + LOG.debug(msg + "%.3f", (time.time() - start)) + + +def logged_call(): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + return log_call(func, *args, **kwargs) + return wrapper + return decorator + + +def logged_time(msg): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + return log_time("TIMED %s: " % msg, func, *args, **kwargs) + return wrapper + return decorator + + LOG = _getLogger() # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/__main__.py curtin-18.2-10-g7afd77fa/curtin/__main__.py --- curtin-18.1-17-gae48e86f/curtin/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/__main__.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,4 @@ +if __name__ == '__main__': + from .commands.main import main + import sys + sys.exit(main()) diff -Nru curtin-18.1-17-gae48e86f/curtin/net/deps.py curtin-18.2-10-g7afd77fa/curtin/net/deps.py --- curtin-18.1-17-gae48e86f/curtin/net/deps.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/net/deps.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,72 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +from curtin.distro import DISTROS + + +def network_config_required_packages(network_config, mapping=None): + + if network_config is None: + network_config = {} + + if not isinstance(network_config, dict): + raise ValueError('Invalid network configuration. Must be a dict') + + if mapping is None: + mapping = {} + + if not isinstance(mapping, dict): + raise ValueError('Invalid network mapping. Must be a dict') + + # allow top-level 'network' key + if 'network' in network_config: + network_config = network_config.get('network') + + # v1 has 'config' key and uses type: devtype elements + if 'config' in network_config: + dev_configs = set(device['type'] + for device in network_config['config']) + else: + # v2 has no config key + dev_configs = set(cfgtype for (cfgtype, cfg) in + network_config.items() if cfgtype not in ['version']) + + needed_packages = [] + for dev_type in dev_configs: + if dev_type in mapping: + needed_packages.extend(mapping[dev_type]) + + return needed_packages + + +def detect_required_packages_mapping(osfamily=DISTROS.debian): + """Return a dictionary providing a versioned configuration which maps + network configuration elements to the packages which are required + for functionality. + """ + # keys ending with 's' are v2 values + distro_mapping = { + DISTROS.debian: { + 'bond': ['ifenslave'], + 'bonds': [], + 'bridge': ['bridge-utils'], + 'bridges': [], + 'vlan': ['vlan'], + 'vlans': []}, + DISTROS.redhat: { + 'bond': [], + 'bonds': [], + 'bridge': [], + 'bridges': [], + 'vlan': [], + 'vlans': []}, + } + if osfamily not in distro_mapping: + raise ValueError('No net package mapping for distro: %s' % osfamily) + + return {1: {'handler': network_config_required_packages, + 'mapping': distro_mapping.get(osfamily)}, + 2: {'handler': network_config_required_packages, + 'mapping': distro_mapping.get(osfamily)}} + + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/net/__init__.py curtin-18.2-10-g7afd77fa/curtin/net/__init__.py --- curtin-18.1-17-gae48e86f/curtin/net/__init__.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/net/__init__.py 2019-02-15 20:42:42.000000000 +0000 @@ -572,63 +572,4 @@ return read_sys_net(ifname, "address", enoent=False) -def network_config_required_packages(network_config, mapping=None): - - if network_config is None: - network_config = {} - - if not isinstance(network_config, dict): - raise ValueError('Invalid network configuration. Must be a dict') - - if mapping is None: - mapping = {} - - if not isinstance(mapping, dict): - raise ValueError('Invalid network mapping. Must be a dict') - - # allow top-level 'network' key - if 'network' in network_config: - network_config = network_config.get('network') - - # v1 has 'config' key and uses type: devtype elements - if 'config' in network_config: - dev_configs = set(device['type'] - for device in network_config['config']) - else: - # v2 has no config key - dev_configs = set(cfgtype for (cfgtype, cfg) in - network_config.items() if cfgtype not in ['version']) - - needed_packages = [] - for dev_type in dev_configs: - if dev_type in mapping: - needed_packages.extend(mapping[dev_type]) - - return needed_packages - - -def detect_required_packages_mapping(): - """Return a dictionary providing a versioned configuration which maps - network configuration elements to the packages which are required - for functionality. - """ - mapping = { - 1: { - 'handler': network_config_required_packages, - 'mapping': { - 'bond': ['ifenslave'], - 'bridge': ['bridge-utils'], - 'vlan': ['vlan']}, - }, - 2: { - 'handler': network_config_required_packages, - 'mapping': { - 'bonds': ['ifenslave'], - 'bridges': ['bridge-utils'], - 'vlans': ['vlan']} - }, - } - - return mapping - # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/paths.py curtin-18.2-10-g7afd77fa/curtin/paths.py --- curtin-18.1-17-gae48e86f/curtin/paths.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/paths.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,34 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. +import os + +try: + string_types = (basestring,) +except NameError: + string_types = (str,) + + +def target_path(target, path=None): + # return 'path' inside target, accepting target as None + if target in (None, ""): + target = "/" + elif not isinstance(target, string_types): + raise ValueError("Unexpected input for target: %s" % target) + else: + target = os.path.abspath(target) + # abspath("//") returns "//" specifically for 2 slashes. + if target.startswith("//"): + target = target[1:] + + if not path: + return target + + if not isinstance(path, string_types): + raise ValueError("Unexpected input for path: %s" % path) + + # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. + while len(path) and path[0] == "/": + path = path[1:] + + return os.path.join(target, path) + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/swap.py curtin-18.2-10-g7afd77fa/curtin/swap.py --- curtin-18.1-17-gae48e86f/curtin/swap.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/swap.py 2019-02-15 20:42:42.000000000 +0000 @@ -103,9 +103,15 @@ https://github.com/torvalds/linux/blob/master/include/linux/swap.h#L111 """ LOG.debug('Checking if %s is a swap device', path) - swap_magic_offset = resource.getpagesize() - 10 - magic = util.load_file(path, read_len=10, offset=swap_magic_offset, - decode=False) + pagesize = resource.getpagesize() + magic_offset = pagesize - 10 + size = util.file_size(path) + if size < magic_offset: + LOG.debug("%s is to small for swap (size=%d < pagesize=%d)", + path, size, pagesize) + return False + magic = util.load_file( + path, read_len=10, offset=magic_offset, decode=False) LOG.debug('Found swap magic: %s' % magic) return magic in [b'SWAPSPACE2', b'SWAP-SPACE'] # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/udev.py curtin-18.2-10-g7afd77fa/curtin/udev.py --- curtin-18.1-17-gae48e86f/curtin/udev.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/udev.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,6 +2,7 @@ import os from curtin import util +from curtin.log import logged_call def compose_udev_equality(key, value): @@ -40,6 +41,7 @@ return '%s\n' % rule +@logged_call() def udevadm_settle(exists=None, timeout=None): settle_cmd = ["udevadm", "settle"] if exists: @@ -59,4 +61,41 @@ util.subp(['udevadm', 'trigger'] + list(devices)) udevadm_settle() + +def udevadm_info(path=None): + """ Return a dictionary populated by properties of the device specified + in the `path` variable via querying udev 'property' database. + + :params: path: path to device, either /dev or /sys + :returns: dictionary of key=value pairs as exported from the udev database + :raises: ValueError path is None, ProcessExecutionError on exec error. + """ + if not path: + raise ValueError('Invalid path: "%s"' % path) + + info_cmd = ['udevadm', 'info', '--query=property', path] + output, _ = util.subp(info_cmd, capture=True) + + # strip for trailing empty line + info = {} + for line in output.splitlines(): + if not line: + continue + # maxsplit=2 gives us key and remaininng part of line is value + # py2.7 on Trusty doesn't have keyword, pass as argument + key, value = line.split('=', 2) + if not value: + value = None + if value: + # devlinks is a list of paths separated by space + # convert to a list for easy use + if key == 'DEVLINKS': + info[key] = value.split() + else: + # preserve spaces in values, to match udev database + info[key] = value + + return info + + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/curtin/url_helper.py curtin-18.2-10-g7afd77fa/curtin/url_helper.py --- curtin-18.1-17-gae48e86f/curtin/url_helper.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/url_helper.py 2019-02-15 20:42:42.000000000 +0000 @@ -82,39 +82,51 @@ self.close() -def download(url, path, reporthook=None, data=None): +def download(url, path, reporthook=None, data=None, retries=0, retry_delay=3): """Download url to path. reporthook is compatible with py3 urllib.request.urlretrieve. urlretrieve does not exist in py2.""" buflen = 8192 - wfp = open(path, "wb") + attempts = 0 - try: - buf = None - blocknum = 0 - fsize = 0 - start = time.time() - with UrlReader(url) as rfp: - if reporthook: - reporthook(blocknum, buflen, rfp.size) - - while True: - buf = rfp.read(buflen) - if not buf: - break - blocknum += 1 + while True: + wfp = open(path, "wb") + try: + buf = None + blocknum = 0 + fsize = 0 + start = time.time() + with UrlReader(url) as rfp: if reporthook: reporthook(blocknum, buflen, rfp.size) - wfp.write(buf) - fsize += len(buf) - timedelta = time.time() - start - LOG.debug("Downloaded %d bytes from %s to %s in %.2fs (%.2fMbps)", - fsize, url, path, timedelta, fsize / timedelta / 1024 / 1024) - return path, rfp.info - finally: - wfp.close() + + while True: + buf = rfp.read(buflen) + if not buf: + break + blocknum += 1 + if reporthook: + reporthook(blocknum, buflen, rfp.size) + wfp.write(buf) + fsize += len(buf) + timedelta = time.time() - start + LOG.debug("Downloaded %d bytes from %s to %s in %.2fs (%.2fMbps)", + fsize, url, path, timedelta, + fsize / timedelta / 1024 / 1024) + return path, rfp.info + except UrlError as e: + # retry on internal server errors up to "retries #" + if (e.code < 500 or attempts >= retries): + raise e + LOG.debug("Current download failed with error: %s. Retrying in" + " %d seconds.", + e.code, retry_delay) + attempts += 1 + time.sleep(retry_delay) + finally: + wfp.close() def get_maas_version(endpoint): @@ -227,7 +239,7 @@ try: return _geturl(url=url, headers=headers, headers_cb=headers_cb, exception_cb=exception_cb, data=data) - except _ReRaisedException as e: + except _ReRaisedException: raise curexc.exc except Exception as e: curexc = e @@ -380,7 +392,7 @@ if extra_exception_cb: ret = extra_exception_cb(exception) finally: - self.exception_cb(exception) + self.exception_cb(exception) return ret def _headers_cb(self, extra_headers_cb, url): diff -Nru curtin-18.1-17-gae48e86f/curtin/util.py curtin-18.2-10-g7afd77fa/curtin/util.py --- curtin-18.1-17-gae48e86f/curtin/util.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/curtin/util.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,7 +4,6 @@ import collections from contextlib import contextmanager import errno -import glob import json import os import platform @@ -38,15 +37,25 @@ # python3 does not have a long type. numeric_types = (int, float) -from .log import LOG +try: + FileMissingError = FileNotFoundError +except NameError: + FileMissingError = IOError + +from . import paths +from .log import LOG, log_call + +binary_type = bytes +if sys.version_info[0] < 3: + binary_type = str _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' _INSTALLED_MAIN = 'usr/bin/curtin' -_LSB_RELEASE = {} _USES_SYSTEMD = None _HAS_UNSHARE_PID = None + _DNS_REDIRECT_IP = None # matcher used in template rendering functions @@ -61,7 +70,7 @@ rcs = [0] devnull_fp = None - tpath = target_path(target) + tpath = paths.target_path(target) chroot_args = [] if tpath == "/" else ['chroot', target] sh_args = ['sh', '-c'] if shell else [] if isinstance(args, string_types): @@ -103,10 +112,11 @@ (out, err) = sp.communicate(data) # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' + if capture or combine_capture: + if not out: + out = b'' + if not err: + err = b'' if decode: def ldecode(data, m='utf-8'): if not isinstance(data, bytes): @@ -164,7 +174,7 @@ if euid is None: euid = os.geteuid() - tpath = target_path(target) + tpath = paths.target_path(target) unshare_pid_in = unshare_pid if unshare_pid is None: @@ -206,6 +216,8 @@ boolean indicating if stderr should be redirected to stdout. When True, interleaved stderr and stdout will be returned as the first element of a tuple. + if combine_capture is True, then output is captured independent of + the value of capture. :param log_captured: boolean indicating if output should be logged on capture. If True, then stderr and stdout will be logged at DEBUG level. If @@ -521,6 +533,8 @@ def ensure_dir(path, mode=None): + if path == "": + path = "." try: os.makedirs(path) except OSError as e: @@ -590,7 +604,7 @@ 'done', '']) - fpath = target_path(target, "/usr/sbin/policy-rc.d") + fpath = paths.target_path(target, "/usr/sbin/policy-rc.d") if os.path.isfile(fpath): return False @@ -601,7 +615,7 @@ def undisable_daemons_in_root(target): try: - os.unlink(target_path(target, "/usr/sbin/policy-rc.d")) + os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d")) except OSError as e: if e.errno != errno.ENOENT: raise @@ -613,7 +627,7 @@ def __init__(self, target, allow_daemons=False, sys_resolvconf=True): if target is None: target = "/" - self.target = target_path(target) + self.target = paths.target_path(target) self.mounts = ["/dev", "/proc", "/sys"] self.umounts = [] self.disabled_daemons = False @@ -623,14 +637,14 @@ def __enter__(self): for p in self.mounts: - tpath = target_path(self.target, p) + tpath = paths.target_path(self.target, p) if do_mount(p, tpath, opts='--bind'): self.umounts.append(tpath) if not self.allow_daemons: self.disabled_daemons = disable_daemons_in_root(self.target) - rconf = target_path(self.target, "/etc/resolv.conf") + rconf = paths.target_path(self.target, "/etc/resolv.conf") target_etc = os.path.dirname(rconf) if self.target != "/" and os.path.isdir(target_etc): # never muck with resolv.conf on / @@ -655,13 +669,13 @@ undisable_daemons_in_root(self.target) # if /dev is to be unmounted, udevadm settle (LP: #1462139) - if target_path(self.target, "/dev") in self.umounts: - subp(['udevadm', 'settle']) + if paths.target_path(self.target, "/dev") in self.umounts: + log_call(subp, ['udevadm', 'settle']) for p in reversed(self.umounts): do_umount(p) - rconf = target_path(self.target, "/etc/resolv.conf") + rconf = paths.target_path(self.target, "/etc/resolv.conf") if self.sys_resolvconf and self.rconf_d: os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) shutil.rmtree(self.rconf_d) @@ -671,7 +685,7 @@ return subp(*args, **kwargs) def path(self, path): - return target_path(self.target, path) + return paths.target_path(self.target, path) def is_exe(fpath): @@ -680,29 +694,29 @@ def which(program, search=None, target=None): - target = target_path(target) + target = paths.target_path(target) if os.path.sep in program: # if program had a '/' in it, then do not search PATH # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls # so effectively we set cwd to / (or target) - if is_exe(target_path(target, program)): + if is_exe(paths.target_path(target, program)): return program if search is None: - paths = [p.strip('"') for p in - os.environ.get("PATH", "").split(os.pathsep)] + candpaths = [p.strip('"') for p in + os.environ.get("PATH", "").split(os.pathsep)] if target == "/": - search = paths + search = candpaths else: - search = [p for p in paths if p.startswith("/")] + search = [p for p in candpaths if p.startswith("/")] # normalize path input search = [os.path.abspath(p) for p in search] for path in search: ppath = os.path.sep.join((path, program)) - if is_exe(target_path(target, ppath)): + if is_exe(paths.target_path(target, ppath)): return ppath return None @@ -768,91 +782,6 @@ return out.strip() -def has_pkg_available(pkg, target=None): - out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) - for item in out.splitlines(): - if pkg == item.strip(): - return True - return False - - -def get_installed_packages(target=None): - (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) - - pkgs_inst = set() - for line in out.splitlines(): - try: - (state, pkg, other) = line.split(None, 2) - except ValueError: - continue - if state.startswith("hi") or state.startswith("ii"): - pkgs_inst.add(re.sub(":.*", "", pkg)) - - return pkgs_inst - - -def has_pkg_installed(pkg, target=None): - try: - out, _ = subp(['dpkg-query', '--show', '--showformat', - '${db:Status-Abbrev}', pkg], - capture=True, target=target) - return out.rstrip() == "ii" - except ProcessExecutionError: - return False - - -def parse_dpkg_version(raw, name=None, semx=None): - """Parse a dpkg version string into various parts and calcualate a - numerical value of the version for use in comparing package versions - - returns a dictionary with the results - """ - if semx is None: - semx = (10000, 100, 1) - - upstream = raw.split('-')[0] - toks = upstream.split(".", 2) - if len(toks) == 3: - major, minor, micro = toks - elif len(toks) == 2: - major, minor, micro = (toks[0], toks[1], 0) - elif len(toks) == 1: - major, minor, micro = (toks[0], 0, 0) - - version = { - 'major': major, - 'minor': minor, - 'micro': micro, - 'raw': raw, - 'upstream': upstream, - } - if name: - version['name'] = name - - if semx: - try: - version['semantic_version'] = int( - int(major) * semx[0] + int(minor) * semx[1] + - int(micro) * semx[2]) - except (ValueError, IndexError): - version['semantic_version'] = None - - return version - - -def get_package_version(pkg, target=None, semx=None): - """Use dpkg-query to extract package pkg's version string - and parse the version string into a dictionary - """ - try: - out, _ = subp(['dpkg-query', '--show', '--showformat', - '${Version}', pkg], capture=True, target=target) - raw = out.rstrip() - return parse_dpkg_version(raw, name=pkg, semx=semx) - except ProcessExecutionError: - return None - - def find_newer(src, files): mtime = os.stat(src).st_mtime return [f for f in files if @@ -877,134 +806,6 @@ return cur -def apt_update(target=None, env=None, force=False, comment=None, - retries=None): - - marker = "tmp/curtin.aptupdate" - if target is None: - target = "/" - - if env is None: - env = os.environ.copy() - - if retries is None: - # by default run apt-update up to 3 times to allow - # for transient failures - retries = (1, 2, 3) - - if comment is None: - comment = "no comment provided" - - if comment.endswith("\n"): - comment = comment[:-1] - - marker = target_path(target, marker) - # if marker exists, check if there are files that would make it obsolete - listfiles = [target_path(target, "/etc/apt/sources.list")] - listfiles += glob.glob( - target_path(target, "etc/apt/sources.list.d/*.list")) - - if os.path.exists(marker) and not force: - if len(find_newer(marker, listfiles)) == 0: - return - - restore_perms = [] - - abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) - try: - abs_slist = abs_tmpdir + "/sources.list" - abs_slistd = abs_tmpdir + "/sources.list.d" - ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) - ch_slist = ch_tmpdir + "/sources.list" - ch_slistd = ch_tmpdir + "/sources.list.d" - - # this file gets executed on apt-get update sometimes. (LP: #1527710) - motd_update = target_path( - target, "/usr/lib/update-notifier/update-motd-updates-available") - pmode = set_unexecutable(motd_update) - if pmode is not None: - restore_perms.append((motd_update, pmode),) - - # create tmpdir/sources.list with all lines other than deb-src - # avoid apt complaining by using existing and empty dir for sourceparts - os.mkdir(abs_slistd) - with open(abs_slist, "w") as sfp: - for sfile in listfiles: - with open(sfile, "r") as fp: - contents = fp.read() - for line in contents.splitlines(): - line = line.lstrip() - if not line.startswith("deb-src"): - sfp.write(line + "\n") - - update_cmd = [ - 'apt-get', '--quiet', - '--option=Acquire::Languages=none', - '--option=Dir::Etc::sourcelist=%s' % ch_slist, - '--option=Dir::Etc::sourceparts=%s' % ch_slistd, - 'update'] - - # do not using 'run_apt_command' so we can use 'retries' to subp - with ChrootableTarget(target, allow_daemons=True) as inchroot: - inchroot.subp(update_cmd, env=env, retries=retries) - finally: - for fname, perms in restore_perms: - os.chmod(fname, perms) - if abs_tmpdir: - shutil.rmtree(abs_tmpdir) - - with open(marker, "w") as fp: - fp.write(comment + "\n") - - -def run_apt_command(mode, args=None, aptopts=None, env=None, target=None, - execute=True, allow_daemons=False): - opts = ['--quiet', '--assume-yes', - '--option=Dpkg::options::=--force-unsafe-io', - '--option=Dpkg::Options::=--force-confold'] - - if args is None: - args = [] - - if aptopts is None: - aptopts = [] - - if env is None: - env = os.environ.copy() - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if which('eatmydata', target=target): - emd = ['eatmydata'] - else: - emd = [] - - cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args - if not execute: - return env, cmd - - apt_update(target, env=env, comment=' '.join(cmd)) - with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: - return inchroot.subp(cmd, env=env) - - -def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False): - LOG.debug("Upgrading system in %s", target) - for mode in ('dist-upgrade', 'autoremove'): - ret = run_apt_command( - mode, aptopts=aptopts, target=target, - env=env, allow_daemons=allow_daemons) - return ret - - -def install_packages(pkglist, aptopts=None, target=None, env=None, - allow_daemons=False): - if isinstance(pkglist, str): - pkglist = [pkglist] - return run_apt_command( - 'install', args=pkglist, - aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) - - def is_uefi_bootable(): return os.path.exists('/sys/firmware/efi') is True @@ -1076,7 +877,7 @@ """ Look for "hook" in "target" and run it """ - target_hook = target_path(target, '/curtin/' + hook) + target_hook = paths.target_path(target, '/curtin/' + hook) if os.path.isfile(target_hook): LOG.debug("running %s" % target_hook) subp([target_hook]) @@ -1094,7 +895,7 @@ # already sanitized? return source supported = ['tgz', 'dd-tgz', 'dd-tbz', 'dd-txz', 'dd-tar', 'dd-bz2', - 'dd-gz', 'dd-xz', 'dd-raw', 'fsimage'] + 'dd-gz', 'dd-xz', 'dd-raw', 'fsimage', 'fsimage-layered'] deftype = 'tgz' for i in supported: prefix = i + ":" @@ -1231,41 +1032,6 @@ exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) -def _lsb_release(target=None): - fmap = {'Codename': 'codename', 'Description': 'description', - 'Distributor ID': 'id', 'Release': 'release'} - - data = {} - try: - out, _ = subp(['lsb_release', '--all'], capture=True, target=target) - for line in out.splitlines(): - fname, _, val = line.partition(":") - if fname in fmap: - data[fmap[fname]] = val.strip() - missing = [k for k in fmap.values() if k not in data] - if len(missing): - LOG.warn("Missing fields in lsb_release --all output: %s", - ','.join(missing)) - - except ProcessExecutionError as err: - LOG.warn("Unable to get lsb_release --all: %s", err) - data = {v: "UNAVAILABLE" for v in fmap.values()} - - return data - - -def lsb_release(target=None): - if target_path(target) != "/": - # do not use or update cache if target is provided - return _lsb_release(target) - - global _LSB_RELEASE - if not _LSB_RELEASE: - data = _lsb_release() - _LSB_RELEASE.update(data) - return _LSB_RELEASE - - class MergedCmdAppend(argparse.Action): """This appends to a list in order of appearence both the option string and the value""" @@ -1400,31 +1166,6 @@ return is_resolvable(urlparse(url).hostname) -def target_path(target, path=None): - # return 'path' inside target, accepting target as None - if target in (None, ""): - target = "/" - elif not isinstance(target, string_types): - raise ValueError("Unexpected input for target: %s" % target) - else: - target = os.path.abspath(target) - # abspath("//") returns "//" specifically for 2 slashes. - if target.startswith("//"): - target = target[1:] - - if not path: - return target - - if not isinstance(path, string_types): - raise ValueError("Unexpected input for path: %s" % path) - - # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. - while len(path) and path[0] == "/": - path = path[1:] - - return os.path.join(target, path) - - class RunInChroot(ChrootableTarget): """Backwards compatibility for RunInChroot (LP: #1617375). It needs to work like: diff -Nru curtin-18.1-17-gae48e86f/debian/changelog curtin-18.2-10-g7afd77fa/debian/changelog --- curtin-18.1-17-gae48e86f/debian/changelog 2018-05-18 19:11:34.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/debian/changelog 2019-02-28 15:54:49.000000000 +0000 @@ -1,3 +1,86 @@ +curtin (18.2-10-g7afd77fa-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1817964) + - Support for multi-layers images fsimage-layered:// URI + [Jean-Baptiste Lallement] + - dname: relax dname req for disk serial/wwn presence for compatibility + - flake8: fix some E117 over-indented issues [Paride Legovini] + - bcache: ensure partitions on bcache devices are detected as partition + - vmtest: bump skip_by_date out a year for trusty bcache bug + - Fix typo in doc/topics/integration-testing.rst. [Paride Legovini] + - flake8: Fix two issues found with new version of flake8 + - clear-holders: handle FileNotFound when probing for bcache device slaves + - vmtests: network mtu fix-by bump to post 19.04 release + - vmtest: Fix bug preventing explicit disabling of system_upgrade. + - Release 18.2 + - Adjust helpers/common to edit GRUB_CMDLINE_LINUX_DEFAULT in place. + - dname: persistent names based on serial or wwn + - Fix bug in is_swap_device if a device was smaller than page_size. + - vmtest: add disco tests [Joshua Powers] + - unittest: change directory to tmpdir for testing relative files. + - Add clear-holders to meta-simple + - vmtests: check install log for Out of memory kernel messages and fail + - unittest: correctly use tmpdir for my.img [Joshua Powers] + - block_meta: use wipe config when clearing partitions + - tests: fix vmtests for apt perserve_source_list changes + - apt: Use new format apt config when writing preserve_sources_list. + - vmtests: multipath mount /home with nofail and validate in unittest + - vmtests: fix common collect scripts to not exit failure. + - vmtest: handle collect disk unpack failure + - vmtests: dont use multiple subclasses in uefi 4k tests + - vmtests: disable snapd/seeding to avoid boot hang + - jenkins-runner: fix when using --filter only + + -- Ryan Harper Thu, 28 Feb 2019 09:54:02 -0600 + +curtin (18.1-59-g0f993084-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1795712) + - distro: fix system_upgrade command using string instead of function + - Capture stdout when using lxc file push + - vmtest: boot ephemeral with 'ro' on the kernel command line. + - vmtest: Fix typo in skip-by-date. + - vmtest: kick skip-by-date for 1671951. + - tools/jenkins-runner: Error if both filters and tests are given. + - vmtests: prevent tests from modifying cls.collect_scripts + - Enable custom storage configuration for centos images + - vmtest: ensure we collect /var/log/journal only once + - Don't allow reads of /proc and modprobe zfs through + - clear-holders: handle missing zpool/zfs tools when wiping + - clear-holders: rescan for lvm devices after assembling raid arrays + - vmtest: enable persistent journal and collect at boot time + - Add timing and logging functions. + - parse_dpkg_version: support non-numeric in version string. + - Add main so that 'python3 -m curtin' does the right thing. + - Add subcommand 'features'. + - block: use uuid4 (random) when autogenerating UUIDS for filesystems + - vmtests: Increase size of root filesystems. + - clear-holders: reread ptable after wiping disks with partitions + - vmtest: Skip proposed pocket on dev release when 'proposed' in ADD_REPOS. + - tests: remove Ubuntu Artful [Joshua Powers] + - vmtests: Let a raised SkipTest go through skip_by_date. + - vmtests: Increase root fs to give upgrades to -proposed more space. + - vmtest: Order the vmtest_pollinate late_command earlier. + - vmtest: always add 'curtin/vmtest' to installed pollinate user_agent. + - vmtests: make skip_by_date a decorator that runs and reports. + - vmtests: always declare certain attributes and remove redundant tests. + - vmtests: Add Cosmic release to tests [Joshua Powers] + - vmtests: skip TrustyTestMdadmBcache until 2019-01-22. + - tox: use simplestreams from git repository rather than bzr. + - document that you can set ptable on raids [Michael Hudson-Doyle] + - vmtests: move skip-by date of xfs root and xfs boot out 1 year. + - vmtests: network_mtu move fixby date out 4 months from last value + - Fix WorkingDir class to support already existing target directory. + - Fix extraction of local filesystem image. + - Fix tip-pyflakes imported but unused call to util.get_platform_arch + - subp: update return value of subp with combine_capture=True. + - tox: add a xenial environments, default envlist changes. + - tests: Fix race on utcnow during timestamped curtin-log dir creation + - curtainer: patch source version from --source. + - pyflakes: fix unused variable references identified by pyflakes 2.0.0. + + -- Scott Moser Fri, 12 Oct 2018 16:23:16 -0400 + curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium * New upstream snapshot. (LP: #1772044) diff -Nru curtin-18.1-17-gae48e86f/doc/topics/apt_source.rst curtin-18.2-10-g7afd77fa/doc/topics/apt_source.rst --- curtin-18.1-17-gae48e86f/doc/topics/apt_source.rst 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/doc/topics/apt_source.rst 2019-02-15 20:42:42.000000000 +0000 @@ -165,3 +165,60 @@ ~~~~~~~~~~~~ Cloud-init might need to resolve dependencies and install packages in the ephemeral environment to run curtin. Therefore it is recommended to not only provide an apt configuration to curtin for the target, but also one to the install environment via cloud-init. + + +apt preserve_sources_list setting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +cloud-init and curtin treat the ``preserve_sources_list`` setting slightly differently, and thus this setting deserves its own section. + +Interpretation / Meaning +------------------------ +curtin reads ``preserve_sources_list`` to indicate whether or not it should update the target systems' ``/etc/apt/sources.list``. This includes replacing the mirrors used (apt/primary...). + +cloud-init reads ``preserve_sources_list`` to indicate whether or not it should *render* ``/etc/apt/sources.list`` from its built-in template. + +defaults +-------- +Just for reference, the ``preserve_sources_list`` defaults in curtin and cloud-init are: + + * curtin: **true** + By default curtin will not modify ``/etc/apt/sources.list`` in the installed OS. It is assumed that this file is intentionally as it is. + * cloud-init: **false** + * cloud-init in ephemeral environment: **false** + * cloud-init system installed by curtin: **true** + (curtin writes this to a file ``/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg`` in the target). It does this because we have already written the sources.list that is desired in the installer. We do not want cloud-init to overwrite it when it boots. + +preserve_sources_list in MAAS +----------------------------- +Curtin and cloud-init use the same ``apt`` configuration language. +MAAS provides apt config in three different scenarios. + + 1. To cloud-init in ephemeral environment (rescue, install or commissioning) + Here MAAS **should not send a value**. If it wants to be explicit it should send ``preserve_sources_list: false``. + + 2. To curtin in curtin config + MAAS **should send ``preserve_sources_list: false``**. curtin will correctly read and update mirrors in official Ubuntu images, so setting this to 'false' is correct. In some cases for custom images, the user might want to be able to have their /etc/apt/sources.list left untouched entirely. In such cases they may want to override this value. + + 3. To cloud-init via curtin config in debconf_selections. + MAAS should **not send a value**. Curtin will handle telling cloud-init to not update /etc/apt/sources.list. MAAS does not need to do this. + + 4. To installed system via vendor-data or user-data. + MAAS should **not send a value**. MAAS does not currently send a value. The user could send one in user-data, but then if they did presumably they did that for a reason. + +Legacy format +------------- + +Versions of cloud-init in 14.04 and older only support: + +.. code-block:: yaml + + apt_preserve_sources_list: VALUE + +Versions of cloud-init present 16.04+ read the "new" style apt configuration, but support the old style configuration also. The new style configuration is: + +.. code-block:: yaml + + apt: + preserve_sources_list: VALUE + +**Note**: If versions of cloud-init that support the new style config receive conflicting values in old style and new style, cloud-init will raise exception and exit failure. It simplly doesn't know what behavior is desired. diff -Nru curtin-18.1-17-gae48e86f/doc/topics/config.rst curtin-18.2-10-g7afd77fa/doc/topics/config.rst --- curtin-18.1-17-gae48e86f/doc/topics/config.rst 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/doc/topics/config.rst 2019-02-15 20:42:42.000000000 +0000 @@ -14,6 +14,7 @@ - apt_mirrors (``apt_mirrors``) - apt_proxy (``apt_proxy``) - block-meta (``block``) +- curthooks (``curthooks``) - debconf_selections (``debconf_selections``) - disable_overlayroot (``disable_overlayroot``) - grub (``grub``) @@ -110,6 +111,45 @@ label: my-boot-partition +curthooks +~~~~~~~~~ +Configure how Curtin determines what :ref:`curthooks` to run during the installation +process. + +**mode**: *<['auto', 'builtin', 'target']>* + +The default mode is ``auto``. + +In ``auto`` mode, curtin will execute curthooks within the image if present. +For images without curthooks inside, curtin will execute its built-in hooks. + +Currently the built-in curthooks support the following OS families: + +- Ubuntu +- Centos + +When specifying ``builtin``, curtin will only run the curthooks present in +Curtin ignoring any curthooks that may be present in the target operating +system. + +When specifying ``target``, curtin will attempt run the curthooks in the target +operating system. If the target does NOT contain any curthooks, then the +built-in curthooks will be run instead. + +Any errors during execution of curthooks (built-in or target) will fail the +installation. + +**Example**:: + + # ignore any target curthooks + curthooks: + mode: builtin + + # Only run target curthooks, fall back to built-in + curthooks: + mode: target + + debconf_selections ~~~~~~~~~~~~~~~~~~ Curtin will update the target with debconf set-selection values. Users will @@ -466,9 +506,83 @@ - **cp://**: Use ``rsync`` command to copy source directory to target. - **file://**: Use ``tar`` command to extract source to target. - **http[s]://**: Use ``wget | tar`` commands to extract source to target. -- **fsimage://**: mount filesystem image and copy contents to target. - Local file or url are supported. Filesystem can be any filesystem type +- **fsimage://** mount filesystem image and copy contents to target. + Local file or url are supported. Filesystem can be any filesystem type mountable by the running kernel. +- **fsimage-layered://** mount layered filesystem image and copy contents to target. + A ``fsimage-layered`` install source is a string representing one or more mountable + images from a single local or remote directory. The string is dot-separated where + each value between the dots represents a particular image and the location of the + name within the string encodes the order in which it is to be mounted. The resulting + list of images are downloaded (if needed) then mounted and overlayed into a single + directory which is used as the source for installation. + +**Image Name Pattern** + + [[.]...]. + +Example:: + + 10-base.img + minimal.img + minimal.standard.live.squashfs + http://example.io/standard.squashfs + +**Layer Dependencies** + +Layers are parts of the name seperated by dots. Any layer in the name will +be included as a dependency. The file extension pattern is used to find +related layers. + +Examples: + + Base use case:: + + /images + ├── main.squashfs + ├── main.upper.squashfs + └── main.upper.debug.squashfs + + source='fsimage-layered://images/main.squashfs' -> images='/images/main.squashfs' + source='fsimage-layered://images/main.upper.squashfs' -> images='/images/main.upper.squashfs, /images/main.squashfs' + source='fsimage-layered://images/main.upper.debug.squashfs' -> images='/images/main.upper.debug.squashfs, /images/main.upper.squashfs, /images/main.squashfs' + + Multiple extensions:: + + /images + ├── main.squashfs + ├── main.img + ├── main.upper.squashfs + ├── main.upper.img + └── main.upper.debug.squashfs + + source='fsimage-layered://images/main.upper.squashfs' -> images='/images/main.upper.squashfs, /images/main.squashfs' + source='fsimage-layered://images/main.upper.img' -> images='/images/main.upper.img, /images/main.img' + + Missing intermediary layer:: + + /images + ├── main.squashfs + └── main.upper.debug.squashfs + +If there is a missing image in the path to a leaf, an error will be raised + + source='fsimage-layered://images/main.squashfs' -> images='/images/main.squashfs' + source='fsimage-layered://images/main.upper.debug.squashfs' -> Raised Error' + + Remote Layers:: + + http://example.io/base.extended.debug.squashfs + + +The URI passed to ``fsimage-layered`` may be on a remote system. Curtin +will parse the URI and then download each layer from the remote system. +This results in Curtin downloading the following URLs:: + +- http://example.io/base.squashfs +- http://example.io/base.extended.squashfs +- http://example.io/base.extended.debug.squashfs + **Example Cloud-image**:: diff -Nru curtin-18.1-17-gae48e86f/doc/topics/curthooks.rst curtin-18.2-10-g7afd77fa/doc/topics/curthooks.rst --- curtin-18.1-17-gae48e86f/doc/topics/curthooks.rst 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/doc/topics/curthooks.rst 2019-02-15 20:42:42.000000000 +0000 @@ -1,7 +1,13 @@ +.. _curthooks: + ======================================== -Curthooks / New OS Support +Curthooks / New OS Support ======================================== -Curtin has built-in support for installation of Ubuntu. +Curtin has built-in support for installation of: + + - Ubuntu + - Centos + Other operating systems are supported through a mechanism called 'curthooks' or 'curtin-hooks'. @@ -47,11 +53,21 @@ - ``CONFIG``: This is a path to the curtin config file. It is provided so that additional configuration could be provided through to the OS customization. + - ``WORKING_DIR``: This is a path to a temporary directory where curtin + stores state and configuration files. .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment so that the hook can easily run a python program with the same python that curtin ran with (ie, python2 or python3). +Running built-in hooks +---------------------- + +Curthooks may opt to run the built-in curthooks that are already provided in +curtin itself. To do so, an in-image curthook can import the ``curthooks`` +module and invoke the ``builtin_curthooks`` function passing in the required +parameters: config, target, and state. + Networking configuration ------------------------ diff -Nru curtin-18.1-17-gae48e86f/doc/topics/integration-testing.rst curtin-18.2-10-g7afd77fa/doc/topics/integration-testing.rst --- curtin-18.1-17-gae48e86f/doc/topics/integration-testing.rst 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/doc/topics/integration-testing.rst 2019-02-15 20:42:42.000000000 +0000 @@ -116,7 +116,7 @@ Or run a single test with:: - nosetests3 tests/vmtests/test_network.py::XenialTestNetworkBasic + nosetests3 tests/vmtests/test_network.py:XenialTestNetworkBasic Environment Variables @@ -314,10 +314,14 @@ setting (auto), then a upgrade will be done to make sure to include any new packages. + The string 'proposed' is handled specially. It will enable the + Ubuntu -proposed pocket for non-devel releases. If you wish to test + the -proposed pocket for a devel release, use 'PROPOSED'. + - ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto' The default setting of 'auto' means to do a system upgrade if there are additional repos added. To enable this explicitly, set - to any non "0" value. + to any value other than case insensitive "false" or "0" or an empty string. - ``CURTIN_VMTEST_UPGRADE_PACKAGES``: default '' This is a comma delimited string listing packages that should have diff -Nru curtin-18.1-17-gae48e86f/doc/topics/storage.rst curtin-18.2-10-g7afd77fa/doc/topics/storage.rst --- curtin-18.1-17-gae48e86f/doc/topics/storage.rst 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/doc/topics/storage.rst 2019-02-15 20:42:42.000000000 +0000 @@ -60,9 +60,9 @@ **ptable**: *msdos, gpt* -If the ``ptable`` key is present and a valid type of partition table, curtin -will create an empty partition table of that type on the disk. At the moment, -msdos and gpt partition tables are supported. +If the ``ptable`` key is present and a curtin will create an empty +partition table of that type on the disk. Curtin supports msdos and +gpt partition tables. **serial**: ** @@ -175,7 +175,15 @@ If the ``name`` key is present, curtin will create a udev rule that makes a symbolic link to the disk with the given name value. This makes it easy to find disks on an installed system. The links are created in -``/dev/disk/by-dname/``. +``/dev/disk/by-dname/``. The udev rules will utilize two types of disk +metadata to construct the link. For disks with ``serial`` and/or ``wwn`` values +these will be used to ensure the name persists even if the contents of the disk +change. For legacy purposes, curtin also emits a rule utilizing metadata on +the disk contents, typically a partition UUID value, this also preserves these +links for disks which lack persistent attributes such as a ``serial`` or +``wwn``, typically found on virtualized environments where such values are left +unset. + A link to each partition on the disk will also be created at ``/dev/disk/by-dname/-part``, so if ``name: maindisk`` is set, the disk will be at ``/dev/disk/by-dname/maindisk`` and the first partition on @@ -237,6 +245,14 @@ wipe command on the partition. The wipe command values are the sames as for disks. +.. note:: + + Curtin will automatically wipe 1MB at the starting location of the partition + prior to creating the partition to ensure that other block layers or devices + do not enable themselves and prevent accessing the partition. Wipe + and other destructive operations only occur if the ``preserve`` value + is not set to ``True``. + **flag**: *logical, extended, boot, bios_grub, swap, lvm, raid, home, prep* If the ``flag`` key is present, curtin will set the specified flag on the @@ -268,6 +284,17 @@ If the preserve flag is set to true, curtin will verify that the partition exists and will not modify the partition. +**name**: ** + +If the ``name`` key is present, curtin will create a udev rule that makes a +symbolic link to the partition with the given name value. The links are created +in ``/dev/disk/by-dname/``. + +For partitions, the udev rule created relies upon disk contents, in this case +the partition entry UUID. This will remain in effect unless the underlying disk +on which the partition resides has the partition table modified or wiped. + + **Config Example**:: - id: disk0-part1 @@ -276,6 +303,7 @@ size: 8GB device: disk0 flag: boot + name: boot_partition .. _format: @@ -496,6 +524,12 @@ with ``name`` *lv1* on a ``lvm_volgroup`` named *vg1* would have the path ``/dev/disk/by-dname/vg1-lv1``. +.. note:: + + dname values for contructed devices (such as lvm) only remain persistent + as long as the device metadata does not change. If users modify the device + such that device metadata is changed then the udev rule may no longer apply. + **volgroup**: ** The ``volgroup`` key specifies the ``id`` of the Volume Group in which to @@ -592,7 +626,9 @@ .. note:: Curtin creates a udev rule to create a link to the md device in - ``/dev/disk/by-dname/`` using the specified name. + ``/dev/disk/by-dname/`` using the specified name. The dname + symbolic link is only persistent as long as the raid metadata is + not modifed or destroyed. **raidlevel**: *0, 1, 5, 6, 10* @@ -613,6 +649,11 @@ spares in the raid array. Each device must be referenced by ``id`` and the device must be previously defined in the storage configuration. May be empty. +**ptable**: *msdos, gpt* + +To partition the array rather than mounting it directly, the +``ptable`` key must be present and a valid type of partition table, +i.e. msdos or gpt. **Config Example**:: @@ -664,6 +705,13 @@ If the ``name`` key is present, curtin will create a link to the device at ``/dev/disk/by-dname/``. +.. note:: + + dname values for contructed devices (such as bcache) only remain persistent + as long as the device metadata does not change. If users modify the device + such that device metadata is changed then the udev rule may no longer apply. + + **Config Example**:: - id: bcache0 @@ -801,6 +849,7 @@ - LVM - Bcache - RAID Boot +- Partitioned RAID - RAID5 + Bcache - ZFS Root Simple - ZFS Root @@ -1045,6 +1094,76 @@ path: / device: md_root +Partitioned RAID +~~~~~~~~~~~~~~~~ + +:: + + storage: + config: + - type: disk + id: disk-0 + ptable: gpt + path: /dev/vda + wipe: superblock + grub_device: true + - type: disk + id: disk-1 + path: /dev/vdb + wipe: superblock + - type: disk + id: disk-2 + path: /dev/vdc + wipe: superblock + - type: partition + id: part-0 + device: disk-0 + size: 1048576 + flag: bios_grub + - type: partition + id: part-1 + device: disk-0 + size: 21471690752 + - id: raid-0 + type: raid + name: md0 + raidlevel: 1 + devices: [disk-2, disk-1] + ptable: gpt + - type: partition + id: part-2 + device: raid-0 + size: 10737418240 + - type: partition + id: part-3 + device: raid-0 + size: 10735321088, + - type: format + id: fs-0 + fstype: ext4 + volume: part-1 + - type: format + id: fs-1 + fstype: xfs + volume: part-2 + - type: format + id: fs-2 + fstype: ext4 + volume: part-3 + - type: mount + id: mount-0 + device: fs-0 + path: / + - type: mount + id: mount-1 + device: fs-1 + path: /srv + - type: mount + id: mount-2 + device: fs-2 + path: /home + version: 1 + RAID5 + Bcache ~~~~~~~~~~~~~~ diff -Nru curtin-18.1-17-gae48e86f/examples/tests/basic_scsi.yaml curtin-18.2-10-g7afd77fa/examples/tests/basic_scsi.yaml --- curtin-18.1-17-gae48e86f/examples/tests/basic_scsi.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/basic_scsi.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -20,14 +20,25 @@ number: 2 size: 1GB device: sda + - id: sda3 + type: partition + number: 3 + size: 1GB + device: sda + name: swap - id: sda1_root type: format fstype: ext4 volume: sda1 + label: 'cloudimg-rootfs' - id: sda2_home type: format fstype: ext4 volume: sda2 + - id: sda3_swap + type: format + fstype: swap + volume: sda3 - id: sda1_mount type: mount path: / @@ -41,6 +52,10 @@ wwn: '0x080258d13ea95ae5' name: sparedisk wipe: superblock + - id: sparedisk_fat_fmt_id + type: format + fstype: fat32 + volume: sparedisk_id - id: btrfs_disk_id type: disk wwn: '0x22dc58dc023c7008' @@ -68,6 +83,18 @@ device: pnum_disk - id: pnum_disk_p2 type: partition + number: 2 + size: 8MB + device: pnum_disk + flag: prep + wipe: zero + name: prep + - id: pnum_disk_p3 + type: partition number: 10 size: 1GB device: pnum_disk + - id: swap_mnt + type: mount + path: "none" + device: sda3_swap diff -Nru curtin-18.1-17-gae48e86f/examples/tests/basic.yaml curtin-18.2-10-g7afd77fa/examples/tests/basic.yaml --- curtin-18.1-17-gae48e86f/examples/tests/basic.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/basic.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -26,6 +26,7 @@ number: 3 size: 1GB device: sda + name: swap - id: sda1_root type: format fstype: ext4 @@ -83,6 +84,14 @@ device: pnum_disk - id: pnum_disk_p2 type: partition + number: 2 + size: 8MB + device: pnum_disk + flag: prep + wipe: zero + name: prep + - id: pnum_disk_p3 + type: partition number: 10 size: 1GB device: pnum_disk diff -Nru curtin-18.1-17-gae48e86f/examples/tests/bcache-partitions.yaml curtin-18.2-10-g7afd77fa/examples/tests/bcache-partitions.yaml --- curtin-18.1-17-gae48e86f/examples/tests/bcache-partitions.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/bcache-partitions.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,97 @@ +showtrace: true + +storage: + config: + - id: id_rotary0 + type: disk + name: rotary0 + serial: disk-a + ptable: msdos + wipe: superblock + grub_device: true + - id: id_ssd0 + type: disk + name: ssd0 + serial: disk-b + wipe: superblock + - id: id_rotary1 + type: disk + name: rotary1 + serial: disk-c + ptable: gpt + wipe: superblock + - id: id_rotary0_part1 + type: partition + name: rotary0-part1 + device: id_rotary0 + number: 1 + offset: 1M + size: 999M + wipe: superblock + - id: id_rotary0_part2 + type: partition + name: rotary0-part2 + device: id_rotary0 + number: 2 + size: 9G + wipe: superblock + - id: id_bcache0 + type: bcache + name: bcache0 + backing_device: id_rotary0_part2 + cache_device: id_ssd0 + cache_mode: writeback + - id: id_bcache1 + type: bcache + name: bcache1 + backing_device: id_rotary1 + cache_device: id_ssd0 + cache_mode: writeback + - id: id_bcache1_disk + type: disk + path: /dev/bcache1 + ptable: gpt + - id: id_bcache1_part1 + type: partition + device: id_bcache1_disk + number: 1 + offset: 1M + size: 1G + wipe: superblock + - id: id_bcache1_part2 + type: partition + device: id_bcache1_disk + number: 2 + size: 1G + wipe: superblock + - id: id_bcache1_part3 + type: partition + device: id_bcache1_disk + number: 3 + size: 1G + wipe: superblock + - id: id_bcache1_part4 + type: partition + device: id_bcache1_disk + number: 4 + size: 1G + wipe: superblock + - id: bootfs + type: format + label: boot-fs + volume: id_rotary0_part1 + fstype: ext4 + - id: rootfs + type: format + label: root-fs + volume: id_bcache0 + fstype: ext4 + - id: rootfs_mount + type: mount + path: / + device: rootfs + - id: bootfs_mount + type: mount + path: /boot + device: bootfs + version: 1 diff -Nru curtin-18.1-17-gae48e86f/examples/tests/dirty_disks_config.yaml curtin-18.2-10-g7afd77fa/examples/tests/dirty_disks_config.yaml --- curtin-18.1-17-gae48e86f/examples/tests/dirty_disks_config.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/dirty_disks_config.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -27,6 +27,31 @@ # disable any rpools to trigger disks with zfs_member label but inactive # pools zpool export rpool ||: + - &lvm_stop | + #!/bin/sh + # This function disables any existing lvm logical volumes that + # have been created during the early storage config stage + # and simulates the effect of booting into a system with existing + # (but inactive) lvm configuration. + for vg in `pvdisplay -C --separator = -o vg_name --noheadings`; do + vgchange -an $vg ||: + done + # disable the automatic pvscan, we want to test that curtin + # can find/enable logical volumes without this service + command -v systemctl && systemctl mask lvm2-pvscan\@.service + # remove any existing metadata written from early disk config + rm -rf /etc/lvm/archive /etc/lvm/backup + - &mdadm_stop | + #!/bin/sh + # This function disables any existing raid devices which may + # have been created during the early storage config stage + # and simulates the effect of booting into a system with existing + # but inactive mdadm configuration. + for md in /dev/md*; do + mdadm --stop $md ||: + done + # remove any existing metadata written from early disk config + rm -f /etc/mdadm/mdadm.conf early_commands: # running block-meta custom from the install environment @@ -34,9 +59,11 @@ # the disks exactly as in this config before the rest of the install # will just blow it all away. We have clean out other environment # that could unintentionally mess things up. - blockmeta: [env, -u, OUTPUT_FSTAB, + 01-blockmeta: [env, -u, OUTPUT_FSTAB, TARGET_MOUNT_POINT=/tmp/my.bdir/target, WORKING_DIR=/tmp/my.bdir/work.d, curtin, --showtrace, -v, block-meta, --umount, custom] - enable_swaps: [sh, -c, *swapon] - disable_rpool: [sh, -c, *zpool_export] + 02-enable_swaps: [sh, -c, *swapon] + 03-disable_rpool: [sh, -c, *zpool_export] + 04-lvm_stop: [sh, -c, *lvm_stop] + 05-mdadm_stop: [sh, -c, *mdadm_stop] diff -Nru curtin-18.1-17-gae48e86f/examples/tests/filesystem_battery.yaml curtin-18.2-10-g7afd77fa/examples/tests/filesystem_battery.yaml --- curtin-18.1-17-gae48e86f/examples/tests/filesystem_battery.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/filesystem_battery.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -113,8 +113,8 @@ - id: bind1 fstype: "none" options: "bind" - path: "/var/lib" - spec: "/my/bind-over-var-lib" + path: "/var/cache" + spec: "/my/bind-over-var-cache" type: mount - id: bind2 fstype: "none" diff -Nru curtin-18.1-17-gae48e86f/examples/tests/install_disable_unmount.yaml curtin-18.2-10-g7afd77fa/examples/tests/install_disable_unmount.yaml --- curtin-18.1-17-gae48e86f/examples/tests/install_disable_unmount.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/install_disable_unmount.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -14,5 +14,5 @@ late_commands: 01_get_proc_mounts: [sh, -c, *cat_proc_mounts] 02_write_out_target: [sh, -c, *echo_target_mp] - 03_unmount_target: [curtin, unmount] - 04_get_proc_mounts: [cat, /proc/mounts] + 99a_unmount_target: [curtin, unmount] + 99b_get_proc_mounts: [cat, /proc/mounts] diff -Nru curtin-18.1-17-gae48e86f/examples/tests/lvmoverraid.yaml curtin-18.2-10-g7afd77fa/examples/tests/lvmoverraid.yaml --- curtin-18.1-17-gae48e86f/examples/tests/lvmoverraid.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/lvmoverraid.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,98 @@ +storage: + config: + - grub_device: true + id: disk-0 + model: QEMU_HARDDISK + name: 'main_disk' + serial: disk-a + preserve: false + ptable: gpt + type: disk + wipe: superblock + - grub_device: false + id: disk-2 + name: 'disk-2' + serial: disk-b + preserve: false + type: disk + wipe: superblock + - grub_device: false + id: disk-1 + name: 'disk-1' + serial: disk-c + preserve: false + type: disk + wipe: superblock + - grub_device: false + id: disk-3 + name: 'disk-3' + serial: disk-d + preserve: false + type: disk + wipe: superblock + - grub_device: false + id: disk-4 + name: 'disk-4' + serial: disk-e + preserve: false + type: disk + wipe: superblock + - device: disk-0 + flag: bios_grub + id: part-0 + preserve: false + size: 1048576 + type: partition + - device: disk-0 + flag: '' + id: part-1 + preserve: false + size: 4G + type: partition + - devices: + - disk-2 + - disk-1 + id: raid-0 + name: md0 + raidlevel: 1 + spare_devices: [] + type: raid + - devices: + - disk-3 + - disk-4 + id: raid-1 + name: md1 + raidlevel: 1 + spare_devices: [] + type: raid + - devices: + - raid-0 + - raid-1 + id: vg-0 + name: vg0 + type: lvm_volgroup + - id: lv-0 + name: lv-0 + size: 3G + type: lvm_partition + volgroup: vg-0 + - fstype: ext4 + id: fs-0 + preserve: false + type: format + volume: part-1 + - fstype: ext4 + id: fs-1 + preserve: false + type: format + volume: lv-0 + - device: fs-0 + id: mount-0 + path: / + type: mount + - device: fs-1 + id: mount-1 + path: /home + type: mount + version: 1 + diff -Nru curtin-18.1-17-gae48e86f/examples/tests/mirrorboot-msdos-partition.yaml curtin-18.2-10-g7afd77fa/examples/tests/mirrorboot-msdos-partition.yaml --- curtin-18.1-17-gae48e86f/examples/tests/mirrorboot-msdos-partition.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/mirrorboot-msdos-partition.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -47,7 +47,7 @@ name: md0-part1 number: 1 offset: 4194304B - size: 2GB + size: 3GB type: partition uuid: 4f4fa336-2762-48e4-ae54-9451141665cd wipe: superblock @@ -55,7 +55,7 @@ id: md0-part2 name: md0-part2 number: 2 - size: 2GB + size: 1.5GB type: partition uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e wipe: superblock diff -Nru curtin-18.1-17-gae48e86f/examples/tests/mirrorboot-uefi.yaml curtin-18.2-10-g7afd77fa/examples/tests/mirrorboot-uefi.yaml --- curtin-18.1-17-gae48e86f/examples/tests/mirrorboot-uefi.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/mirrorboot-uefi.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -30,7 +30,7 @@ id: sda-part2 name: sda-part2 number: 2 - size: 2G + size: 3G type: partition uuid: 47c97eae-f35d-473f-8f3d-d64161d571f1 wipe: superblock @@ -38,7 +38,7 @@ id: sda-part3 name: sda-part3 number: 3 - size: 2G + size: 1G type: partition uuid: e3202633-841c-4936-a520-b18d1f7938ea wipe: superblock @@ -56,7 +56,7 @@ id: sdb-part2 name: sdb-part2 number: 2 - size: 2G + size: 3G type: partition uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc wipe: superblock @@ -72,7 +72,7 @@ id: sdb-part3 name: sdb-part3 number: 3 - size: 2G + size: 1G type: partition uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d wipe: superblock diff -Nru curtin-18.1-17-gae48e86f/examples/tests/multipath.yaml curtin-18.2-10-g7afd77fa/examples/tests/multipath.yaml --- curtin-18.1-17-gae48e86f/examples/tests/multipath.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/multipath.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -36,3 +36,4 @@ type: mount path: /home device: sda2_home + options: 'defaults,nofail' diff -Nru curtin-18.1-17-gae48e86f/examples/tests/nvme_bcache.yaml curtin-18.2-10-g7afd77fa/examples/tests/nvme_bcache.yaml --- curtin-18.1-17-gae48e86f/examples/tests/nvme_bcache.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/nvme_bcache.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -19,7 +19,7 @@ model: INTEL SSDPEDME400G4 name: nvme0n1 ptable: gpt - path: /dev/nvme0n1 + serial: nvme-a type: disk wipe: superblock - device: sda diff -Nru curtin-18.1-17-gae48e86f/examples/tests/nvme.yaml curtin-18.2-10-g7afd77fa/examples/tests/nvme.yaml --- curtin-18.1-17-gae48e86f/examples/tests/nvme.yaml 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/nvme.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -44,7 +44,7 @@ device: main_disk_home - id: nvme_disk type: disk - path: /dev/nvme0n1 + serial: nvme-a name: nvme_disk wipe: superblock ptable: gpt @@ -63,7 +63,7 @@ device: nvme_disk - id: nvme_disk2 type: disk - path: /dev/nvme1n1 + serial: nvme-b wipe: superblock ptable: msdos name: second_nvme diff -Nru curtin-18.1-17-gae48e86f/examples/tests/simple-storage.yaml curtin-18.2-10-g7afd77fa/examples/tests/simple-storage.yaml --- curtin-18.1-17-gae48e86f/examples/tests/simple-storage.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/simple-storage.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,48 @@ +# MAAS will send storage config to dd and windows to help pick boot device +# this test forces curtin down a block-meta simple path along with storage cfg +partitioning_commands: + builtin: [curtin, block-meta, simple, --force-mode] +showtrace: true +storage: + version: 1 + config: + - id: sda + type: disk + wipe: superblock + ptable: msdos + model: QEMU HARDDISK + serial: disk-a + grub_device: true + - id: sdb + type: disk + wipe: superblock + ptable: msdos + model: QEMU HARDDISK + serial: disk-b + wipe: superblock + - id: sdc + type: disk + wipe: superblock + ptable: msdos + model: QEMU HARDDISK + serial: disk-c + wipe: superblock +# This partition config is here to "dirty" the disk + - id: sda-part1 + type: partition + device: sda + name: sda-part1 + number: 1 + size: 3G + uuid: ecc1ec63-e8d2-4719-8cee-dd7f4e2b390e + wipe: superblock + - id: sda-part1_format + type: format + fstype: ext4 + label: root + uuid: f793b242-e812-44df-91c0-c245a55ffd59 + volume: sda-part1 + - id: sda-part1_mount + type: mount + path: / + device: sda-part1_format diff -Nru curtin-18.1-17-gae48e86f/examples/tests/vmtest_defaults.yaml curtin-18.2-10-g7afd77fa/examples/tests/vmtest_defaults.yaml --- curtin-18.1-17-gae48e86f/examples/tests/vmtest_defaults.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/examples/tests/vmtest_defaults.yaml 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,24 @@ +# this updates pollinate in the installed target to add a vmtest identifier. +# specifically pollinate's user-agent should contain 'curtin/vmtest'. +_vmtest_pollinate: + - &pvmtest | + cfg="/etc/pollinate/add-user-agent" + [ -d "${cfg%/*}" ] || exit 0 + echo curtin/vmtest >> "$cfg" + +# this enables a persitent journald if target system has journald +# and does not have /var/log/journal directory already +_persist_journal: + - &persist_journal | + command -v journalctl && { + jdir=/var/log/journal + [ -e ${jdir} ] || { + mkdir -p ${jdir} + systemd-tmpfiles --create --prefix ${jdir} + } + } + exit 0 + +late_commands: + 01_vmtest_pollinate: ['curtin', 'in-target', '--', 'sh', '-c', *pvmtest] + 02_persist_journal: ['curtin', 'in-target', '--', 'sh', '-c', *persist_journal] diff -Nru curtin-18.1-17-gae48e86f/helpers/common curtin-18.2-10-g7afd77fa/helpers/common --- curtin-18.1-17-gae48e86f/helpers/common 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/helpers/common 2019-02-15 20:42:42.000000000 +0000 @@ -537,22 +537,59 @@ done echo "${c# }" ) - _RET="${carry_lead:+${carry_lead} }${carry_extra}" + [ -n "${carry_lead}" -a -n "${carry_extra}" ] && + carry_lead="${carry_lead} " + _RET="${carry_lead}${carry_extra}" +} + +shell_config_update() { + # shell_config_update(file, name, value) + # update variable 'name' setting value to 'val' in shell syntax 'file'. + # if 'name' is not present, then append declaration. + local file="$1" name="$2" val="$3" + if ! [ -f "$file" ] || ! grep -q "^$name=" "$file"; then + debug 2 "appending to $file shell $name=\"$val\"" + echo "$name=\"$val\"" >> "$file" + return + fi + local cand="" del="" + for cand in "|" "," "/"; do + [ "${val#*${del}}" = "${val}" ] && del="$cand" && break + done + [ -n "$del" ] || { + error "Couldn't find a sed delimiter for '$val'"; + return 1; + } + + sed -i -e "s${del}^$name=.*${del}$name=\"$val\"${del}" "$file" || + { error "Failed editing '$file' to set $name=$val"; return 1; } + debug 2 "updated $file to set $name=\"$val\"" + return 0 +} + +apply_grub_cmdline_linux_default() { + local mp="$1" newargs="$2" edg="${3:-etc/default/grub}" + local gcld="GRUB_CMDLINE_LINUX_DEFAULT" + debug 1 "setting $gcld to '$newargs' in $edg" + shell_config_update "$mp/$edg" "$gcld" "$newargs" || { + error "Failed to set '$gcld=$newargs' in $edg" + return 1 + } } install_grub() { - local long_opts="uefi,update-nvram" + local long_opts="uefi,update-nvram,os-family:" local getopt_out="" mp_efi="" getopt_out=$(getopt --name "${0##*/}" \ --options "" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" - local uefi=0 - local update_nvram=0 + local uefi=0 update_nvram=0 os_family="" while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in + --os-family) os_family=${next};; --uefi) uefi=$((${uefi}+1));; --update-nvram) update_nvram=$((${update_nvram}+1));; --) shift; break;; @@ -595,29 +632,88 @@ error "$mp_dev ($fstype) is not a block device!"; return 1; fi - # get dpkg arch - local dpkg_arch="" - dpkg_arch=$(chroot "$mp" dpkg --print-architecture) - r=$? + local os_variant="" + if [ -e "${mp}/etc/os-release" ]; then + os_variant=$(chroot "$mp" \ + /bin/sh -c 'echo $(. /etc/os-release; echo $ID)') + else + # Centos6 doesn't have os-release, so check for centos/redhat release + # looks like: CentOS release 6.9 (Final) + for rel in $(ls ${mp}/etc/*-release); do + os_variant=$(awk '{print tolower($1)}' $rel) + [ -n "$os_variant" ] && break + done + fi + [ $? != 0 ] && + { error "Failed to read ID from $mp/etc/os-release"; return 1; } + + local rhel_ver="" + case $os_variant in + debian|ubuntu) os_family="debian";; + centos|rhel) + os_family="redhat" + rhel_ver=$(chroot "$mp" rpm -E '%rhel') + ;; + esac + + # ensure we have both settings, family and variant are needed + [ -n "${os_variant}" -a -n "${os_family}" ] || + { error "Failed to determine os variant and family"; return 1; } + + # get target arch + local target_arch="" r="1" + case $os_family in + debian) + target_arch=$(chroot "$mp" dpkg --print-architecture) + r=$? + ;; + redhat) + target_arch=$(chroot "$mp" rpm -E '%_arch') + r=$? + ;; + esac [ $r -eq 0 ] || { - error "failed to get dpkg architecture [$r]" + error "failed to get target architecture [$r]" return 1; } # grub is not the bootloader you are looking for - if [ "${dpkg_arch}" = "s390x" ]; then - return 0; + if [ "${target_arch}" = "s390x" ]; then + return 0; fi # set correct grub package - local grub_name="grub-pc" - local grub_target="i386-pc" - if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then + local grub_name="" + local grub_target="" + case "$target_arch" in + i386|amd64) + # debian + grub_name="grub-pc" + grub_target="i386-pc" + ;; + x86_64) + case $rhel_ver in + 6) grub_name="grub";; + 7) grub_name="grub2-pc";; + *) + error "Unknown rhel_ver [$rhel_ver]"; + return 1; + ;; + esac + grub_target="i386-pc" + ;; + esac + if [ "${target_arch#ppc64}" != "${target_arch}" ]; then grub_name="grub-ieee1275" grub_target="powerpc-ieee1275" elif [ "$uefi" -ge 1 ]; then - grub_name="grub-efi-$dpkg_arch" - case "$dpkg_arch" in + grub_name="grub-efi-$target_arch" + case "$target_arch" in + x86_64) + # centos 7+, no centos6 support + grub_name="grub2-efi-x64-modules" + grub_target="x86_64-efi" + ;; amd64) grub_target="x86_64-efi";; arm64) @@ -626,9 +722,19 @@ fi # check that the grub package is installed - tmp=$(chroot "$mp" dpkg-query --show \ - --showformat='${Status}\n' $grub_name) - r=$? + local r=$? + case $os_family in + debian) + tmp=$(chroot "$mp" dpkg-query --show \ + --showformat='${Status}\n' $grub_name) + r=$? + ;; + redhat) + tmp=$(chroot "$mp" rpm -q \ + --queryformat='install ok installed\n' $grub_name) + r=$? + ;; + esac if [ $r -ne 0 -a $r -ne 1 ]; then error "failed to check if $grub_name installed"; return 1; @@ -636,11 +742,18 @@ case "$tmp" in install\ ok\ installed) :;; *) debug 1 "$grub_name not installed, not doing anything"; - return 0;; + return 1;; esac local grub_d="etc/default/grub.d" + # ubuntu writes to /etc/default/grub.d/50-curtin-settings.cfg + # to avoid tripping prompts on upgrade LP: #564853 local mygrub_cfg="$grub_d/50-curtin-settings.cfg" + case $os_family in + redhat) + grub_d="etc/default" + mygrub_cfg="etc/default/grub";; + esac [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || { error "Failed to create $grub_d"; return 1; } @@ -659,14 +772,29 @@ error "Failed to get carryover parrameters from cmdline"; return 1; } - debug 1 "carryover command line params: $newargs" + # always append rd.auto=1 for centos + case $os_family in + redhat) + newargs="${newargs:+${newargs} }rd.auto=1";; + esac + debug 1 "carryover command line params '$newargs'" + + case $os_family in + debian) + : > "$mp/$mygrub_cfg" || + { error "Failed to write '$mygrub_cfg'"; return 1; } + ;; + esac + + if [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" != "0" ]; then + apply_grub_cmdline_linux_default "$mp" "$newargs" || { + error "Failed to apply grub cmdline." + return 1 + } + fi - : > "$mp/$mygrub_cfg" || - { error "Failed to write '$mygrub_cfg'"; return 1; } { - [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || - echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" - echo "# disable grub os prober that might find other OS installs." + echo "# Curtin disable grub os prober that might find other OS installs." echo "GRUB_DISABLE_OS_PROBER=true" echo "GRUB_TERMINAL=console" } >> "$mp/$mygrub_cfg" @@ -692,30 +820,46 @@ nvram="--no-nvram" if [ "$update_nvram" -ge 1 ]; then nvram="" - fi + fi debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' echo "before grub-install efiboot settings" - efibootmgr || echo "WARN: efibootmgr exited $?" - dpkg-reconfigure "$1" - update-grub + efibootmgr -v || echo "WARN: efibootmgr exited $?" + bootid="$4" + grubpost="" + case $bootid in + debian|ubuntu) + grubcmd="grub-install" + dpkg-reconfigure "$1" + update-grub + ;; + centos|redhat|rhel) + grubcmd="grub2-install" + grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg" + ;; + *) + echo "Unsupported OS: $bootid" 1>&2 + exit 1 + ;; + esac # grub-install in 12.04 does not contain --no-nvram, --target, # or --efi-directory target="--target=$2" no_nvram="$3" efi_dir="--efi-directory=/boot/efi" - gi_out=$(grub-install --help 2>&1) + gi_out=$($grubcmd --help 2>&1) echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" echo "$gi_out" | grep -q -- "--target" || target="" echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" - grub-install $target $efi_dir \ - --bootloader-id=ubuntu --recheck $no_nvram' -- \ - "${grub_name}" "${grub_target}" "$nvram" &2 + exit 1 + ;; + esac + for d in "$@"; do + echo $grubcmd "$d"; + $grubcmd "$d" || exit; done + [ -z "$grubpost" ] || $grubpost;' \ + -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" 1.0 with new apt format.""" + m_get_pkg_ver.return_value = distro.parse_dpkg_version('17.1-0ubuntu1') + apt_config.apply_preserve_sources_list(self.tmp) + m_get_pkg_ver.assert_has_calls( + [mock.call('cloud-init', target=self.tmp)]) + self.assertEqual( + yaml.load(util.load_file(self.tmp_cfg)), + {'apt': {'preserve_sources_list': True}}) + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_apt_source.py curtin-18.2-10-g7afd77fa/tests/unittests/test_apt_source.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_apt_source.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_apt_source.py 2019-02-15 20:42:42.000000000 +0000 @@ -12,8 +12,9 @@ import mock from mock import call -from curtin import util +from curtin import distro from curtin import gpg +from curtin import util from curtin.commands import apt_config from .helpers import CiTestCase @@ -77,7 +78,7 @@ @staticmethod def _add_apt_sources(*args, **kwargs): - with mock.patch.object(util, 'apt_update'): + with mock.patch.object(distro, 'apt_update'): apt_config.add_apt_sources(*args, **kwargs) @staticmethod @@ -86,7 +87,7 @@ Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = util.lsb_release()['codename'] + params['RELEASE'] = distro.lsb_release()['codename'] arch = util.get_architecture() params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] return params @@ -472,7 +473,7 @@ 'uri': 'http://testsec.ubuntu.com/%s/' % component}]} post = ("%s_dists_%s-updates_InRelease" % - (component, util.lsb_release()['codename'])) + (component, distro.lsb_release()['codename'])) fromfn = ("%s/%s_%s" % (pre, archive, post)) tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) @@ -937,7 +938,7 @@ m_set_sel.assert_not_called() @mock.patch("curtin.commands.apt_config.debconf_set_selections") - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): data = { 'set1': 'pkga pkga/q1 mybool false', @@ -960,7 +961,7 @@ @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") @mock.patch("curtin.commands.apt_config.debconf_set_selections") - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, m_dpkg_r): data = { @@ -985,7 +986,7 @@ @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") @mock.patch("curtin.commands.apt_config.debconf_set_selections") - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, m_dpkg_r): data = {'set1': 'pkga pkga/q1 mybool false'} diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block_iscsi.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block_iscsi.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block_iscsi.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block_iscsi.py 2019-02-15 20:42:42.000000000 +0000 @@ -588,6 +588,13 @@ # utilize IscsiDisk str method for equality check self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) + # test with cfg.get('storage') since caller may already have + # grabbed the 'storage' value from the curtin config + iscsi_disk = iscsi.get_iscsi_disks_from_config( + cfg.get('storage')).pop() + # utilize IscsiDisk str method for equality check + self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) + def test_parse_iscsi_disk_from_config_no_iscsi(self): """Test parsing storage config with no iscsi disks included""" cfg = { diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block_lvm.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block_lvm.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block_lvm.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block_lvm.py 2019-02-15 20:42:42.000000000 +0000 @@ -73,26 +73,27 @@ @mock.patch('curtin.block.lvm.lvmetad_running') @mock.patch('curtin.block.lvm.util') - def test_lvm_scan(self, mock_util, mock_lvmetad): + @mock.patch('curtin.block.lvm.distro') + def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad): """check that lvm_scan formats commands correctly for each release""" + cmds = [['pvscan'], ['vgscan', '--mknodes']] for (count, (codename, lvmetad_status, use_cache)) in enumerate( - [('precise', False, False), ('precise', True, False), - ('trusty', False, False), ('trusty', True, True), - ('vivid', False, False), ('vivid', True, True), - ('wily', False, False), ('wily', True, True), + [('precise', False, False), + ('trusty', False, False), ('xenial', False, False), ('xenial', True, True), - ('yakkety', True, True), ('UNAVAILABLE', True, True), (None, True, True), (None, False, False)]): - mock_util.lsb_release.return_value = {'codename': codename} + mock_distro.lsb_release.return_value = {'codename': codename} mock_lvmetad.return_value = lvmetad_status lvm.lvm_scan() - self.assertEqual( - len(mock_util.subp.call_args_list), 2 * (count + 1)) - for (expected, actual) in zip( - [['pvscan'], ['vgscan', '--mknodes']], - mock_util.subp.call_args_list[2 * count:2 * count + 2]): - if use_cache: - expected.append('--cache') - self.assertEqual(mock.call(expected, capture=True), actual) + expected = [cmd for cmd in cmds] + for cmd in expected: + if lvmetad_status: + cmd.append('--cache') + + calls = [mock.call(cmd, capture=True) for cmd in expected] + self.assertEqual(len(expected), len(mock_util.subp.call_args_list)) + mock_util.subp.has_calls(calls) + mock_util.subp.reset_mock() + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block_mdadm.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block_mdadm.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block_mdadm.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block_mdadm.py 2019-02-15 20:42:42.000000000 +0000 @@ -15,12 +15,13 @@ def setUp(self): super(TestBlockMdadmAssemble, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') self.add_patch('curtin.block.mdadm.udev', 'mock_udev') # Common mock settings self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'precise'} + self.mock_lsb_release.return_value = {'codename': 'precise'} self.mock_util.subp.return_value = ('', '') def test_mdadm_assemble_scan(self): @@ -88,12 +89,15 @@ def setUp(self): super(TestBlockMdadmCreate, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') + self.add_patch('curtin.block.mdadm.udev.udevadm_settle', + 'm_udevadm_settle') # Common mock settings self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'precise'} + self.mock_lsb_release.return_value = {'codename': 'precise'} self.mock_holders.return_value = [] def prepare_mock(self, md_devname, raidlevel, devices, spares): @@ -115,8 +119,6 @@ expected_calls.append( call(["mdadm", "--zero-superblock", d], capture=True)) - side_effects.append(("", "")) # udevadm settle - expected_calls.append(call(["udevadm", "settle"])) side_effects.append(("", "")) # udevadm control --stop-exec-queue expected_calls.append(call(["udevadm", "control", "--stop-exec-queue"])) @@ -134,9 +136,6 @@ side_effects.append(("", "")) # udevadm control --start-exec-queue expected_calls.append(call(["udevadm", "control", "--start-exec-queue"])) - side_effects.append(("", "")) # udevadm settle - expected_calls.append(call(["udevadm", "settle", - "--exit-if-exists=%s" % md_devname])) return (side_effects, expected_calls) @@ -154,6 +153,8 @@ mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) self.mock_util.subp.assert_has_calls(expected_calls) + self.m_udevadm_settle.assert_has_calls( + [call(), call(exists=md_devname)]) def test_mdadm_create_raid0_devshort(self): md_devname = "md0" @@ -237,14 +238,15 @@ def setUp(self): super(TestBlockMdadmExamine, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'precise'} + self.mock_lsb_release.return_value = {'codename': 'precise'} def test_mdadm_examine_export(self): - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.return_value = ( """ MD_LEVEL=raid0 @@ -321,7 +323,7 @@ class TestBlockMdadmStop(CiTestCase): def setUp(self): super(TestBlockMdadmStop, self).setUp() - self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') self.add_patch('curtin.block.mdadm.util.write_file', 'mock_util_write_file') @@ -334,7 +336,7 @@ # Common mock settings self.mock_valid.return_value = True - self.mock_util_lsb.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} self.mock_util_subp.side_effect = iter([ ("", ""), # mdadm stop device ]) @@ -489,11 +491,12 @@ def setUp(self): super(TestBlockMdadmRemove, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.side_effect = [ ("", ""), # mdadm remove device ] @@ -515,14 +518,15 @@ def setUp(self): super(TestBlockMdadmQueryDetail, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'precise'} + self.mock_lsb_release.return_value = {'codename': 'precise'} def test_mdadm_query_detail_export(self): - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.return_value = ( """ MD_LEVEL=raid1 @@ -593,13 +597,14 @@ def setUp(self): super(TestBlockMdadmDetailScan, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.side_effect = [ (self.scan_output, ""), # mdadm --detail --scan ] @@ -628,10 +633,11 @@ def setUp(self): super(TestBlockMdadmMdHelpers, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') self.mock_valid.return_value = True - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} + self.mock_lsb_release.return_value = {'codename': 'xenial'} def test_valid_mdname(self): mdname = "/dev/md0" diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block_mkfs.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block_mkfs.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block_mkfs.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block_mkfs.py 2019-02-15 20:42:42.000000000 +0000 @@ -37,11 +37,12 @@ @mock.patch("curtin.block.mkfs.block") @mock.patch("curtin.block.mkfs.os") @mock.patch("curtin.block.mkfs.util") + @mock.patch("curtin.block.mkfs.distro.lsb_release") def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, - mock_util, mock_os, mock_block, + mock_lsb_release, mock_util, mock_os, mock_block, release="wily", strict=False): # Pretend we are on wily as there are no known edge cases for it - mock_util.lsb_release.return_value = {"codename": release} + mock_lsb_release.return_value = {"codename": release} mock_os.path.exists.return_value = True mock_block.get_blockdev_sector_size.return_value = (512, 512) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block.py 2019-02-15 20:42:42.000000000 +0000 @@ -647,4 +647,39 @@ knames = block.get_device_slave_knames(device) self.assertEqual(slaves, knames) + +class TestGetSupportedFilesystems(CiTestCase): + + supported_filesystems = ['sysfs', 'rootfs', 'ramfs', 'ext4'] + + def _proc_filesystems_output(self, supported=None): + if not supported: + supported = self.supported_filesystems + + def devname(fsname): + """ in-use filesystem modules not emit the 'nodev' prefix """ + return '\t' if fsname.startswith('ext') else 'nodev\t' + + return '\n'.join([devname(fs) + fs for fs in supported]) + '\n' + + @mock.patch('curtin.block.util') + @mock.patch('curtin.block.os') + def test_get_supported_filesystems(self, mock_os, mock_util): + """ test parsing /proc/filesystems contents into a filesystem list""" + mock_os.path.exists.return_value = True + mock_util.load_file.return_value = self._proc_filesystems_output() + + result = block.get_supported_filesystems() + self.assertEqual(sorted(self.supported_filesystems), sorted(result)) + + @mock.patch('curtin.block.util') + @mock.patch('curtin.block.os') + def test_get_supported_filesystems_no_proc_path(self, mock_os, mock_util): + """ missing /proc/filesystems raises RuntimeError """ + mock_os.path.exists.return_value = False + with self.assertRaises(RuntimeError): + block.get_supported_filesystems() + self.assertEqual(0, mock_util.load_file.call_count) + + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_block_zfs.py curtin-18.2-10-g7afd77fa/tests/unittests/test_block_zfs.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_block_zfs.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_block_zfs.py 2019-02-15 20:42:42.000000000 +0000 @@ -378,15 +378,20 @@ self.mock_blkid.assert_called_with(devs=[devname]) -class TestBlockZfsZfsSupported(CiTestCase): +class TestBlockZfsAssertZfsSupported(CiTestCase): def setUp(self): - super(TestBlockZfsZfsSupported, self).setUp() + super(TestBlockZfsAssertZfsSupported, self).setUp() self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') - self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release') - self.mock_release.return_value = {'codename': 'xenial'} + self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release') + self.add_patch('curtin.block.zfs.util.which', 'mock_which') + self.add_patch('curtin.block.zfs.get_supported_filesystems', + 'mock_supfs') self.mock_arch.return_value = 'x86_64' + self.mock_release.return_value = {'codename': 'xenial'} + self.mock_supfs.return_value = ['zfs'] + self.mock_which.return_value = True def test_supported_arch(self): self.assertTrue(zfs.zfs_supported()) @@ -394,81 +399,143 @@ def test_unsupported_arch(self): self.mock_arch.return_value = 'i386' with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() def test_unsupported_releases(self): for rel in ['precise', 'trusty']: self.mock_release.return_value = {'codename': rel} with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() - def test_missing_module(self): - missing = 'modinfo: ERROR: Module zfs not found.\n ' + @mock.patch('curtin.block.zfs.util.is_kmod_loaded') + @mock.patch('curtin.block.zfs.get_supported_filesystems') + def test_missing_module(self, mock_supfs, mock_kmod): + missing = 'modprobe: FATAL: Module zfs not found.\n ' self.mock_subp.side_effect = ProcessExecutionError(stdout='', stderr=missing, exit_code='1') + mock_supfs.return_value = ['ext4'] + mock_kmod.return_value = False with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() -class TestZfsSupported(CiTestCase): +class TestAssertZfsSupported(CiTestCase): def setUp(self): - super(TestZfsSupported, self).setUp() + super(TestAssertZfsSupported, self).setUp() + @mock.patch('curtin.block.zfs.get_supported_filesystems') + @mock.patch('curtin.block.zfs.distro') @mock.patch('curtin.block.zfs.util') - def test_zfs_supported_returns_true(self, mock_util): - """zfs_supported returns True on supported platforms""" + def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro, + mock_supfs): + """zfs_assert_supported returns True on supported platforms""" mock_util.get_platform_arch.return_value = 'amd64' - mock_util.lsb_release.return_value = {'codename': 'bionic'} + mock_distro.lsb_release.return_value = {'codename': 'bionic'} mock_util.subp.return_value = ("", "") + mock_supfs.return_value = ['zfs'] + mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs']) self.assertNotIn(mock_util.get_platform_arch.return_value, zfs.ZFS_UNSUPPORTED_ARCHES) - self.assertNotIn(mock_util.lsb_release.return_value['codename'], + self.assertNotIn(mock_distro.lsb_release.return_value['codename'], zfs.ZFS_UNSUPPORTED_RELEASES) self.assertTrue(zfs.zfs_supported()) + @mock.patch('curtin.block.zfs.distro') @mock.patch('curtin.block.zfs.util') - def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util): - """zfs_supported raises RuntimeError on unspported arches""" - mock_util.lsb_release.return_value = {'codename': 'bionic'} + def test_zfs_assert_supported_raises_exception_on_bad_arch(self, + mock_util, + mock_distro): + """zfs_assert_supported raises RuntimeError on unspported arches""" + mock_distro.lsb_release.return_value = {'codename': 'bionic'} mock_util.subp.return_value = ("", "") for arch in zfs.ZFS_UNSUPPORTED_ARCHES: mock_util.get_platform_arch.return_value = arch with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() + @mock.patch('curtin.block.zfs.distro') @mock.patch('curtin.block.zfs.util') - def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util): - """zfs_supported raises RuntimeError on unspported releases""" + def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util, + mock_distro): + """zfs_assert_supported raises RuntimeError on unspported releases""" mock_util.get_platform_arch.return_value = 'amd64' mock_util.subp.return_value = ("", "") for release in zfs.ZFS_UNSUPPORTED_RELEASES: - mock_util.lsb_release.return_value = {'codename': release} + mock_distro.lsb_release.return_value = {'codename': release} with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() @mock.patch('curtin.block.zfs.util.subprocess.Popen') - @mock.patch('curtin.block.zfs.util.lsb_release') + @mock.patch('curtin.block.zfs.util.is_kmod_loaded') + @mock.patch('curtin.block.zfs.get_supported_filesystems') + @mock.patch('curtin.block.zfs.distro.lsb_release') @mock.patch('curtin.block.zfs.util.get_platform_arch') - def test_zfs_supported_raises_exception_on_missing_module(self, - m_arch, - m_release, - m_popen): - """zfs_supported raises RuntimeError on missing zfs module""" + def test_zfs_assert_supported_raises_exc_on_missing_module(self, + m_arch, + m_release, + m_supfs, + m_kmod, + m_popen, + ): + """zfs_assert_supported raises RuntimeError modprobe zfs error""" m_arch.return_value = 'amd64' m_release.return_value = {'codename': 'bionic'} + m_supfs.return_value = ['ext4'] + m_kmod.return_value = False process_mock = mock.Mock() attrs = { 'returncode': 1, 'communicate.return_value': - ('output', "modinfo: ERROR: Module zfs not found."), + ('output', 'modprobe: FATAL: Module zfs not found ...'), } process_mock.configure_mock(**attrs) m_popen.return_value = process_mock with self.assertRaises(RuntimeError): - zfs.zfs_supported() + zfs.zfs_assert_supported() + + @mock.patch('curtin.block.zfs.get_supported_filesystems') + @mock.patch('curtin.block.zfs.util.lsb_release') + @mock.patch('curtin.block.zfs.util.get_platform_arch') + @mock.patch('curtin.block.zfs.util') + def test_zfs_assert_supported_raises_exc_on_missing_binaries(self, + mock_util, + m_arch, + m_release, + m_supfs): + """zfs_assert_supported raises RuntimeError if no zpool or zfs tools""" + mock_util.get_platform_arch.return_value = 'amd64' + mock_util.lsb_release.return_value = {'codename': 'bionic'} + mock_util.subp.return_value = ("", "") + m_supfs.return_value = ['zfs'] + mock_util.which.return_value = None + + with self.assertRaises(RuntimeError): + zfs.zfs_assert_supported() + + +class TestZfsSupported(CiTestCase): + + @mock.patch('curtin.block.zfs.zfs_assert_supported') + def test_zfs_supported(self, m_assert_zfs): + zfs_supported = True + m_assert_zfs.return_value = zfs_supported + + result = zfs.zfs_supported() + self.assertEqual(zfs_supported, result) + self.assertEqual(1, m_assert_zfs.call_count) + + @mock.patch('curtin.block.zfs.zfs_assert_supported') + def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs): + zfs_supported = False + m_assert_zfs.side_effect = RuntimeError('No zfs module') + + result = zfs.zfs_supported() + self.assertEqual(zfs_supported, result) + self.assertEqual(1, m_assert_zfs.call_count) + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_clear_holders.py curtin-18.2-10-g7afd77fa/tests/unittests/test_clear_holders.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_clear_holders.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_clear_holders.py 2019-02-15 20:42:42.000000000 +0000 @@ -6,11 +6,12 @@ import textwrap from curtin.block import clear_holders +from curtin.util import ProcessExecutionError from .helpers import CiTestCase class TestClearHolders(CiTestCase): - test_blockdev = '/dev/null' + test_blockdev = '/wark/dev/null' test_syspath = '/sys/class/block/null' remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds example_holders_trees = [ @@ -153,7 +154,7 @@ # device = self.test_syspath - mock_block.sys_block_path.return_value = '/dev/null' + mock_block.sys_block_path.return_value = self.test_blockdev bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94' mock_os.path.exists.return_value = True @@ -189,9 +190,8 @@ def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log, mock_os, mock_util, mock_get_bcache_block): - device = "/dev/fakenull" with self.assertRaises(ValueError): - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_blockdev) self.assertEqual(0, len(mock_get_bcache.call_args_list)) self.assertEqual(0, len(mock_log.call_args_list)) @@ -208,11 +208,10 @@ def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log, mock_os, mock_util, mock_get_bcache_block, mock_block): - device = "/sys/class/block/null" - mock_block.sysfs_to_devpath.return_value = '/dev/null' + mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_os.path.exists.return_value = False - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_syspath) self.assertEqual(3, len(mock_log.info.call_args_list)) self.assertEqual(1, len(mock_os.path.exists.call_args_list)) @@ -229,18 +228,17 @@ def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log, mock_os, mock_util, mock_get_bcache_block, mock_block): - device = "/sys/class/block/null" - mock_block.sysfs_to_devpath.return_value = '/dev/null' + mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_os.path.exists.side_effect = iter([ True, # backing device exists False, # cset device not present (already removed) True, # backing device (still) exists ]) mock_get_bcache.return_value = '/sys/fs/bcache/fake' - mock_get_bcache_block.return_value = device + '/bcache' + mock_get_bcache_block.return_value = self.test_syspath + '/bcache' mock_os.path.join.side_effect = os.path.join - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_syspath) self.assertEqual(4, len(mock_log.info.call_args_list)) self.assertEqual(3, len(mock_os.path.exists.call_args_list)) @@ -249,14 +247,15 @@ self.assertEqual(1, len(mock_util.write_file.call_args_list)) self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list)) - mock_get_bcache.assert_called_with(device, strict=False) - mock_get_bcache_block.assert_called_with(device, strict=False) - mock_util.write_file.assert_called_with(device + '/bcache/stop', - '1', mode=None) + mock_get_bcache.assert_called_with(self.test_syspath, strict=False) + mock_get_bcache_block.assert_called_with(self.test_syspath, + strict=False) + mock_util.write_file.assert_called_with( + self.test_syspath + '/bcache/stop', '1', mode=None) retries = self.remove_retries mock_util.wait_for_removal.assert_has_calls([ - mock.call(device, retries=retries), - mock.call(device + '/bcache', retries=retries)]) + mock.call(self.test_syspath, retries=retries), + mock.call(self.test_syspath + '/bcache', retries=retries)]) @mock.patch('curtin.block.clear_holders.block') @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') @@ -271,8 +270,7 @@ mock_get_bcache_block, mock_udevadm_settle, mock_block): - device = "/sys/class/block/null" - mock_block.sysfs_to_devpath.return_value = '/dev/null' + mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_os.path.exists.side_effect = iter([ True, # backing device exists True, # cset device not present (already removed) @@ -280,10 +278,10 @@ ]) cset = '/sys/fs/bcache/fake' mock_get_bcache.return_value = cset - mock_get_bcache_block.return_value = device + '/bcache' + mock_get_bcache_block.return_value = self.test_syspath + '/bcache' mock_os.path.join.side_effect = os.path.join - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_syspath) self.assertEqual(4, len(mock_log.info.call_args_list)) self.assertEqual(3, len(mock_os.path.exists.call_args_list)) @@ -292,14 +290,15 @@ self.assertEqual(2, len(mock_util.write_file.call_args_list)) self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list)) - mock_get_bcache.assert_called_with(device, strict=False) - mock_get_bcache_block.assert_called_with(device, strict=False) + mock_get_bcache.assert_called_with(self.test_syspath, strict=False) + mock_get_bcache_block.assert_called_with(self.test_syspath, + strict=False) mock_util.write_file.assert_has_calls([ mock.call(cset + '/stop', '1', mode=None), - mock.call(device + '/bcache/stop', '1', mode=None)]) + mock.call(self.test_syspath + '/bcache/stop', '1', mode=None)]) mock_util.wait_for_removal.assert_has_calls([ mock.call(cset, retries=self.remove_retries), - mock.call(device, retries=self.remove_retries) + mock.call(self.test_syspath, retries=self.remove_retries) ]) @mock.patch('curtin.block.clear_holders.block') @@ -315,8 +314,7 @@ mock_get_bcache_block, mock_udevadm_settle, mock_block): - device = "/sys/class/block/null" - mock_block.sysfs_to_devpath.return_value = '/dev/null' + mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_os.path.exists.side_effect = iter([ True, # backing device exists True, # cset device not present (already removed) @@ -324,10 +322,10 @@ ]) cset = '/sys/fs/bcache/fake' mock_get_bcache.return_value = cset - mock_get_bcache_block.return_value = device + '/bcache' + mock_get_bcache_block.return_value = self.test_syspath + '/bcache' mock_os.path.join.side_effect = os.path.join - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_syspath) self.assertEqual(4, len(mock_log.info.call_args_list)) self.assertEqual(3, len(mock_os.path.exists.call_args_list)) @@ -336,7 +334,7 @@ self.assertEqual(1, len(mock_util.write_file.call_args_list)) self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) - mock_get_bcache.assert_called_with(device, strict=False) + mock_get_bcache.assert_called_with(self.test_syspath, strict=False) mock_util.write_file.assert_has_calls([ mock.call(cset + '/stop', '1', mode=None), ]) @@ -361,8 +359,7 @@ mock_wipe, mock_block): """Test writes sysfs write failures pass if file not present""" - device = "/sys/class/block/null" - mock_block.sysfs_to_devpath.return_value = '/dev/null' + mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_os.path.exists.side_effect = iter([ True, # backing device exists True, # cset device not present (already removed) @@ -371,14 +368,14 @@ ]) cset = '/sys/fs/bcache/fake' mock_get_bcache.return_value = cset - mock_get_bcache_block.return_value = device + '/bcache' + mock_get_bcache_block.return_value = self.test_syspath + '/bcache' mock_os.path.join.side_effect = os.path.join # make writes to sysfs fail mock_util.write_file.side_effect = IOError(errno.ENOENT, "File not found") - clear_holders.shutdown_bcache(device) + clear_holders.shutdown_bcache(self.test_syspath) self.assertEqual(4, len(mock_log.info.call_args_list)) self.assertEqual(3, len(mock_os.path.exists.call_args_list)) @@ -387,7 +384,7 @@ self.assertEqual(1, len(mock_util.write_file.call_args_list)) self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) - mock_get_bcache.assert_called_with(device, strict=False) + mock_get_bcache.assert_called_with(self.test_syspath, strict=False) mock_util.write_file.assert_has_calls([ mock.call(cset + '/stop', '1', mode=None), ]) @@ -528,10 +525,15 @@ self.assertTrue(mock_log.debug.called) self.assertTrue(mock_log.critical.called) + @mock.patch('curtin.block.clear_holders.is_swap_device') + @mock.patch('curtin.block.clear_holders.os.path.exists') @mock.patch('curtin.block.clear_holders.LOG') @mock.patch('curtin.block.clear_holders.block') - def test_clear_holders_wipe_superblock(self, mock_block, mock_log): + def test_clear_holders_wipe_superblock(self, mock_block, mock_log, + mock_os_path, mock_swap): """test clear_holders.wipe_superblock handles errors right""" + mock_swap.return_value = False + mock_os_path.return_value = False mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_block.is_extended_partition.return_value = True clear_holders.wipe_superblock(self.test_syspath) @@ -543,18 +545,21 @@ mock_block.wipe_volume.assert_called_with( self.test_blockdev, exclusive=True, mode='superblock') + @mock.patch('curtin.block.clear_holders.is_swap_device') @mock.patch('curtin.block.clear_holders.zfs') @mock.patch('curtin.block.clear_holders.LOG') @mock.patch('curtin.block.clear_holders.block') def test_clear_holders_wipe_superblock_zfs(self, mock_block, mock_log, - mock_zfs): + mock_zfs, mock_swap): """test clear_holders.wipe_superblock handles zfs member""" + mock_swap.return_value = False mock_block.sysfs_to_devpath.return_value = self.test_blockdev mock_block.is_extended_partition.return_value = True clear_holders.wipe_superblock(self.test_syspath) self.assertFalse(mock_block.wipe_volume.called) mock_block.is_extended_partition.return_value = False mock_block.is_zfs_member.return_value = True + mock_zfs.zfs_supported.return_value = True mock_zfs.device_to_poolname.return_value = 'fake_pool' mock_zfs.zpool_list.return_value = ['fake_pool'] clear_holders.wipe_superblock(self.test_syspath) @@ -563,6 +568,111 @@ mock_block.wipe_volume.assert_called_with( self.test_blockdev, exclusive=True, mode='superblock') + @mock.patch('curtin.block.clear_holders.is_swap_device') + @mock.patch('curtin.block.clear_holders.zfs') + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.block') + def test_clear_holders_wipe_superblock_no_zfs(self, mock_block, mock_log, + mock_zfs, mock_swap): + """test clear_holders.wipe_superblock checks zfs supported""" + mock_swap.return_value = False + mock_block.sysfs_to_devpath.return_value = self.test_blockdev + mock_block.is_extended_partition.return_value = True + clear_holders.wipe_superblock(self.test_syspath) + self.assertFalse(mock_block.wipe_volume.called) + mock_block.is_extended_partition.return_value = False + mock_block.is_zfs_member.return_value = True + mock_zfs.zfs_supported.return_value = False + clear_holders.wipe_superblock(self.test_syspath) + mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) + self.assertEqual(1, mock_zfs.zfs_supported.call_count) + self.assertEqual(0, mock_block.is_zfs_member.call_count) + self.assertEqual(0, mock_zfs.device_to_poolname.call_count) + self.assertEqual(0, mock_zfs.zpool_list.call_count) + mock_block.wipe_volume.assert_called_with( + self.test_blockdev, exclusive=True, mode='superblock') + + @mock.patch('curtin.block.clear_holders.is_swap_device') + @mock.patch('curtin.block.clear_holders.zfs') + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.block') + def test_clear_holders_wipe_superblock_zfs_no_utils(self, mock_block, + mock_log, mock_zfs, + mock_swap): + """test clear_holders.wipe_superblock handles missing zpool cmd""" + mock_swap.return_value = False + mock_block.sysfs_to_devpath.return_value = self.test_blockdev + mock_block.is_extended_partition.return_value = True + clear_holders.wipe_superblock(self.test_syspath) + self.assertFalse(mock_block.wipe_volume.called) + mock_block.is_extended_partition.return_value = False + mock_block.is_zfs_member.return_value = True + mock_zfs.zfs_supported.return_value = True + mock_zfs.device_to_poolname.return_value = 'fake_pool' + mock_zfs.zpool_list.return_value = ['fake_pool'] + mock_zfs.zpool_export.side_effect = [ + ProcessExecutionError(cmd=['zpool', 'export', 'fake_pool'], + stdout="", + stderr=("cannot open 'fake_pool': " + "no such pool"))] + clear_holders.wipe_superblock(self.test_syspath) + mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) + mock_block.wipe_volume.assert_called_with( + self.test_blockdev, exclusive=True, mode='superblock') + + @mock.patch('curtin.block.clear_holders.is_swap_device') + @mock.patch('curtin.block.clear_holders.time') + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.block') + def test_clear_holders_wipe_superblock_rereads_pt(self, mock_block, + mock_log, m_time, + mock_swap): + """test clear_holders.wipe_superblock re-reads partition table""" + mock_swap.return_value = False + mock_block.sysfs_to_devpath.return_value = self.test_blockdev + mock_block.is_extended_partition.return_value = False + mock_block.is_zfs_member.return_value = False + mock_block.get_sysfs_partitions.side_effect = iter([ + ['p1', 'p2'], # has partitions before wipe + ['p1', 'p2'], # still has partitions after wipe + [], # partitions are now gone + ]) + clear_holders.wipe_superblock(self.test_syspath) + mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) + mock_block.wipe_volume.assert_called_with( + self.test_blockdev, exclusive=True, mode='superblock') + mock_block.get_sysfs_partitions.assert_has_calls( + [mock.call(self.test_syspath)] * 3) + mock_block.rescan_block_devices.assert_has_calls( + [mock.call(devices=[self.test_blockdev])] * 2) + + @mock.patch('curtin.block.clear_holders.is_swap_device') + @mock.patch('curtin.block.clear_holders.time') + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.block') + def test_clear_holders_wipe_superblock_rereads_pt_oserr(self, mock_block, + mock_log, m_time, + mock_swap): + """test clear_holders.wipe_superblock re-reads ptable handles oserr""" + mock_swap.return_value = False + mock_block.sysfs_to_devpath.return_value = self.test_blockdev + mock_block.is_extended_partition.return_value = False + mock_block.is_zfs_member.return_value = False + mock_block.get_sysfs_partitions.side_effect = iter([ + ['p1', 'p2'], # has partitions before wipe + OSError('No sysfs path for partition'), + [], # partitions are now gone + ]) + clear_holders.wipe_superblock(self.test_syspath) + mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) + mock_block.wipe_volume.assert_called_with( + self.test_blockdev, exclusive=True, mode='superblock') + mock_block.get_sysfs_partitions.assert_has_calls( + [mock.call(self.test_syspath)] * 3) + mock_block.rescan_block_devices.assert_has_calls( + [mock.call(devices=[self.test_blockdev])] * 2) + self.assertEqual(1, m_time.sleep.call_count) + @mock.patch('curtin.block.clear_holders.LOG') @mock.patch('curtin.block.clear_holders.block') @mock.patch('curtin.block.clear_holders.os') @@ -716,29 +826,32 @@ def test_assert_clear(self, mock_gen_holders_tree, mock_syspath): mock_gen_holders_tree.return_value = self.example_holders_trees[0][0] mock_syspath.side_effect = lambda x: x - device = '/dev/null' + device = self.test_blockdev with self.assertRaises(OSError): clear_holders.assert_clear(device) mock_gen_holders_tree.assert_called_with(device) mock_gen_holders_tree.return_value = self.example_holders_trees[1][1] clear_holders.assert_clear(device) + @mock.patch('curtin.block.clear_holders.lvm') @mock.patch('curtin.block.clear_holders.zfs') @mock.patch('curtin.block.clear_holders.mdadm') @mock.patch('curtin.block.clear_holders.util') - def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs): + def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs, + mock_lvm): mock_zfs.zfs_supported.return_value = True clear_holders.start_clear_holders_deps() mock_mdadm.mdadm_assemble.assert_called_with( scan=True, ignore_errors=True) mock_util.load_kernel_module.assert_has_calls([ - mock.call('bcache'), mock.call('zfs')]) + mock.call('bcache')]) + @mock.patch('curtin.block.clear_holders.lvm') @mock.patch('curtin.block.clear_holders.zfs') @mock.patch('curtin.block.clear_holders.mdadm') @mock.patch('curtin.block.clear_holders.util') def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm, - mock_zfs): + mock_zfs, mock_lvm): """test that we skip zfs modprobe on unsupported platforms""" mock_zfs.zfs_supported.return_value = False clear_holders.start_clear_holders_deps() diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_apply_net.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_apply_net.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_apply_net.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_apply_net.py 2019-02-15 20:42:42.000000000 +0000 @@ -5,7 +5,7 @@ import os from curtin.commands import apply_net -from curtin import util +from curtin import paths from .helpers import CiTestCase @@ -153,8 +153,8 @@ prehookfn=prehookfn, posthookfn=posthookfn) - precfg = util.target_path(target, path=prehookfn) - postcfg = util.target_path(target, path=posthookfn) + precfg = paths.target_path(target, path=prehookfn) + postcfg = paths.target_path(target, path=posthookfn) precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK @@ -231,7 +231,7 @@ apply_net._disable_ipv6_privacy_extensions(target) - cfg = util.target_path(target, path=path) + cfg = paths.target_path(target, path=path) mock_write.assert_called_with(cfg, expected_ipv6_priv_contents) @patch('curtin.util.load_file') @@ -259,7 +259,7 @@ apply_net._disable_ipv6_privacy_extensions(target, path=path) # source file not found - cfg = util.target_path(target, path) + cfg = paths.target_path(target, path) mock_ospath.exists.assert_called_with(cfg) self.assertEqual(0, mock_load.call_count) @@ -272,7 +272,7 @@ def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del): target = 'mytarget' path = 'eth0.cfg' - cfg = util.target_path(target, path) + cfg = paths.target_path(target, path) legacy_eth0_contents = ( 'auto eth0\n' 'iface eth0 inet dhcp') @@ -330,7 +330,7 @@ apply_net._maybe_remove_legacy_eth0(target, path) # source file not found - cfg = util.target_path(target, path) + cfg = paths.target_path(target, path) mock_ospath.exists.assert_called_with(cfg) self.assertEqual(0, mock_load.call_count) self.assertEqual(0, mock_del.call_count) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_block_meta.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_block_meta.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_block_meta.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_block_meta.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,7 +7,7 @@ import os from curtin.commands import block_meta -from curtin import util +from curtin import paths, util from .helpers import CiTestCase @@ -85,8 +85,9 @@ self.mock_block_get_root_device.assert_called_with([devname], paths=paths) + @patch('curtin.commands.block_meta.meta_clear') @patch('curtin.commands.block_meta.write_image_to_disk') - def test_meta_simple_calls_write_img(self, mock_write_image): + def test_meta_simple_calls_write_img(self, mock_write_image, mock_clear): devname = "fakedisk1p1" devnode = "/dev/" + devname sources = { @@ -104,9 +105,9 @@ mock_write_image.return_value = devname args = Namespace(target=self.target, devices=None, mode=None, - boot_fstype=None, fstype=None) + boot_fstype=None, fstype=None, force_mode=False) - block_meta.meta_simple(args) + block_meta.block_meta(args) mock_write_image.assert_called_with(sources.get('unittest'), devname) self.mock_subp.assert_has_calls( @@ -145,6 +146,8 @@ 'mock_block_get_volume_uuid') self.add_patch('curtin.block.zero_file_at_offsets', 'mock_block_zero_file') + self.add_patch('curtin.block.rescan_block_devices', + 'mock_block_rescan') self.target = "my_target" self.config = { @@ -224,9 +227,9 @@ part_offset = 2048 * 512 self.mock_block_zero_file.assert_called_with(disk_kname, [part_offset], exclusive=False) - self.mock_subp.assert_called_with(['parted', disk_kname, '--script', - 'mkpart', 'primary', '2048s', - '1001471s'], capture=True) + self.mock_subp.assert_has_calls( + [call(['parted', disk_kname, '--script', + 'mkpart', 'primary', '2048s', '1001471s'], capture=True)]) @patch('curtin.util.write_file') def test_mount_handler_defaults(self, mock_write_file): @@ -688,8 +691,9 @@ if target is None: target = self.tmp_dir() - expected = [a if a != "_T_MP" else util.target_path(target, fdata.path) - for a in expected] + expected = [ + a if a != "_T_MP" else paths.target_path(target, fdata.path) + for a in expected] with patch("curtin.util.subp") as m_subp: block_meta.mount_fstab_data(fdata, target=target) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_collect_logs.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_collect_logs.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_collect_logs.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_collect_logs.py 2019-02-15 20:42:42.000000000 +0000 @@ -56,11 +56,12 @@ write_file(savefile, 'install:\n log_file: /tmp/savefile.log\n') packdir = self.tmp_path('configs', _dir=self.new_root) ensure_dir(packdir) - date = datetime.utcnow().strftime('%Y-%m-%d-%H-%M') unusedcfg = os.path.join(packdir, 'config-001.yaml') write_file(unusedcfg, 'install:\n log_file: /tmp/unused.log\n') - curtin_config = self.tmp_path( - 'curtin-logs-%s/curtin-config' % date, _dir=self.tmpdir) + utcnow = datetime.utcnow() + datestr = utcnow.strftime('%Y-%m-%d-%H-%M') + tardir = self.tmp_path('curtin-logs-%s' % datestr, _dir=self.tmpdir) + curtin_config = self.tmp_path('curtin-config', _dir=tardir) self.assertEqual( '/curtin/configs', collect_logs.CURTIN_PACK_CONFIG_DIR) @@ -72,8 +73,10 @@ new=packdir, autospec=None) self.add_patch('shutil.rmtree', 'm_rmtree') with mock.patch('sys.stderr'): - with self.assertRaises(SystemExit) as context_manager: - collect_logs.collect_logs_main(FakeArgs('my.tar')) + with mock.patch('curtin.commands.collect_logs.datetime') as m_dt: + with self.assertRaises(SystemExit) as context_manager: + m_dt.utcnow.return_value = utcnow + collect_logs.collect_logs_main(FakeArgs('my.tar')) self.assertEqual('0', str(context_manager.exception)) expected_cfg = {'install': {'log_file': '/tmp/savefile.log'}} with open(curtin_config, 'r') as f: @@ -83,8 +86,9 @@ """collect_logs_main sources all configs from /curtin/configs dir.""" savefile = self.tmp_path('absentinstall.log', _dir=self.new_root) packdir = self.tmp_path('configs', _dir=self.new_root) - date = datetime.utcnow().strftime('%Y-%m-%d-%H-%M') - tardir = self.tmp_path('curtin-logs-%s' % date, _dir=self.tmpdir) + utcnow = datetime.utcnow() + datestr = utcnow.strftime('%Y-%m-%d-%H-%M') + tardir = self.tmp_path('curtin-logs-%s' % datestr, _dir=self.tmpdir) ensure_dir(packdir) cfg1 = os.path.join(packdir, 'config-001.yaml') cfg2 = os.path.join(packdir, 'config-002.yaml') @@ -101,8 +105,10 @@ new=packdir, autospec=None) self.add_patch('shutil.rmtree', 'm_rmtree') with mock.patch('sys.stderr'): - with self.assertRaises(SystemExit) as context_manager: - collect_logs.collect_logs_main(FakeArgs('my.tar')) + with mock.patch('curtin.commands.collect_logs.datetime') as m_dt: + with self.assertRaises(SystemExit) as context_manager: + m_dt.utcnow.return_value = utcnow + collect_logs.collect_logs_main(FakeArgs('my.tar')) self.assertEqual('0', str(context_manager.exception)) self.assertEqual(['config-001.yaml', 'config-002.yaml'], sorted(os.listdir(packdir))) @@ -232,8 +238,8 @@ def setUp(self): super(TestCreateTar, self).setUp() self.new_root = self.tmp_dir() - date = datetime.utcnow().strftime('%Y-%m-%d-%H-%M') - self.tardir = 'curtin-logs-%s' % date + self.utcnow = datetime.utcnow() + self.tardir = 'curtin-logs-%s' % self.utcnow.strftime('%Y-%m-%d-%H-%M') self.add_patch( 'curtin.commands.collect_logs._collect_system_info', 'm_sys_info') self.tmpdir = self.tmp_path('mytemp', _dir=self.new_root) @@ -247,7 +253,9 @@ self.add_patch('curtin.util.subp', 'mock_subp') self.mock_subp.return_value = ('', '') with mock.patch('sys.stderr'): - collect_logs.create_log_tarfile(tarfile, config={}) + with mock.patch('curtin.commands.collect_logs.datetime') as m_dt: + m_dt.utcnow.return_value = self.utcnow + collect_logs.create_log_tarfile(tarfile, config={}) self.assertIn( mock.call(['tar', '-cvf', tarfile, self.tardir], capture=True), @@ -264,7 +272,9 @@ self.add_patch('curtin.util.subp', 'mock_subp') self.mock_subp.return_value = ('', '') with mock.patch('sys.stderr'): - collect_logs.create_log_tarfile(tarfile, config={}) + with mock.patch('curtin.commands.collect_logs.datetime') as m_dt: + m_dt.utcnow.return_value = self.utcnow + collect_logs.create_log_tarfile(tarfile, config={}) self.assertIn( mock.call(['tar', '-cvf', tarfile, self.tardir], capture=True), @@ -311,7 +321,9 @@ 'maas': {'consumer_key': 'ckey', 'token_key': 'tkey', 'token_secret': 'tsecret'}}} with mock.patch('sys.stderr'): - collect_logs.create_log_tarfile(tarfile, config=config) + with mock.patch('curtin.commands.collect_logs.datetime') as m_dt: + m_dt.utcnow.return_value = self.utcnow + collect_logs.create_log_tarfile(tarfile, config=config) self.assertEqual( [mock.call(self.tardir, ['ckey', 'tkey', 'tsecret'])], self.m_redact.call_args_list) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_extract.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_extract.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_extract.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_extract.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,297 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. +import os + +from .helpers import CiTestCase + +from curtin import util +from curtin.commands.extract import (extract_root_fsimage_url, + extract_root_layered_fsimage_url, + _get_image_stack) +from curtin.url_helper import UrlError + + +class TestExtractRootFsImageUrl(CiTestCase): + """Test extract_root_fsimage_url.""" + def _fake_download(self, url, path, retries=0): + self.downloads.append(os.path.abspath(path)) + with open(path, "w") as fp: + fp.write("fake content from " + url + "\n") + + def setUp(self): + super(TestExtractRootFsImageUrl, self).setUp() + self.downloads = [] + self.add_patch("curtin.commands.extract.url_helper.download", + "m_download", side_effect=self._fake_download) + self.add_patch("curtin.commands.extract._extract_root_fsimage", + "m__extract_root_fsimage") + + def test_relative_file_url(self): + """extract_root_fsimage_url supports relative file:// urls.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + startdir = os.getcwd() + fname = "my.img" + try: + os.chdir(tmpd) + util.write_file(fname, fname + " data\n") + extract_root_fsimage_url("file://" + fname, target) + finally: + os.chdir(startdir) + self.assertEqual(1, self.m__extract_root_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_absolute_file_url(self): + """extract_root_fsimage_url supports absolute file:/// urls.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + fpath = self.tmp_path("my.img", tmpd) + util.write_file(fpath, fpath + " data\n") + extract_root_fsimage_url("file:///" + fpath, target) + self.assertEqual(1, self.m__extract_root_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_http_url(self): + """extract_root_fsimage_url supports http:// urls.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + myurl = "http://bogus.example.com/my.img" + extract_root_fsimage_url(myurl, target) + self.assertEqual(1, self.m__extract_root_fsimage.call_count) + self.assertEqual(1, self.m_download.call_count) + # ensure the file got cleaned up. + self.assertEqual(1, len(self.downloads)) + self.assertEqual([], [f for f in self.downloads if os.path.exists(f)]) + + def test_file_path_not_url(self): + """extract_root_fsimage_url supports normal file path without file:.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + fpath = self.tmp_path("my.img", tmpd) + util.write_file(fpath, fpath + " data\n") + extract_root_fsimage_url(os.path.abspath(fpath), target) + self.assertEqual(1, self.m__extract_root_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + +class TestExtractRootLayeredFsImageUrl(CiTestCase): + """Test extract_root_layared_fsimage_url.""" + def _fake_download(self, url, path, retries=0): + self.downloads.append(os.path.abspath(path)) + with open(path, "w") as fp: + fp.write("fake content from " + url + "\n") + + def setUp(self): + super(TestExtractRootLayeredFsImageUrl, self).setUp() + self.downloads = [] + self.add_patch("curtin.commands.extract.url_helper.download", + "m_download", side_effect=self._fake_download) + self.add_patch("curtin.commands.extract._extract_root_layered_fsimage", + "m__extract_root_layered_fsimage") + + def test_relative_local_file_single(self): + """extract_root_layered_fsimage_url supports relative file:// uris.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + startdir = os.getcwd() + fname = "my.img" + try: + os.chdir(tmpd) + util.write_file(fname, fname + " data\n") + extract_root_layered_fsimage_url("file://" + fname, target) + finally: + os.chdir(startdir) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_absolute_local_file_single(self): + """extract_root_layered_fsimage_url supports absolute file:/// uris.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + fpath = self.tmp_path("my.img", tmpd) + util.write_file(fpath, fpath + " data\n") + extract_root_layered_fsimage_url("file:///" + fpath, target) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_local_file_path_single(self): + """extract_root_layered_fsimage_url supports normal file path without + file:""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + fpath = self.tmp_path("my.img", tmpd) + util.write_file(fpath, fpath + " data\n") + extract_root_layered_fsimage_url(os.path.abspath(fpath), target) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_local_file_path_multiple(self): + """extract_root_layered_fsimage_url supports normal hierarchy file + path""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs", + tmpd)) + for f in ["minimal.squashfs", + "minimal.standard.squashfs", + "minimal.standard.debug.squashfs"]: + fpath = self.tmp_path(f, tmpd) + util.write_file(fpath, fpath + " data\n") + extract_root_layered_fsimage_url(arg, target) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_local_file_path_multiple_one_missing(self): + """extract_root_layered_fsimage_url supports normal hierarchy file + path but intermediate layer missing""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs", + tmpd)) + for f in ["minimal.squashfs", + "minimal.standard.debug.squashfs"]: + fpath = self.tmp_path(f, tmpd) + util.write_file(fpath, fpath + " data\n") + self.assertRaises(ValueError, extract_root_layered_fsimage_url, arg, + target) + self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_local_file_path_multiple_one_empty(self): + """extract_root_layered_fsimage_url supports normal hierarchy file + path but intermediate layer empty""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs", + tmpd)) + for f in ["minimal.squashfs", + "minimal.standard.squashfs" + "minimal.standard.debug.squashfs"]: + fpath = self.tmp_path(f, tmpd) + if f == "minimal.standard.squashfs": + util.write_file(fpath, "") + else: + util.write_file(fpath, fpath + " data\n") + self.assertRaises(ValueError, extract_root_layered_fsimage_url, arg, + target) + self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(0, self.m_download.call_count) + + def test_remote_file_single(self): + """extract_root_layered_fsimage_url supports http:// urls.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + myurl = "http://example.io/minimal.squashfs" + extract_root_layered_fsimage_url(myurl, target) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(1, self.m_download.call_count) + self.assertEqual("http://example.io/minimal.squashfs", + self.m_download.call_args_list[0][0][0]) + # ensure the file got cleaned up. + self.assertEqual([], [f for f in self.downloads if os.path.exists(f)]) + + def test_remote_file_multiple(self): + """extract_root_layered_fsimage_url supports normal hierarchy from + http:// urls.""" + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + myurl = "http://example.io/minimal.standard.debug.squashfs" + extract_root_layered_fsimage_url(myurl, target) + self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(3, self.m_download.call_count) + for i, image_url in enumerate(["minimal.squashfs", + "minimal.standard.squashfs", + "minimal.standard.debug.squashfs"]): + self.assertEqual("http://example.io/" + image_url, + self.m_download.call_args_list[i][0][0]) + # ensure the file got cleaned up. + self.assertEqual([], [f for f in self.downloads if os.path.exists(f)]) + + def test_remote_file_multiple_one_missing(self): + """extract_root_layered_fsimage_url supports normal hierarchy from + http:// urls with one layer missing.""" + + def fail_download_minimal_standard(url, path, retries=0): + if url == "http://example.io/minimal.standard.squashfs": + raise UrlError(url, 404, "Couldn't download", + None, None) + return self._fake_download(url, path, retries) + self.m_download.side_effect = fail_download_minimal_standard + + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + myurl = "http://example.io/minimal.standard.debug.squashfs" + self.assertRaises(UrlError, extract_root_layered_fsimage_url, + myurl, target) + self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(2, self.m_download.call_count) + for i, image_url in enumerate(["minimal.squashfs", + "minimal.standard.squashfs"]): + self.assertEqual("http://example.io/" + image_url, + self.m_download.call_args_list[i][0][0]) + # ensure the file got cleaned up. + self.assertEqual([], [f for f in self.downloads if os.path.exists(f)]) + + def test_remote_file_multiple_one_empty(self): + """extract_root_layered_fsimage_url supports normal hierarchy from + http:// urls with one layer empty.""" + + def empty_download_minimal_standard(url, path, retries=0): + if url == "http://example.io/minimal.standard.squashfs": + self.downloads.append(os.path.abspath(path)) + with open(path, "w") as fp: + fp.write("") + return + return self._fake_download(url, path, retries) + self.m_download.side_effect = empty_download_minimal_standard + + tmpd = self.tmp_dir() + target = self.tmp_path("target_d", tmpd) + myurl = "http://example.io/minimal.standard.debug.squashfs" + self.assertRaises(ValueError, extract_root_layered_fsimage_url, + myurl, target) + self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count) + self.assertEqual(3, self.m_download.call_count) + for i, image_url in enumerate(["minimal.squashfs", + "minimal.standard.squashfs", + "minimal.standard.debug.squashfs"]): + self.assertEqual("http://example.io/" + image_url, + self.m_download.call_args_list[i][0][0]) + # ensure the file got cleaned up. + self.assertEqual([], [f for f in self.downloads if os.path.exists(f)]) + + +class TestGetImageStack(CiTestCase): + """Test _get_image_stack.""" + + def test_get_image_stack(self): + """_get_image_paths returns a tuple of depending fsimages + with same extension""" + self.assertEqual( + ['/path/to/aa.fs', + '/path/to/aa.bbb.fs', + '/path/to/aa.bbb.cccc.fs'], + _get_image_stack("/path/to/aa.bbb.cccc.fs")) + + def test_get_image_stack_none(self): + """_get_image_paths returns an empty tuple with no entry""" + self.assertEqual( + [], + _get_image_stack("")) + + def test_get_image_stack_no_dependency(self): + """_get_image_paths returns a tuple a single element when fsimage + has no dependency""" + self.assertEqual( + ['/path/to/aa.fs'], + _get_image_stack("/path/to/aa.fs")) + + def test_get_image_stack_with_urls(self): + """_get_image_paths returns a tuple of depending fsimages + with same extension and same urls""" + self.assertEqual( + ['https://path.com/to/aa.fs', + 'https://path.com/to/aa.bbb.fs', + 'https://path.com/to/aa.bbb.cccc.fs'], + _get_image_stack("https://path.com/to/aa.bbb.cccc.fs")) + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_install.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_install.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_install.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_install.py 2019-02-15 20:42:42.000000000 +0000 @@ -189,3 +189,43 @@ self.assertEqual( [mock.call(self.logfile, target_dir, '/root/curtin-install.log')], self.m_copy_log.call_args_list) + + +class TestWorkingDir(CiTestCase): + def test_target_dir_may_exist(self): + """WorkingDir supports existing empty target directory.""" + tmp_d = self.tmp_dir() + work_d = self.tmp_path("work_d", tmp_d) + target_d = self.tmp_path("target_d", tmp_d) + ensure_dir(work_d) + ensure_dir(target_d) + with mock.patch("curtin.commands.install.tempfile.mkdtemp", + return_value=work_d) as m_mkdtemp: + workingdir = install.WorkingDir({'install': {'target': target_d}}) + self.assertEqual(1, m_mkdtemp.call_count) + self.assertEqual(target_d, workingdir.target) + self.assertEqual(target_d, workingdir.env().get('TARGET_MOUNT_POINT')) + + def test_target_dir_with_content_raises_error(self): + """WorkingDir raises ValueError on populated target_d.""" + tmp_d = self.tmp_dir() + work_d = self.tmp_path("work_d", tmp_d) + target_d = self.tmp_path("target_d", tmp_d) + ensure_dir(work_d) + ensure_dir(target_d) + write_file(self.tmp_path("somefile.txt", target_d), "sometext") + with mock.patch("curtin.commands.install.tempfile.mkdtemp", + return_value=work_d): + with self.assertRaises(ValueError): + install.WorkingDir({'install': {'target': target_d}}) + + def test_target_dir_by_default_is_under_workd(self): + """WorkingDir does not require target in config.""" + tmp_d = self.tmp_dir() + work_d = self.tmp_path("work_d", tmp_d) + ensure_dir(work_d) + with mock.patch("curtin.commands.install.tempfile.mkdtemp", + return_value=work_d) as m_mkdtemp: + wd = install.WorkingDir({}) + self.assertEqual(1, m_mkdtemp.call_count) + self.assertTrue(wd.target.startswith(work_d + "/")) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_commands_unmount.py curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_unmount.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_commands_unmount.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_commands_unmount.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,7 @@ # This file is part of curtin. See LICENSE file for copyright and license info. from curtin.commands import unmount +from curtin.util import FileMissingError from .helpers import CiTestCase import argparse @@ -24,11 +25,7 @@ def test_unmount_target_not_found_exception(self): """Check target path not found raises FileNotFoundError exception""" self.args.target = "catch-me-if-you-can" - try: - FileNotFoundError - except NameError: - FileNotFoundError = IOError - self.assertRaises(FileNotFoundError, unmount.unmount_main, self.args) + self.assertRaises(FileMissingError, unmount.unmount_main, self.args) @mock.patch('curtin.commands.unmount.os') def test_unmount_target_with_path(self, mock_os): diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_curthooks.py curtin-18.2-10-g7afd77fa/tests/unittests/test_curthooks.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_curthooks.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_curthooks.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,6 +4,7 @@ from mock import call, patch, MagicMock from curtin.commands import curthooks +from curtin import distro from curtin import util from curtin import config from curtin.reporter import events @@ -47,8 +48,8 @@ class TestCurthooksInstallKernel(CiTestCase): def setUp(self): super(TestCurthooksInstallKernel, self).setUp() - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') - self.add_patch('curtin.util.install_packages', 'mock_instpkg') + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') + self.add_patch('curtin.distro.install_packages', 'mock_instpkg') self.add_patch( 'curtin.commands.curthooks.get_flash_kernel_pkgs', 'mock_get_flash_kernel_pkgs') @@ -122,12 +123,21 @@ def setUp(self): super(TestInstallMissingPkgs, self).setUp() self.add_patch('platform.machine', 'mock_machine') - self.add_patch('curtin.util.get_installed_packages', + self.add_patch('curtin.util.get_architecture', 'mock_arch') + self.add_patch('curtin.distro.get_installed_packages', 'mock_get_installed_packages') self.add_patch('curtin.util.load_command_environment', 'mock_load_cmd_evn') self.add_patch('curtin.util.which', 'mock_which') - self.add_patch('curtin.util.install_packages', 'mock_install_packages') + self.add_patch('curtin.util.is_uefi_bootable', 'mock_uefi') + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') + self.add_patch('curtin.distro.install_packages', + 'mock_install_packages') + self.add_patch('curtin.distro.get_osfamily', 'mock_osfamily') + self.distro_family = distro.DISTROS.debian + self.mock_osfamily.return_value = self.distro_family + self.mock_uefi.return_value = False + self.mock_haspkg.return_value = False @patch.object(events, 'ReportEventStack') def test_install_packages_s390x(self, mock_events): @@ -137,8 +147,8 @@ target = "not-a-real-target" cfg = {} curthooks.install_missing_packages(cfg, target=target) - self.mock_install_packages.assert_called_with(['s390-tools'], - target=target) + self.mock_install_packages.assert_called_with( + ['s390-tools'], target=target, osfamily=self.distro_family) @patch.object(events, 'ReportEventStack') def test_install_packages_s390x_has_zipl(self, mock_events): @@ -159,6 +169,50 @@ curthooks.install_missing_packages(cfg, target=target) self.assertEqual([], self.mock_install_packages.call_args_list) + @patch.object(events, 'ReportEventStack') + def test_install_packages_on_uefi_amd64_shim_signed(self, mock_events): + arch = 'amd64' + self.mock_arch.return_value = arch + self.mock_machine.return_value = 'x86_64' + expected_pkgs = ['grub-efi-%s' % arch, + 'grub-efi-%s-signed' % arch, + 'shim-signed'] + self.mock_machine.return_value = 'x86_64' + self.mock_uefi.return_value = True + self.mock_haspkg.return_value = True + target = "not-a-real-target" + cfg = {} + curthooks.install_missing_packages(cfg, target=target) + self.mock_install_packages.assert_called_with( + expected_pkgs, target=target, osfamily=self.distro_family) + + @patch.object(events, 'ReportEventStack') + def test_install_packages_on_uefi_i386_noshim_nosigned(self, mock_events): + arch = 'i386' + self.mock_arch.return_value = arch + self.mock_machine.return_value = 'i386' + expected_pkgs = ['grub-efi-%s' % arch] + self.mock_machine.return_value = 'i686' + self.mock_uefi.return_value = True + target = "not-a-real-target" + cfg = {} + curthooks.install_missing_packages(cfg, target=target) + self.mock_install_packages.assert_called_with( + expected_pkgs, target=target, osfamily=self.distro_family) + + @patch.object(events, 'ReportEventStack') + def test_install_packages_on_uefi_arm64_nosign_noshim(self, mock_events): + arch = 'arm64' + self.mock_arch.return_value = arch + self.mock_machine.return_value = 'aarch64' + expected_pkgs = ['grub-efi-%s' % arch] + self.mock_uefi.return_value = True + target = "not-a-real-target" + cfg = {} + curthooks.install_missing_packages(cfg, target=target) + self.mock_install_packages.assert_called_with( + expected_pkgs, target=target, osfamily=self.distro_family) + class TestSetupZipl(CiTestCase): @@ -192,7 +246,8 @@ def setUp(self): super(TestSetupGrub, self).setUp() self.target = self.tmp_dir() - self.add_patch('curtin.util.lsb_release', 'mock_lsb_release') + self.distro_family = distro.DISTROS.debian + self.add_patch('curtin.distro.lsb_release', 'mock_lsb_release') self.mock_lsb_release.return_value = { 'codename': 'xenial', } @@ -219,11 +274,12 @@ 'grub_install_devices': ['/dev/vdb'] } self.subp_output.append(('', '')) - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ([ 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', self.target, '/dev/vdb'],), + 'install-grub', '--os-family=%s' % self.distro_family, + self.target, '/dev/vdb'],), self.mock_subp.call_args_list[0][0]) def test_uses_install_devices_in_grubcfg(self): @@ -233,11 +289,12 @@ }, } self.subp_output.append(('', '')) - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ([ 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', self.target, '/dev/vdb'],), + 'install-grub', '--os-family=%s' % self.distro_family, + self.target, '/dev/vdb'],), self.mock_subp.call_args_list[0][0]) def test_uses_grub_install_on_storage_config(self): @@ -255,11 +312,12 @@ }, } self.subp_output.append(('', '')) - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ([ 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', self.target, '/dev/vdb'],), + 'install-grub', '--os-family=%s' % self.distro_family, + self.target, '/dev/vdb'],), self.mock_subp.call_args_list[0][0]) def test_grub_install_installs_to_none_if_install_devices_None(self): @@ -269,62 +327,17 @@ }, } self.subp_output.append(('', '')) - curthooks.setup_grub(cfg, self.target) - self.assertEquals( - ([ - 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', self.target, 'none'],), - self.mock_subp.call_args_list[0][0]) - - def test_grub_install_uefi_installs_signed_packages_for_amd64(self): - self.add_patch('curtin.util.install_packages', 'mock_install') - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') - self.mock_is_uefi_bootable.return_value = True - cfg = { - 'grub': { - 'install_devices': ['/dev/vdb'], - 'update_nvram': False, - }, - } - self.subp_output.append(('', '')) - self.mock_arch.return_value = 'amd64' - self.mock_haspkg.return_value = True - curthooks.setup_grub(cfg, self.target) - self.assertEquals( - (['grub-efi-amd64', 'grub-efi-amd64-signed', 'shim-signed'],), - self.mock_install.call_args_list[0][0]) - self.assertEquals( - ([ - 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', '--uefi', self.target, '/dev/vdb'],), - self.mock_subp.call_args_list[0][0]) - - def test_grub_install_uefi_installs_packages_for_arm64(self): - self.add_patch('curtin.util.install_packages', 'mock_install') - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') - self.mock_is_uefi_bootable.return_value = True - cfg = { - 'grub': { - 'install_devices': ['/dev/vdb'], - 'update_nvram': False, - }, - } - self.subp_output.append(('', '')) - self.mock_arch.return_value = 'arm64' - self.mock_haspkg.return_value = False - curthooks.setup_grub(cfg, self.target) - self.assertEquals( - (['grub-efi-arm64'],), - self.mock_install.call_args_list[0][0]) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ([ 'sh', '-c', 'exec "$0" "$@" 2>&1', - 'install-grub', '--uefi', self.target, '/dev/vdb'],), + 'install-grub', '--os-family=%s' % self.distro_family, + self.target, 'none'],), self.mock_subp.call_args_list[0][0]) def test_grub_install_uefi_updates_nvram_skips_remove_and_reorder(self): - self.add_patch('curtin.util.install_packages', 'mock_install') - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') + self.add_patch('curtin.distro.install_packages', 'mock_install') + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') self.mock_is_uefi_bootable.return_value = True cfg = { @@ -347,17 +360,18 @@ } } } - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ([ 'sh', '-c', 'exec "$0" "$@" 2>&1', 'install-grub', '--uefi', '--update-nvram', + '--os-family=%s' % self.distro_family, self.target, '/dev/vdb'],), self.mock_subp.call_args_list[0][0]) def test_grub_install_uefi_updates_nvram_removes_old_loaders(self): - self.add_patch('curtin.util.install_packages', 'mock_install') - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') + self.add_patch('curtin.distro.install_packages', 'mock_install') + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') self.mock_is_uefi_bootable.return_value = True cfg = { @@ -392,7 +406,7 @@ self.in_chroot_subp_output.append(('', '')) self.in_chroot_subp_output.append(('', '')) self.mock_haspkg.return_value = False - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( ['efibootmgr', '-B', '-b'], self.mock_in_chroot_subp.call_args_list[0][0][0][:3]) @@ -406,8 +420,8 @@ self.mock_in_chroot_subp.call_args_list[1][0][0][3]])) def test_grub_install_uefi_updates_nvram_reorders_loaders(self): - self.add_patch('curtin.util.install_packages', 'mock_install') - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') + self.add_patch('curtin.distro.install_packages', 'mock_install') + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') self.mock_is_uefi_bootable.return_value = True cfg = { @@ -436,7 +450,7 @@ } self.in_chroot_subp_output.append(('', '')) self.mock_haspkg.return_value = False - curthooks.setup_grub(cfg, self.target) + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) self.assertEquals( (['efibootmgr', '-o', '0001,0000'],), self.mock_in_chroot_subp.call_args_list[0][0]) @@ -453,11 +467,11 @@ 'var/lib/snapd') util.ensure_dir(ubuntu_core_path) self.assertTrue(os.path.isdir(ubuntu_core_path)) - is_core = curthooks.target_is_ubuntu_core(self.target) + is_core = distro.is_ubuntu_core(self.target) self.assertTrue(is_core) def test_target_is_ubuntu_core_no_target(self): - is_core = curthooks.target_is_ubuntu_core(self.target) + is_core = distro.is_ubuntu_core(self.target) self.assertFalse(is_core) def test_target_is_ubuntu_core_noncore_target(self): @@ -465,7 +479,7 @@ non_core_path = os.path.join(self.target, 'curtin') util.ensure_dir(non_core_path) self.assertTrue(os.path.isdir(non_core_path)) - is_core = curthooks.target_is_ubuntu_core(self.target) + is_core = distro.is_ubuntu_core(self.target) self.assertFalse(is_core) @patch('curtin.util.write_file') @@ -736,15 +750,15 @@ ({'network': { 'version': 2, 'items': ('bridge',)}}, - ('bridge-utils',)), + ()), ({'network': { 'version': 2, 'items': ('vlan',)}}, - ('vlan',)), + ()), ({'network': { 'version': 2, 'items': ('vlan', 'bridge')}}, - ('vlan', 'bridge-utils')), + ()), )) def test_mixed_storage_v1_network_v2_detect(self): @@ -755,7 +769,7 @@ 'storage': { 'version': 1, 'items': ('raid', 'bcache', 'ext4')}}, - ('vlan', 'bridge-utils', 'mdadm', 'bcache-tools', 'e2fsprogs')), + ('mdadm', 'bcache-tools', 'e2fsprogs')), )) def test_invalid_version_in_config(self): @@ -782,7 +796,7 @@ dict((cfg[i]['path'], cfg[i]['content']) for i in cfg.keys()), dir2dict(tmpd, prefix=tmpd)) - @patch('curtin.commands.curthooks.futil.target_path') + @patch('curtin.commands.curthooks.paths.target_path') @patch('curtin.commands.curthooks.futil.write_finfo') def test_handle_write_files_finfo(self, mock_write_finfo, mock_tp): """ Validate that futils.write_files handles target_path correctly """ @@ -816,6 +830,8 @@ self.add_patch('curtin.util.write_file', 'mock_write') self.add_patch('curtin.commands.curthooks.get_maas_version', 'mock_maas_version') + self.add_patch('curtin.util.which', 'mock_which') + self.mock_which.return_value = '/usr/bin/pollinate' self.target = self.tmp_dir() def test_handle_pollinate_user_agent_disable(self): @@ -824,6 +840,15 @@ curthooks.handle_pollinate_user_agent(cfg, self.target) self.assertEqual(0, self.mock_curtin_version.call_count) self.assertEqual(0, self.mock_maas_version.call_count) + self.assertEqual(0, self.mock_write.call_count) + + def test_handle_pollinate_returns_if_no_pollinate_binary(self): + """ handle_pollinate_user_agent does nothing if no pollinate binary""" + self.mock_which.return_value = None + cfg = {'reporting': {'maas': {'endpoint': 'http://127.0.0.1/foo'}}} + curthooks.handle_pollinate_user_agent(cfg, self.target) + self.assertEqual(0, self.mock_curtin_version.call_count) + self.assertEqual(0, self.mock_maas_version.call_count) self.assertEqual(0, self.mock_write.call_count) def test_handle_pollinate_user_agent_default(self): diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_distro.py curtin-18.2-10-g7afd77fa/tests/unittests/test_distro.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_distro.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_distro.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,373 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +from unittest import skipIf +import mock +import sys + +from curtin import distro +from curtin import paths +from curtin import util +from .helpers import CiTestCase + + +class TestLsbRelease(CiTestCase): + + def setUp(self): + super(TestLsbRelease, self).setUp() + self._reset_cache() + + def _reset_cache(self): + keys = [k for k in distro._LSB_RELEASE.keys()] + for d in keys: + del distro._LSB_RELEASE[d] + + @mock.patch("curtin.distro.subp") + def test_lsb_release_functional(self, mock_subp): + output = '\n'.join([ + "Distributor ID: Ubuntu", + "Description: Ubuntu 14.04.2 LTS", + "Release: 14.04", + "Codename: trusty", + ]) + rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', + 'codename': 'trusty', 'release': '14.04'} + + def fake_subp(cmd, capture=False, target=None): + return output, 'No LSB modules are available.' + + mock_subp.side_effect = fake_subp + found = distro.lsb_release() + mock_subp.assert_called_with( + ['lsb_release', '--all'], capture=True, target=None) + self.assertEqual(found, rdata) + + @mock.patch("curtin.distro.subp") + def test_lsb_release_unavailable(self, mock_subp): + def doraise(*args, **kwargs): + raise util.ProcessExecutionError("foo") + mock_subp.side_effect = doraise + + expected = {k: "UNAVAILABLE" for k in + ('id', 'description', 'codename', 'release')} + self.assertEqual(distro.lsb_release(), expected) + + +class TestParseDpkgVersion(CiTestCase): + """test parse_dpkg_version.""" + + def test_none_raises_type_error(self): + self.assertRaises(TypeError, distro.parse_dpkg_version, None) + + @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.") + def test_bytes_raises_type_error(self): + self.assertRaises(TypeError, distro.parse_dpkg_version, b'1.2.3-0') + + def test_simple_native_package_version(self): + """dpkg versions must have a -. If not present expect value error.""" + self.assertEqual( + {'major': 2, 'minor': 28, 'micro': 0, 'extra': None, + 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate', + 'semantic_version': 22800}, + distro.parse_dpkg_version('2.28', name='germinate')) + + def test_complex_native_package_version(self): + dver = '1.0.106ubuntu2+really1.0.97ubuntu1' + self.assertEqual( + {'major': 1, 'minor': 0, 'micro': 106, + 'extra': 'ubuntu2+really1.0.97ubuntu1', + 'raw': dver, 'upstream': dver, 'name': 'debootstrap', + 'semantic_version': 100106}, + distro.parse_dpkg_version(dver, name='debootstrap', + semx=(100000, 1000, 1))) + + def test_simple_valid(self): + self.assertEqual( + {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, + 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo', + 'semantic_version': 10203}, + distro.parse_dpkg_version('1.2.3-0', name='foo')) + + def test_simple_valid_with_semx(self): + self.assertEqual( + {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, + 'raw': '1.2.3-0', 'upstream': '1.2.3', + 'semantic_version': 123}, + distro.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1))) + + def test_upstream_with_hyphen(self): + """upstream versions may have a hyphen.""" + cver = '18.2-14-g6d48d265-0ubuntu1' + self.assertEqual( + {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265', + 'raw': cver, 'upstream': '18.2-14-g6d48d265', + 'name': 'cloud-init', 'semantic_version': 180200}, + distro.parse_dpkg_version(cver, name='cloud-init')) + + def test_upstream_with_plus(self): + """multipath tools has a + in it.""" + mver = '0.5.0+git1.656f8865-5ubuntu2.5' + self.assertEqual( + {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865', + 'raw': mver, 'upstream': '0.5.0+git1.656f8865', + 'semantic_version': 500}, + distro.parse_dpkg_version(mver)) + + +class TestDistros(CiTestCase): + + def test_distro_names(self): + all_distros = list(distro.DISTROS) + for distro_name in distro.DISTRO_NAMES: + distro_enum = getattr(distro.DISTROS, distro_name) + self.assertIn(distro_enum, all_distros) + + def test_distro_names_unknown(self): + distro_name = "ImNotADistro" + self.assertNotIn(distro_name, distro.DISTRO_NAMES) + with self.assertRaises(AttributeError): + getattr(distro.DISTROS, distro_name) + + def test_distro_osfamily(self): + for variant, family in distro.OS_FAMILIES.items(): + self.assertNotEqual(variant, family) + self.assertIn(variant, distro.DISTROS) + for dname in family: + self.assertIn(dname, distro.DISTROS) + + def test_distro_osfmaily_identity(self): + for family, variants in distro.OS_FAMILIES.items(): + self.assertIn(family, variants) + + def test_name_to_distro(self): + for distro_name in distro.DISTRO_NAMES: + dobj = distro.name_to_distro(distro_name) + self.assertEqual(dobj, getattr(distro.DISTROS, distro_name)) + + def test_name_to_distro_unknown_value(self): + with self.assertRaises(ValueError): + distro.name_to_distro(None) + + def test_name_to_distro_unknown_attr(self): + with self.assertRaises(ValueError): + distro.name_to_distro('NotADistro') + + def test_distros_unknown_attr(self): + with self.assertRaises(AttributeError): + distro.DISTROS.notadistro + + def test_distros_unknown_index(self): + with self.assertRaises(IndexError): + distro.DISTROS[len(distro.DISTROS)+1] + + +class TestDistroInfo(CiTestCase): + + def setUp(self): + super(TestDistroInfo, self).setUp() + self.add_patch('curtin.distro.os_release', 'mock_os_release') + + def test_get_distroinfo(self): + for distro_name in distro.DISTRO_NAMES: + self.mock_os_release.return_value = {'ID': distro_name} + variant = distro.name_to_distro(distro_name) + family = distro.DISTRO_TO_OSFAMILY[variant] + distro_info = distro.get_distroinfo() + self.assertEqual(variant, distro_info.variant) + self.assertEqual(family, distro_info.family) + + def test_get_distro(self): + for distro_name in distro.DISTRO_NAMES: + self.mock_os_release.return_value = {'ID': distro_name} + variant = distro.name_to_distro(distro_name) + distro_obj = distro.get_distro() + self.assertEqual(variant, distro_obj) + + def test_get_osfamily(self): + for distro_name in distro.DISTRO_NAMES: + self.mock_os_release.return_value = {'ID': distro_name} + variant = distro.name_to_distro(distro_name) + family = distro.DISTRO_TO_OSFAMILY[variant] + distro_obj = distro.get_osfamily() + self.assertEqual(family, distro_obj) + + +class TestDistroIdentity(CiTestCase): + + def setUp(self): + super(TestDistroIdentity, self).setUp() + self.add_patch('curtin.distro.os.path.exists', 'mock_os_path') + + def test_is_ubuntu_core(self): + for exists in [True, False]: + self.mock_os_path.return_value = exists + self.assertEqual(exists, distro.is_ubuntu_core()) + self.mock_os_path.assert_called_with('/system-data/var/lib/snapd') + + def test_is_centos(self): + for exists in [True, False]: + self.mock_os_path.return_value = exists + self.assertEqual(exists, distro.is_centos()) + self.mock_os_path.assert_called_with('/etc/centos-release') + + def test_is_rhel(self): + for exists in [True, False]: + self.mock_os_path.return_value = exists + self.assertEqual(exists, distro.is_rhel()) + self.mock_os_path.assert_called_with('/etc/redhat-release') + + +class TestYumInstall(CiTestCase): + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.util.subp') + def test_yum_install(self, m_subp): + pkglist = ['foobar', 'wark'] + target = 'mytarget' + mode = 'install' + expected_calls = [ + mock.call(['yum', '--assumeyes', '--quiet', 'install', + '--downloadonly', '--setopt=keepcache=1'] + pkglist, + env=None, retries=[1] * 10, + target=paths.target_path(target)), + mock.call(['yum', '--assumeyes', '--quiet', 'install', + '--cacheonly'] + pkglist, env=None, + target=paths.target_path(target)) + ] + + # call yum_install directly + self.assertFalse(m_subp.called) + distro.yum_install(mode, pkglist, target=target) + m_subp.assert_has_calls(expected_calls) + + # call yum_install through run_yum_command; expect the same calls + # so clear m_subp's call stack. + m_subp.reset_mock() + self.assertFalse(m_subp.called) + distro.run_yum_command('install', pkglist, target=target) + m_subp.assert_has_calls(expected_calls) + + # call yum_install through install_packages; expect the same calls + # so clear m_subp's call stack. + m_subp.reset_mock() + self.assertFalse(m_subp.called) + osfamily = distro.DISTROS.redhat + distro.install_packages(pkglist, osfamily=osfamily, target=target) + m_subp.assert_has_calls(expected_calls) + + +class TestSystemUpgrade(CiTestCase): + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.util.subp') + def test_system_upgrade_redhat(self, m_subp): + """system_upgrade osfamily=redhat calls run_yum_command mode=upgrade""" + osfamily = distro.DISTROS.redhat + target = 'mytarget' + mode = 'upgrade' + pkglist = [] + expected_calls = [ + mock.call(['yum', '--assumeyes', '--quiet', mode, + '--downloadonly', '--setopt=keepcache=1'] + pkglist, + env=None, retries=[1] * 10, + target=paths.target_path(target)), + mock.call(['yum', '--assumeyes', '--quiet', mode, + '--cacheonly'] + pkglist, env=None, + target=paths.target_path(target)) + ] + # call system_upgrade via osfamily; note that we expect the same calls + # call system_upgrade via osfamily; note that we expect the same calls + + # call yum_install through run_yum_command + distro.run_yum_command(mode, pkglist, target=target) + m_subp.assert_has_calls(expected_calls) + + # call system_upgrade via osfamily; note that we expect the same calls + # but to prevent a false positive we clear m_subp's call stack. + m_subp.reset_mock() + self.assertFalse(m_subp.called) + distro.system_upgrade(target=target, osfamily=osfamily) + m_subp.assert_has_calls(expected_calls) + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.distro.os.environ') + @mock.patch('curtin.distro.apt_update') + @mock.patch('curtin.distro.which') + @mock.patch('curtin.util.subp') + def test_system_upgrade_debian(self, m_subp, m_which, m_apt_update, m_env): + """system_upgrade osfamily=debian calls run_apt_command mode=upgrade""" + osfamily = distro.DISTROS.debian + target = 'mytarget' + m_env.copy.return_value = {} + m_which.return_value = None + env = {'DEBIAN_FRONTEND': 'noninteractive'} + pkglist = [] + apt_base = [ + 'apt-get', '--quiet', '--assume-yes', + '--option=Dpkg::options::=--force-unsafe-io', + '--option=Dpkg::Options::=--force-confold'] + apt_cmd = apt_base + ['dist-upgrade'] + pkglist + auto_remove = apt_base + ['autoremove'] + expected_calls = [ + mock.call(apt_cmd, env=env, target=paths.target_path(target)), + mock.call(auto_remove, env=env, target=paths.target_path(target)), + ] + which_calls = [mock.call('eatmydata', target=target)] + apt_update_calls = [ + mock.call(target, env=env, comment=' '.join(apt_cmd))] + + distro.system_upgrade(target=target, osfamily=osfamily) + m_which.assert_has_calls(which_calls) + m_apt_update.assert_has_calls(apt_update_calls) + m_subp.assert_has_calls(expected_calls) + + +class TestHasPkgAvailable(CiTestCase): + + def setUp(self): + super(TestHasPkgAvailable, self).setUp() + self.package = 'foobar' + self.target = paths.target_path('mytarget') + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.distro.subp') + def test_has_pkg_available_debian(self, m_subp): + osfamily = distro.DISTROS.debian + m_subp.return_value = (self.package, '') + result = distro.has_pkg_available(self.package, self.target, osfamily) + self.assertTrue(result) + m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], + capture=True, + target=self.target)]) + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.distro.subp') + def test_has_pkg_available_debian_returns_false_not_avail(self, m_subp): + pkg = 'wark' + osfamily = distro.DISTROS.debian + m_subp.return_value = (pkg, '') + result = distro.has_pkg_available(self.package, self.target, osfamily) + self.assertEqual(pkg == self.package, result) + m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], + capture=True, + target=self.target)]) + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.distro.run_yum_command') + def test_has_pkg_available_redhat(self, m_subp): + osfamily = distro.DISTROS.redhat + m_subp.return_value = (self.package, '') + result = distro.has_pkg_available(self.package, self.target, osfamily) + self.assertTrue(result) + m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) + + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) + @mock.patch('curtin.distro.run_yum_command') + def test_has_pkg_available_redhat_returns_false_not_avail(self, m_subp): + pkg = 'wark' + osfamily = distro.DISTROS.redhat + m_subp.return_value = (pkg, '') + result = distro.has_pkg_available(self.package, self.target, osfamily) + self.assertEqual(pkg == self.package, result) + m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_feature.py curtin-18.2-10-g7afd77fa/tests/unittests/test_feature.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_feature.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_feature.py 2019-02-15 20:42:42.000000000 +0000 @@ -21,4 +21,7 @@ def test_has_centos_apply_network_config(self): self.assertIn('CENTOS_APPLY_NETWORK_CONFIG', curtin.FEATURES) + def test_has_centos_curthook_support(self): + self.assertIn('CENTOS_CURTHOOK_SUPPORT', curtin.FEATURES) + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_make_dname.py curtin-18.2-10-g7afd77fa/tests/unittests/test_make_dname.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_make_dname.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_make_dname.py 2019-02-15 20:42:42.000000000 +0000 @@ -13,10 +13,16 @@ state = {'scratch': '/tmp/null'} rules_d = '/tmp/null/rules.d' rule_file = '/tmp/null/rules.d/{}.rules' + disk_serial = 'abcdefg' + disk_wwn = '0x1234567890' storage_config = { - 'disk1': {'type': 'disk', 'id': 'disk1', 'name': 'main_disk'}, + 'disk1': {'type': 'disk', 'id': 'disk1', 'name': 'main_disk', + 'serial': disk_serial}, + 'disk_noid': {'type': 'disk', 'id': 'disk_noid', 'name': 'main_disk'}, 'disk1p1': {'type': 'partition', 'id': 'disk1p1', 'device': 'disk1'}, - 'disk2': {'type': 'disk', 'id': 'disk2', + 'disk1p2': {'type': 'partition', 'id': 'disk1p2', 'device': 'disk1', + 'name': 'custom-partname'}, + 'disk2': {'type': 'disk', 'id': 'disk2', 'wwn': disk_wwn, 'name': 'in_valid/name!@#$% &*(+disk'}, 'disk2p1': {'type': 'partition', 'id': 'disk2p1', 'device': 'disk2'}, 'md_id': {'type': 'raid', 'id': 'md_id', 'name': 'mdadm_name'}, @@ -27,7 +33,8 @@ 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id', 'name': 'lvm part/2', 'volgroup': 'lvol_id'}, 'bcache1_id': {'type': 'bcache', 'id': 'bcache1_id', - 'name': 'my-cached-data'} + 'name': 'my-cached-data'}, + 'iscsi1': {'type': 'disk', 'id': 'iscsi1', 'name': 'iscsi_disk1'} } bcache_super_show = { 'sb.version': '1 [backing device]', @@ -57,20 +64,37 @@ rule.append('SYMLINK+="disk/by-dname/{}"\n'.format(target)) return ', '.join(rule) + def _content(self, rules=[]): + return "\n".join(['# Written by curtin'] + rules) + + @mock.patch('curtin.commands.block_meta.udevadm_info') @mock.patch('curtin.commands.block_meta.LOG') @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume') @mock.patch('curtin.commands.block_meta.util') - def test_make_dname_disk(self, mock_util, mock_get_path, mock_log): + def test_make_dname_disk(self, mock_util, mock_get_path, mock_log, + mock_udev): disk_ptuuid = str(uuid.uuid1()) mock_util.subp.side_effect = self._make_mock_subp_blkid( disk_ptuuid, self.disk_blkid) mock_util.load_command_environment.return_value = self.state - rule_identifiers = [ - ('DEVTYPE', 'disk'), - ('ID_PART_TABLE_UUID', disk_ptuuid) - ] + + rule_identifiers = [('ID_PART_TABLE_UUID', disk_ptuuid)] + id_rule_identifiers = [('ID_SERIAL', self.disk_serial)] + wwn_rule_identifiers = [('ID_WWN_WITH_EXTENSION', self.disk_wwn)] + + def _drule(devtype, match): + return [('DEVTYPE', devtype)] + [m for m in match] + + def drule(match): + return _drule('disk', match) + + def prule(match): + return _drule('partition', match) # simple run + mock_udev.side_effect = ( + [{'DEVTYPE': 'disk', 'ID_SERIAL': self.disk_serial}, + {'DEVTYPE': 'disk', 'ID_WWN_WITH_EXTENSION': self.disk_wwn}]) res_dname = 'main_disk' block_meta.make_dname('disk1', self.storage_config) mock_util.ensure_dir.assert_called_with(self.rules_d) @@ -78,7 +102,13 @@ self.assertFalse(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(drule(rule_identifiers), + res_dname), + self._formatted_rule(drule(id_rule_identifiers), + res_dname), + self._formatted_rule(prule(id_rule_identifiers), + "%s-part%%n" % res_dname)])) # run invalid dname res_dname = 'in_valid-name----------disk' @@ -86,12 +116,39 @@ self.assertTrue(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(drule(rule_identifiers), + res_dname), + self._formatted_rule(drule(wwn_rule_identifiers), + res_dname), + self._formatted_rule(prule(wwn_rule_identifiers), + "%s-part%%n" % res_dname)])) + + # iscsi disk with no config, but info returns serial and wwn + mock_udev.side_effect = ( + [{'DEVTYPE': 'disk', 'ID_SERIAL': self.disk_serial, + 'DEVTYPE': 'disk', 'ID_WWN_WITH_EXTENSION': self.disk_wwn}]) + res_dname = 'iscsi_disk1' + block_meta.make_dname('iscsi1', self.storage_config) + mock_util.ensure_dir.assert_called_with(self.rules_d) + self.assertTrue(mock_log.debug.called) + both_rules = (wwn_rule_identifiers + id_rule_identifiers) + mock_util.write_file.assert_called_with( + self.rule_file.format(res_dname), + self._content( + [self._formatted_rule(drule(rule_identifiers), res_dname), + self._formatted_rule(drule(both_rules), res_dname), + self._formatted_rule(prule(both_rules), + "%s-part%%n" % res_dname)])) + @mock.patch('curtin.commands.block_meta.udevadm_info') @mock.patch('curtin.commands.block_meta.LOG') @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume') @mock.patch('curtin.commands.block_meta.util') - def test_make_dname_failures(self, mock_util, mock_get_path, mock_log): + def test_make_dname_failures(self, mock_util, mock_get_path, mock_log, + mock_udev): + mock_udev.side_effect = ([{'DEVTYPE': 'disk'}, {'DEVTYPE': 'disk'}]) + mock_util.subp.side_effect = self._make_mock_subp_blkid( '', self.trusty_blkid) mock_util.load_command_environment.return_value = self.state @@ -99,10 +156,13 @@ warning_msg = "Can't find a uuid for volume: {}. Skipping dname." # disk with no PT_UUID - block_meta.make_dname('disk1', self.storage_config) - mock_log.warning.assert_called_with(warning_msg.format('disk1')) + disk = 'disk_noid' + block_meta.make_dname(disk, self.storage_config) + mock_log.warning.assert_called_with(warning_msg.format(disk)) self.assertFalse(mock_util.write_file.called) + mock_util.subp.side_effect = self._make_mock_subp_blkid( + '', self.trusty_blkid) # partition with no PART_UUID block_meta.make_dname('disk1p1', self.storage_config) mock_log.warning.assert_called_with(warning_msg.format('disk1p1')) @@ -123,22 +183,15 @@ ] # simple run - res_dname = 'main_disk-part1' - block_meta.make_dname('disk1p1', self.storage_config) + res_dname = 'custom-partname' + block_meta.make_dname('disk1p2', self.storage_config) mock_util.ensure_dir.assert_called_with(self.rules_d) self.assertTrue(mock_log.debug.called) self.assertFalse(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) - - # run invalid dname - res_dname = 'in_valid-name----------disk-part1' - block_meta.make_dname('disk2p1', self.storage_config) - self.assertTrue(mock_log.warning.called) - mock_util.write_file.assert_called_with( - self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) @mock.patch('curtin.commands.block_meta.mdadm') @mock.patch('curtin.commands.block_meta.LOG') @@ -158,7 +211,8 @@ self.assertFalse(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) # invalid name res_dname = 'mdadm-name' @@ -166,7 +220,8 @@ self.assertTrue(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) @mock.patch('curtin.commands.block_meta.LOG') @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume') @@ -183,7 +238,8 @@ self.assertFalse(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) # with invalid name res_dname = 'vg1-lvm-part-2' @@ -192,7 +248,8 @@ self.assertTrue(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) @mock.patch('curtin.commands.block_meta.LOG') @mock.patch('curtin.commands.block_meta.bcache') @@ -213,7 +270,8 @@ self.assertFalse(mock_log.warning.called) mock_util.write_file.assert_called_with( self.rule_file.format(res_dname), - self._formatted_rule(rule_identifiers, res_dname)) + self._content( + [self._formatted_rule(rule_identifiers, res_dname)])) def test_sanitize_dname(self): unsanitized_to_sanitized = [ @@ -226,4 +284,123 @@ for (unsanitized, sanitized) in unsanitized_to_sanitized: self.assertEqual(block_meta.sanitize_dname(unsanitized), sanitized) + +class TestMakeDnameById(CiTestCase): + + @mock.patch('curtin.commands.block_meta.udevadm_info') + def test_bad_path(self, m_udev): + """test dname_byid raises ValueError on invalid path.""" + mypath = None + with self.assertRaises(ValueError): + block_meta.make_dname_byid(mypath) + + @mock.patch('curtin.commands.block_meta.udevadm_info') + def test_non_disk(self, m_udev): + """test dname_byid raises ValueError on DEVTYPE != 'disk'""" + mypath = "/dev/" + self.random_string() + m_udev.return_value = {'DEVTYPE': 'not_a_disk'} + with self.assertRaises(ValueError): + block_meta.make_dname_byid(mypath) + + @mock.patch('curtin.commands.block_meta.udevadm_info') + def test_disk_with_no_id_wwn(self, m_udev): + """test dname_byid raises RuntimeError on device without ID or WWN.""" + mypath = "/dev/" + self.random_string() + m_udev.return_value = {'DEVTYPE': 'disk'} + self.assertEqual([], block_meta.make_dname_byid(mypath)) + + @mock.patch('curtin.commands.block_meta.udevadm_info') + def test_udevinfo_not_called_if_info_provided(self, m_udev): + """dname_byid does not invoke udevadm_info if using info dict""" + myserial = self.random_string() + self.assertEqual( + [['ENV{ID_SERIAL}=="%s"' % myserial]], + block_meta.make_dname_byid( + self.random_string(), + info={'DEVTYPE': 'disk', 'ID_SERIAL': myserial})) + self.assertEqual(0, m_udev.call_count) + + @mock.patch('curtin.commands.block_meta.udevadm_info') + def test_udevinfo_called_if_info_not_provided(self, m_udev): + """dname_byid should call udevadm_info if no data given.""" + myserial = self.random_string() + mypath = "/dev/" + self.random_string() + m_udev.return_value = { + 'DEVTYPE': 'disk', 'ID_SERIAL': myserial, 'DEVNAME': mypath} + self.assertEqual( + [['ENV{ID_SERIAL}=="%s"' % myserial]], + block_meta.make_dname_byid(mypath)) + self.assertEqual( + [mock.call(path=mypath)], m_udev.call_args_list) + + def test_disk_with_only_serial(self): + """test dname_byid returns rules for ID_SERIAL""" + mypath = "/dev/" + self.random_string() + myserial = self.random_string() + info = {'DEVTYPE': 'disk', 'DEVNAME': mypath, 'ID_SERIAL': myserial} + self.assertEqual( + [['ENV{ID_SERIAL}=="%s"' % myserial]], + block_meta.make_dname_byid(mypath, info=info)) + + def test_disk_with_only_wwn(self): + """test dname_byid returns rules for ID_WWN_WITH_EXTENSION""" + mypath = "/dev/" + self.random_string() + mywwn = self.random_string() + info = {'DEVTYPE': 'disk', 'DEVNAME': mypath, + 'ID_WWN_WITH_EXTENSION': mywwn} + self.assertEqual( + [['ENV{ID_WWN_WITH_EXTENSION}=="%s"' % mywwn]], + block_meta.make_dname_byid(mypath, info=info)) + + def test_disk_with_both_id_wwn(self): + """test dname_byid returns rules with both ID_WWN_* and ID_SERIAL""" + mypath = "/dev/" + self.random_string() + myserial = self.random_string() + mywwn = self.random_string() + info = {'DEVTYPE': 'disk', 'ID_SERIAL': myserial, + 'ID_WWN_WITH_EXTENSION': mywwn, + 'DEVNAME': mypath} + self.assertEqual( + [[ + 'ENV{ID_WWN_WITH_EXTENSION}=="%s"' % mywwn, + 'ENV{ID_SERIAL}=="%s"' % myserial, + ]], + block_meta.make_dname_byid(mypath, info=info)) + + def test_disk_with_short_ids(self): + """test dname_byid returns rules w/ both ID_WWN and ID_SERIAL_SHORT.""" + mypath = "/dev/" + self.random_string() + myserial = self.random_string() + mywwn = self.random_string() + info = {'DEVTYPE': 'disk', 'ID_SERIAL_SHORT': myserial, + 'ID_WWN': mywwn, + 'DEVNAME': mypath} + self.assertEqual( + [[ + 'ENV{ID_WWN}=="%s"' % mywwn, + 'ENV{ID_SERIAL_SHORT}=="%s"' % myserial, + ]], + block_meta.make_dname_byid(mypath, info=info)) + + def test_disk_with_all_ids(self): + """test dname_byid returns rules w/ all WWN and SERIAL values.""" + mypath = "/dev/" + self.random_string() + myserial_short = self.random_string() + myserial = myserial_short + "_" + myserial_short + mywwn = self.random_string() + mywwn_ext = mywwn + "_" + mywwn + info = {'DEVTYPE': 'disk', 'ID_SERIAL_SHORT': myserial_short, + 'ID_SERIAL': myserial, + 'ID_WWN': mywwn, + 'ID_WWN_WITH_EXTENSION': mywwn_ext, + 'DEVNAME': mypath} + self.assertEqual( + [[ + 'ENV{ID_WWN_WITH_EXTENSION}=="%s"' % mywwn_ext, + 'ENV{ID_WWN}=="%s"' % mywwn, + 'ENV{ID_SERIAL}=="%s"' % myserial, + 'ENV{ID_SERIAL_SHORT}=="%s"' % myserial_short, + ]], + block_meta.make_dname_byid(mypath, info=info)) + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_pack.py curtin-18.2-10-g7afd77fa/tests/unittests/test_pack.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_pack.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_pack.py 2019-02-15 20:42:42.000000000 +0000 @@ -97,6 +97,8 @@ }} out, err, rc, log_contents = self.run_install(cfg) + print("out=%s" % out) + print("err=%s" % err) # the version string and users command output should be in output self.assertIn(version.version_string(), out) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_swap.py curtin-18.2-10-g7afd77fa/tests/unittests/test_swap.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_swap.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_swap.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,39 +1,53 @@ import mock from curtin import swap +from curtin import util from .helpers import CiTestCase class TestSwap(CiTestCase): - @mock.patch('curtin.swap.resource') - @mock.patch('curtin.swap.util') - def test_is_swap_device_read_offsets(self, mock_util, mock_resource): - """swap.is_swap_device() checks offsets based on system pagesize""" - path = '/mydev/dummydisk' + def _valid_swap_contents(self): + """Yields (pagesize, content) of things that should be considered + valid swap.""" # 4k and 64k page size for pagesize in [4096, 65536]: - magic_offset = pagesize - 10 - mock_resource.getpagesize.return_value = pagesize - swap.is_swap_device(path) - mock_util.load_file.assert_called_with(path, read_len=10, - offset=magic_offset, - decode=False) - - @mock.patch('curtin.swap.resource') - @mock.patch('curtin.swap.util') - def test_identify_swap_false(self, mock_util, mock_resource): - """swap.is_swap_device() returns false on non swap magic""" - mock_util.load_file.return_value = ( - b'\x00\x00c\x05\x00\x00\x11\x00\x19\x00') - is_swap = swap.is_swap_device('ignored') - self.assertFalse(is_swap) - - @mock.patch('curtin.swap.resource') - @mock.patch('curtin.swap.util') - def test_identify_swap_true(self, mock_util, mock_resource): - """swap.is_swap_device() returns true on swap magic strings""" - path = '/mydev/dummydisk' - for magic in [b'SWAPSPACE2', b'SWAP-SPACE']: - mock_util.load_file.return_value = magic - is_swap = swap.is_swap_device(path) - self.assertTrue(is_swap) + for magic in [b'SWAPSPACE2', b'SWAP-SPACE']: + # yield content of 2 pages to trigger/avoid fence-post errors + yield (pagesize, + ((pagesize - len(magic)) * b'\0' + + magic + pagesize * b'\0')) + + @mock.patch('curtin.swap.resource.getpagesize') + def test_is_swap_device_read_offsets(self, mock_getpagesize): + """swap.is_swap_device() correctly identifies swap content.""" + tmpd = self.tmp_dir() + for num, (pagesize, content) in enumerate(self._valid_swap_contents()): + path = self.tmp_path("swap-file-%02d" % num, tmpd) + util.write_file(path, content, omode="wb") + mock_getpagesize.return_value = pagesize + self.assertTrue(swap.is_swap_device(path)) + + @mock.patch('curtin.swap.resource.getpagesize', return_value=4096) + def test_identify_swap_false_if_tiny(self, mock_getpagesize): + """small files do not trip up is_swap_device().""" + path = self.tmp_path("tiny") + util.write_file(path, b'tinystuff', omode='wb') + self.assertFalse(swap.is_swap_device(path)) + + @mock.patch('curtin.swap.resource.getpagesize', return_value=4096) + def test_identify_zeros_are_swap(self, mock_getpagesize): + """swap.is_swap_device() returns false on all zeros""" + pagesize = mock_getpagesize() + path = self.tmp_path("notswap0") + util.write_file(path, pagesize * 2 * b'\0', omode="wb") + self.assertFalse(swap.is_swap_device(path)) + + @mock.patch('curtin.swap.resource.getpagesize', return_value=65536) + def test_identify_swap_false(self, mock_getpagesize): + """swap.is_swap_device() returns false on non swap content""" + pagesize = mock_getpagesize() + path = self.tmp_path("notswap1") + # this is just arbitrary content that is not swap content. + blob = b'\x00\x00c\x05\x00\x00\x11\x19' + util.write_file(path, int(pagesize * 2 / len(blob)) * blob, omode="wb") + self.assertFalse(swap.is_swap_device(path)) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_udev.py curtin-18.2-10-g7afd77fa/tests/unittests/test_udev.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_udev.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_udev.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,68 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +import mock + +from curtin import udev +from curtin import util +from .helpers import CiTestCase + + +UDEVADM_INFO_QUERY = """\ +DEVLINKS=/dev/disk/by-id/nvme-eui.0025388b710116a1 +DEVNAME=/dev/nvme0n1 +DEVPATH=/devices/pci0000:00/0000:00:1c.4/0000:05:00.0/nvme/nvme0/nvme0n1 +DEVTYPE=disk +ID_PART_TABLE_TYPE=gpt +ID_PART_TABLE_UUID=ea0b9ddc-a114-4e01-b257-750d86e3a944 +ID_SERIAL=SAMSUNG MZVLB1T0HALR-000L7_S3TPNY0JB00151 +ID_SERIAL_SHORT=S3TPNY0JB00151 +MAJOR=259 +MINOR=0 +SUBSYSTEM=block +TAGS=:systemd: +USEC_INITIALIZED=2026691 +""" + +INFO_DICT = { + 'DEVLINKS': ['/dev/disk/by-id/nvme-eui.0025388b710116a1'], + 'DEVNAME': '/dev/nvme0n1', + 'DEVPATH': + '/devices/pci0000:00/0000:00:1c.4/0000:05:00.0/nvme/nvme0/nvme0n1', + 'DEVTYPE': 'disk', + 'ID_PART_TABLE_TYPE': 'gpt', + 'ID_PART_TABLE_UUID': 'ea0b9ddc-a114-4e01-b257-750d86e3a944', + 'ID_SERIAL': 'SAMSUNG MZVLB1T0HALR-000L7_S3TPNY0JB00151', + 'ID_SERIAL_SHORT': 'S3TPNY0JB00151', + 'MAJOR': '259', + 'MINOR': '0', + 'SUBSYSTEM': 'block', + 'TAGS': ':systemd:', + 'USEC_INITIALIZED': '2026691' +} + + +class TestUdevInfo(CiTestCase): + + @mock.patch('curtin.util.subp') + def test_udevadm_info(self, m_subp): + """ udevadm_info returns dictionary for specified device """ + mypath = '/dev/nvme0n1' + m_subp.return_value = (UDEVADM_INFO_QUERY, "") + info = udev.udevadm_info(mypath) + m_subp.assert_called_with( + ['udevadm', 'info', '--query=property', mypath], capture=True) + self.assertEqual(sorted(INFO_DICT), sorted(info)) + + def test_udevadm_info_no_path(self): + """ udevadm_info raises ValueError for invalid path value""" + mypath = None + with self.assertRaises(ValueError): + udev.udevadm_info(mypath) + + @mock.patch('curtin.util.subp') + def test_udevadm_info_path_not_exists(self, m_subp): + """ udevadm_info raises ProcessExecutionError for invalid path value""" + mypath = self.random_string() + m_subp.side_effect = util.ProcessExecutionError() + with self.assertRaises(util.ProcessExecutionError): + udev.udevadm_info(mypath) diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_url_helper.py curtin-18.2-10-g7afd77fa/tests/unittests/test_url_helper.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_url_helper.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_url_helper.py 2019-02-15 20:42:42.000000000 +0000 @@ -10,19 +10,71 @@ class TestDownload(CiTestCase): + def setUp(self): + super(TestDownload, self).setUp() + self.tmpd = self.tmp_dir() + self.src_file = self.tmp_path("my-source", self.tmpd) + with open(self.src_file, "wb") as fp: + # Write the min amount of bytes + fp.write(b':-)\n' * int(8200/4)) + self.target_file = self.tmp_path("my-target", self.tmpd) + def test_download_file_url(self): """Download a file to another file.""" - tmpd = self.tmp_dir() - src_file = self.tmp_path("my-source", tmpd) - target_file = self.tmp_path("my-target", tmpd) - - # Make sure we have > 8192 bytes in the file (buflen of UrlReader) - with open(src_file, "wb") as fp: - for line in range(0, 1024): - fp.write(b'Who are the people in your neighborhood.\n') + url_helper.download("file://" + self.src_file, self.target_file) + self.assertTrue(filecmp.cmp(self.src_file, self.target_file), + "Downloaded file differed from source file.") - url_helper.download("file://" + src_file, target_file) - self.assertTrue(filecmp.cmp(src_file, target_file), + @mock.patch('curtin.url_helper.UrlReader') + def test_download_file_url_retry(self, urlreader_mock): + """Retry downloading a file with server error (http 5xx).""" + urlreader_mock.side_effect = url_helper.UrlError(None, code=500) + + self.assertRaises(url_helper.UrlError, url_helper.download, + "file://" + self.src_file, self.target_file, + retries=3, retry_delay=0) + self.assertEquals(4, urlreader_mock.call_count, + "Didn't call UrlReader 4 times (retries=3)") + + @mock.patch('curtin.url_helper.UrlReader') + def test_download_file_url_no_retry(self, urlreader_mock): + """No retry by default on downloading a file with server error + (http 5xx).""" + urlreader_mock.side_effect = url_helper.UrlError(None, code=500) + + self.assertRaises(url_helper.UrlError, url_helper.download, + "file://" + self.src_file, self.target_file, + retry_delay=0) + self.assertEquals(1, urlreader_mock.call_count, + "Didn't call UrlReader once (retries=0)") + + @mock.patch('curtin.url_helper.UrlReader') + def test_download_file_url_no_retry_on_client_error(self, urlreader_mock): + """No retry by default on downloading a file with 4xx http error.""" + urlreader_mock.side_effect = url_helper.UrlError(None, code=404) + + self.assertRaises(url_helper.UrlError, url_helper.download, + "file://" + self.src_file, self.target_file, + retries=3, retry_delay=0) + self.assertEquals(1, urlreader_mock.call_count, + "Didn't call UrlReader once (400 class error)") + + def test_download_file_url_retry_then_success(self): + """Retry downloading a file with server error and then succeed.""" + url_reader = url_helper.UrlReader + + with mock.patch('curtin.url_helper.UrlReader') as urlreader_mock: + # return first an error, then, real object + def urlreader_download(url): + urlreader_mock.side_effect = url_reader + raise url_helper.UrlError(None, code=500) + urlreader_mock.side_effect = urlreader_download + url_helper.download("file://" + self.src_file, self.target_file, + retries=3, retry_delay=0) + self.assertEquals(2, urlreader_mock.call_count, + "Didn't call UrlReader twice (first failing," + "then success)") + self.assertTrue(filecmp.cmp(self.src_file, self.target_file), "Downloaded file differed from source file.") diff -Nru curtin-18.1-17-gae48e86f/tests/unittests/test_util.py curtin-18.2-10-g7afd77fa/tests/unittests/test_util.py --- curtin-18.1-17-gae48e86f/tests/unittests/test_util.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/unittests/test_util.py 2019-02-15 20:42:42.000000000 +0000 @@ -7,6 +7,7 @@ from textwrap import dedent from curtin import util +from curtin import paths from .helpers import CiTestCase, simple_mocked_open @@ -103,48 +104,6 @@ self.assertEqual(found, "/usr/bin2/fuzz") -class TestLsbRelease(CiTestCase): - - def setUp(self): - super(TestLsbRelease, self).setUp() - self._reset_cache() - - def _reset_cache(self): - keys = [k for k in util._LSB_RELEASE.keys()] - for d in keys: - del util._LSB_RELEASE[d] - - @mock.patch("curtin.util.subp") - def test_lsb_release_functional(self, mock_subp): - output = '\n'.join([ - "Distributor ID: Ubuntu", - "Description: Ubuntu 14.04.2 LTS", - "Release: 14.04", - "Codename: trusty", - ]) - rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', - 'codename': 'trusty', 'release': '14.04'} - - def fake_subp(cmd, capture=False, target=None): - return output, 'No LSB modules are available.' - - mock_subp.side_effect = fake_subp - found = util.lsb_release() - mock_subp.assert_called_with( - ['lsb_release', '--all'], capture=True, target=None) - self.assertEqual(found, rdata) - - @mock.patch("curtin.util.subp") - def test_lsb_release_unavailable(self, mock_subp): - def doraise(*args, **kwargs): - raise util.ProcessExecutionError("foo") - mock_subp.side_effect = doraise - - expected = {k: "UNAVAILABLE" for k in - ('id', 'description', 'codename', 'release')} - self.assertEqual(util.lsb_release(), expected) - - class TestSubp(CiTestCase): stdin2err = ['bash', '-c', 'cat >&2'] @@ -260,7 +219,7 @@ data = b'hello world' (out, err) = util.subp(self.stdin2err, combine_capture=True, decode=False, data=data) - self.assertIsNone(err) + self.assertEqual(err, b'') self.assertEqual(out, data) def test_returns_none_if_no_capture(self): @@ -311,7 +270,7 @@ # if target is not provided or is /, chroot should not be used calls = m_popen.call_args_list popen_args, popen_kwargs = calls[-1] - target = util.target_path(kwargs.get('target', None)) + target = paths.target_path(kwargs.get('target', None)) unshcmd = self.mock_get_unshare_pid_args.return_value if target == "/": self.assertEqual(unshcmd + list(cmd), popen_args[0]) @@ -553,44 +512,44 @@ class TestTargetPath(CiTestCase): def test_target_empty_string(self): - self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd")) + self.assertEqual("/etc/passwd", paths.target_path("", "/etc/passwd")) def test_target_non_string_raises(self): - self.assertRaises(ValueError, util.target_path, False) - self.assertRaises(ValueError, util.target_path, 9) - self.assertRaises(ValueError, util.target_path, True) + self.assertRaises(ValueError, paths.target_path, False) + self.assertRaises(ValueError, paths.target_path, 9) + self.assertRaises(ValueError, paths.target_path, True) def test_lots_of_slashes_is_slash(self): - self.assertEqual("/", util.target_path("/")) - self.assertEqual("/", util.target_path("//")) - self.assertEqual("/", util.target_path("///")) - self.assertEqual("/", util.target_path("////")) + self.assertEqual("/", paths.target_path("/")) + self.assertEqual("/", paths.target_path("//")) + self.assertEqual("/", paths.target_path("///")) + self.assertEqual("/", paths.target_path("////")) def test_empty_string_is_slash(self): - self.assertEqual("/", util.target_path("")) + self.assertEqual("/", paths.target_path("")) def test_recognizes_relative(self): - self.assertEqual("/", util.target_path("/foo/../")) - self.assertEqual("/", util.target_path("/foo//bar/../../")) + self.assertEqual("/", paths.target_path("/foo/../")) + self.assertEqual("/", paths.target_path("/foo//bar/../../")) def test_no_path(self): - self.assertEqual("/my/target", util.target_path("/my/target")) + self.assertEqual("/my/target", paths.target_path("/my/target")) def test_no_target_no_path(self): - self.assertEqual("/", util.target_path(None)) + self.assertEqual("/", paths.target_path(None)) def test_no_target_with_path(self): - self.assertEqual("/my/path", util.target_path(None, "/my/path")) + self.assertEqual("/my/path", paths.target_path(None, "/my/path")) def test_trailing_slash(self): self.assertEqual("/my/target/my/path", - util.target_path("/my/target/", "/my/path")) + paths.target_path("/my/target/", "/my/path")) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", - util.target_path("/target/", "//my/path/")) + paths.target_path("/target/", "//my/path/")) self.assertEqual("/target/my/path/", - util.target_path("/target/", "///my/path/")) + paths.target_path("/target/", "///my/path/")) class TestRunInChroot(CiTestCase): diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/helpers.py curtin-18.2-10-g7afd77fa/tests/vmtests/helpers.py --- curtin-18.1-17-gae48e86f/tests/vmtests/helpers.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/helpers.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,6 +2,7 @@ # This file is part of curtin. See LICENSE file for copyright and license info. import os +import re import subprocess import signal import threading @@ -86,7 +87,26 @@ return Command(cmd, signal).run(**kwargs) -def find_testcases(): +def find_testcases_by_attr(**kwargs): + class_match = set() + for test_case in find_testcases(**kwargs): + tc_name = str(test_case.__class__) + full_path = tc_name.split("'")[1].split(".") + class_name = full_path[-1] + if class_name in class_match: + continue + class_match.add(class_name) + filename = "/".join(full_path[0:-1]) + ".py" + yield "%s:%s" % (filename, class_name) + + +def _attr_match(pattern, value): + if not value: + return False + return re.match(pattern, str(value)) + + +def find_testcases(**kwargs): # Use the TestLoder to load all test cases defined within tests/vmtests/ # and figure out what distros and releases they are testing. Any tests # which are disabled will be excluded. @@ -97,12 +117,19 @@ root_dir = os.path.split(os.path.split(tests_dir)[0])[0] # Find all test modules defined in curtin/tests/vmtests/ module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) + filter_attrs = [attr for attr, value in kwargs.items() if value] for mts in module_test_suites: for class_test_suite in mts: for test_case in class_test_suite: # skip disabled tests if not getattr(test_case, '__test__', False): continue + # compare each filter attr with the specified value + tcmatch = [not _attr_match(kwargs[attr], + getattr(test_case, attr, False)) + for attr in filter_attrs] + if any(tcmatch): + continue yield test_case diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/image_sync.py curtin-18.2-10-g7afd77fa/tests/vmtests/image_sync.py --- curtin-18.1-17-gae48e86f/tests/vmtests/image_sync.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/image_sync.py 2019-02-15 20:42:42.000000000 +0000 @@ -30,7 +30,9 @@ "http://maas.ubuntu.com/images/ephemeral-v3/daily/streams/v1/index.sjson") IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") -KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' +KEYRING = os.environ.get( + 'IMAGE_SRC_KEYRING', + '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg') ITEM_NAME_FILTERS = \ ['ftype~(boot-initrd|boot-kernel|root-tgz|squashfs)'] FORMAT_JSON = 'JSON' @@ -174,7 +176,7 @@ except IOError as e: if e.errno != errno.ENOENT: raise - except JSONDecodeError as e: + except JSONDecodeError: jsonfile = os.path.join(self.out_d, dpath) sys.stderr.write("Decode error in:\n " "content_id=%s\n " diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/__init__.py curtin-18.2-10-g7afd77fa/tests/vmtests/__init__.py --- curtin-18.1-17-gae48e86f/tests/vmtests/__init__.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/__init__.py 2019-02-15 20:42:42.000000000 +0000 @@ -21,11 +21,13 @@ from .report_webhook_logger import CaptureReporting from curtin.commands.install import INSTALL_PASS_MSG +from curtin.commands.block_meta import sanitize_dname from .image_sync import query as imagesync_query from .image_sync import mirror as imagesync_mirror from .image_sync import (IMAGE_SRC_URL, IMAGE_DIR, ITEM_NAME_FILTERS) from .helpers import check_call, TimeoutExpired, ip_a_to_dict +from functools import wraps from unittest import TestCase, SkipTest try: @@ -36,6 +38,7 @@ DEFAULT_SSTREAM_OPTS = [ '--keyring=/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'] +AUTO = "auto" DEVNULL = open(os.devnull, 'w') KEEP_DATA = {"pass": "none", "fail": "all"} CURTIN_VMTEST_IMAGE_SYNC = os.environ.get("CURTIN_VMTEST_IMAGE_SYNC", "1") @@ -51,10 +54,11 @@ REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0))) ADD_REPOS = os.environ.get("CURTIN_VMTEST_ADD_REPOS", "") UPGRADE_PACKAGES = os.environ.get("CURTIN_VMTEST_UPGRADE_PACKAGES", "") -SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", "auto") +SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", AUTO) _UNSUPPORTED_UBUNTU = None +_DEVEL_UBUNTU = None _TOPDIR = None @@ -148,6 +152,11 @@ return logger +def is_false_env_value(val): + """These values found in environment are explicitly set to "false".""" + return str(val).lower() in ("false", "0", "") + + def get_env_var_bool(envname, default=False): """get a boolean environment variable. @@ -160,7 +169,7 @@ if val is None: return default - return val.lower() not in ("false", "0", "") + return not is_false_env_value(val) def sync_images(src_url, base_dir, filters, verbosity=0): @@ -342,18 +351,24 @@ def collect_output(self): logger.debug('extracting output disk') - subprocess.check_call(['tar', '-C', self.collect, '-xf', - self.output_disk], - stdout=DEVNULL, stderr=subprocess.STDOUT) - # make sure collect output dir is usable by non-root - subprocess.check_call(['chmod', '-R', 'u+rwX', self.collect], - stdout=DEVNULL, stderr=subprocess.STDOUT) + try: + subprocess.check_call(['tar', '-C', self.collect, '-xf', + self.output_disk], + stdout=DEVNULL, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + logger.error('Failed unpacking collect output: %s', e) + finally: + logger.debug('Fixing collect output dir permissions.') + # make sure collect output dir is usable by non-root + subprocess.check_call(['chmod', '-R', 'u+rwX', self.collect], + stdout=DEVNULL, stderr=subprocess.STDOUT) def skip_if_flag(flag): def decorator(func): """the name test_wrapper below has to start with test, or nose's filter will not run it.""" + @wraps(func) def test_wrapper(self, *args, **kwargs): val = getattr(self, flag, None) if val: @@ -364,18 +379,203 @@ return decorator +def skip_by_date(bugnum, fixby, removeby=None, skips=None, install=True): + """A decorator to skip a test or test class based on current date. + + @param bugnum: A bug number with which to identify the action. + @param fixby: string: A date string in the format of YYYY-MM-DD + tuple (YYYY,MM,DD). Raises SkipTest with testcase results until + the current date is >= fixby value. + @param removeby: string: A date string in the format of YYYY-MM-DD + or (YYYY,MM,DD). Raises RuntimeError with testcase results + if current date is >= removeby value. + Default value is 3 weeks past fixby value. + @param skips: list of test cases (string method names) to be skipped. + If None, all class testcases will be skipped per evaluation of + fixby and removeby params. This is ignored when decorating a method. + Example skips=["test_test1", "test_test2"] + @param install: boolean: When True, setUpClass raises skipTest + exception while "today" < "fixby" date. Useful for handling + testcases where the install or boot would hang. + install parameter is ignored when decorating a method.""" + def as_dtdate(date): + if not isinstance(date, str): + return date + return datetime.date(*map(int, date.split("-"))) + + def as_str(dtdate): + return "%s-%02d-%02d" % (dtdate.year, dtdate.month, dtdate.day) + + d_today = datetime.date.today() + d_fixby = as_dtdate(fixby) + if removeby is None: + # give 3 weeks by default. + d_removeby = d_fixby + datetime.timedelta(21) + else: + d_removeby = as_dtdate(removeby) + + envname = 'CURTIN_VMTEST_SKIP_BY_DATE_BUGS' + envval = os.environ.get(envname, "") + skip_bugs = envval.replace(",", " ").split() + _setUpClass = "setUpClass" + + def decorator(test): + def decorate_callable(mycallable, classname=test.__name__): + funcname = mycallable.__name__ + name = '.'.join((classname, funcname)) + bmsg = ("skip_by_date({name}) LP: #{bug} " + "fixby={fixby} removeby={removeby}: ".format( + name=name, bug=bugnum, + fixby=as_str(d_fixby), removeby=as_str(d_removeby))) + + @wraps(mycallable) + def wrapper(*args, **kwargs): + skip_reason = None + if "*" in skip_bugs or bugnum in skip_bugs: + skip_reason = "skip per %s=%s" % (envname, envval) + elif (funcname == _setUpClass and + (not install and d_today < d_fixby)): + skip_reason = "skip per install=%s" % install + + if skip_reason: + logger.info(bmsg + skip_reason) + raise SkipTest(bmsg + skip_reason) + + if d_today < d_fixby: + tmsg = "NOT_YET_FIXBY" + elif d_today > d_removeby: + tmsg = "REMOVE_WORKAROUND" + else: + tmsg = "PAST_FIXBY" + + if funcname == _setUpClass: + logger.info(bmsg + "Running test (%s)", tmsg) + + exc = None + try: + mycallable(*args, **kwargs) + except SkipTest: + raise + except Exception as e: + exc = e + + if exc is None: + result = "Passed." + elif isinstance(exc, SkipTest): + result = "Skipped: %s" % exc + else: + result = "Failed: %s" % exc + + msg = (bmsg + "(%s)" % tmsg + " " + result) + + logger.info(msg) + if d_today < d_fixby: + if funcname != _setUpClass: + raise SkipTest(msg) + else: + # Expected fixed. + if d_today > d_removeby: + raise RuntimeError(msg) + elif exc: + raise RuntimeError(msg) + + return wrapper + + def decorate_test_methods(klass): + for attr in dir(klass): + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + if attr != _setUpClass: + if not attr.startswith('test_'): + continue + elif not (skips is None or attr in skips): + continue + + setattr(klass, attr, + decorate_callable(attr_value, + classname=klass.__name__)) + return klass + + if isinstance(test, (type,)): + return decorate_test_methods(klass=test) + return decorate_callable(test) + + return decorator + + +DEFAULT_COLLECT_SCRIPTS = { + 'common': [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cp /etc/fstab ./fstab + cp -a /etc/udev/rules.d ./udev_rules.d + ifconfig -a | cat >ifconfig_a + ip a | cat >ip_a + cp -a /var/log/messages . + cp -a /var/log/syslog . + cp -a /var/log/cloud-init* . + cp -a /var/lib/cloud ./var_lib_cloud + cp -a /run/cloud-init ./run_cloud-init + cp -a /proc/cmdline ./proc_cmdline + cp -a /proc/mounts ./proc_mounts + cp -a /proc/partitions ./proc_partitions + cp -a /proc/swaps ./proc-swaps + # ls -al /dev/disk/* + mkdir -p /dev/disk/by-dname + ls /dev/disk/by-dname/ | cat >ls_dname + ls -al /dev/disk/by-dname/ | cat >ls_al_bydname + ls -al /dev/disk/by-id/ | cat >ls_al_byid + ls -al /dev/disk/by-uuid/ | cat >ls_al_byuuid + ls -al /dev/disk/by-partuuid/ | cat >ls_al_bypartuuid + blkid -o export | cat >blkid.out + find /boot | cat > find_boot.out + if [ -e /sys/firmware/efi ]; then + efibootmgr -v | cat >efibootmgr.out; + fi + [ ! -d /etc/default/grub.d ] || + cp -a /etc/default/grub.d etc_default_grub_d + [ ! -f /etc/default/grub ] || cp /etc/default/grub etc_default_grub + + exit 0 + """)], + 'centos': [textwrap.dedent(""" + # XXX: command | cat >output is required for Centos under SELinux + # http://danwalsh.livejournal.com/22860.html + cd OUTPUT_COLLECT_D + rpm -qa | cat >rpm_qa + cp -a /etc/sysconfig/network-scripts . + rpm -q --queryformat '%{VERSION}\n' cloud-init |tee rpm_ci_version + rpm -E '%rhel' > rpm_dist_version_major + cp -a /etc/centos-release . + + exit 0 + """)], + 'ubuntu': [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + dpkg-query --show \ + --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ + > debian-packages.txt 2> debian-packages.txt.err + cp -av /etc/network/interfaces . + cp -av /etc/network/interfaces.d . + find /etc/network/interfaces.d > find_interfacesd + v="" + out=$(apt-config shell v Acquire::HTTP::Proxy) + eval "$out" + echo "$v" > apt-proxy + + exit 0 + """)] +} + + class VMBaseClass(TestCase): __test__ = False expected_failure = False arch_skip = [] boot_timeout = BOOT_TIMEOUT - collect_scripts = [textwrap.dedent(""" - cd OUTPUT_COLLECT_D - dpkg-query --show \ - --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ - > debian-packages.txt 2> debian-packages.txt.err - cat /proc/swaps > proc-swaps - """)] + collect_scripts = [] + extra_collect_scripts = [] conf_file = "examples/tests/basic.yaml" nr_cpus = None dirty_disks = False @@ -399,6 +599,10 @@ conf_replace = {} uefi = False proxy = None + url_map = { + '/MAAS/api/version/': '2.0', + '/MAAS/api/2.0/version/': + json.dumps({'version': '2.5.0+curtin-vmtest'})} # these get set from base_vm_classes release = None @@ -450,8 +654,8 @@ else: target_img_verstr, target_ftypes = get_images( IMAGE_SRC_URL, IMAGE_DIR, - cls.target_distro if cls.target_distro else cls.distro, - cls.target_release if cls.target_release else cls.release, + cls.target_distro, + cls.target_release, cls.arch, subarch=cls.subarch if cls.subarch else None, kflavor=cls.kflavor if cls.kflavor else None, krel=cls.target_krel, sync=CURTIN_VMTEST_IMAGE_SYNC, @@ -566,61 +770,6 @@ return disks @classmethod - def skip_by_date(cls, bugnum, fixby, removeby=None, - release=None, name=None): - """Raise SkipTest with bug message until 'fixby'. - Raise RutimeError after removeby. - fixby and removeby support string (2018-01-01) or tuple(2018,01,01) - removeby defaults to 3 weeks after fixby. - """ - def as_dtdate(date): - if not isinstance(date, str): - return date - return datetime.date(*map(int, date.split("-"))) - - def as_str(dtdate): - return "%s-%02d-%02d" % (dtdate.year, dtdate.month, dtdate.day) - - if name is None: - name = cls.__name__ - - if release is None: - release = cls.release - - d_today = datetime.date.today() - d_fixby = as_dtdate(fixby) - if removeby is None: - # give 3 weeks by default. - d_removeby = d_fixby + datetime.timedelta(21) - else: - d_removeby = as_dtdate(removeby) - - bmsg = ("[{name}/{rel}] skip_by_date LP: #{bug} " - "fixby={fixby} removeby={removeby}: ".format( - name=name, rel=release, bug=bugnum, - fixby=as_str(d_fixby), removeby=as_str(d_removeby))) - - envname = 'CURTIN_VMTEST_SKIP_BY_DATE_BUGS' - envval = os.environ.get(envname, "") - skip_bugs = envval.replace(",", " ").split() - - result = None - msg = bmsg + "Not skipping." - if "*" in skip_bugs or bugnum in skip_bugs: - msg = bmsg + "skip per %s=%s" % (envname, envval) - result = SkipTest - elif d_today < d_fixby: - msg = bmsg + "skip (today < fixby)" - result = SkipTest - elif d_today > d_removeby: - msg = bmsg + "Remove workaround." - result = RuntimeError - - logger.info(msg) - if result: - raise result(msg) - - @classmethod def get_config_smp(cls): """Get number of cpus to use for guest""" @@ -670,6 +819,12 @@ return {'kernel': {'fallback-package': package}} @classmethod + def skip_by_date(cls, *args, **kwargs): + """skip_by_date wrapper. this way other modules do not have + to add an import of skip_by_date to start skipping.""" + return skip_by_date(*args, **kwargs) + + @classmethod def setUpClass(cls): # initialize global logger with class name to help make sense of # parallel vmtest runs which intermingle output. @@ -677,6 +832,13 @@ logger = _initialize_logging(name=cls.__name__) cls.logger = logger + req_attrs = ('target_distro', 'target_release', 'release', 'distro') + missing = [a for a in req_attrs if not getattr(cls, a)] + if missing: + raise ValueError( + "Class %s does not have required attrs set: %s" % + (cls.__name__, missing)) + if is_unsupported_ubuntu(cls.release): raise SkipTest('"%s" is unsupported release.' % cls.release) @@ -686,8 +848,28 @@ cls.arch) raise SkipTest(reason) + # assign default collect scripts + if not cls.collect_scripts: + cls.collect_scripts = ( + DEFAULT_COLLECT_SCRIPTS['common'] + + DEFAULT_COLLECT_SCRIPTS[cls.target_distro]) + else: + raise RuntimeError('cls collect scripts not empty: %s' % + cls.collect_scripts) + + # append extra from subclass + if cls.extra_collect_scripts: + cls.collect_scripts.extend(cls.extra_collect_scripts) + setup_start = time.time() - logger.info('Starting setup for testclass: {}'.format(cls.__name__)) + logger.info( + ('Starting setup for testclass: {__name__} ' + '({distro}/{release} -> ' + '{target_distro}/{target_release})').format( + **{k: getattr(cls, k) + for k in ('__name__', 'distro', 'release', + 'target_distro', 'target_release')})) + # set up tempdir cls.td = TempDir( name=cls.__name__, @@ -727,8 +909,14 @@ cmd.extend([ "--root-arg=root=%s" % root_url, "--append=overlayroot=tmpfs", + "--append=ro", ]) + # Avoid LP: #1797218 and make vms boot faster + cmd.extend(['--append=%s' % service for service in + ["systemd.mask=snapd.seeded.service", + "systemd.mask=snapd.service"]]) + # getting resolvconf configured is only fixed in bionic # the iscsi_auto handles resolvconf setup via call to # configure_networking in initramfs @@ -774,13 +962,21 @@ # build disk arguments disks = [] - sc = cls.load_conf_file() - storage_config = yaml.load(sc).get('storage', {}).get('config', {}) + storage_config = cls.get_class_storage_config() cls.disk_wwns = ["wwn=%s" % x.get('wwn') for x in storage_config if 'wwn' in x] - cls.disk_serials = ["serial=%s" % x.get('serial') - for x in storage_config if 'serial' in x] + cls.disk_serials = [] + cls.nvme_serials = [] + for x in storage_config: + if 'serial' in x: + serial = x.get('serial') + if serial.startswith('nvme'): + cls.nvme_serials.append("serial=%s" % serial) + else: + cls.disk_serials.append("serial=%s" % serial) + logger.info("disk_serials: %s", cls.disk_serials) + logger.info("nvme_serials: %s", cls.nvme_serials) target_disk = "{}:{}:{}:{}:".format(cls.td.target_disk, "", cls.disk_driver, @@ -812,18 +1008,21 @@ disks.extend(['--disk', extra_disk]) # build nvme disk args if needed + logger.info('nvme disks: %s', cls.nvme_disks) for (disk_no, disk_sz) in enumerate(cls.nvme_disks): dpath = os.path.join(cls.td.disks, 'nvme_disk_%d.img' % disk_no) + nvme_serial = cls.nvme_serials[disk_no] nvme_disk = '{}:{}:nvme:{}:{}'.format(dpath, disk_sz, cls.disk_block_size, - "serial=nvme-%d" % disk_no) + "%s" % nvme_serial) disks.extend(['--disk', nvme_disk]) # build iscsi disk args if needed disks.extend(cls.build_iscsi_disks()) + # class config file and vmtest defaults + configs = [cls.conf_file, 'examples/tests/vmtest_defaults.yaml'] # proxy config - configs = [cls.conf_file] cls.proxy = get_apt_proxy() if cls.proxy is not None and not cls.td.restored: proxy_config = os.path.join(cls.td.install, 'proxy.cfg') @@ -863,16 +1062,25 @@ add_repos = ADD_REPOS system_upgrade = SYSTEM_UPGRADE upgrade_packages = UPGRADE_PACKAGES + + if is_false_env_value(system_upgrade): + system_upgrade = False + if add_repos: - # enable if user has set a value here - if system_upgrade == "auto": - system_upgrade = True - logger.info('Adding apt repositories: %s', add_repos) - repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg') - util.write_file(repo_cfg, - generate_repo_config(add_repos.split(","))) - configs.append(repo_cfg) - elif system_upgrade == "auto": + cfg_repos = generate_repo_config(add_repos.split(","), + release=cls.target_release) + if cfg_repos: + logger.info('Adding apt repositories: %s', add_repos) + # enable if user has set a value here + if system_upgrade == AUTO: + system_upgrade = True + repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg') + util.write_file(repo_cfg, cfg_repos) + configs.append(repo_cfg) + else: + logger.info("add_repos=%s processed to empty config.", + add_repos) + elif system_upgrade == AUTO: system_upgrade = False if system_upgrade: @@ -894,7 +1102,8 @@ # set reporting logger cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') - reporting_logger = CaptureReporting(cls.reporting_log) + reporting_logger = CaptureReporting(cls.reporting_log, + url_mapping=cls.url_map) # write reporting config reporting_config = os.path.join(cls.td.install, 'reporting.cfg') @@ -1038,6 +1247,8 @@ dpath = os.path.join(cls.td.disks, 'nvme_disk_%d.img' % disk_no) disk = '--disk={},driver={},format={},{}'.format( dpath, disk_driver, TARGET_IMAGE_FORMAT, bsize_args) + if len(cls.nvme_serials): + disk += ",%s" % cls.nvme_serials[disk_no] nvme_disks.extend([disk]) # unlike NVMe disks, we do not want to configure the iSCSI disks @@ -1085,13 +1296,12 @@ content = lfh.read().decode('utf-8', errors='replace') logger.debug('boot serial console output:\n%s', content) else: - logger.warn("Booting after install not produce" - " a console log.") + logger.warn("Booting after install not produce a console log.") # mount output disk try: cls.collect_output() - except Exception as e: + except Exception: cls.tearDownClass() raise @@ -1146,7 +1356,7 @@ """Return install uri and a list of files needed to be published.""" # if release (install environment) is the same as target # target (thing to install) then install via cp:// - if cls.target_release in (None, cls.release): + if cls.target_release == cls.release: install_src = "cp:///media/root-ro" return install_src, [] @@ -1331,17 +1541,18 @@ fstab_entry = None for line in util.load_file(path).splitlines(): for device, mntpoint in self.fstab_expected.items(): - if device in line: - fstab_entry = line - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], - mntpoint) + if device in line: + fstab_entry = line + self.assertIsNotNone(fstab_entry) + self.assertEqual(fstab_entry.split(' ')[1], mntpoint) @skip_if_flag('expected_failure') def test_dname(self, disk_to_check=None): - if "trusty" in [self.release, self.target_release]: + if self.target_release == "trusty": raise SkipTest( "(LP: #1523037): dname does not work on trusty kernels") + if self.target_distro != "ubuntu": + raise SkipTest("dname not present in non-ubuntu releases") if not disk_to_check: disk_to_check = self.disk_to_check @@ -1349,16 +1560,47 @@ logger.debug('test_dname: no disks to check') return logger.debug('test_dname: checking disks: %s', disk_to_check) - path = self.collect_path("ls_dname") - if not os.path.exists(path): - logger.debug('test_dname: no "ls_dname" file: %s', path) - return - contents = util.load_file(path) + self.output_files_exist(["ls_dname"]) + + contents = self.load_collect_file("ls_dname") for diskname, part in self.disk_to_check: - if part is not 0: + if part != 0: link = diskname + "-part" + str(part) - self.assertIn(link, contents) - self.assertIn(diskname, contents) + self.assertIn(link, contents.splitlines()) + self.assertIn(diskname, contents.splitlines()) + + @skip_if_flag('expected_failure') + def test_dname_rules(self, disk_to_check=None): + if self.target_distro != "ubuntu": + raise SkipTest("dname not present in non-ubuntu releases") + + if not disk_to_check: + disk_to_check = self.disk_to_check + if disk_to_check is None: + logger.debug('test_dname_rules: no disks to check') + return + logger.debug('test_dname_rules: checking disks: %s', disk_to_check) + self.output_files_exist(["udev_rules.d"]) + + cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml")) + stgcfg = cfg.get("storage", {}).get("config", []) + disks = [ent for ent in stgcfg if (ent.get('type') == 'disk' and + 'name' in ent)] + key_to_udev = { + 'serial': 'ID_SERIAL', + 'wwn': 'ID_WWN_WITH_EXTENSION', + } + for disk in disks: + dname_file = "%s.rules" % sanitize_dname(disk.get('name')) + contents = self.load_collect_file("udev_rules.d/%s" % dname_file) + for key, key_name in key_to_udev.items(): + value = disk.get(key) + if value: + # serials may include spaces, udev replaces them with # _ + if ' ' in value: + value = value.replace(' ', '_') + self.assertIn(key_name, contents) + self.assertIn(value, contents) @skip_if_flag('expected_failure') def test_reporting_data(self): @@ -1385,6 +1627,9 @@ """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg by examining the output of a find /etc/network > find_interfaces.d """ + # target_distro is set for non-ubuntu targets + if self.target_distro != 'ubuntu': + raise SkipTest("eni/ifupdown not present in non-ubuntu releases") interfacesd = self.load_collect_file("find_interfacesd") self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", interfacesd.split("\n")) @@ -1393,12 +1638,35 @@ def test_installed_correct_kernel_package(self): """ Test curtin installs the correct kernel package. """ # target_distro is set for non-ubuntu targets - if self.target_distro is not None: + if self.target_distro != "ubuntu": raise SkipTest("Can't check non-ubuntu kernel packages") kpackage = self.get_kernel_package() self.assertIn(kpackage, self.debian_packages) + @skip_if_flag('expected_failure') + def test_clear_holders_ran(self): + """ Test curtin install runs block-meta/clear-holders. """ + if not self.has_storage_config(): + raise SkipTest("This test does not use storage config.") + + install_logfile = 'root/curtin-install.log' + self.output_files_exist([install_logfile]) + install_log = self.load_collect_file(install_logfile) + + # validate block-meta called clear-holders at least once + # We match both 'start' and 'finish' strings, so for each + # call we'll have 2 matches. + clear_holders_re = 'cmd-install/.*cmd-block-meta/clear-holders' + events = re.findall(clear_holders_re, install_log) + print('Matched clear-holder events:\n%s' % events) + self.assertGreaterEqual(len(events), 2) + + # dirty_disks mode runs an early block-meta command which + # also runs clear-holders + if self.dirty_disks is True: + self.assertGreaterEqual(len(events), 4) + def run(self, result): super(VMBaseClass, self).run(result) self.record_result(result) @@ -1438,14 +1706,25 @@ self._debian_packages = pkgs return self._debian_packages + @classmethod + def get_class_storage_config(cls): + sc = cls.load_conf_file() + return yaml.load(sc).get('storage', {}).get('config', {}) + + def get_storage_config(self): + cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml")) + return cfg.get("storage", {}).get("config", []) + + def has_storage_config(self): + '''check if test used storage config''' + return len(self.get_storage_config()) > 0 + @skip_if_flag('expected_failure') def test_swaps_used(self): - cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml")) - stgcfg = cfg.get("storage", {}).get("config", []) - if len(stgcfg) == 0: - logger.debug("This test does not use storage config.") - return + if not self.has_storage_config(): + raise SkipTest("This test does not use storage config.") + stgcfg = self.get_storage_config() swap_ids = [d["id"] for d in stgcfg if d.get("fstype") == "swap"] swap_mounts = [d for d in stgcfg if d.get("device") in swap_ids] self.assertEqual(len(swap_ids), len(swap_mounts), @@ -1544,6 +1823,9 @@ def test_dname(self, disk_to_check=None): pass + def test_dname_rules(self, disk_to_check=None): + pass + def test_interfacesd_eth0_removed(self): pass @@ -1601,6 +1883,7 @@ install_fail = "({})".format("|".join([ 'Installation failed', 'ImportError: No module named.*', + 'Out of memory:', 'Unexpected error while running command', 'E: Unable to locate package.*', 'cloud-init.*: Traceback.*'])) @@ -1701,8 +1984,25 @@ exit 0; """) + collect_journal = textwrap.dedent("""#!/bin/sh -x + cd OUTPUT_COLLECT_D + # sync and flush journal before copying (if journald enabled) + target="./var-log-journal" + [ -e /var/log/journal ] && { + [ -e ${target} ] && { + echo "ERROR: ${target} dir exists; journal collected already"; + exit 1; + } + journalctl --sync --flush --rotate + cp -a /var/log/journal ${target} + gzip -9 ${target}/*/system*.journal + } + exit 0; + """) + + # add journal collection "last" before collect_post scripts = ([collect_prep] + [copy_rootdir] + collect_scripts + - [collect_post] + [failsafe_poweroff]) + [collect_journal] + [collect_post] + [failsafe_poweroff]) for part in scripts: if not part.startswith("#!"): @@ -1849,7 +2149,25 @@ return release in _UNSUPPORTED_UBUNTU -def generate_repo_config(repos): +def is_devel_release(release): + global _DEVEL_UBUNTU + if _DEVEL_UBUNTU is None: + udi = 'ubuntu-distro-info' + env = os.environ.get('_DEVEL_UBUNTU') + if env: + # allow it to be , or " " separated. + _DEVEL_UBUNTU = env.replace(",", " ").split() + elif util.which(udi): + _DEVEL_UBUNTU = util.subp( + [udi, '--devel'], capture=True)[0].splitlines() + else: + # no way to tell. + _DEVEL_UBUNTU = [] + + return release in _DEVEL_UBUNTU + + +def generate_repo_config(repos, release=None): """Generate apt yaml configuration to add specified repositories. @param repos: A list of add-apt-repository strings. @@ -1857,8 +2175,19 @@ pocket of a particular release. @returns: string: A yaml string """ - sources = {"add_repos_%02d" % idx: {'source': v} - for idx, v in enumerate(repos)} + sources = {} + for idx, v in enumerate(repos): + if v == 'proposed' and is_devel_release(release): + # lower case 'proposed' is magically handled by apt repo + # processing. But dev release's -proposed is "known broken". + # if you want to test development release, then use 'PROPOSED'. + continue + if v == 'PROPOSED': + v = 'proposed' + sources['add_repos_%02d' % idx] = {'source': v} + if not sources: + return None + return yaml.dump({'apt': {'sources': sources}}) diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/releases.py curtin-18.2-10-g7afd77fa/tests/vmtests/releases.py --- curtin-18.1-17-gae48e86f/tests/vmtests/releases.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/releases.py 2019-02-15 20:42:42.000000000 +0000 @@ -11,6 +11,7 @@ class _UbuntuBase(_ReleaseBase): distro = "ubuntu" kflavor = "generic" + target_distro = "ubuntu" class _CentosFromUbuntuBase(_UbuntuBase): @@ -39,6 +40,7 @@ release = "xenial" # release for target target_release = "ubuntu-core-16" + target_distro = "ubuntu-core" class _Centos66FromXenialBase(_CentosFromUbuntuBase): @@ -59,59 +61,61 @@ class _TrustyBase(_UbuntuBase): release = "trusty" + target_release = "trusty" -class _TrustyHWEU(_UbuntuBase): - release = "trusty" +class _TrustyHWEU(_TrustyBase): krel = "utopic" -class _TrustyHWEV(_UbuntuBase): - release = "trusty" +class _TrustyHWEV(_TrustyBase): krel = "vivid" -class _TrustyHWEW(_UbuntuBase): - release = "trusty" +class _TrustyHWEW(_TrustyBase): krel = "wily" -class _TrustyHWEX(_UbuntuBase): - release = "trusty" +class _TrustyHWEX(_TrustyBase): krel = "xenial" -class _TrustyFromXenial(_UbuntuBase): +class _TrustyFromXenial(_TrustyBase): release = "xenial" target_release = "trusty" class _XenialBase(_UbuntuBase): release = "xenial" + target_release = "xenial" subarch = "ga-16.04" -class _XenialGA(_UbuntuBase): - release = "xenial" +class _XenialGA(_XenialBase): subarch = "ga-16.04" -class _XenialHWE(_UbuntuBase): - release = "xenial" +class _XenialHWE(_XenialBase): subarch = "hwe-16.04" -class _XenialEdge(_UbuntuBase): - release = "xenial" +class _XenialEdge(_XenialBase): subarch = "hwe-16.04-edge" -class _ArtfulBase(_UbuntuBase): - release = "artful" - - class _BionicBase(_UbuntuBase): release = "bionic" + target_release = "bionic" + + +class _CosmicBase(_UbuntuBase): + release = "cosmic" + target_release = "cosmic" + + +class _DiscoBase(_UbuntuBase): + release = "disco" + target_release = "disco" class _Releases(object): @@ -127,13 +131,14 @@ xenial_ga = _XenialGA xenial_hwe = _XenialHWE xenial_edge = _XenialEdge - artful = _ArtfulBase bionic = _BionicBase + cosmic = _CosmicBase + disco = _DiscoBase class _CentosReleases(object): - centos70fromxenial = _Centos70FromXenialBase - centos66fromxenial = _Centos66FromXenialBase + centos70_xenial = _Centos70FromXenialBase + centos66_xenial = _Centos66FromXenialBase class _UbuntuCoreReleases(object): diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/report_webhook_logger.py curtin-18.2-10-g7afd77fa/tests/vmtests/report_webhook_logger.py --- curtin-18.1-17-gae48e86f/tests/vmtests/report_webhook_logger.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/report_webhook_logger.py 2019-02-15 20:42:42.000000000 +0000 @@ -76,7 +76,10 @@ self._message = None self.send_response(200) self.end_headers() - self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) + if self.url_mapping and self.path in self.url_mapping: + self.wfile.write(self.url_mapping[self.path].encode('utf-8')) + else: + self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) def do_POST(self): length = int(self.headers['Content-Length']) @@ -96,13 +99,14 @@ self.wfile.write(msg.encode('utf-8')) -def GenServerHandlerWithResultFile(file_path): +def GenServerHandlerWithResultFile(file_path, url_map): class ExtendedServerHandler(ServerHandler): result_log_file = file_path + url_mapping = url_map return ExtendedServerHandler -def get_httpd(port=None, result_file=None): +def get_httpd(port=None, result_file=None, url_mapping=None): # avoid 'Address already in use' after ctrl-c socketserver.TCPServer.allow_reuse_address = True @@ -111,7 +115,7 @@ port = 0 if result_file: - Handler = GenServerHandlerWithResultFile(result_file) + Handler = GenServerHandlerWithResultFile(result_file, url_mapping) else: Handler = ServerHandler httpd = HTTPServerV6(("::", port), Handler) @@ -143,10 +147,11 @@ class CaptureReporting: - def __init__(self, result_file): + def __init__(self, result_file, url_mapping=None): + self.url_mapping = url_mapping self.result_file = result_file self.httpd = get_httpd(result_file=self.result_file, - port=None) + port=None, url_mapping=self.url_mapping) self.httpd.server_activate() # socket.AF_INET6 returns # (host, port, flowinfo, scopeid) diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_apt_config_cmd.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_apt_config_cmd.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_apt_config_cmd.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_apt_config_cmd.py 2019-02-15 20:42:42.000000000 +0000 @@ -5,6 +5,7 @@ apt-config standalone command. """ import textwrap +import yaml from . import VMBaseClass from .releases import base_vm_classes as relbase @@ -12,19 +13,19 @@ class TestAptConfigCMD(VMBaseClass): """TestAptConfigCMD - test standalone command""" + test_type = 'config' conf_file = "examples/tests/apt_config_command.yaml" interactive = False extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - ls /dev/disk/by-dname > ls_dname - find /etc/network/interfaces.d > find_interfacesd cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list . cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . apt-cache policy | grep proposed > proposed-enabled + + exit 0 """)] def test_cmd_proposed_enabled(self): @@ -46,8 +47,10 @@ def test_cmd_preserve_source(self): """check if cloud-init was prevented from overwriting""" self.output_files_exist(["curtin-preserve-sources.cfg"]) - self.check_file_regex("curtin-preserve-sources.cfg", - "apt_preserve_sources_list.*true") + # For earlier than xenial 'apt_preserve_sources_list' is expected + self.assertEqual( + {'apt': {'preserve_sources_list': True}}, + yaml.load(self.load_collect_file("curtin-preserve-sources.cfg"))) class XenialTestAptConfigCMDCMD(relbase.xenial, TestAptConfigCMD): @@ -57,11 +60,15 @@ __test__ = True -class ArtfulTestAptConfigCMDCMD(relbase.artful, TestAptConfigCMD): +class BionicTestAptConfigCMDCMD(relbase.bionic, TestAptConfigCMD): __test__ = True -class BionicTestAptConfigCMDCMD(relbase.bionic, TestAptConfigCMD): +class CosmicTestAptConfigCMDCMD(relbase.cosmic, TestAptConfigCMD): + __test__ = True + + +class DiscoTestAptConfigCMDCMD(relbase.disco, TestAptConfigCMD): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_apt_source.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_apt_source.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_apt_source.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_apt_source.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,6 +4,7 @@ Collection of tests for the apt configuration features """ import textwrap +import yaml from . import VMBaseClass from .releases import base_vm_classes as relbase @@ -14,15 +15,13 @@ class TestAptSrcAbs(VMBaseClass): """TestAptSrcAbs - Basic tests for apt features of curtin""" + test_type = 'config' interactive = False extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - ls /dev/disk/by-dname > ls_dname - find /etc/network/interfaces.d > find_interfacesd apt-key list "F430BBA5" > keyid-F430BBA5 apt-key list "0165013E" > keyppa-0165013E apt-key list "F470A0AC" > keylongid-F470A0AC @@ -36,6 +35,8 @@ apt-config dump | grep Retries > aptconf cp /etc/apt/sources.list sources.list cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . + + exit 0 """)] mirror = "http://us.archive.ubuntu.com/ubuntu" secmirror = "http://security.ubuntu.com/ubuntu" @@ -63,8 +64,10 @@ def test_preserve_source(self): """test_preserve_source - no clobbering sources.list by cloud-init""" self.output_files_exist(["curtin-preserve-sources.cfg"]) - self.check_file_regex("curtin-preserve-sources.cfg", - "apt_preserve_sources_list.*true") + # For earlier than xenial 'apt_preserve_sources_list' is expected + self.assertEqual( + {'apt': {'preserve_sources_list': True}}, + yaml.load(self.load_collect_file("curtin-preserve-sources.cfg"))) def test_source_files(self): """test_source_files - Check generated .lists for correct content""" diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_basic.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_basic.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_basic.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_basic.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,38 +4,46 @@ VMBaseClass, get_apt_proxy) from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import textwrap from unittest import SkipTest class TestBasicAbs(VMBaseClass): + test_type = 'storage' interactive = False nr_cpus = 2 dirty_disks = True conf_file = "examples/tests/basic.yaml" extra_disks = ['128G', '128G', '4G'] - nvme_disks = ['4G'] - disk_to_check = [('main_disk_with_in---valid--dname', 1), - ('main_disk_with_in---valid--dname', 2)] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + disk_to_check = [('btrfs_volume', 0), + ('main_disk_with_in---valid--dname', 0), + ('main_disk_with_in---valid--dname', 1), + ('main_disk_with_in---valid--dname', 2), + ('pnum_disk', 0), + ('pnum_disk', 1), + ('pnum_disk', 10), + ('sparedisk', 0)] + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 - f="btrfs_uuid_vdd" - btrfs-debug-tree -r /dev/vdd | awk '/^uuid/ {print $2}' | grep "-" > $f - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy + blkid -o export /dev/vda | cat >blkid_output_vda + blkid -o export /dev/vda1 | cat >blkid_output_vda1 + blkid -o export /dev/vda2 | cat >blkid_output_vda2 + dev="/dev/vdd"; f="btrfs_uuid_${dev#/dev/*}"; + if command -v btrfs-debug-tree >/dev/null; then + btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-" + else + btrfs inspect-internal dump-super $dev | + awk '/^dev_item.fsid/ {print $2}' + fi | cat >$f + + # compare via /dev/zero 8MB + cmp --bytes=8388608 /dev/zero /dev/vde2; echo "$?" > cmp_prep.out + # extract partition info + udevadm info --export --query=property /dev/vde2 | cat >udev_info.out + + exit 0 """)] def _kname_to_uuid(self, kname): @@ -43,7 +51,7 @@ # parsing ls -al output on /dev/disk/by-uuid: # lrwxrwxrwx 1 root root 9 Dec 4 20:02 # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg - ls_uuid = self.load_collect_file("ls_uuid") + ls_uuid = self.load_collect_file("ls_al_byuuid") uuid = [line.split()[8] for line in ls_uuid.split('\n') if ("../../" + kname) in line.split()] self.assertEqual(len(uuid), 1) @@ -52,81 +60,114 @@ self.assertEqual(len(uuid), 36) return uuid - def test_output_files_exist(self): - self.output_files_exist( - ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", - "btrfs_uuid_vdd", "fstab", "ls_dname", "ls_uuid", - "proc_partitions", - "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) - - def test_ptable(self, disk_to_check=None): - if "trusty" in [self.release, self.target_release]: + def _test_ptable(self, blkid_output, expected): + if self.target_release == "trusty": raise SkipTest("No PTTYPE blkid output on trusty") - blkid_info = self.get_blkid_data("blkid_output_vda") - self.assertEquals(blkid_info["PTTYPE"], "dos") + if not blkid_output: + raise RuntimeError('_test_ptable requires blkid output file') - def test_partition_numbers(self): - # vde should have partitions 1 and 10 - disk = "vde" + if not expected: + raise RuntimeError('_test_ptable requires expected value') + + self.output_files_exist([blkid_output]) + blkid_info = self.get_blkid_data(blkid_output) + self.assertEquals(expected, blkid_info["PTTYPE"]) + + def _test_partition_numbers(self, disk, expected): found = [] + self.output_files_exist(["proc_partitions"]) proc_partitions = self.load_collect_file('proc_partitions') for line in proc_partitions.splitlines(): if disk in line: found.append(line.split()[3]) - # /proc/partitions should have 3 lines with 'vde' in them. - expected = [disk + s for s in ["", "1", "10"]] - self.assertEqual(found, expected) - - def test_partitions(self): - fstab_lines = self.load_collect_file('fstab').splitlines() - print("\n".join(fstab_lines)) - # Test that vda1 is on / - blkid_info = self.get_blkid_data("blkid_output_vda1") - fstab_entry = None - for line in fstab_lines: - if blkid_info['UUID'] in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/") + self.assertEqual(expected, found) - # Test that vda2 is on /home - blkid_info = self.get_blkid_data("blkid_output_vda2") - fstab_entry = None - for line in fstab_lines: - if blkid_info['UUID'] in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/home") + def _test_fstab_entries(self, fstab, byuuid, expected): + """ + expected = [ + (kname, mp, fsopts), + ... + ] + """ + self.output_files_exist([fstab, byuuid]) + fstab_lines = self.load_collect_file(fstab).splitlines() + for (kname, mp, fsopts) in expected: + uuid = self._kname_to_uuid(kname) + if not uuid: + raise RuntimeError('Did not find uuid for kname: %s', kname) + for line in fstab_lines: + if uuid in line: + fstab_entry = line + break + self.assertIsNotNone(fstab_entry) + self.assertEqual(mp, fstab_entry.split(' ')[1]) + self.assertEqual(fsopts, fstab_entry.split(' ')[3]) - # Test whole disk vdd is mounted at /btrfs - uuid = self._kname_to_uuid('vdd') - fstab_entry = None - for line in fstab_lines: - if uuid in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") - self.assertEqual(fstab_entry.split(' ')[3], "defaults,noatime") + def _test_whole_disk_uuid(self, kname, uuid_file): - def test_whole_disk_format(self): # confirm the whole disk format is the expected device - btrfs_uuid = self.load_collect_file('btrfs_uuid_vdd').strip() + self.output_files_exist([uuid_file]) + btrfs_uuid = self.load_collect_file(uuid_file).strip() # extract uuid from btrfs superblock self.assertTrue(btrfs_uuid is not None) self.assertEqual(len(btrfs_uuid), 36) # extract uuid from ls_uuid by kname - kname_uuid = self._kname_to_uuid('vdd') + kname_uuid = self._kname_to_uuid(kname) # compare them self.assertEqual(kname_uuid, btrfs_uuid) + def _test_partition_is_prep(self, info_file): + udev_info = self.load_collect_file(info_file).rstrip() + entry_type = '' + for line in udev_info.splitlines(): + if line.startswith('ID_PART_ENTRY_TYPE'): + entry_type = line.split("=", 1)[1].replace("'", "") + break + # https://en.wikipedia.org/wiki/GUID_Partition_Table + # GPT PReP boot UUID + self.assertEqual('9e1a2d38-c612-4316-aa26-8b49521e5a8b'.lower(), + entry_type.lower()) + + def _test_partition_is_zero(self, cmp_file): + self.assertEqual(0, int(self.load_collect_file(cmp_file).rstrip())) + + # class specific input + def test_output_files_exist(self): + self.output_files_exist( + ["ls_al_byuuid", + "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) + + def test_ptable(self): + self._test_ptable("blkid_output_vda", "dos") + + def test_partition_numbers(self): + # vde should have partitions 1 2, and 10 + disk = "vde" + expected = [disk + s for s in ["", "1", "2", "10"]] + self._test_partition_numbers(disk, expected) + + def test_fstab_entries(self): + """" + dev=vda1 mp=/ fsopts=defaults + dev=vda2 mp=/home fsopts=defaults + dev=vdd mp=/btrfs fsopts=defaults,noatime + """ + expected = [('vda1', '/', 'defaults'), + ('vda2', '/home', 'defaults'), + ('vdd', '/btrfs', 'defaults,noatime')] + self._test_fstab_entries('fstab', 'ls_al_byuuid', expected) + + def test_whole_disk_uuid(self): + self._test_whole_disk_uuid("vdd", "btrfs_uuid_vdd") + def test_proxy_set(self): + if self.target_distro != 'ubuntu': + raise SkipTest("No apt-proxy for non-ubuntu distros") + self.output_files_exist(['apt-proxy']) expected = get_apt_proxy() apt_proxy_found = self.load_collect_file("apt-proxy").rstrip() if expected: @@ -143,6 +184,34 @@ print('Source repo version: %s' % source_version) self.assertEqual(source_version, installed_version) + def test_partition_is_prep(self): + self._test_partition_is_prep("udev_info.out") + + def test_partition_is_zero(self): + self._test_partition_is_zero("cmp_prep.out") + + +class CentosTestBasicAbs(TestBasicAbs): + def test_centos_release(self): + """Test this image is the centos release expected""" + self.output_files_exist(["rpm_dist_version_major", "centos-release"]) + + centos_release = self.load_collect_file("centos-release").lower() + rpm_major_version = ( + self.load_collect_file("rpm_dist_version_major").strip()) + _, os_id, os_version = self.target_release.partition("centos") + + self.assertTrue(os_version.startswith(rpm_major_version), + "%s doesn't start with %s" % (os_version, + rpm_major_version)) + self.assertTrue(centos_release.startswith(os_id), + "%s doesn't start with %s" % (centos_release, os_id)) + + +class Centos70XenialTestBasic(centos_relbase.centos70_xenial, + CentosTestBasicAbs): + __test__ = True + class TrustyTestBasic(relbase.trusty, TestBasicAbs): __test__ = True @@ -169,11 +238,15 @@ __test__ = True -class ArtfulTestBasic(relbase.artful, TestBasicAbs): +class BionicTestBasic(relbase.bionic, TestBasicAbs): __test__ = True -class BionicTestBasic(relbase.bionic, TestBasicAbs): +class CosmicTestBasic(relbase.cosmic, TestBasicAbs): + __test__ = True + + +class DiscoTestBasic(relbase.disco, TestBasicAbs): __test__ = True @@ -181,100 +254,60 @@ conf_file = "examples/tests/basic_scsi.yaml" disk_driver = 'scsi-hd' extra_disks = ['128G', '128G', '4G'] - nvme_disks = ['4G'] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - blkid -o export /dev/sda > blkid_output_sda - blkid -o export /dev/sda1 > blkid_output_sda1 - blkid -o export /dev/sda2 > blkid_output_sda2 - f="btrfs_uuid_sdc" - btrfs-debug-tree -r /dev/sdc | awk '/^uuid/ {print $2}' | grep "-" > $f - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - ls -al /dev/disk/by-id/ > ls_disk_id - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy - """)] + blkid -o export /dev/sda | cat >blkid_output_sda + blkid -o export /dev/sda1 | cat >blkid_output_sda1 + blkid -o export /dev/sda2 | cat >blkid_output_sda2 + dev="/dev/sdc"; f="btrfs_uuid_${dev#/dev/*}"; + if command -v btrfs-debug-tree >/dev/null; then + btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-" + else + btrfs inspect-internal dump-super $dev | + awk '/^dev_item.fsid/ {print $2}' + fi | cat >$f + + # compare via /dev/zero 8MB + cmp --bytes=8388608 /dev/zero /dev/sdd2; echo "$?" > cmp_prep.out + # extract partition info + udevadm info --export --query=property /dev/sdd2 | cat >udev_info.out - def test_output_files_exist(self): - self.output_files_exist( - ["blkid_output_sda", "blkid_output_sda1", "blkid_output_sda2", - "btrfs_uuid_sdc", "fstab", "ls_dname", "ls_uuid", - "ls_disk_id", "proc_partitions"]) + exit 0 + """)] def test_ptable(self): - if "trusty" in [self.release, self.target_release]: - raise SkipTest("No PTTYPE blkid output on trusty") - - blkid_info = self.get_blkid_data("blkid_output_sda") - self.assertEquals(blkid_info["PTTYPE"], "dos") + self._test_ptable("blkid_output_sda", "dos") def test_partition_numbers(self): - # vde should have partitions 1 and 10 + # sdd should have partitions 1, 2, and 10 disk = "sdd" - found = [] - proc_partitions = self.load_collect_file('proc_partitions') - for line in proc_partitions.splitlines(): - if disk in line: - found.append(line.split()[3]) - # /proc/partitions should have 3 lines with 'vde' in them. - expected = [disk + s for s in ["", "1", "10"]] - self.assertEqual(found, expected) - - def test_partitions(self): - fstab_lines = self.load_collect_file('fstab').splitlines() - print("\n".join(fstab_lines)) - # Test that vda1 is on / - blkid_info = self.get_blkid_data("blkid_output_sda1") - fstab_entry = None - for line in fstab_lines: - if blkid_info['UUID'] in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/") + expected = [disk + s for s in ["", "1", "2", "10"]] + self._test_partition_numbers(disk, expected) - # Test that vda2 is on /home - blkid_info = self.get_blkid_data("blkid_output_sda2") - fstab_entry = None - for line in fstab_lines: - if blkid_info['UUID'] in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/home") + def test_fstab_entries(self): + """" + dev=sda1 mp=/ fsopts=defaults + dev=sda2 mp=/home fsopts=defaults + dev=sdc mp=/btrfs fsopts=defaults,noatime + """ + expected = [('sda1', '/', 'defaults'), + ('sda2', '/home', 'defaults'), + ('sdc', '/btrfs', 'defaults,noatime')] + self._test_fstab_entries('fstab', 'ls_al_byuuid', expected) + + def test_whole_disk_uuid(self): + self._test_whole_disk_uuid("sdc", "btrfs_uuid_sdc") + + def test_partition_is_prep(self): + self._test_partition_is_prep("udev_info.out") - # Test whole disk sdc is mounted at /btrfs, and uses defaults,noatime - uuid = self._kname_to_uuid('sdc') - fstab_entry = None - for line in fstab_lines: - if uuid in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") - self.assertEqual(fstab_entry.split(' ')[3], "defaults,noatime") + def test_partition_is_zero(self): + self._test_partition_is_zero("cmp_prep.out") - def test_whole_disk_format(self): - # confirm the whole disk format is the expected device - btrfs_uuid = self.load_collect_file("btrfs_uuid_sdc").strip() - - # extract uuid from btrfs superblock - self.assertTrue(btrfs_uuid is not None) - self.assertEqual(len(btrfs_uuid), 36) - - # extract uuid from ls_uuid by kname - kname_uuid = self._kname_to_uuid('sdc') - # compare them - self.assertEqual(kname_uuid, btrfs_uuid) +class Centos70XenialTestScsiBasic(centos_relbase.centos70_xenial, + TestBasicScsiAbs, CentosTestBasicAbs): + __test__ = True class XenialGATestScsiBasic(relbase.xenial_ga, TestBasicScsiAbs): @@ -289,11 +322,15 @@ __test__ = True -class ArtfulTestScsiBasic(relbase.artful, TestBasicScsiAbs): +class BionicTestScsiBasic(relbase.bionic, TestBasicScsiAbs): + __test__ = True + + +class CosmicTestScsiBasic(relbase.cosmic, TestBasicScsiAbs): __test__ = True -class BionicTestScsiBasic(relbase.bionic, TestBasicScsiAbs): +class DiscoTestScsiBasic(relbase.disco, TestBasicScsiAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_basic.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_basic.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_basic.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_basic.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,6 @@ # This file is part of curtin. See LICENSE file for copyright and license info. -from . import VMBaseClass +from . import VMBaseClass, skip_if_flag from .releases import base_vm_classes as relbase import textwrap @@ -10,25 +10,26 @@ arch_skip = [ "s390x", # lp:1565029 ] + test_type = 'storage' conf_file = "examples/tests/bcache_basic.yaml" nr_cpus = 2 dirty_disks = True extra_disks = ['2G'] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda2 > bcache_super_vda2 ls /sys/fs/bcache > bcache_ls cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode - cat /proc/mounts > proc_mounts - cat /proc/partitions > proc_partitions - find /etc/network/interfaces.d > find_interfacesd - cat /proc/cmdline > cmdline + + exit 0 """)] + @skip_if_flag('expected_failure') def test_bcache_output_files_exist(self): self.output_files_exist(["bcache_super_vda2", "bcache_ls", "bcache_cache_mode"]) + @skip_if_flag('expected_failure') def test_bcache_status(self): bcache_cset_uuid = None for line in self.load_collect_file("bcache_super_vda2").splitlines(): @@ -38,11 +39,13 @@ self.assertTrue(bcache_cset_uuid in self.load_collect_file("bcache_ls").splitlines()) + @skip_if_flag('expected_failure') def test_bcache_cachemode(self): self.check_file_regex("bcache_cache_mode", r"\[writeback\]") + @skip_if_flag('expected_failure') def test_proc_cmdline_root_by_uuid(self): - self.check_file_regex("cmdline", r"root=UUID=") + self.check_file_regex("proc_cmdline", r"root=UUID=") class TrustyBcacheBasic(relbase.trusty, TestBcacheBasic): @@ -65,11 +68,15 @@ __test__ = True -class ArtfulBcacheBasic(relbase.artful, TestBcacheBasic): +class BionicBcacheBasic(relbase.bionic, TestBcacheBasic): __test__ = True -class BionicBcacheBasic(relbase.bionic, TestBcacheBasic): +class CosmicBcacheBasic(relbase.cosmic, TestBcacheBasic): + __test__ = True + + +class DiscoBcacheBasic(relbase.disco, TestBcacheBasic): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_bug1718699.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_bug1718699.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_bug1718699.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_bug1718699.py 2019-02-15 20:42:42.000000000 +0000 @@ -15,11 +15,15 @@ __test__ = True -class ArtfulTestBcacheBug1718699(relbase.artful, TestBcacheBug1718699): +class BionicTestBcacheBug1718699(relbase.bionic, TestBcacheBug1718699): __test__ = True -class BionicTestBcacheBug1718699(relbase.bionic, TestBcacheBug1718699): +class CosmicTestBcacheBug1718699(relbase.cosmic, TestBcacheBug1718699): + __test__ = True + + +class DiscoTestBcacheBug1718699(relbase.disco, TestBcacheBug1718699): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_partitions.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_partitions.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_bcache_partitions.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_bcache_partitions.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,35 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +from .releases import base_vm_classes as relbase +from .test_bcache_basic import TestBcacheBasic + + +class TestBcachePartitions(TestBcacheBasic): + conf_file = "examples/tests/bcache-partitions.yaml" + dirty_disks = True + nr_cpus = 2 + extra_disks = ['10G', '10G'] + + +class XenialTestBcachePartitions(relbase.xenial, TestBcachePartitions): + # Xenial 4.4 kernel does not support bcache partitions + expected_failure = True + __test__ = True + + +class XenialHWETestBcachePartitions(relbase.xenial_hwe, TestBcachePartitions): + __test__ = True + + +class BionicTestBcachePartitions(relbase.bionic, TestBcachePartitions): + __test__ = True + + +class CosmicTestBcachePartitions(relbase.cosmic, TestBcachePartitions): + __test__ = True + + +class DiscoTestBcachePartitions(relbase.disco, TestBcachePartitions): + __test__ = True + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_centos_basic.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_centos_basic.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_centos_basic.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_centos_basic.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -# This file is part of curtin. See LICENSE file for copyright and license info. - -from . import VMBaseClass -from .releases import centos_base_vm_classes as relbase -from .test_network import TestNetworkBaseTestsAbs - -import textwrap - - -# FIXME: should eventually be integrated with the real TestBasic -class CentosTestBasicAbs(VMBaseClass): - __test__ = False - conf_file = "examples/tests/centos_basic.yaml" - # XXX: command | tee output is required for Centos under SELinux - # http://danwalsh.livejournal.com/22860.html - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( - """ - cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - rpm -qa | cat >rpm_qa - ifconfig -a | cat >ifconfig_a - ip a | cat >ip_a - cp -a /etc/sysconfig/network-scripts . - cp -a /var/log/messages . - cp -a /var/log/cloud-init* . - cp -a /var/lib/cloud ./var_lib_cloud - cp -a /run/cloud-init ./run_cloud-init - rpm -E '%rhel' > rpm_dist_version_major - cp -a /etc/centos-release . - """)] - fstab_expected = { - 'LABEL=cloudimg-rootfs': '/', - } - - def test_dname(self): - pass - - def test_interfacesd_eth0_removed(self): - pass - - def test_output_files_exist(self): - self.output_files_exist(["fstab"]) - - def test_centos_release(self): - """Test this image is the centos release expected""" - self.output_files_exist(["rpm_dist_version_major", "centos-release"]) - - centos_release = self.load_collect_file("centos-release").lower() - rpm_major_version = ( - self.load_collect_file("rpm_dist_version_major").strip()) - _, os_id, os_version = self.target_release.partition("centos") - - self.assertTrue(os_version.startswith(rpm_major_version), - "%s doesn't start with %s" % (os_version, - rpm_major_version)) - self.assertTrue(centos_release.startswith(os_id), - "%s doesn't start with %s" % (centos_release, os_id)) - - -# FIXME: this naming scheme needs to be replaced -class Centos70FromXenialTestBasic(relbase.centos70fromxenial, - CentosTestBasicAbs): - __test__ = True - - -class Centos66FromXenialTestBasic(relbase.centos66fromxenial, - CentosTestBasicAbs): - __test__ = False - # FIXME: test is disabled because the grub config script in target - # specifies drive using hd(1,0) syntax, which breaks when the - # installation medium is removed. other than this, the install works - - -class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs): - conf_file = "examples/tests/centos_basic.yaml" - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ - textwrap.dedent(""" - cd OUTPUT_COLLECT_D - cp -a /etc/sysconfig/network-scripts . - cp -a /var/log/cloud-init* . - cp -a /var/lib/cloud ./var_lib_cloud - cp -a /run/cloud-init ./run_cloud-init - """)] - - def test_etc_network_interfaces(self): - pass - - def test_etc_resolvconf(self): - pass - - -class Centos70BasicNetworkFromXenialTestBasic(relbase.centos70fromxenial, - CentosTestBasicNetworkAbs): - __test__ = True - -# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_fs_battery.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_fs_battery.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_fs_battery.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_fs_battery.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,6 +2,7 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase from curtin import config @@ -43,19 +44,14 @@ class TestFsBattery(VMBaseClass): interactive = False + test_type = 'storage' conf_file = "examples/tests/filesystem_battery.yaml" extra_disks = ['20G'] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - blkid -o export > blkid.out - cat /proc/mounts > proc_mounts - cat /proc/partitions > proc_partitions - find /etc/network/interfaces.d > find_interfacesd - cat /proc/cmdline > cmdline - cat /etc/fstab > fstab cat /proc/1/mountinfo > mountinfo - for p in /my/bind-over-var-lib/apt /my/bind-ro-etc/passwd; do + for p in /my/bind-over-var-cache/man /my/bind-ro-etc/passwd; do [ -e "$p" ] && echo "$p: present" || echo "$p: missing" done > my-path-checks @@ -104,6 +100,8 @@ echo "$part umount: PASS" || echo "$part umount: FAIL: $out" done >> battery-mount-umount + + exit 0 """)] def get_fs_entries(self): @@ -162,7 +160,7 @@ expected = [ "none /my/tmpfs tmpfs size=4194304 0 0".split(), "none /my/ramfs ramfs defaults 0 0".split(), - "/my/bind-over-var-lib /var/lib none bind 0 0".split(), + "/my/bind-over-var-cache /var/cache none bind 0 0".split(), "/etc /my/bind-ro-etc none bind,ro 0 0".split(), ] fstab_found = [ @@ -185,7 +183,7 @@ dest_src[toks[4]] = toks[3] self.assertTrue("/my/ramfs" in dest_src) self.assertTrue("/my/tmpfs" in dest_src) - self.assertEqual(dest_src.get("/var/lib"), "/my/bind-over-var-lib") + self.assertEqual(dest_src.get("/var/cache"), "/my/bind-over-var-cache") self.assertEqual(dest_src.get("/my/bind-ro-etc"), "/etc") def test_expected_files_from_bind_mounts(self): @@ -197,10 +195,28 @@ paths[path] = val.strip() self.assertEqual( - {'/my/bind-over-var-lib/apt': 'present', + {'/my/bind-over-var-cache/man': 'present', '/my/bind-ro-etc/passwd': 'present'}, paths) +class Centos70XenialTestFsBattery(centos_relbase.centos70_xenial, + TestFsBattery): + __test__ = True + + def test_mount_umount(self): + """Check output of mount and unmount operations for each fs.""" + # centos does not support: jfs, ntfs, reiserfs + unsupported = ['jfs', 'ntfs', 'reiserfs'] + results = [ent for ent in + self.load_collect_file("battery-mount-umount").splitlines() + if ent.split()[-1].replace("'", "") not in unsupported] + entries = {k: v for k, v in self.get_fs_entries().items() + if v['fstype'] not in unsupported} + expected = (["%s mount: PASS" % k for k in entries] + + ["%s umount: PASS" % k for k in entries]) + self.assertEqual(sorted(expected), sorted(results)) + + class TrustyTestFsBattery(relbase.trusty, TestFsBattery): __test__ = True @@ -225,4 +241,11 @@ __test__ = True +class CosmicTestFsBattery(relbase.cosmic, TestFsBattery): + __test__ = True + + +class DiscoTestFsBattery(relbase.disco, TestFsBattery): + __test__ = True + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_install_umount.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_install_umount.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_install_umount.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_install_umount.py 2019-02-15 20:42:42.000000000 +0000 @@ -3,32 +3,15 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase -import textwrap import yaml class TestInstallUnmount(VMBaseClass): """ Test a curtin install which disabled unmonting """ conf_file = "examples/tests/install_disable_unmount.yaml""" + test_type = 'config' extra_nics = [] extra_disks = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" - cd OUTPUT_COLLECT_D - sfdisk --list > sfdisk_list - for d in /dev/[sv]d[a-z] /dev/xvd?; do - [ -b "$d" ] || continue - echo == $d == - sgdisk --print $d - done > sgdisk_list - blkid > blkid - cat /proc/partitions > proc_partitions - cp /etc/network/interfaces interfaces - if [ -f /var/log/cloud-init-output.log ]; then - cp /var/log/cloud-init-output.log . - fi - cp /var/log/cloud-init.log . - find /etc/network/interfaces.d > find_interfacesd - """)] def test_proc_mounts_before_unmount(self): """Test TARGET_MOUNT_POINT value is in ephemeral /proc/mounts""" diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_iscsi.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_iscsi.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_iscsi.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_iscsi.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,12 +2,14 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import textwrap class TestBasicIscsiAbs(VMBaseClass): interactive = False + test_type = 'storage' iscsi_disks = [ {'size': '3G'}, {'size': '4G', 'auth': 'user:passw0rd'}, @@ -16,14 +18,13 @@ conf_file = "examples/tests/basic_iscsi.yaml" nr_testfiles = 4 - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( - """ + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd + cp -a /etc/iscsi ./etc_iscsi bash -c \ - 'for f in /mnt/iscsi*; do cat $f/testfile > testfile${f: -1}; done' + 'for f in /mnt/iscsi*; do cp $f/testfile testfile${f: -1}; done' + + exit 0 """)] def test_fstab_has_netdev_option(self): @@ -49,6 +50,11 @@ (testfile, expected_content, content)) +class Centos70XenialTestIscsiBasic(centos_relbase.centos70_xenial, + TestBasicIscsiAbs): + __test__ = True + + class TrustyTestIscsiBasic(relbase.trusty, TestBasicIscsiAbs): __test__ = True @@ -65,11 +71,15 @@ __test__ = True -class ArtfulTestIscsiBasic(relbase.artful, TestBasicIscsiAbs): +class BionicTestIscsiBasic(relbase.bionic, TestBasicIscsiAbs): __test__ = True -class BionicTestIscsiBasic(relbase.bionic, TestBasicIscsiAbs): +class CosmicTestIscsiBasic(relbase.cosmic, TestBasicIscsiAbs): + __test__ = True + + +class DiscoTestIscsiBasic(relbase.disco, TestBasicIscsiAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_journald_reporter.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_journald_reporter.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_journald_reporter.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_journald_reporter.py 2019-02-15 20:42:42.000000000 +0000 @@ -4,18 +4,15 @@ from .releases import base_vm_classes as relbase import json -import textwrap class TestJournaldReporter(VMBaseClass): # Test that curtin with no config does the right thing conf_file = "examples/tests/journald_reporter.yaml" + test_type = 'config' extra_disks = [] extra_nics = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" - cd OUTPUT_COLLECT_D - find /etc/network/interfaces.d > find_interfacesd - """)] + extra_collect_scripts = [] def test_output_files_exist(self): self.output_files_exist(["root/journalctl.curtin_events.log", @@ -35,11 +32,15 @@ __test__ = True -class ArtfulTestJournaldReporter(relbase.artful, TestJournaldReporter): +class BionicTestJournaldReporter(relbase.bionic, TestJournaldReporter): __test__ = True -class BionicTestJournaldReporter(relbase.bionic, TestJournaldReporter): +class CosmicTestJournaldReporter(relbase.cosmic, TestJournaldReporter): + __test__ = True + + +class DiscoTestJournaldReporter(relbase.disco, TestJournaldReporter): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_iscsi.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_iscsi.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_iscsi.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_iscsi.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,7 @@ # This file is part of curtin. See LICENSE file for copyright and license info. from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase from .test_lvm import TestLvmAbs from .test_iscsi import TestBasicIscsiAbs @@ -16,14 +17,15 @@ conf_file = "examples/tests/lvm_iscsi.yaml" nr_testfiles = 4 - collect_scripts = TestLvmAbs.collect_scripts - collect_scripts += TestBasicIscsiAbs.collect_scripts + [textwrap.dedent( - """ - cd OUTPUT_COLLECT_D - ls -al /sys/class/block/dm*/slaves/ > dm_slaves - cp -a /etc/udev/rules.d udev_rules_d - cp -a /etc/iscsi etc_iscsi - """)] + extra_collect_scripts = ( + TestLvmAbs.extra_collect_scripts + + TestBasicIscsiAbs.extra_collect_scripts + + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + ls -al /sys/class/block/dm*/slaves/ > dm_slaves + + exit 0 + """)]) fstab_expected = { 'UUID=6de56115-9500-424b-8151-221b270ec708': '/mnt/iscsi1', @@ -55,6 +57,11 @@ self.check_file_strippedline("pvs", "vg2=/dev/sdb6") +class Centos70XenialTestLvmIscsi(centos_relbase.centos70_xenial, + TestLvmIscsiAbs): + __test__ = True + + class TrustyTestIscsiLvm(relbase.trusty, TestLvmIscsiAbs): __test__ = True @@ -75,11 +82,15 @@ __test__ = True -class ArtfulTestIscsiLvm(relbase.artful, TestLvmIscsiAbs): +class BionicTestIscsiLvm(relbase.bionic, TestLvmIscsiAbs): __test__ = True -class BionicTestIscsiLvm(relbase.bionic, TestLvmIscsiAbs): +class CosmicTestIscsiLvm(relbase.cosmic, TestLvmIscsiAbs): + __test__ = True + + +class DiscoTestIscsiLvm(relbase.disco, TestLvmIscsiAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,26 +2,23 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import textwrap class TestLvmAbs(VMBaseClass): conf_file = "examples/tests/lvm.yaml" + test_type = 'storage' interactive = False extra_disks = ['10G'] dirty_disks = True - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - ls /dev/disk/by-dname > ls_dname - ls -al /dev/disk/by-dname > lsal_dname - ls -al /dev/disk/by-id/ > ls_byid - ls -al /dev/disk/by-uuid/ > ls_byuuid - cat /proc/partitions > proc_partitions - find /etc/network/interfaces.d > find_interfacesd pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs + + exit 0 """)] fstab_expected = { '/dev/vg1/lv1': '/srv/data', @@ -46,6 +43,10 @@ ["fstab", "ls_dname"]) +class Centos70XenialTestLvm(centos_relbase.centos70_xenial, TestLvmAbs): + __test__ = True + + class TrustyTestLvm(relbase.trusty, TestLvmAbs): __test__ = True @@ -66,11 +67,15 @@ __test__ = True -class ArtfulTestLvm(relbase.artful, TestLvmAbs): +class BionicTestLvm(relbase.bionic, TestLvmAbs): __test__ = True -class BionicTestLvm(relbase.bionic, TestLvmAbs): +class CosmicTestLvm(relbase.cosmic, TestLvmAbs): + __test__ = True + + +class DiscoTestLvm(relbase.disco, TestLvmAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_raid.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_raid.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_raid.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_raid.py 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,59 @@ +# This file is part of curtin. See LICENSE file for copyright and license info. + +from .releases import base_vm_classes as relbase +from .test_mdadm_bcache import TestMdadmAbs +from .test_lvm import TestLvmAbs + +import textwrap + + +class TestLvmOverRaidAbs(TestMdadmAbs, TestLvmAbs): + conf_file = "examples/tests/lvmoverraid.yaml" + active_mdadm = "2" + nr_cpus = 2 + dirty_disks = True + extra_disks = ['10G'] * 4 + + extra_collect_scripts = ( + TestLvmAbs.extra_collect_scripts + + TestMdadmAbs.extra_collect_scripts + + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + ls -al /dev/md* > dev_md + cp -a /etc/mdadm etc_mdadm + cp -a /etc/lvm etc_lvm + + exit 0 + """)] + ) + + fstab_expected = { + '/dev/vg1/lv1': '/srv/data', + '/dev/vg1/lv2': '/srv/backup', + } + disk_to_check = [('main_disk', 1), + ('md0', 0), + ('md1', 0)] + + def test_lvs(self): + self.check_file_strippedline("lvs", "lv-0=vg0") + + def test_pvs(self): + self.check_file_strippedline("pvs", "vg0=/dev/md0") + self.check_file_strippedline("pvs", "vg0=/dev/md1") + + +class DiscoTestLvmOverRaid(relbase.disco, TestLvmOverRaidAbs): + __test__ = True + + +class CosmicTestLvmOverRaid(relbase.cosmic, TestLvmOverRaidAbs): + __test__ = True + + +class BionicTestLvmOverRaid(relbase.bionic, TestLvmOverRaidAbs): + __test__ = True + + +class XenialGATestLvmOverRaid(relbase.xenial_ga, TestLvmOverRaidAbs): + __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_root.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_root.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_lvm_root.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_lvm_root.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,33 +2,32 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import json +import os import textwrap class TestLvmRootAbs(VMBaseClass): conf_file = "examples/tests/lvmroot.yaml" + test_type = 'storage' interactive = False rootfs_uuid = '04836770-e989-460f-8774-8e277ddcb40f' extra_disks = [] dirty_disks = True - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab lsblk --json --fs -o KNAME,MOUNTPOINT,UUID,FSTYPE > lsblk.json lsblk --fs -P -o KNAME,MOUNTPOINT,UUID,FSTYPE > lsblk.out - ls -al /dev/disk/by-dname > ls_al_dname - ls -al /dev/disk/by-id > ls_al_byid - ls -al /dev/disk/by-uuid > ls_al_byuuid - ls -al /dev/mapper > ls_al_dev_mapper - find /etc/network/interfaces.d > find_interfacesd pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs pvdisplay > pvdisplay vgdisplay > vgdisplay lvdisplay > lvdisplay ls -al /dev/root_vg/ > dev_root_vg + + exit 0 """)] fstab_expected = { 'UUID=04836770-e989-460f-8774-8e277ddcb40f': '/', @@ -39,8 +38,8 @@ self.output_files_exist(["fstab"]) def test_rootfs_format(self): - if self.release not in ['trusty']: - self.output_files_exist(["lsblk.json"]) + self.output_files_exist(["lsblk.json"]) + if os.path.getsize(self.collect_path('lsblk.json')) > 0: lsblk_data = json.load(open(self.collect_path('lsblk.json'))) print(json.dumps(lsblk_data, indent=4)) [entry] = [entry for entry in lsblk_data.get('blockdevices') @@ -62,44 +61,46 @@ self.assertEqual(self.conf_replace['__ROOTFS_FORMAT__'], fstype) -class TrustyTestLvmRootExt4(relbase.trusty, TestLvmRootAbs): +class Centos70XenialTestLvmRootExt4(centos_relbase.centos70_xenial, + TestLvmRootAbs): __test__ = True conf_replace = { '__ROOTFS_FORMAT__': 'ext4', } -class TrustyTestLvmRootXfs(relbase.trusty, TestLvmRootAbs): - __test__ = True - # xfs on trusty can't support uuid= - fstab_expected = {} +class Centos70XenialTestLvmRootXfs(centos_relbase.centos70_xenial, + TestLvmRootAbs): + __test__ = False conf_replace = { '__ROOTFS_FORMAT__': 'xfs', } -class XenialTestLvmRootExt4(relbase.xenial, TestLvmRootAbs): +class TrustyTestLvmRootExt4(relbase.trusty, TestLvmRootAbs): __test__ = True conf_replace = { '__ROOTFS_FORMAT__': 'ext4', } -class XenialTestLvmRootXfs(relbase.xenial, TestLvmRootAbs): +class TrustyTestLvmRootXfs(relbase.trusty, TestLvmRootAbs): __test__ = True + # xfs on trusty can't support uuid= + fstab_expected = {} conf_replace = { '__ROOTFS_FORMAT__': 'xfs', } -class ArtfulTestLvmRootExt4(relbase.artful, TestLvmRootAbs): +class XenialTestLvmRootExt4(relbase.xenial, TestLvmRootAbs): __test__ = True conf_replace = { '__ROOTFS_FORMAT__': 'ext4', } -class ArtfulTestLvmRootXfs(relbase.artful, TestLvmRootAbs): +class XenialTestLvmRootXfs(relbase.xenial, TestLvmRootAbs): __test__ = True conf_replace = { '__ROOTFS_FORMAT__': 'xfs', @@ -111,7 +112,8 @@ uefi = True -class XenialTestUefiLvmRootExt4(relbase.xenial, TestUefiLvmRootAbs): +class Centos70XenialTestUefiLvmRootExt4(centos_relbase.centos70_xenial, + TestUefiLvmRootAbs): __test__ = True conf_replace = { '__BOOTFS_FORMAT__': 'ext4', @@ -119,7 +121,8 @@ } -class XenialTestUefiLvmRootXfs(relbase.xenial, TestUefiLvmRootAbs): +class Centos70XenialTestUefiLvmRootXfs(centos_relbase.centos70_xenial, + TestUefiLvmRootAbs): __test__ = True conf_replace = { '__BOOTFS_FORMAT__': 'ext4', @@ -127,31 +130,31 @@ } -class XenialTestUefiLvmRootXfsBootXfs(relbase.xenial, TestUefiLvmRootAbs): +class XenialTestUefiLvmRootExt4(relbase.xenial, TestUefiLvmRootAbs): __test__ = True conf_replace = { - '__BOOTFS_FORMAT__': 'xfs', # Expected to fail until LP: #1652822 - '__ROOTFS_FORMAT__': 'xfs', + '__BOOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'ext4', } - @classmethod - def setUpClass(cls): - cls.skip_by_date("1652822", fixby="2018-05-26") - super().setUpClass() - -class ArtfulTestUefiLvmRootExt4(relbase.artful, TestUefiLvmRootAbs): +class XenialTestUefiLvmRootXfs(relbase.xenial, TestUefiLvmRootAbs): __test__ = True conf_replace = { '__BOOTFS_FORMAT__': 'ext4', - '__ROOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'xfs', } -class ArtfulTestUefiLvmRootXfs(relbase.artful, TestUefiLvmRootAbs): +@VMBaseClass.skip_by_date("1652822", fixby="2019-06-01", install=False) +class XenialTestUefiLvmRootXfsBootXfs(relbase.xenial, TestUefiLvmRootAbs): + """This tests xfs root and xfs boot with uefi. + + It is known broken (LP: #1652822) and unlikely to be fixed without pushing, + so we skip-by for a long time.""" __test__ = True conf_replace = { - '__BOOTFS_FORMAT__': 'ext4', + '__BOOTFS_FORMAT__': 'xfs', '__ROOTFS_FORMAT__': 'xfs', } diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_mdadm_bcache.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_mdadm_bcache.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_mdadm_bcache.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_mdadm_bcache.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,32 +2,29 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase - +from .releases import centos_base_vm_classes as centos_relbase import textwrap class TestMdadmAbs(VMBaseClass): interactive = False + test_type = 'storage' active_mdadm = "1" dirty_disks = True - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status mdadm --detail --scan | grep -c ubuntu > mdadm_active1 grep -c active /proc/mdstat > mdadm_active2 ls /dev/disk/by-dname > ls_dname - ls -al /dev/disk/by-dname > lsal_dname - ls -al /dev/disk/by-uuid > lsal_uuid - find /etc/network/interfaces.d > find_interfacesd cat /proc/mdstat | tee mdstat - cat /proc/partitions | tee procpartitions ls -1 /sys/class/block | tee sys_class_block ls -1 /dev/md* | tee dev_md ls -al /sys/fs/bcache/* > lsal_sys_fs_bcache_star ls -al /dev/bcache* > lsal_dev_bcache_star ls -al /dev/bcache/by_uuid/* > lsal_dev_bcache_byuuid_star - cp -a /var/log/syslog . + + exit 0 """)] def test_mdadm_output_files_exist(self): @@ -58,7 +55,8 @@ ('cached_array_2', 0), ('cached_array_3', 0)] extra_disks = ['4G', '4G', '4G', '4G', '4G'] - collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = TestMdadmAbs.extra_collect_scripts + [ + textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda6 > bcache_super_vda6 bcache-super-show /dev/vda7 > bcache_super_vda7 @@ -67,9 +65,8 @@ cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode cat /sys/block/bcache1/bcache/cache_mode >> bcache_cache_mode cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode - cat /proc/mounts > proc_mounts - find /etc/network/interfaces.d > find_interfacesd - cp -a /etc/udev/rules.d etc_udev_rules.d + + exit 0 """)] fstab_expected = { '/dev/vda1': '/media/sda1', @@ -129,14 +126,10 @@ self.test_dname(disk_to_check=self.bcache_dnames) +@VMBaseClass.skip_by_date("1754581", fixby="2020-01-22", install=False) class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs): __test__ = True - @classmethod - def setUpClass(cls): - cls.skip_by_date("1754581", fixby="2018-06-22") - super().setUpClass() - class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs): __test__ = True @@ -154,11 +147,15 @@ __test__ = True -class ArtfulTestMdadmBcache(relbase.artful, TestMdadmBcacheAbs): +class BionicTestMdadmBcache(relbase.bionic, TestMdadmBcacheAbs): __test__ = True -class BionicTestMdadmBcache(relbase.bionic, TestMdadmBcacheAbs): +class CosmicTestMdadmBcache(relbase.cosmic, TestMdadmBcacheAbs): + __test__ = True + + +class DiscoTestMdadmBcache(relbase.disco, TestMdadmBcacheAbs): __test__ = True @@ -173,6 +170,11 @@ ('md0', 0)] +class Centos70TestMirrorboot(centos_relbase.centos70_xenial, + TestMirrorbootAbs): + __test__ = True + + class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs): __test__ = True @@ -194,11 +196,15 @@ __test__ = True -class ArtfulTestMirrorboot(relbase.artful, TestMirrorbootAbs): +class BionicTestMirrorboot(relbase.bionic, TestMirrorbootAbs): + __test__ = True + + +class CosmicTestMirrorboot(relbase.cosmic, TestMirrorbootAbs): __test__ = True -class BionicTestMirrorboot(relbase.bionic, TestMirrorbootAbs): +class DiscoTestMirrorboot(relbase.disco, TestMirrorbootAbs): __test__ = True @@ -212,6 +218,11 @@ ('md0', 2)] +class Centos70TestMirrorbootPartitions(centos_relbase.centos70_xenial, + TestMirrorbootPartitionsAbs): + __test__ = True + + class TrustyTestMirrorbootPartitions(relbase.trusty, TestMirrorbootPartitionsAbs): __test__ = True @@ -238,16 +249,21 @@ __test__ = True -class ArtfulTestMirrorbootPartitions(relbase.artful, +class BionicTestMirrorbootPartitions(relbase.bionic, TestMirrorbootPartitionsAbs): __test__ = True -class BionicTestMirrorbootPartitions(relbase.bionic, +class CosmicTestMirrorbootPartitions(relbase.cosmic, TestMirrorbootPartitionsAbs): __test__ = True +class DiscoTestMirrorbootPartitions(relbase.disco, + TestMirrorbootPartitionsAbs): + __test__ = True + + class TestMirrorbootPartitionsUEFIAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/mirrorboot-uefi.yaml" @@ -263,6 +279,11 @@ dirty_disks = True +class Centos70TestMirrorbootPartitionsUEFI(centos_relbase.centos70_xenial, + TestMirrorbootPartitionsUEFIAbs): + __test__ = True + + class TrustyTestMirrorbootPartitionsUEFI(relbase.trusty, TestMirrorbootPartitionsUEFIAbs): __test__ = True @@ -283,16 +304,21 @@ __test__ = True -class ArtfulTestMirrorbootPartitionsUEFI(relbase.artful, +class BionicTestMirrorbootPartitionsUEFI(relbase.bionic, TestMirrorbootPartitionsUEFIAbs): __test__ = True -class BionicTestMirrorbootPartitionsUEFI(relbase.bionic, +class CosmicTestMirrorbootPartitionsUEFI(relbase.cosmic, TestMirrorbootPartitionsUEFIAbs): __test__ = True +class DiscoTestMirrorbootPartitionsUEFI(relbase.disco, + TestMirrorbootPartitionsUEFIAbs): + __test__ = True + + class TestRaid5bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid5boot.yaml" @@ -305,11 +331,15 @@ ('md0', 0)] -class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs): +class Centos70TestRaid5boot(centos_relbase.centos70_xenial, TestRaid5bootAbs): __test__ = True -class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot): +class TrustyTestRaid5boot(relbase.trusty, TestRaid5bootAbs): + __test__ = True + + +class TrustyHWEXTestRaid5boot(relbase.trusty_hwe_x, TrustyTestRaid5boot): # This tests kernel upgrade in target __test__ = True @@ -326,11 +356,15 @@ __test__ = True -class ArtfulTestRaid5boot(relbase.artful, TestRaid5bootAbs): +class BionicTestRaid5boot(relbase.bionic, TestRaid5bootAbs): __test__ = True -class BionicTestRaid5boot(relbase.bionic, TestRaid5bootAbs): +class CosmicTestRaid5boot(relbase.cosmic, TestRaid5bootAbs): + __test__ = True + + +class DiscoTestRaid5boot(relbase.disco, TestRaid5bootAbs): __test__ = True @@ -345,10 +379,13 @@ ('third_disk', 1), ('fourth_disk', 1), ('md0', 0)] - collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = ( + TestMdadmAbs.extra_collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D mdadm --detail --scan > mdadm_detail - """)] + + exit 0 + """)]) def test_raid6_output_files_exist(self): self.output_files_exist( @@ -359,6 +396,10 @@ self.check_file_regex("mdadm_detail", r"ubuntu:foobar") +class Centos70TestRaid6boot(centos_relbase.centos70_xenial, TestRaid6bootAbs): + __test__ = True + + class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs): __test__ = True @@ -379,11 +420,15 @@ __test__ = True -class ArtfulTestRaid6boot(relbase.artful, TestRaid6bootAbs): +class BionicTestRaid6boot(relbase.bionic, TestRaid6bootAbs): __test__ = True -class BionicTestRaid6boot(relbase.bionic, TestRaid6bootAbs): +class CosmicTestRaid6boot(relbase.cosmic, TestRaid6bootAbs): + __test__ = True + + +class DiscoTestRaid6boot(relbase.disco, TestRaid6bootAbs): __test__ = True @@ -400,6 +445,11 @@ ('md0', 0)] +class Centos70TestRaid10boot(centos_relbase.centos70_xenial, + TestRaid10bootAbs): + __test__ = True + + class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs): __test__ = True @@ -420,11 +470,15 @@ __test__ = True -class ArtfulTestRaid10boot(relbase.artful, TestRaid10bootAbs): +class BionicTestRaid10boot(relbase.bionic, TestRaid10bootAbs): __test__ = True -class BionicTestRaid10boot(relbase.bionic, TestRaid10bootAbs): +class CosmicTestRaid10boot(relbase.cosmic, TestRaid10bootAbs): + __test__ = True + + +class DiscoTestRaid10boot(relbase.disco, TestRaid10bootAbs): __test__ = True @@ -464,7 +518,8 @@ ('vg1-lv1', 0), ('vg1-lv2', 0)] - collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = ( + TestMdadmAbs.extra_collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs @@ -474,7 +529,9 @@ mkdir -p /tmp/xfstest mount /dev/mapper/dmcrypt0 /tmp/xfstest xfs_info /tmp/xfstest/ > xfs_info - """)] + + exit 0 + """)]) fstab_expected = { '/dev/vg1/lv1': '/srv/data', '/dev/vg1/lv2': '/srv/backup', @@ -521,11 +578,15 @@ __test__ = True -class ArtfulTestAllindata(relbase.artful, TestAllindataAbs): +class BionicTestAllindata(relbase.bionic, TestAllindataAbs): + __test__ = True + + +class CosmicTestAllindata(relbase.cosmic, TestAllindataAbs): __test__ = True -class BionicTestAllindata(relbase.bionic, TestAllindataAbs): +class DiscoTestAllindata(relbase.disco, TestAllindataAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_mdadm_iscsi.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_mdadm_iscsi.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_mdadm_iscsi.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_mdadm_iscsi.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,6 +1,7 @@ # This file is part of curtin. See LICENSE file for copyright and license info. from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase from .test_mdadm_bcache import TestMdadmAbs from .test_iscsi import TestBasicIscsiAbs @@ -17,12 +18,20 @@ conf_file = "examples/tests/mdadm_iscsi.yaml" nr_testfiles = 1 - collect_scripts = TestMdadmAbs.collect_scripts - collect_scripts += TestBasicIscsiAbs.collect_scripts + [textwrap.dedent( - """ - cd OUTPUT_COLLECT_D - ls -al /sys/class/block/md*/slaves/ > md_slaves - """)] + extra_collect_scripts = ( + TestMdadmAbs.extra_collect_scripts + + TestBasicIscsiAbs.extra_collect_scripts + + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + ls -al /sys/class/block/md*/slaves/ > md_slaves + + exit 0 + """)]) + + +class Centos70TestIscsiMdadm(centos_relbase.centos70_xenial, + TestMdadmIscsiAbs): + __test__ = True class TrustyTestIscsiMdadm(relbase.trusty, TestMdadmIscsiAbs): @@ -41,11 +50,15 @@ __test__ = True -class ArtfulTestIscsiMdadm(relbase.artful, TestMdadmIscsiAbs): +class BionicTestIscsiMdadm(relbase.bionic, TestMdadmIscsiAbs): __test__ = True -class BionicTestIscsiMdadm(relbase.bionic, TestMdadmIscsiAbs): +class CosmicTestIscsiMdadm(relbase.cosmic, TestMdadmIscsiAbs): + __test__ = True + + +class DiscoTestIscsiMdadm(relbase.disco, TestMdadmIscsiAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_multipath.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_multipath.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_multipath.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_multipath.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,39 +2,33 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase +from unittest import SkipTest +import os import textwrap class TestMultipathBasicAbs(VMBaseClass): conf_file = "examples/tests/multipath.yaml" + test_type = 'storage' multipath = True disk_driver = 'scsi-hd' extra_disks = [] nvme_disks = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - blkid -o export /dev/sda > blkid_output_sda - blkid -o export /dev/sda1 > blkid_output_sda1 - blkid -o export /dev/sda2 > blkid_output_sda2 - blkid -o export /dev/sdb > blkid_output_sdb - blkid -o export /dev/sdb1 > blkid_output_sdb1 - blkid -o export /dev/sdb2 > blkid_output_sdb2 - dmsetup ls > dmsetup_ls - dmsetup info > dmsetup_info - cat /proc/partitions > proc_partitions multipath -ll > multipath_ll multipath -v3 -ll > multipath_v3_ll multipath -r > multipath_r cp -a /etc/multipath* . - ls -al /dev/disk/by-uuid/ > ls_uuid - ls -al /dev/disk/by-id/ > ls_disk_id readlink -f /sys/class/block/sda/holders/dm-0 > holders_sda readlink -f /sys/class/block/sdb/holders/dm-0 > holders_sdb - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd + command -v systemctl && { + systemctl show -- home.mount > systemctl_show_home.mount; + systemctl status --full home.mount > systemctl_status_home.mount + } + exit 0 """)] def test_multipath_disks_match(self): @@ -44,6 +38,31 @@ print('sdb holders:\n%s' % sdb_data) self.assertEqual(sda_data, sdb_data) + def test_home_mount_unit(self): + unit_file = 'systemctl_show_home.mount' + if not os.path.exists(self.collect_path(unit_file)): + raise SkipTest( + 'target_release=%s does not use systemd' % self.target_release) + + # We can't use load_shell_content as systemctl show output + # does not quote values even though it's in Key=Value format + content = self.load_collect_file(unit_file) + expected_results = { + 'ActiveState': 'active', + 'Result': 'success', + 'SubState': 'mounted', + } + show = {key: value for key, value in + [line.split('=') for line in content.splitlines() + if line.split('=')[0] in expected_results.keys()]} + + self.assertEqual(sorted(expected_results), sorted(show)) + + +class Centos70TestMultipathBasic(centos_relbase.centos70_xenial, + TestMultipathBasicAbs): + __test__ = True + class TrustyTestMultipathBasic(relbase.trusty, TestMultipathBasicAbs): __test__ = True @@ -66,11 +85,15 @@ __test__ = True -class ArtfulTestMultipathBasic(relbase.artful, TestMultipathBasicAbs): +class BionicTestMultipathBasic(relbase.bionic, TestMultipathBasicAbs): __test__ = True -class BionicTestMultipathBasic(relbase.bionic, TestMultipathBasicAbs): +class CosmicTestMultipathBasic(relbase.cosmic, TestMultipathBasicAbs): + __test__ = True + + +class DiscoTestMultipathBasic(relbase.disco, TestMultipathBasicAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_alias.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_alias.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_alias.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_alias.py 2019-02-15 20:42:42.000000000 +0000 @@ -19,25 +19,27 @@ class CentosTestNetworkAliasAbs(TestNetworkAliasAbs): - collect_scripts = TestNetworkAliasAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkAliasAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cp -a /etc/sysconfig/network-scripts . cp -a /var/log/cloud-init* . cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init + + exit 0 """)] def test_etc_resolvconf(self): pass -class Centos66TestNetworkAlias(centos_relbase.centos66fromxenial, +class Centos66TestNetworkAlias(centos_relbase.centos66_xenial, CentosTestNetworkAliasAbs): __test__ = True -class Centos70TestNetworkAlias(centos_relbase.centos70fromxenial, +class Centos70TestNetworkAlias(centos_relbase.centos70_xenial, CentosTestNetworkAliasAbs): __test__ = True @@ -69,11 +71,15 @@ __test__ = True -class ArtfulTestNetworkAlias(relbase.artful, TestNetworkAliasAbs): +class BionicTestNetworkAlias(relbase.bionic, TestNetworkAliasAbs): __test__ = True -class BionicTestNetworkAlias(relbase.bionic, TestNetworkAliasAbs): +class CosmicTestNetworkAlias(relbase.cosmic, TestNetworkAliasAbs): + __test__ = True + + +class DiscoTestNetworkAlias(relbase.disco, TestNetworkAliasAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_bonding.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_bonding.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_bonding.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_bonding.py 2019-02-15 20:42:42.000000000 +0000 @@ -10,13 +10,22 @@ class TestNetworkBondingAbs(TestNetworkBaseTestsAbs): conf_file = "examples/tests/bonding_network.yaml" - def test_ifenslave_installed(self): - self.assertIn("ifenslave", self.debian_packages, - "ifenslave deb not installed") + def test_ifenslave_package_status(self): + """ifenslave is expected installed in Ubuntu < artful.""" + rel = self.target_release + pkg = "ifenslave" + if rel in ("precise", "trusty", "xenial"): + self.assertIn( + pkg, self.debian_packages, + "%s package expected in %s but not found" % (pkg, rel)) + else: + self.assertNotIn( + pkg, self.debian_packages, + "%s package found but not expected in %s" % (pkg, rel)) class CentosTestNetworkBondingAbs(TestNetworkBondingAbs): - collect_scripts = TestNetworkBondingAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkBondingAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cp -a /etc/sysconfig/network-scripts . @@ -24,9 +33,11 @@ cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init rpm -qf `which ifenslave` |tee ifenslave_installed + + exit 0 """)] - def test_ifenslave_installed(self): + def test_ifenslave_package_status(self): status = self.load_collect_file("ifenslave_installed") self.logger.debug('ifenslave installed: {}'.format(status)) self.assertTrue('iputils' in status) @@ -62,40 +73,24 @@ __test__ = True -class ArtfulTestBonding(relbase.artful, TestNetworkBondingAbs): +class BionicTestBonding(relbase.bionic, TestNetworkBondingAbs): __test__ = True - def test_ifenslave_installed(self): - """Artful should not have ifenslave installed.""" - pass - - def test_ifenslave_not_installed(self): - """Confirm that ifenslave is not installed on artful""" - self.assertNotIn('ifenslave', self.debian_packages, - "ifenslave is not expected in artful: %s" % - self.debian_packages.get('ifenslave')) - -class BionicTestBonding(relbase.bionic, TestNetworkBondingAbs): +class CosmicTestBonding(relbase.cosmic, TestNetworkBondingAbs): __test__ = True - def test_ifenslave_installed(self): - """Bionic should not have ifenslave installed.""" - pass - def test_ifenslave_not_installed(self): - """Confirm that ifenslave is not installed on bionic""" - self.assertNotIn('ifenslave', self.debian_packages, - "ifenslave is not expected in bionic: %s" % - self.debian_packages.get('ifenslave')) +class DiscoTestBonding(relbase.disco, TestNetworkBondingAbs): + __test__ = True -class Centos66TestNetworkBonding(centos_relbase.centos66fromxenial, +class Centos66TestNetworkBonding(centos_relbase.centos66_xenial, CentosTestNetworkBondingAbs): __test__ = True -class Centos70TestNetworkBonding(centos_relbase.centos70fromxenial, +class Centos70TestNetworkBonding(centos_relbase.centos70_xenial, CentosTestNetworkBondingAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_bridging.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_bridging.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_bridging.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_bridging.py 2019-02-15 20:42:42.000000000 +0000 @@ -96,12 +96,14 @@ class TestBridgeNetworkAbs(TestNetworkBaseTestsAbs): conf_file = "examples/tests/bridging_network.yaml" - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkBaseTestsAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D grep -r . /sys/class/net/br0 > sysfs_br0 grep -r . /sys/class/net/br0/brif/eth1 > sysfs_br0_eth1 grep -r . /sys/class/net/br0/brif/eth2 > sysfs_br0_eth2 + + exit 0 """)] def test_output_files_exist_bridge(self): @@ -109,9 +111,18 @@ "sysfs_br0_eth1", "sysfs_br0_eth2"]) - def test_bridge_utils_installed(self): - self.assertIn("bridge-utils", self.debian_packages, - "bridge-utilsi deb not installed") + def test_bridge_package_status(self): + """bridge-utils is expected installed in Ubuntu < artful.""" + rel = self.target_release + pkg = "bridge-utils" + if rel in ("precise", "trusty", "xenial"): + self.assertIn( + pkg, self.debian_packages, + "%s package expected in %s but not found" % (pkg, rel)) + else: + self.assertNotIn( + pkg, self.debian_packages, + "%s package found but not expected in %s" % (pkg, rel)) def test_bridge_params(self): @@ -132,11 +143,10 @@ return br0 def _get_bridge_params(br): - release = ( - self.target_release if self.target_release else self.release) bridge_params_uncheckable = default_bridge_params_uncheckable bridge_params_uncheckable.extend( - release_to_bridge_params_uncheckable.get(release, [])) + release_to_bridge_params_uncheckable.get( + self.target_release, [])) return [p for p in br.keys() if (p.startswith('bridge_') and p not in bridge_params_uncheckable)] @@ -184,7 +194,7 @@ class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs): - collect_scripts = TestBridgeNetworkAbs.collect_scripts + [ + extra_collect_scripts = TestBridgeNetworkAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cp -a /etc/sysconfig/network-scripts . @@ -192,6 +202,8 @@ cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init rpm -qf `which brctl` |tee bridge-utils_installed + + exit 0 """)] def test_etc_network_interfaces(self): @@ -200,48 +212,35 @@ def test_etc_resolvconf(self): pass - def test_bridge_utils_installed(self): + def test_bridge_package_status(self): + """bridge-utils is expected installed in centos.""" self.output_files_exist(["bridge-utils_installed"]) status = self.load_collect_file("bridge-utils_installed").strip() self.logger.debug('bridge-utils installed: {}'.format(status)) self.assertTrue('bridge' in status) -class Centos66TestBridgeNetwork(centos_relbase.centos66fromxenial, +class Centos66TestBridgeNetwork(centos_relbase.centos66_xenial, CentosTestBridgeNetworkAbs): __test__ = True -class Centos70TestBridgeNetwork(centos_relbase.centos70fromxenial, +class Centos70TestBridgeNetwork(centos_relbase.centos70_xenial, CentosTestBridgeNetworkAbs): __test__ = True # only testing Yakkety or newer as older releases do not yet # have updated ifupdown/bridge-utils packages; -class ArtfulTestBridging(relbase.artful, TestBridgeNetworkAbs): +class BionicTestBridging(relbase.bionic, TestBridgeNetworkAbs): __test__ = True - def test_bridge_utils_installed(self): - """bridge-utils not needed in artful.""" - pass - - def test_bridge_utils_not_installed(self): - self.assertNotIn("bridge-utils", self.debian_packages, - "bridge-utils is not expected in artful: %s" % - self.debian_packages.get('bridge-utils')) - -class BionicTestBridging(relbase.bionic, TestBridgeNetworkAbs): +class CosmicTestBridging(relbase.cosmic, TestBridgeNetworkAbs): __test__ = True - def test_bridge_utils_installed(self): - """bridge-utils not needed in bionic.""" - pass - def test_bridge_utils_not_installed(self): - self.assertNotIn("bridge-utils", self.debian_packages, - "bridge-utils is not expected in bionic: %s" % - self.debian_packages.get('bridge-utils')) +class DiscoTestBridging(relbase.disco, TestBridgeNetworkAbs): + __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6.py 2019-02-15 20:42:42.000000000 +0000 @@ -16,22 +16,26 @@ - all IP is static """ conf_file = "examples/network-ipv6-bond-vlan.yaml" - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkBaseTestsAbs.extra_collect_scripts + [ textwrap.dedent(""" grep . -r /sys/class/net/bond0/ > sysfs_bond0 || : grep . -r /sys/class/net/bond0.108/ > sysfs_bond0.108 || : grep . -r /sys/class/net/bond0.208/ > sysfs_bond0.208 || : + + exit 0 """)] class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs): - collect_scripts = TestNetworkIPV6Abs.collect_scripts + [ + extra_collect_scripts = TestNetworkIPV6Abs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cp -a /etc/sysconfig/network-scripts . cp -a /var/log/cloud-init* . cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init + + exit 0 """)] def test_etc_network_interfaces(self): @@ -64,20 +68,24 @@ __test__ = True -class ArtfulTestNetworkIPV6(relbase.artful, TestNetworkIPV6Abs): +class BionicTestNetworkIPV6(relbase.bionic, TestNetworkIPV6Abs): __test__ = True -class BionicTestNetworkIPV6(relbase.bionic, TestNetworkIPV6Abs): +class CosmicTestNetworkIPV6(relbase.cosmic, TestNetworkIPV6Abs): + __test__ = True + + +class DiscoTestNetworkIPV6(relbase.disco, TestNetworkIPV6Abs): __test__ = True -class Centos66TestNetworkIPV6(centos_relbase.centos66fromxenial, +class Centos66TestNetworkIPV6(centos_relbase.centos66_xenial, CentosTestNetworkIPV6Abs): __test__ = True -class Centos70TestNetworkIPV6(centos_relbase.centos70fromxenial, +class Centos70TestNetworkIPV6(centos_relbase.centos70_xenial, CentosTestNetworkIPV6Abs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6_static.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6_static.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6_static.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6_static.py 2019-02-15 20:42:42.000000000 +0000 @@ -46,20 +46,24 @@ __test__ = True -class ArtfulTestNetworkIPV6Static(relbase.artful, TestNetworkIPV6StaticAbs): +class BionicTestNetworkIPV6Static(relbase.bionic, TestNetworkIPV6StaticAbs): __test__ = True -class BionicTestNetworkIPV6Static(relbase.bionic, TestNetworkIPV6StaticAbs): +class CosmicTestNetworkIPV6Static(relbase.cosmic, TestNetworkIPV6StaticAbs): + __test__ = True + + +class DiscoTestNetworkIPV6Static(relbase.disco, TestNetworkIPV6StaticAbs): __test__ = True -class Centos66TestNetworkIPV6Static(centos_relbase.centos66fromxenial, +class Centos66TestNetworkIPV6Static(centos_relbase.centos66_xenial, CentosTestNetworkIPV6StaticAbs): __test__ = True -class Centos70TestNetworkIPV6Static(centos_relbase.centos70fromxenial, +class Centos70TestNetworkIPV6Static(centos_relbase.centos70_xenial, CentosTestNetworkIPV6StaticAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6_vlan.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6_vlan.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_ipv6_vlan.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_ipv6_vlan.py 2019-02-15 20:42:42.000000000 +0000 @@ -27,20 +27,24 @@ __test__ = True -class ArtfulTestNetworkIPV6Vlan(relbase.artful, TestNetworkIPV6VlanAbs): +class BionicTestNetworkIPV6Vlan(relbase.bionic, TestNetworkIPV6VlanAbs): __test__ = True -class BionicTestNetworkIPV6Vlan(relbase.bionic, TestNetworkIPV6VlanAbs): +class CosmicTestNetworkIPV6Vlan(relbase.cosmic, TestNetworkIPV6VlanAbs): + __test__ = True + + +class DiscoTestNetworkIPV6Vlan(relbase.disco, TestNetworkIPV6VlanAbs): __test__ = True -class Centos66TestNetworkIPV6Vlan(centos_relbase.centos66fromxenial, +class Centos66TestNetworkIPV6Vlan(centos_relbase.centos66_xenial, CentosTestNetworkIPV6VlanAbs): __test__ = True -class Centos70TestNetworkIPV6Vlan(centos_relbase.centos70fromxenial, +class Centos70TestNetworkIPV6Vlan(centos_relbase.centos70_xenial, CentosTestNetworkIPV6VlanAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_mtu.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_mtu.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_mtu.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_mtu.py 2019-02-15 20:42:42.000000000 +0000 @@ -25,7 +25,8 @@ ipv6 first, then ipv4 with mtu. """ conf_file = "examples/tests/network_mtu.yaml" - collect_scripts = TestNetworkIPV6Abs.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = TestNetworkIPV6Abs.extra_collect_scripts + [ + textwrap.dedent(""" cd OUTPUT_COLLECT_D proc_v6="/proc/sys/net/ipv6/conf" for f in `seq 0 7`; do @@ -35,6 +36,8 @@ if [ -e /var/log/upstart ]; then cp -a /var/log/upstart ./var_log_upstart fi + + exit 0 """)] def _load_mtu_data(self, ifname): @@ -120,13 +123,15 @@ class CentosTestNetworkMtuAbs(TestNetworkMtuAbs): conf_file = "examples/tests/network_mtu.yaml" - collect_scripts = TestNetworkMtuAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkMtuAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cp -a /etc/sysconfig/network-scripts . cp -a /var/log/cloud-init* . cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init + + exit 0 """)] def test_etc_network_interfaces(self): @@ -189,30 +194,27 @@ __test__ = True -class ArtfulTestNetworkMtu(relbase.artful, TestNetworkMtuAbs): +@TestNetworkMtuAbs.skip_by_date("1671951", fixby="2019-05-02") +class BionicTestNetworkMtu(relbase.bionic, TestNetworkMtuAbs): __test__ = True - @classmethod - def setUpClass(cls): - cls.skip_by_date("1671951", fixby="2018-05-26") - super().setUpClass() - -class BionicTestNetworkMtu(relbase.bionic, TestNetworkMtuAbs): +@TestNetworkMtuAbs.skip_by_date("1671951", fixby="2019-05-02") +class CosmicTestNetworkMtu(relbase.cosmic, TestNetworkMtuAbs): __test__ = True - @classmethod - def setUpClass(cls): - cls.skip_by_date("1671951", fixby="2018-05-26") - super().setUpClass() + +@TestNetworkMtuAbs.skip_by_date("1671951", fixby="2019-05-02") +class DiscoTestNetworkMtu(relbase.disco, TestNetworkMtuAbs): + __test__ = True -class Centos66TestNetworkMtu(centos_relbase.centos66fromxenial, +class Centos66TestNetworkMtu(centos_relbase.centos66_xenial, CentosTestNetworkMtuAbs): __test__ = True -class Centos70TestNetworkMtu(centos_relbase.centos70fromxenial, +class Centos70TestNetworkMtu(centos_relbase.centos70_xenial, CentosTestNetworkMtuAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network.py 2019-02-15 20:42:42.000000000 +0000 @@ -17,20 +17,16 @@ class TestNetworkBaseTestsAbs(VMBaseClass): interactive = False + test_type = 'network' extra_disks = [] extra_nics = [] # XXX: command | tee output is required for Centos under SELinux # http://danwalsh.livejournal.com/22860.html - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D echo "waiting for ipv6 to settle" && sleep 5 route -n | tee first_route_n - ifconfig -a | tee ifconfig_a ip link show | tee ip_link_show - ip a | tee ip_a - find /etc/network/interfaces.d > find_interfacesd - cp -av /etc/network/interfaces . - cp -av /etc/network/interfaces.d . cp /etc/resolv.conf . cp -av /etc/udev/rules.d/70-persistent-net.rules . ||: ip -o route show | tee ip_route_show @@ -41,7 +37,6 @@ cp -av /var/log/upstart ./upstart ||: cp -av /etc/cloud ./etc_cloud cp -av /var/log/cloud*.log ./ - rpm -q --queryformat '%{VERSION}\n' cloud-init |tee rpm_ci_version V=/usr/lib/python*/*-packages/cloudinit/version.py; grep -c NETWORK_CONFIG_V2 $V |tee cloudinit_passthrough_available mkdir -p etc_netplan @@ -54,12 +49,12 @@ cp -a /etc/systemd ./etc_systemd ||: journalctl --no-pager -b -x | tee journalctl_out sleep 10 && ip a | tee ip_a + + exit 0 """)] def test_output_files_exist(self): self.output_files_exist([ - "find_interfacesd", - "ifconfig_a", "ip_a", "ip_route_show", "route_6_n", @@ -437,14 +432,6 @@ class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs): conf_file = "examples/tests/centos_basic.yaml" - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ - textwrap.dedent(""" - cd OUTPUT_COLLECT_D - cp -a /etc/sysconfig/network-scripts . - cp -a /var/log/cloud-init* . - cp -a /var/lib/cloud ./var_lib_cloud - cp -a /run/cloud-init ./run_cloud-init - """)] def test_etc_network_interfaces(self): pass @@ -480,20 +467,24 @@ __test__ = True -class ArtfulTestNetworkBasic(relbase.artful, TestNetworkBasicAbs): +class BionicTestNetworkBasic(relbase.bionic, TestNetworkBasicAbs): __test__ = True -class BionicTestNetworkBasic(relbase.bionic, TestNetworkBasicAbs): +class CosmicTestNetworkBasic(relbase.cosmic, TestNetworkBasicAbs): + __test__ = True + + +class DiscoTestNetworkBasic(relbase.disco, TestNetworkBasicAbs): __test__ = True -class Centos66TestNetworkBasic(centos_relbase.centos66fromxenial, +class Centos66TestNetworkBasic(centos_relbase.centos66_xenial, CentosTestNetworkBasicAbs): __test__ = True -class Centos70TestNetworkBasic(centos_relbase.centos70fromxenial, +class Centos70TestNetworkBasic(centos_relbase.centos70_xenial, CentosTestNetworkBasicAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_static.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_static.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_static.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_static.py 2019-02-15 20:42:42.000000000 +0000 @@ -3,7 +3,6 @@ from .releases import base_vm_classes as relbase from .releases import centos_base_vm_classes as centos_relbase from .test_network import TestNetworkBaseTestsAbs -import textwrap class TestNetworkStaticAbs(TestNetworkBaseTestsAbs): @@ -13,14 +12,6 @@ class CentosTestNetworkStaticAbs(TestNetworkStaticAbs): - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ - textwrap.dedent(""" - cd OUTPUT_COLLECT_D - cp -a /etc/sysconfig/network-scripts . - cp -a /var/log/cloud-init* . - cp -a /var/lib/cloud ./var_lib_cloud - cp -a /run/cloud-init ./run_cloud-init - """)] def test_etc_network_interfaces(self): pass @@ -60,20 +51,24 @@ __test__ = True -class ArtfulTestNetworkStatic(relbase.artful, TestNetworkStaticAbs): +class BionicTestNetworkStatic(relbase.bionic, TestNetworkStaticAbs): __test__ = True -class BionicTestNetworkStatic(relbase.bionic, TestNetworkStaticAbs): +class CosmicTestNetworkStatic(relbase.cosmic, TestNetworkStaticAbs): + __test__ = True + + +class DiscoTestNetworkStatic(relbase.disco, TestNetworkStaticAbs): __test__ = True -class Centos66TestNetworkStatic(centos_relbase.centos66fromxenial, +class Centos66TestNetworkStatic(centos_relbase.centos66_xenial, CentosTestNetworkStaticAbs): __test__ = True -class Centos70TestNetworkStatic(centos_relbase.centos70fromxenial, +class Centos70TestNetworkStatic(centos_relbase.centos70_xenial, CentosTestNetworkStaticAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_static_routes.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_static_routes.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_static_routes.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_static_routes.py 2019-02-15 20:42:42.000000000 +0000 @@ -49,22 +49,27 @@ __test__ = True -class ArtfulTestNetworkStaticRoutes(relbase.artful, +class BionicTestNetworkStaticRoutes(relbase.bionic, TestNetworkStaticRoutesAbs): __test__ = True -class BionicTestNetworkStaticRoutes(relbase.bionic, +class CosmicTestNetworkStaticRoutes(relbase.cosmic, TestNetworkStaticRoutesAbs): __test__ = True -class Centos66TestNetworkStaticRoutes(centos_relbase.centos66fromxenial, +class DiscoTestNetworkStaticRoutes(relbase.disco, + TestNetworkStaticRoutesAbs): + __test__ = True + + +class Centos66TestNetworkStaticRoutes(centos_relbase.centos66_xenial, CentosTestNetworkStaticRoutesAbs): __test__ = False -class Centos70TestNetworkStaticRoutes(centos_relbase.centos70fromxenial, +class Centos70TestNetworkStaticRoutes(centos_relbase.centos70_xenial, CentosTestNetworkStaticRoutesAbs): __test__ = False diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_network_vlan.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_vlan.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_network_vlan.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_network_vlan.py 2019-02-15 20:42:42.000000000 +0000 @@ -11,13 +11,15 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs): conf_file = "examples/tests/vlan_network.yaml" - collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ + extra_collect_scripts = TestNetworkBaseTestsAbs.extra_collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D ip -d link show interface1.2667 |tee ip_link_show_interface1.2667 ip -d link show interface1.2668 |tee ip_link_show_interface1.2668 ip -d link show interface1.2669 |tee ip_link_show_interface1.2669 ip -d link show interface1.2670 |tee ip_link_show_interface1.2670 + + exit 0 """)] def get_vlans(self): @@ -35,7 +37,7 @@ self.output_files_exist(link_files) def test_vlan_installed(self): - release = self.target_release if self.target_release else self.release + release = self.target_release if release not in ('precise', 'trusty', 'xenial', 'artful'): raise SkipTest("release '%s' does not need the vlan package" % release) @@ -54,14 +56,6 @@ class CentosTestNetworkVlanAbs(TestNetworkVlanAbs): - collect_scripts = TestNetworkVlanAbs.collect_scripts + [ - textwrap.dedent(""" - cd OUTPUT_COLLECT_D - cp -a /etc/sysconfig/network-scripts . - cp -a /var/log/cloud-init* . - cp -a /var/lib/cloud ./var_lib_cloud - cp -a /run/cloud-init ./run_cloud-init - """)] def test_etc_network_interfaces(self): pass @@ -86,20 +80,24 @@ __test__ = True -class ArtfulTestNetworkVlan(relbase.artful, TestNetworkVlanAbs): +class BionicTestNetworkVlan(relbase.bionic, TestNetworkVlanAbs): __test__ = True -class BionicTestNetworkVlan(relbase.bionic, TestNetworkVlanAbs): +class CosmicTestNetworkVlan(relbase.cosmic, TestNetworkVlanAbs): + __test__ = True + + +class DiscoTestNetworkVlan(relbase.disco, TestNetworkVlanAbs): __test__ = True -class Centos66TestNetworkVlan(centos_relbase.centos66fromxenial, +class Centos66TestNetworkVlan(centos_relbase.centos66_xenial, CentosTestNetworkVlanAbs): __test__ = True -class Centos70TestNetworkVlan(centos_relbase.centos70fromxenial, +class Centos70TestNetworkVlan(centos_relbase.centos70_xenial, CentosTestNetworkVlanAbs): __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_nvme.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_nvme.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_nvme.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_nvme.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,57 +2,59 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import os import textwrap +centos70_xenial = centos_relbase.centos70_xenial + class TestNvmeAbs(VMBaseClass): arch_skip = [ "s390x", # nvme is a pci device, no pci on s390x ] + test_type = 'storage' interactive = False conf_file = "examples/tests/nvme.yaml" extra_disks = [] nvme_disks = ['4G', '4G'] - disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 15), - ('nvme_disk', 1), ('nvme_disk', 2), ('nvme_disk', 3), - ('second_nvme', 1)] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + disk_to_check = [ + ('main_disk', 1), ('main_disk', 2), ('main_disk', 15), + ('nvme_disk', 0), ('nvme_disk', 1), ('nvme_disk', 2), ('nvme_disk', 3), + ('second_nvme', 0), ('second_nvme', 1)] + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ls /sys/class/ > sys_class ls /sys/class/nvme/ > ls_nvme ls /dev/nvme* > ls_dev_nvme - ls /dev/disk/by-dname/ > ls_dname - blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy + + exit 0 """)] - def test_output_files_exist(self): - self.output_files_exist(["ls_nvme", "ls_dname", "ls_dev_nvme"]) + def _test_nvme_device_names(self, expected): + self.output_files_exist(["ls_nvme", "ls_dev_nvme"]) + print('expected: %s' % expected) + if os.path.getsize(self.collect_path('ls_dev_nvme')) > 0: + print('using ls_dev_nvme') + for device in ['/dev/' + dev for dev in expected]: + print('checking device: %s' % device) + self.check_file_strippedline("ls_dev_nvme", device) - def test_nvme_device_names(self): - ls_nvme = self.collect_path('ls_nvme') # trusty and vivid do not have sys/class/nvme but # nvme devices do work - if os.path.getsize(ls_nvme) > 0: - self.check_file_strippedline("ls_nvme", "nvme0") - self.check_file_strippedline("ls_nvme", "nvme1") else: - self.check_file_strippedline("ls_dev_nvme", "/dev/nvme0") - self.check_file_strippedline("ls_dev_nvme", "/dev/nvme1") + print('using ls_nvme') + for device in expected: + print('checking device: %s' % device) + self.check_file_strippedline("ls_nvme", device) + + def test_nvme_device_names(self): + self._test_nvme_device_names(['nvme0', 'nvme1']) + + +class Centos70TestNvme(centos70_xenial, TestNvmeAbs): + __test__ = True class TrustyTestNvme(relbase.trusty, TestNvmeAbs): @@ -75,15 +77,19 @@ __test__ = True -class ArtfulTestNvme(relbase.artful, TestNvmeAbs): +class BionicTestNvme(relbase.bionic, TestNvmeAbs): __test__ = True -class BionicTestNvme(relbase.bionic, TestNvmeAbs): +class CosmicTestNvme(relbase.cosmic, TestNvmeAbs): __test__ = True -class TestNvmeBcacheAbs(VMBaseClass): +class DiscoTestNvme(relbase.cosmic, TestNvmeAbs): + __test__ = True + + +class TestNvmeBcacheAbs(TestNvmeAbs): arch_skip = [ "s390x", # nvme is a pci device, no pci on s390x ] @@ -92,52 +98,29 @@ extra_disks = ['10G'] nvme_disks = ['6G'] uefi = True - disk_to_check = [('sda', 1), ('sda', 2), ('sda', 3)] + disk_to_check = [('sda', 1), ('sda', 2), ('sda', 3), + ('sdb', 0), ('nvme0n1', 0)] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ls /sys/class/ > sys_class ls /sys/class/nvme/ > ls_nvme ls /dev/nvme* > ls_dev_nvme - ls /dev/disk/by-dname/ > ls_dname ls -al /dev/bcache/by-uuid/ > ls_bcache_by_uuid |: - blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 bcache-super-show /dev/nvme0n1p1 > bcache_super_nvme0n1p1 ls /sys/fs/bcache > bcache_ls cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy - """)] - def test_output_files_exist(self): - self.output_files_exist(["ls_nvme", "ls_dname", "ls_dev_nvme"]) - - def test_nvme_device_names(self): - ls_nvme = self.collect_path('ls_nvme') - # trusty and vivid do not have sys/class/nvme but - # nvme devices do work - if os.path.getsize(ls_nvme) > 0: - self.check_file_strippedline("ls_nvme", "nvme0") - else: - self.check_file_strippedline("ls_dev_nvme", "/dev/nvme0") - self.check_file_strippedline("ls_dev_nvme", "/dev/nvme0n1") - self.check_file_strippedline("ls_dev_nvme", "/dev/nvme0n1p1") + exit 0 + """)] def test_bcache_output_files_exist(self): self.output_files_exist(["bcache_super_nvme0n1p1", "bcache_ls", "bcache_cache_mode"]) + def test_nvme_device_names(self): + self._test_nvme_device_names(['nvme0', 'nvme0n1', 'nvme0n1p1']) + def test_bcache_status(self): bcache_cset_uuid = None bcache_super = self.load_collect_file("bcache_super_nvme0n1p1") @@ -164,11 +147,15 @@ __test__ = True -class ArtfulTestNvmeBcache(relbase.artful, TestNvmeBcacheAbs): +class BionicTestNvmeBcache(relbase.bionic, TestNvmeBcacheAbs): __test__ = True -class BionicTestNvmeBcache(relbase.bionic, TestNvmeBcacheAbs): +class CosmicTestNvmeBcache(relbase.cosmic, TestNvmeBcacheAbs): + __test__ = True + + +class DiscoTestNvmeBcache(relbase.disco, TestNvmeBcacheAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_old_apt_features.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_old_apt_features.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_old_apt_features.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_old_apt_features.py 2019-02-15 20:42:42.000000000 +0000 @@ -5,6 +5,7 @@ """ import re import textwrap +import yaml from . import VMBaseClass from .releases import base_vm_classes as relbase @@ -38,20 +39,20 @@ class TestOldAptAbs(VMBaseClass): """TestOldAptAbs - Basic tests for old apt features of curtin""" interactive = False + test_type = 'config' extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab - ls /dev/disk/by-dname > ls_dname - find /etc/network/interfaces.d > find_interfacesd grep -A 3 "Name: debconf/priority" /var/cache/debconf/config.dat > debc apt-config dump > aptconf cp /etc/apt/apt.conf.d/90curtin-aptproxy . cp /etc/apt/sources.list . cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . cp /etc/cloud/cloud.cfg.d/90_dpkg.cfg . + + exit 0 """)] arch = util.get_architecture() if arch in ['amd64', 'i386']: @@ -71,8 +72,10 @@ def test_preserve_source(self): """test_preserve_source - no clobbering sources.list by cloud-init""" - self.check_file_regex("curtin-preserve-sources.cfg", - "apt_preserve_sources_list.*true") + # For earlier than xenial 'apt_preserve_sources_list' is expected + self.assertEqual( + {'apt': {'preserve_sources_list': True}}, + yaml.load(self.load_collect_file("curtin-preserve-sources.cfg"))) def test_debconf(self): """test_debconf - Check if debconf is in place""" diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_pollinate_useragent.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_pollinate_useragent.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_pollinate_useragent.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_pollinate_useragent.py 2019-02-15 20:42:42.000000000 +0000 @@ -11,13 +11,15 @@ class TestPollinateUserAgent(VMBaseClass): # Test configuring pollinate useragent conf_file = "examples/tests/pollinate-useragent.yaml" + test_type = 'config' extra_disks = [] extra_nics = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - find /etc/network/interfaces.d > find_interfacesd cp -a /etc/pollinate etc_pollinate pollinate --print-user-agent > pollinate_print_user_agent + + exit 0 """)] def test_pollinate_user_agent(self): @@ -63,4 +65,11 @@ __test__ = True +class CosmicTestPollinateUserAgent(relbase.cosmic, TestPollinateUserAgent): + __test__ = True + + +class DiscoTestPollinateUserAgent(relbase.disco, TestPollinateUserAgent): + __test__ = True + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_raid5_bcache.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_raid5_bcache.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_raid5_bcache.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_raid5_bcache.py 2019-02-15 20:42:42.000000000 +0000 @@ -8,22 +8,21 @@ class TestMdadmAbs(VMBaseClass): interactive = False + test_type = 'storage' extra_disks = ['10G', '10G', '10G', '10G'] active_mdadm = "1" - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status mdadm --detail --scan | grep -c ubuntu > mdadm_active1 grep -c active /proc/mdstat > mdadm_active2 - ls /dev/disk/by-dname > ls_dname - find /etc/network/interfaces.d > find_interfacesd + + exit 0 """)] def test_mdadm_output_files_exist(self): - self.output_files_exist( - ["fstab", "mdadm_status", "mdadm_active1", "mdadm_active2", - "ls_dname"]) + self.output_files_exist(["mdadm_status", "mdadm_active1", + "mdadm_active2"]) def test_mdadm_status(self): # ubuntu: is the name assigned to the md array @@ -36,15 +35,15 @@ conf_file = "examples/tests/raid5bcache.yaml" disk_to_check = [('md0', 0), ('sda', 2)] - collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" - cd OUTPUT_COLLECT_D - bcache-super-show /dev/vda2 > bcache_super_vda2 - ls /sys/fs/bcache > bcache_ls - cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode - cat /proc/mounts > proc_mounts - cat /proc/partitions > proc_partitions - find /etc/network/interfaces.d > find_interfacesd - """)] + extra_collect_scripts = ( + TestMdadmAbs.extra_collect_scripts + + [textwrap.dedent("""\ + cd OUTPUT_COLLECT_D + bcache-super-show /dev/vda2 > bcache_super_vda2 + ls /sys/fs/bcache > bcache_ls + cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode + + exit 0""")]) fstab_expected = { '/dev/bcache0': '/', '/dev/md0': '/srv/data', @@ -99,11 +98,15 @@ __test__ = True -class ArtfulTestRaid5Bcache(relbase.artful, TestMdadmBcacheAbs): +class BionicTestRaid5Bcache(relbase.bionic, TestMdadmBcacheAbs): __test__ = True -class BionicTestRaid5Bcache(relbase.bionic, TestMdadmBcacheAbs): +class CosmicTestRaid5Bcache(relbase.cosmic, TestMdadmBcacheAbs): + __test__ = True + + +class DiscoTestRaid5Bcache(relbase.disco, TestMdadmBcacheAbs): __test__ = True # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_simple.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_simple.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_simple.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_simple.py 2019-02-15 20:42:42.000000000 +0000 @@ -2,16 +2,64 @@ from . import VMBaseClass from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import textwrap class TestSimple(VMBaseClass): - # Test that curtin with no config does the right thing + """ Test that curtin runs block-meta simple mode correctly. """ conf_file = "examples/tests/simple.yaml" extra_disks = [] extra_nics = [] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cp /etc/netplan/50-cloud-init.yaml netplan.yaml + + exit 0 + """)] + + +class Centos70TestSimple(centos_relbase.centos70_xenial, TestSimple): + __test__ = True + + +class TrustyTestSimple(relbase.trusty, TestSimple): + __test__ = True + + +class XenialTestSimple(relbase.xenial, TestSimple): + __test__ = True + + +class BionicTestSimple(relbase.bionic, TestSimple): + __test__ = True + + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) + + +class CosmicTestSimple(relbase.cosmic, TestSimple): + __test__ = True + + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) + + +class DiscoTestSimple(relbase.disco, TestSimple): + __test__ = True + + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) + + +class TestSimpleStorage(VMBaseClass): + """ Test curtin runs clear-holders when mode=simple with storage cfg. """ + conf_file = "examples/tests/simple-storage.yaml" + dirty_disks = True + extra_disks = ['5G', '5G'] + extra_nics = [] + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D sfdisk --list > sfdisk_list for d in /dev/[sv]d[a-z] /dev/xvd?; do @@ -28,6 +76,7 @@ fi cp /var/log/cloud-init.log . find /etc/network/interfaces.d > find_interfacesd + exit 0 """)] def test_output_files_exist(self): @@ -35,25 +84,29 @@ "proc_partitions"]) -class TrustyTestSimple(relbase.trusty, TestSimple): +class XenialGATestSimpleStorage(relbase.xenial, TestSimpleStorage): __test__ = True -class XenialTestSimple(relbase.xenial, TestSimple): +class BionicTestSimpleStorage(relbase.bionic, TestSimpleStorage): __test__ = True + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) + -class ArtfulTestSimple(relbase.artful, TestSimple): +class CosmicTestSimpleStorage(relbase.cosmic, TestSimpleStorage): __test__ = True def test_output_files_exist(self): self.output_files_exist(["netplan.yaml"]) -class BionicTestSimple(relbase.bionic, TestSimple): +class DiscoTestSimpleStorage(relbase.disco, TestSimpleStorage): __test__ = True def test_output_files_exist(self): self.output_files_exist(["netplan.yaml"]) + # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_ubuntu_core.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_ubuntu_core.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_ubuntu_core.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_ubuntu_core.py 2019-02-15 20:42:42.000000000 +0000 @@ -10,23 +10,20 @@ target_ftype = "root-image.xz" interactive = False conf_file = "examples/tests/ubuntu_core.yaml" - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = VMBaseClass.extra_collect_scripts + [ + textwrap.dedent(""" cd OUTPUT_COLLECT_D - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - find /etc/network/interfaces.d > find_interfacesd snap list > snap_list cp -a /run/cloud-init ./run_cloud_init |: cp -a /etc/cloud ./etc_cloud |: cp -a /home . |: cp -a /var/lib/extrausers . |: - """)] - def test_output_files_exist(self): - self.output_files_exist(["snap_list"]) + exit 0 + """)] def test_ubuntu_core_snaps_installed(self): + self.output_files_exist(["snap_list"]) snap_list = self.load_collect_file('snap_list') print(snap_list) for snap in ['core', 'pc', 'pc-kernel', 'hello', diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_uefi_basic.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_uefi_basic.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_uefi_basic.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_uefi_basic.py 2019-02-15 20:42:42.000000000 +0000 @@ -1,46 +1,35 @@ # This file is part of curtin. See LICENSE file for copyright and license info. from . import (VMBaseClass) - from .releases import base_vm_classes as relbase +from .releases import centos_base_vm_classes as centos_relbase import textwrap class TestBasicAbs(VMBaseClass): interactive = False + test_type = 'storage' arch_skip = ["s390x"] conf_file = "examples/tests/uefi_basic.yaml" extra_disks = ['4G'] uefi = True disk_to_check = [('main_disk', 1), ('main_disk', 2)] - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D - blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - ls /sys/firmware/efi/ > ls_sys_firmware_efi - cat /sys/class/block/vda/queue/logical_block_size > vda_lbs - cat /sys/class/block/vda/queue/physical_block_size > vda_pbs - blockdev --getsz /dev/vda > vda_blockdev_getsz - blockdev --getss /dev/vda > vda_blockdev_getss - blockdev --getpbsz /dev/vda > vda_blockdev_getpbsz - blockdev --getbsz /dev/vda > vda_blockdev_getbsz - """)] + ls /sys/firmware/efi/ | cat >ls_sys_firmware_efi + cp /sys/class/block/vda/queue/logical_block_size vda_lbs + cp /sys/class/block/vda/queue/physical_block_size vda_pbs + blockdev --getsz /dev/vda | cat >vda_blockdev_getsz + blockdev --getss /dev/vda | cat >vda_blockdev_getss + blockdev --getpbsz /dev/vda | cat >vda_blockdev_getpbsz + blockdev --getbsz /dev/vda | cat >vda_blockdev_getbsz - def test_output_files_exist(self): - self.output_files_exist( - ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", - "fstab", "ls_dname", "ls_uuid", "ls_sys_firmware_efi", - "proc_partitions"]) + exit 0 + """)] def test_sys_firmware_efi(self): + self.output_files_exist(["ls_sys_firmware_efi"]) sys_efi_possible = [ 'config_table', 'efivars', @@ -62,8 +51,10 @@ """ Test disk logical and physical block size are match the class block size. """ - for bs in ['lbs', 'pbs']: - size = int(self.load_collect_file('vda_' + bs)) + blocksize_files = ['vda_' + bs for bs in ['lbs', 'pbs']] + self.output_files_exist(blocksize_files) + for bs_file in blocksize_files: + size = int(self.load_collect_file(bs_file)) self.assertEqual(self.disk_block_size, size) def test_disk_block_size_with_blockdev(self): @@ -73,11 +64,17 @@ --getpbsz get physical block (sector) size --getbsz get blocksize """ - for syscall in ['getss', 'getpbsz']: - size = int(self.load_collect_file('vda_blockdev_' + syscall)) + bdev_files = ['vda_blockdev_' + sc for sc in ['getss', 'getpbsz']] + self.output_files_exist(bdev_files) + for sc_file in bdev_files: + size = int(self.load_collect_file(sc_file)) self.assertEqual(self.disk_block_size, size) +class Centos70UefiTestBasic(centos_relbase.centos70_xenial, TestBasicAbs): + __test__ = True + + class PreciseUefiTestBasic(relbase.precise, TestBasicAbs): __test__ = False @@ -96,7 +93,7 @@ __test__ = True -class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic): +class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TestBasicAbs): __test__ = True @@ -112,31 +109,43 @@ __test__ = True -class ArtfulUefiTestBasic(relbase.artful, TestBasicAbs): +class BionicUefiTestBasic(relbase.bionic, TestBasicAbs): __test__ = True -class BionicUefiTestBasic(relbase.bionic, TestBasicAbs): +class CosmicUefiTestBasic(relbase.cosmic, TestBasicAbs): __test__ = True -class TrustyUefiTestBasic4k(TrustyUefiTestBasic): +class DiscoUefiTestBasic(relbase.disco, TestBasicAbs): + __test__ = True + + +class Centos70UefiTestBasic4k(centos_relbase.centos70_xenial, TestBasicAbs): disk_block_size = 4096 -class TrustyHWEXUefiTestBasic4k(relbase.trusty_hwe_x, TrustyUefiTestBasic4k): - __test__ = True +class TrustyUefiTestBasic4k(relbase.trusty, TestBasicAbs): + disk_block_size = 4096 + + +class TrustyHWEXUefiTestBasic4k(relbase.trusty_hwe_x, TestBasicAbs): + disk_block_size = 4096 + + +class XenialGAUefiTestBasic4k(relbase.xenial_ga, TestBasicAbs): + disk_block_size = 4096 -class XenialGAUefiTestBasic4k(XenialGAUefiTestBasic): +class BionicUefiTestBasic4k(relbase.bionic, TestBasicAbs): disk_block_size = 4096 -class ArtfulUefiTestBasic4k(ArtfulUefiTestBasic): +class CosmicUefiTestBasic4k(relbase.cosmic, TestBasicAbs): disk_block_size = 4096 -class BionicUefiTestBasic4k(BionicUefiTestBasic): +class DiscoUefiTestBasic4k(relbase.disco, TestBasicAbs): disk_block_size = 4096 # vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tests/vmtests/test_zfsroot.py curtin-18.2-10-g7afd77fa/tests/vmtests/test_zfsroot.py --- curtin-18.1-17-gae48e86f/tests/vmtests/test_zfsroot.py 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tests/vmtests/test_zfsroot.py 2019-02-15 20:42:42.000000000 +0000 @@ -6,43 +6,29 @@ class TestZfsRootAbs(VMBaseClass): interactive = False + test_type = 'storage' nr_cpus = 2 dirty_disks = True conf_file = "examples/tests/zfsroot.yaml" extra_disks = [] - collect_scripts = VMBaseClass.collect_scripts + [ - textwrap.dedent(""" + extra_collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 zfs list > zfs_list zpool list > zpool_list zpool status > zpool_status - cat /proc/partitions > proc_partitions - cat /proc/mounts > proc_mounts - cat /proc/cmdline > proc_cmdline - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy + + exit 0 """)] @skip_if_flag('expected_failure') def test_output_files_exist(self): - self.output_files_exist( - ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", - "fstab", "ls_dname", "ls_uuid", - "proc_partitions", - "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) + self.output_files_exist(["root/curtin-install.log", + "root/curtin-install-cfg.yaml"]) @skip_if_flag('expected_failure') def test_ptable(self): + self.output_files_exist(["blkid_output_vda"]) blkid_info = self.get_blkid_data("blkid_output_vda") self.assertEquals(blkid_info["PTTYPE"], "gpt") @@ -96,11 +82,15 @@ __test__ = True -class ArtfulTestZfsRoot(relbase.artful, TestZfsRootAbs): +class BionicTestZfsRoot(relbase.bionic, TestZfsRootAbs): __test__ = True -class BionicTestZfsRoot(relbase.bionic, TestZfsRootAbs): +class CosmicTestZfsRoot(relbase.cosmic, TestZfsRootAbs): + __test__ = True + + +class DiscoTestZfsRoot(relbase.disco, TestZfsRootAbs): __test__ = True @@ -120,3 +110,11 @@ class BionicTestZfsRootFsType(relbase.bionic, TestZfsRootFsTypeAbs): __test__ = True + + +class CosmicTestZfsRootFsType(relbase.cosmic, TestZfsRootFsTypeAbs): + __test__ = True + + +class DiscoTestZfsRootFsType(relbase.disco, TestZfsRootFsTypeAbs): + __test__ = True diff -Nru curtin-18.1-17-gae48e86f/tools/curtainer curtin-18.2-10-g7afd77fa/tools/curtainer --- curtin-18.1-17-gae48e86f/tools/curtainer 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/curtainer 2019-02-15 20:42:42.000000000 +0000 @@ -17,10 +17,13 @@ start a container of image (ubuntu-daily:xenial) and install curtin. options: - --proposed enable proposed - --daily enable daily curtin archive - --source D grab the source deb, unpack inside, and copy unpacked - source out to 'D' + --no-patch-version do not patch source dir/curtin/version.py + by default, --source will have version.py updated + with dpkg's version. + --proposed enable proposed + --daily enable daily curtin archive + --source D grab the source deb, unpack inside, and copy unpacked + source out to 'D' EOF } @@ -114,7 +117,7 @@ main() { local short_opts="hv" - local long_opts="help,daily,proposed,source:,verbose" + local long_opts="help,daily,no-patch-version,proposed,source:,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -123,13 +126,14 @@ local cur="" next="" local proposed=false daily=false src="" name="" maxwait=30 - local eatmydata="eatmydata" getsource="none" + local eatmydata="eatmydata" getsource="none" patch_version=true while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in -h|--help) Usage ; exit 0;; --source) getsource="$next"; shift;; + --no-patch-version) patch_version=false;; --proposed) proposed=true;; --daily) daily=true;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; @@ -147,6 +151,7 @@ if [ "$getsource" != "none" ]; then [ ! -e "$getsource" ] || fail "source output '$getsource' exists." fi + getsource="${getsource%/}" lxc launch "$src" "$name" || fail "failed lxc launch $src $name" CONTAINER=$name @@ -218,6 +223,16 @@ if [ "$src_ver" != "$pkg_ver" ]; then fail "source version ($src_ver) != package version ($pkg_ver)" fi + if "${patch_version}"; then + local verfile="$getsource/curtin/version.py" + grep -q "@@PACKAGED_VERSION@@" "$verfile" || + fail "failed patching version: " \ + "@@PACKAGED_VERSION@@ not found in $verfile" + sed -i.curtainer-dist \ + "s,@@PACKAGED_VERSION@@,${pkg_ver}," "$verfile" || + fail "failed modifying $verfile" + debug 1 "patched $verfile pkg version to $pkg_ver." + fi inside "$name" rm -Rf "$isrcd" || fail "failed removal of extract dir" debug 1 "put source for curtin at $src_ver in $getsource" diff -Nru curtin-18.1-17-gae48e86f/tools/curtin-from-container curtin-18.2-10-g7afd77fa/tools/curtin-from-container --- curtin-18.1-17-gae48e86f/tools/curtin-from-container 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/curtin-from-container 2019-02-15 20:42:42.000000000 +0000 @@ -63,8 +63,12 @@ return } - lxc file push "$fpath" "$name/$prefix/$afile" || { - rerror "failed: lxc file push '$fpath' '$name/$prefix/$afile'" + # 'lxc file' command may write progress info to stdout which breaks + # this program when called as 'curtin pack' as it needs to write the + # payload to stdout. LXD issue #2683 + local out="" + out=$(lxc file push "$fpath" "$name/$prefix/$afile" 2>&1) || { + rerror "failed: lxc file push '$fpath' '$name/$prefix/$afile': $out" return } _RET="$afile:$prefix/$afile" diff -Nru curtin-18.1-17-gae48e86f/tools/jenkins-runner curtin-18.2-10-g7afd77fa/tools/jenkins-runner --- curtin-18.1-17-gae48e86f/tools/jenkins-runner 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/jenkins-runner 2019-02-15 20:42:42.000000000 +0000 @@ -51,11 +51,16 @@ start_s=$(date +%s) parallel=${CURTIN_VMTEST_PARALLEL} +ntfilters=( ) ntargs=( ) +ntdefargs=( "-vv" "--nologcapture" ) +tests=( ) while [ $# -ne 0 ]; do case "$1" in # allow setting these environment variables on cmdline. CURTIN_VMTEST_*=*) export "$1";; + --filter=*) ntfilters[${#ntfilters[@]}]="${1#--filter=}";; + --filter) ntfilters[${#ntfilters[@]}]="$2"; shift;; -p|--parallel) parallel="$2"; shift;; --parallel=*) parallel=${1#*=};; -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};; @@ -63,15 +68,33 @@ shift break ;; - *) ntargs[${#ntargs[@]}]="$1";; + tests/vmtest*) tests[${#tests[@]}]="$1";; + *) ntargs[${#ntargs[@]}]="$1" + ntdefargs="";; esac shift; done +if [ "${#tests[@]}" -ne 0 -a "${#ntfilters[@]}" -ne 0 ]; then + error "Passing test tests and --filter are incompatible." + error "test arguments provided were: ${#tests[*]}" + fail +elif [ "${#tests[@]}" -eq 0 -a "${#ntfilters[@]}" -eq 0 ]; then + tests=( tests/vmtests ) +elif [ "${#ntfilters[@]}" -ne 0 ]; then + tests=( $(./tools/vmtest-filter "${ntfilters[@]}") ) + if [ "${#tests[@]}" -eq 0 ]; then + error "Failed to find any tests with filter(s): \"${ntfilters[*]}\"" + fail "Try testing filters with: ./tools/vmtest-filter ${ntfilters[*]}" + fi +fi + CURTIN_VMTEST_PARALLEL=$parallel if [ ${#ntargs[@]} -eq 0 ]; then - set -- -vv --nologcapture tests/vmtests/ + set -- "${ntdefargs[@]}" "${tests[@]}" +else + set -- "${tests[@]}" fi trap cleanup EXIT @@ -120,11 +143,16 @@ # rather than python3 -m tests.vmtests.image_sync (LP: #1594465) echo "Quering synced ephemeral images/kernels in $IMAGE_DIR" printf "%.s=" {1..86}; echo -printf " %6s %8s %-10s %s/%-14s %s\n" "Release" "Codename" "ImageDate" "Arch" "SubArch" "Path" +printf " %6s %8s %-12s %-5s/%-14s %s\n" "Release" "Codename" "ImageDate" "Arch" "SubArch" "Path" +printf "%.s-" {1..86}; echo +fmt=" %(version)6s %(release)8s %(version_name)-12s %(arch)-5s/%(subarch)-14s %(path)s" +PYTHONPATH="$PWD" python3 tests/vmtests/image_sync.py query \ + --output-format="$fmt" "$IMAGE_DIR" "kflavor=generic" "ftype~(root-image.gz|squashfs|root-tgz)" || + { ret=$?; echo "FATAL: error querying images in $IMAGE_DIR" 1>&2; + exit $ret; } printf "%.s-" {1..86}; echo -fmt=" %(version)6s %(release)8s %(version_name)-10s %(arch)s/%(subarch)-14s %(path)s" PYTHONPATH="$PWD" python3 tests/vmtests/image_sync.py query \ - --output-format="$fmt" "$IMAGE_DIR" "kflavor=generic" "ftype~(root-image.gz|squashfs)" || + --output-format="$fmt" "$IMAGE_DIR" "subarch=generic" "ftype~(root-image.gz|squashfs|root-tgz)" || { ret=$?; echo "FATAL: error querying images in $IMAGE_DIR" 1>&2; exit $ret; } printf "%.s=" {1..86}; printf "\n\n" diff -Nru curtin-18.1-17-gae48e86f/tools/vmtest-filter curtin-18.2-10-g7afd77fa/tools/vmtest-filter --- curtin-18.1-17-gae48e86f/tools/vmtest-filter 1970-01-01 00:00:00.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/vmtest-filter 2019-02-15 20:42:42.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python3 +# This file is part of curtin. See LICENSE file for copyright and license info. + +import argparse +import logging +import os +import sys + +# Fix path so we can import helpers +sys.path.insert(1, os.path.realpath(os.path.join( + os.path.dirname(__file__), '..'))) + +# import the helper +from tests.vmtests.helpers import find_testcases_by_attr # noqa: E402 + +# don't display any logging output +logging.disable(logging.CRITICAL) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog='vmtest-filter', + description='Dump matching testcases', + usage='''vmtest-filter --attr=value''') + parser.add_argument('-d', '--distro', action='store', + dest='distro', default=None, + help='name of distro to match') + parser.add_argument('-D', '--target_distro', action='store', + dest='target_distro', default=None, + help='name of target_distro to match') + parser.add_argument('-r', '--release', action='store', + dest='release', default=None, + help='name of release to match') + parser.add_argument('-R', '--target_release', action='store', + dest='target_release', default=None, + help='name of target_release to match') + parser.add_argument('-a', '--arch', action='store', + dest='arch', default=None, + help='name of arch to match') + parser.add_argument('-s', '--subarch', action='store', + dest='subarch', default=None, + help='name of subarch to match') + parser.add_argument('-k', '--krel', action='store', + dest='krel', default=None, + help='name of krel to match') + parser.add_argument('-f', '--ftype', action='store', + dest='ftype', default=None, + help='name of ftype to match') + parser.add_argument('dir', nargs=argparse.REMAINDER, default=os.getcwd()) + args = parser.parse_args(sys.argv[1:]) + if args.dir: + kwargs = {k[0]: k[1] for k in [v.split('=') for v in args.dir]} + else: + kwargs = vars(args) + for tc in find_testcases_by_attr(**kwargs): + print(tc) + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-18.1-17-gae48e86f/tools/vmtest-sync-images curtin-18.2-10-g7afd77fa/tools/vmtest-sync-images --- curtin-18.1-17-gae48e86f/tools/vmtest-sync-images 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/vmtest-sync-images 2019-02-15 20:42:42.000000000 +0000 @@ -18,7 +18,6 @@ IMAGE_DIR, IMAGE_SRC_URL, sync_images) from tests.vmtests.image_sync import ITEM_NAME_FILTERS from tests.vmtests.helpers import (find_arches, find_releases_by_distro) -from curtin.util import get_platform_arch def _fmt_list_filter(filter_name, matches): diff -Nru curtin-18.1-17-gae48e86f/tools/xkvm curtin-18.2-10-g7afd77fa/tools/xkvm --- curtin-18.1-17-gae48e86f/tools/xkvm 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tools/xkvm 2019-02-15 20:42:42.000000000 +0000 @@ -648,9 +648,13 @@ local bus_devices bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) - cmd=( "${kvmcmd[@]}" "${archopts[@]}" + local rng_devices + rng_devices=( -object "rng-random,filename=/dev/urandom,id=rng0" + -device "virtio-rng-pci,rng=rng0" ) + cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bios_opts[@]}" "${bus_devices[@]}" + "${rng_devices[@]}" "${netargs[@]}" "${diskargs[@]}" "${pt[@]}" ) local pcmd=$(quote_cmd "${cmd[@]}") diff -Nru curtin-18.1-17-gae48e86f/tox.ini curtin-18.2-10-g7afd77fa/tox.ini --- curtin-18.1-17-gae48e86f/tox.ini 2018-05-18 18:40:48.000000000 +0000 +++ curtin-18.2-10-g7afd77fa/tox.ini 2019-02-15 20:42:42.000000000 +0000 @@ -1,7 +1,14 @@ [tox] minversion = 1.6 skipsdist = True -envlist = py27, py3, py3-flake8, py3-pylint, py27-pylint, trusty-check, trusty-py27, trusty-py3 +envlist = + py3-flake8, + py27, + py3, + py3-pylint, + py27-pylint, + trusty-py27, + xenial-py3 [tox:jenkins] downloadcache = ~/cache/pip @@ -44,7 +51,7 @@ basepython = python3 deps = {[testenv]deps} pylint==1.8.1 - bzr+lp:simplestreams + git+https://git.launchpad.net/simplestreams commands = {envpython} -m pylint --errors-only {posargs:curtin tests/vmtests} [testenv:py27-pylint] @@ -91,6 +98,25 @@ commands = {envpython} {toxinidir}/tools/noproxy nosetests \ {posargs:tests/unittests} +[testenv:xenial] +deps = + mock==1.3.0 + nose==1.3.7 + pyyaml==3.11 + oauthlib==1.0.3 + +[testenv:xenial-py27] +basepython = python27 +deps = {[testenv:xenial]deps} +commands = {envpython} {toxinidir}/tools/noproxy nosetests \ + {posargs:tests/unittests} + +[testenv:xenial-py3] +basepython = python3 +deps = {[testenv:xenial]deps} +commands = {envpython} {toxinidir}/tools/noproxy nosetests \ + {posargs:tests/unittests} + [testenv:tip-pycodestyle] commands = {envpython} -m pycodestyle {posargs:curtin/ tests/ tools/} deps = pycodestyle