diff -Nru curtin-0.1.0~bzr532/curtin/block/clear_holders.py curtin-17.1-11-ga4c9636b/curtin/block/clear_holders.py --- curtin-0.1.0~bzr532/curtin/block/clear_holders.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/block/clear_holders.py 2018-01-18 21:10:51.000000000 +0000 @@ -93,6 +93,24 @@ return path +def maybe_stop_bcache_device(device): + """Attempt to stop the provided device_path or raise unexpected errors.""" + bcache_stop = os.path.join(device, 'stop') + try: + util.write_file(bcache_stop, '1', mode=None) + except (IOError, OSError) as e: + # Note: if we get any exceptions in the above exception classes + # it is a result of attempting to write "1" into the sysfs path + # The range of errors changes depending on when we race with + # the kernel asynchronously removing the sysfs path. Therefore + # we log the exception errno we got, but do not re-raise as + # the calling process is watching whether the same sysfs path + # is being removed; if it fails to go away then we'll have + # a log of the exceptions to debug. + LOG.debug('Error writing to bcache stop file %s, device removed: %s', + bcache_stop, e) + + def shutdown_bcache(device): """ Shut down bcache for specified bcache device @@ -132,8 +150,7 @@ os.path.basename(bcache_cache_sysfs)) else: LOG.info('stopping bcache cacheset at: %s', bcache_cache_sysfs) - util.write_file(os.path.join(bcache_cache_sysfs, 'stop'), - '1', mode=None) + maybe_stop_bcache_device(bcache_cache_sysfs) try: util.wait_for_removal(bcache_cache_sysfs, retries=removal_retries) except OSError: @@ -162,8 +179,7 @@ return else: LOG.info('stopping bcache backing device at: %s', bcache_block_sysfs) - util.write_file(os.path.join(bcache_block_sysfs, 'stop'), - '1', mode=None) + maybe_stop_bcache_device(bcache_block_sysfs) try: # wait for them all to go away for dev in [device, bcache_block_sysfs] + slave_paths: @@ -244,6 +260,19 @@ LOG.info("extended partitions do not need wiping, so skipping: '%s'", blockdev) else: + # some volumes will be claimed by the bcache layer but do not surface + # an actual /dev/bcacheN device which owns the parts (backing, cache) + # The result is that some volumes cannot be wiped while bcache claims + # the device. Resolve this by stopping bcache layer on those volumes + # if present. + for bcache_path in ['bcache', 'bcache/set']: + stop_path = os.path.join(device, bcache_path) + if os.path.exists(stop_path): + LOG.debug('Attempting to release bcache layer from device: %s', + device) + maybe_stop_bcache_device(stop_path) + continue + retries = [1, 3, 5, 7] LOG.info('wiping superblock on %s', blockdev) for attempt, wait in enumerate(retries): diff -Nru curtin-0.1.0~bzr532/curtin/block/__init__.py curtin-17.1-11-ga4c9636b/curtin/block/__init__.py --- curtin-0.1.0~bzr532/curtin/block/__init__.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/block/__init__.py 2018-01-18 21:10:51.000000000 +0000 @@ -776,17 +776,21 @@ @contextmanager -def exclusive_open(path): +def exclusive_open(path, exclusive=True): """ - Obtain an exclusive file-handle to the file/device specified + Obtain an exclusive file-handle to the file/device specified unless + caller specifics exclusive=False. """ mode = 'rb+' fd = None if not os.path.exists(path): raise ValueError("No such file at path: %s" % path) + flags = os.O_RDWR + if exclusive: + flags += os.O_EXCL try: - fd = os.open(path, os.O_RDWR | os.O_EXCL) + fd = os.open(path, flags) try: fd_needs_closing = True with os.fdopen(fd, mode) as fo: @@ -875,7 +879,8 @@ return zero_file_at_offsets(path, offsets, buflen=buflen, count=count) -def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False): +def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False, + exclusive=True): """ write zeros to file at specified offsets """ @@ -890,7 +895,8 @@ tot = buflen * count msg_vals = {'path': path, 'tot': buflen * count} - with exclusive_open(path) as fp: + # allow caller to control if we require exclusive open + with exclusive_open(path, exclusive=exclusive) as fp: # get the size by seeking to end. fp.seek(0, 2) size = fp.tell() diff -Nru curtin-0.1.0~bzr532/curtin/block/mkfs.py curtin-17.1-11-ga4c9636b/curtin/block/mkfs.py --- curtin-0.1.0~bzr532/curtin/block/mkfs.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/block/mkfs.py 2018-01-18 21:10:51.000000000 +0000 @@ -64,18 +64,8 @@ } family_flag_mappings = { - "label": {"btrfs": "--label", - "ext": "-L", - "fat": "-n", - "jfs": "-L", - "ntfs": "--label", - "reiserfs": "--label", - "swap": "--label", - "xfs": "-L"}, - "uuid": {"btrfs": "--uuid", - "ext": "-U", - "reiserfs": "--uuid", - "swap": "--uuid"}, + "fatsize": {"fat": ("-F", "{fatsize}")}, + # flag with no parameter "force": {"btrfs": "--force", "ext": "-F", "fat": "-I", @@ -83,18 +73,31 @@ "reiserfs": "-f", "swap": "--force", "xfs": "-f"}, - "fatsize": {"fat": "-F"}, + "label": {"btrfs": ("--label", "{label}"), + "ext": ("-L", "{label}"), + "fat": ("-n", "{label}"), + "jfs": ("-L", "{label}"), + "ntfs": ("--label", "{label}"), + "reiserfs": ("--label", "{label}"), + "swap": ("--label", "{label}"), + "xfs": ("-L", "{label}")}, + # flag with no parameter, N.B: this isn't used/exposed "quiet": {"ext": "-q", "ntfs": "-q", "reiserfs": "-q", "xfs": "--quiet"}, "sectorsize": { - "btrfs": "--sectorsize", - "ext": "-b", - "fat": "-S", - "xfs": "-s", - "ntfs": "--sector-size", - "reiserfs": "--block-size"} + "btrfs": ("--sectorsize", "{sectorsize}",), + "ext": ("-b", "{sectorsize}"), + "fat": ("-S", "{sectorsize}"), + "ntfs": ("--sector-size", "{sectorsize}"), + "reiserfs": ("--block-size", "{sectorsize}"), + "xfs": ("-s", "{sectorsize}")}, + "uuid": {"btrfs": ("--uuid", "{uuid}"), + "ext": ("-U", "{uuid}"), + "reiserfs": ("--uuid", "{uuid}"), + "swap": ("--uuid", "{uuid}"), + "xfs": ("-m", "uuid={uuid}")}, } release_flag_mapping_overrides = { @@ -102,7 +105,8 @@ "force": {"btrfs": None}, "uuid": {"btrfs": None}}, "trusty": { - "uuid": {"btrfs": None}}, + "uuid": {"btrfs": None, + "xfs": None}}, } @@ -126,10 +130,18 @@ if strict: raise ValueError("flag '%s' not supported by fs family '%s'" % flag_name, fs_family) + else: + return ret + + if param is None: + ret.append(flag_sym) else: - ret = [flag_sym] - if param is not None: - ret.append(param) + params = [k.format(**{flag_name: param}) for k in flag_sym] + if list(params) == list(flag_sym): + raise ValueError("Param %s not used for flag_name=%s and " + "fs_family=%s." % (param, flag_name, fs_family)) + + ret.extend(params) return ret diff -Nru curtin-0.1.0~bzr532/curtin/commands/apt_config.py curtin-17.1-11-ga4c9636b/curtin/commands/apt_config.py --- curtin-0.1.0~bzr532/curtin/commands/apt_config.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/apt_config.py 2018-01-18 21:10:51.000000000 +0000 @@ -232,7 +232,12 @@ Checks for existance of the expected mirror and warns if not found """ if mirror not in tmpl: - LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl) + if mirror.endswith("/") and mirror[:-1] in tmpl: + LOG.debug("mirror_to_placeholder: '%s' did not exist in tmpl, " + "did without a trailing /. Accomodating.", mirror) + mirror = mirror[:-1] + else: + LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) diff -Nru curtin-0.1.0~bzr532/curtin/commands/block_meta.py curtin-17.1-11-ga4c9636b/curtin/commands/block_meta.py --- curtin-0.1.0~bzr532/curtin/commands/block_meta.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/block_meta.py 2018-01-18 21:10:51.000000000 +0000 @@ -384,7 +384,11 @@ LOG.info("labeling device: '%s' with '%s' partition table", disk, ptable) if ptable == "gpt": - util.subp(["sgdisk", "--clear", disk]) + # Wipe both MBR and GPT that may be present on the disk. + # N.B.: wipe_volume wipes 1M at front and end of the disk. + # This could destroy disk data in filesystems that lived + # there. + block.wipe_volume(disk, mode='superblock') elif ptable in _dos_names: util.subp(["parted", disk, "--script", "mklabel", "msdos"]) else: @@ -544,6 +548,24 @@ info.get('id'), device, disk_ptable) LOG.debug("partnum: %s offset_sectors: %s length_sectors: %s", partnumber, offset_sectors, length_sectors) + + # Wipe the partition if told to do so, do not wipe dos extended partitions + # as this may damage the extended partition table + if config.value_as_boolean(info.get('wipe')): + LOG.info("Preparing partition location on disk %s", disk) + if info.get('flag') == "extended": + LOG.warn("extended partitions do not need wiping, so skipping: " + "'%s'" % info.get('id')) + else: + # wipe the start of the new partition first by zeroing 1M at the + # length of the previous partition + wipe_offset = int(offset_sectors * logical_block_size_bytes) + LOG.debug('Wiping 1M on %s at offset %s', disk, wipe_offset) + # We don't require exclusive access as we're wiping data at an + # offset and the current holder maybe part of the current storage + # configuration. + block.zero_file_at_offsets(disk, [wipe_offset], exclusive=False) + if disk_ptable == "msdos": if flag in ["extended", "logical", "primary"]: partition_type = flag @@ -565,25 +587,6 @@ else: raise ValueError("parent partition has invalid partition table") - # check if we've triggered hidden metadata like md, lvm or bcache - part_kname = get_path_to_storage_volume(info.get('id'), storage_config) - holders = clear_holders.get_holders(part_kname) - if len(holders) > 0: - LOG.debug('Detected block holders on partition %s: %s', part_kname, - holders) - clear_holders.clear_holders(part_kname) - clear_holders.assert_clear(part_kname) - - # Wipe the partition if told to do so, do not wipe dos extended partitions - # as this may damage the extended partition table - if config.value_as_boolean(info.get('wipe')): - if info.get('flag') == "extended": - LOG.warn("extended partitions do not need wiping, so skipping: " - "'%s'" % info.get('id')) - else: - block.wipe_volume( - get_path_to_storage_volume(info.get('id'), storage_config), - mode=info.get('wipe')) # Make the name if needed if storage_config.get(device).get('name') and partition_type != 'extended': make_dname(info.get('id'), storage_config) @@ -617,9 +620,27 @@ def mount_handler(info, storage_config): + """ Handle storage config type: mount + + info = { + 'id': 'rootfs_mount', + 'type': 'mount', + 'path': '/', + 'options': 'defaults,errors=remount-ro', + 'device': 'rootfs', + } + + Mount specified device under target at 'path' and generate + fstab entry. + """ state = util.load_command_environment() path = info.get('path') filesystem = storage_config.get(info.get('device')) + mount_options = info.get('options') + # handle unset, or empty('') strings + if not mount_options: + mount_options = 'defaults' + if not path and filesystem.get('fstype') != "swap": raise ValueError("path to mountpoint must be specified") volume = storage_config.get(filesystem.get('volume')) @@ -635,41 +656,50 @@ mount_point = os.path.sep.join([state['target'], path]) mount_point = os.path.normpath(mount_point) - # Create mount point if does not exist - util.ensure_dir(mount_point) - - # Mount volume - util.subp(['mount', volume_path, mount_point]) - - path = "/%s" % path - - options = ["defaults"] + options = mount_options.split(",") # If the volume_path's kname is backed by iSCSI or (in the case of # LVM/DM) if any of its slaves are backed by iSCSI, then we need to # append _netdev to the fstab line if iscsi.volpath_is_iscsi(volume_path): LOG.debug("Marking volume_path:%s as '_netdev'", volume_path) options.append("_netdev") + + # Create mount point if does not exist + util.ensure_dir(mount_point) + + # Mount volume, with options + try: + opts = ['-o', ','.join(options)] + util.subp(['mount', volume_path, mount_point] + opts, capture=True) + except util.ProcessExecutionError as e: + LOG.exception(e) + msg = ('Mount failed: %s @ %s with options %s' % (volume_path, + mount_point, + ",".join(opts))) + LOG.error(msg) + raise RuntimeError(msg) + + # set path + path = "/%s" % path + else: path = "none" options = ["sw"] # Add volume to fstab if state['fstab']: - with open(state['fstab'], "a") as fp: - location = get_path_to_storage_volume(volume.get('id'), - storage_config) - uuid = block.get_volume_uuid(volume_path) - if len(uuid) > 0: - location = "UUID=%s" % uuid - - if filesystem.get('fstype') in ["fat", "fat12", "fat16", "fat32", - "fat64"]: - fstype = "vfat" - else: - fstype = filesystem.get('fstype') - fp.write("%s %s %s %s 0 0\n" % (location, path, fstype, - ",".join(options))) + uuid = block.get_volume_uuid(volume_path) + location = ("UUID=%s" % uuid) if uuid else ( + get_path_to_storage_volume(volume.get('id'), + storage_config)) + + fstype = filesystem.get('fstype') + if fstype in ["fat", "fat12", "fat16", "fat32", "fat64"]: + fstype = "vfat" + + fstab_entry = "%s %s %s %s 0 0\n" % (location, path, fstype, + ",".join(options)) + util.write_file(state['fstab'], fstab_entry, omode='a') else: LOG.info("fstab not in environment, so not writing") @@ -704,7 +734,9 @@ else: # Create vgrcreate command and run # capture output to avoid printing it to log - util.subp(['vgcreate', name] + device_paths, capture=True) + # Use zero to clear target devices of any metadata + util.subp(['vgcreate', '--force', '--zero=y', '--yes', + name] + device_paths, capture=True) # refresh lvmetad lvm.lvm_scan() @@ -735,11 +767,17 @@ "possibility of damaging lvm partitions intended to be " "preserved." % (info.get('id'), volgroup)) else: - cmd = ["lvcreate", volgroup, "-n", name] + # Use 'wipesignatures' (if available) and 'zero' to clear target lv + # of any fs metadata + cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] + release = util.lsb_release()['codename'] + if release not in ['precise', 'trusty']: + cmd.extend(["--wipesignatures=y"]) + if info.get('size'): - cmd.extend(["-L", info.get('size')]) + cmd.extend(["--size", info.get('size')]) else: - cmd.extend(["-l", "100%FREE"]) + cmd.extend(["--extents", "100%FREE"]) util.subp(cmd) diff -Nru curtin-0.1.0~bzr532/curtin/commands/block_wipe.py curtin-17.1-11-ga4c9636b/curtin/commands/block_wipe.py --- curtin-0.1.0~bzr532/curtin/commands/block_wipe.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/block_wipe.py 2018-01-18 21:10:51.000000000 +0000 @@ -18,11 +18,15 @@ import sys import curtin.block as block from . import populate_one_subcmd +from .. import log + +LOG = log.LOG def wipe_main(args): for blockdev in args.devices: try: + LOG.debug('Wiping volume %s with mode=%s', blockdev, args.mode) block.wipe_volume(blockdev, mode=args.mode) except Exception as e: sys.stderr.write( diff -Nru curtin-0.1.0~bzr532/curtin/commands/clear_holders.py curtin-17.1-11-ga4c9636b/curtin/commands/clear_holders.py --- curtin-0.1.0~bzr532/curtin/commands/clear_holders.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/clear_holders.py 2018-01-18 21:10:51.000000000 +0000 @@ -28,7 +28,7 @@ raise ValueError('invalid devices specified') block.clear_holders.start_clear_holders_deps() block.clear_holders.clear_holders(args.devices, try_preserve=args.preserve) - if args.try_preserve: + if args.preserve: print('ran clear_holders attempting to preserve data. however, ' 'hotplug support for some devices may cause holders to restart ') block.clear_holders.assert_clear(args.devices) diff -Nru curtin-0.1.0~bzr532/curtin/commands/curthooks.py curtin-17.1-11-ga4c9636b/curtin/commands/curthooks.py --- curtin-0.1.0~bzr532/curtin/commands/curthooks.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/curthooks.py 2018-01-18 21:10:51.000000000 +0000 @@ -36,6 +36,8 @@ from . import populate_one_subcmd +write_files = futil._legacy_write_files # LP: #1731709 + CMD_ARGUMENTS = ( ((('-t', '--target'), {'help': 'operate on target. default is env[TARGET_MOUNT_POINT]', @@ -689,14 +691,11 @@ if pkg not in needed_packages: needed_packages.add(pkg) - # FIXME: This needs cleaning up. - # do not install certain packages on artful as they are no longer needed. - # ifenslave specifically causes issuse due to dependency on ifupdown. - codename = util.lsb_release(target=target).get('codename') - if codename == 'artful': + # Filter out ifupdown network packages on netplan enabled systems. + if 'ifupdown' not in installed_packages and 'nplan' in installed_packages: drops = set(['bridge-utils', 'ifenslave', 'vlan']) if needed_packages.union(drops): - LOG.debug("Skipping install of %s. Not needed on artful.", + LOG.debug("Skipping install of %s. Not needed on netplan system.", needed_packages.union(drops)) needed_packages = needed_packages.difference(drops) diff -Nru curtin-0.1.0~bzr532/curtin/commands/install.py curtin-17.1-11-ga4c9636b/curtin/commands/install.py --- curtin-0.1.0~bzr532/curtin/commands/install.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/install.py 2018-01-18 21:10:51.000000000 +0000 @@ -120,7 +120,9 @@ def __init__(self, config): top_d = tempfile.mkdtemp() state_d = os.path.join(top_d, 'state') - target_d = os.path.join(top_d, 'target') + target_d = config.get('install', {}).get('target') + if not target_d: + target_d = os.path.join(top_d, 'target') scratch_d = os.path.join(top_d, 'scratch') for p in (state_d, target_d, scratch_d): os.mkdir(p) @@ -477,6 +479,11 @@ '/root/curtin-install.log') if log_target_path: copy_install_log(logfile, workingd.target, log_target_path) + + if instcfg.get('unmount', "") == "disabled": + LOG.info('Skipping unmount: config disabled target unmounting') + return + # unmount everything (including iscsi disks) util.do_umount(workingd.target, recursive=True) diff -Nru curtin-0.1.0~bzr532/curtin/commands/main.py curtin-17.1-11-ga4c9636b/curtin/commands/main.py --- curtin-0.1.0~bzr532/curtin/commands/main.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/main.py 2018-01-18 21:10:51.000000000 +0000 @@ -29,11 +29,11 @@ VERSIONSTR = version.version_string() SUB_COMMAND_MODULES = [ - 'apply_net', 'block-attach-iscsi', 'block-detach-iscsi', - 'block-info', 'block-meta', 'block-wipe', 'curthooks', - 'clear-holders', 'extract', 'hook', 'in-target', 'install', 'mkfs', - 'net-meta', 'apt-config', 'pack', 'swap', 'system-install', - 'system-upgrade', 'version'] + 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi', + 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks', + 'extract', 'hook', 'install', 'mkfs', 'in-target', 'net-meta', 'pack', + 'swap', 'system-install', 'system-upgrade', 'unmount', 'version', +] def add_subcmd(subparser, subcmd): diff -Nru curtin-0.1.0~bzr532/curtin/commands/unmount.py curtin-17.1-11-ga4c9636b/curtin/commands/unmount.py --- curtin-0.1.0~bzr532/curtin/commands/unmount.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/commands/unmount.py 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,60 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# Author: Ryan Harper +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see . + +from curtin.log import LOG +from curtin import util +from . import populate_one_subcmd + +import os + +try: + FileMissingError = FileNotFoundError +except NameError: + FileMissingError = IOError + + +def unmount_main(args): + """ + run util.umount(target, recursive=True) + """ + if args.target is None: + msg = "Missing target. Please provide target path parameter" + raise ValueError(msg) + + if not os.path.exists(args.target): + msg = "Cannot unmount target path %s: it does not exist" % args.target + raise FileMissingError(msg) + + LOG.info("Unmounting devices from target path: %s", args.target) + recursive_mode = not args.disable_recursive_mounts + util.do_umount(args.target, recursive=recursive_mode) + + +CMD_ARGUMENTS = ( + (('-t', '--target'), + {'help': ('Path to mountpoint to be unmounted.' + 'The default is env variable "TARGET_MOUNT_POINT"'), + 'metavar': 'TARGET', 'action': 'store', + 'default': os.environ.get('TARGET_MOUNT_POINT')}), + (('-d', '--disable-recursive-mounts'), + {'help': 'Disable unmounting recursively under target', + 'default': False, 'action': 'store_true'}), +) + + +def POPULATE_SUBCMD(parser): + populate_one_subcmd(parser, CMD_ARGUMENTS, unmount_main) diff -Nru curtin-0.1.0~bzr532/curtin/futil.py curtin-17.1-11-ga4c9636b/curtin/futil.py --- curtin-0.1.0~bzr532/curtin/futil.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/futil.py 2018-01-18 21:10:51.000000000 +0000 @@ -18,6 +18,7 @@ import grp import pwd import os +import warnings from .util import write_file, target_path from .log import LOG @@ -101,3 +102,15 @@ content=info.get('content', ''), owner=info.get('owner', "-1:-1"), perms=info.get('permissions', info.get('perms', "0644"))) + + +def _legacy_write_files(cfg, base_dir=None): + """Backwards compatibility for curthooks.write_files (LP: #1731709) + It needs to work like: + curthooks.write_files(cfg, target) + cfg is a 'cfg' dictionary with a 'write_files' entry in it. + """ + warnings.warn( + "write_files use from curtin.util is deprecated. " + "Please use curtin.futil.write_files.", DeprecationWarning) + return write_files(cfg.get('write_files', {}), base_dir=base_dir) diff -Nru curtin-0.1.0~bzr532/curtin/__init__.py curtin-17.1-11-ga4c9636b/curtin/__init__.py --- curtin-0.1.0~bzr532/curtin/__init__.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/__init__.py 2018-01-18 21:10:51.000000000 +0000 @@ -43,6 +43,6 @@ 'HAS_VERSION_MODULE', ] -__version__ = "0.1.0" +__version__ = "17.1" # vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr532/curtin/util.py curtin-17.1-11-ga4c9636b/curtin/util.py --- curtin-0.1.0~bzr532/curtin/util.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/util.py 2018-01-18 21:10:51.000000000 +0000 @@ -54,8 +54,8 @@ from .log import LOG -_INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers' -_INSTALLED_MAIN = '/usr/bin/curtin' +_INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' +_INSTALLED_MAIN = 'usr/bin/curtin' _LSB_RELEASE = {} _USES_SYSTEMD = None @@ -677,6 +677,24 @@ return None +def _installed_file_path(path, check_file=None): + # check the install root for the file 'path'. + # if 'check_file', then path is a directory that contains file. + # return absolute path or None. + inst_pre = "/" + if os.environ.get('SNAP'): + inst_pre = os.path.abspath(os.environ['SNAP']) + inst_path = os.path.join(inst_pre, path) + if check_file: + check_path = os.path.sep.join((inst_path, check_file)) + else: + check_path = inst_path + + if os.path.isfile(check_path): + return os.path.abspath(inst_path) + return None + + def get_paths(curtin_exe=None, lib=None, helpers=None): # return a dictionary with paths for 'curtin_exe', 'helpers' and 'lib' # that represent where 'curtin' executable lives, where the 'curtin' module @@ -698,17 +716,17 @@ if found: curtin_exe = found - if (curtin_exe is None and os.path.exists(_INSTALLED_MAIN)): - curtin_exe = _INSTALLED_MAIN + if curtin_exe is None: + curtin_exe = _installed_file_path(_INSTALLED_MAIN) - cfile = "common" # a file in 'helpers' + # "common" is a file in helpers + cfile = "common" if (helpers is None and os.path.isfile(os.path.join(tld, "helpers", cfile))): helpers = os.path.join(tld, "helpers") - if (helpers is None and - os.path.isfile(os.path.join(_INSTALLED_HELPERS_PATH, cfile))): - helpers = _INSTALLED_HELPERS_PATH + if helpers is None: + helpers = _installed_file_path(_INSTALLED_HELPERS_PATH, cfile) return({'curtin_exe': curtin_exe, 'lib': mydir, 'helpers': helpers}) diff -Nru curtin-0.1.0~bzr532/curtin/version.py curtin-17.1-11-ga4c9636b/curtin/version.py --- curtin-0.1.0~bzr532/curtin/version.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/curtin/version.py 2018-01-18 21:10:51.000000000 +0000 @@ -15,15 +15,15 @@ if not _PACKED_VERSION.startswith('@@'): return _PACKED_VERSION - revno = None version = old_version - bzrdir = os.path.abspath(os.path.join(__file__, '..', '..', '.bzr')) - if os.path.isdir(bzrdir): + gitdir = os.path.abspath(os.path.join(__file__, '..', '..', '.git')) + if os.path.exists(gitdir): try: - out = subprocess.check_output(['bzr', 'revno'], cwd=bzrdir) - revno = out.decode('utf-8').strip() - if revno: - version += "~bzr%s" % revno + out = subprocess.check_output( + ['git', 'describe', '--long', '--abbrev=8', + "--match=[0-9][0-9]*"], + cwd=os.path.dirname(gitdir)) + version = out.decode('utf-8').strip() except subprocess.CalledProcessError: pass diff -Nru curtin-0.1.0~bzr532/debian/changelog curtin-17.1-11-ga4c9636b/debian/changelog --- curtin-0.1.0~bzr532/debian/changelog 2017-10-06 16:07:36.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/debian/changelog 2018-01-18 21:30:09.000000000 +0000 @@ -1,3 +1,53 @@ +curtin (17.1-11-ga4c9636b-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1743618) + - tests: cleanup the apt custom sources test. + - apt: Be more lenient when replacing mirrors in /etc/apt/sources.list + - vmtest: Drop Zesty release + - vmtest: initialize logger with class names for easy parsing + - packaging: Do not mention primary contributors in debian/changelog. + + -- Ryan Harper Thu, 18 Jan 2018 15:30:09 -0600 + +curtin (17.1-6-g8b145067-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1743618) + - packaging: Update new-upstream-snapshot to work with git. + - Bump pylint to 1.8.1. + - vmtests: switch to MAAS v3 streams for images and kernels [Ryan Harper] + - tests: update pack tests to clear out pyc files more completely. + - debian/control: drop conflicts that had bzr version + - Switch uses of bzr to git, borrow from cloud-init git workflow. + - Release 17.1 + - packaging: update debian/build-deb to remove ~bzrREV when using equal tag + - pack: fix packing when curtin is installed inside a snap. + - tox: move to pylint 1.7.4 + - vmtests: iscsi minor cleanup. + - vmtests: exercise rootfs over an lvm logical volume [Ryan Harper] + - Switch network dep filter to test for ifupdown/nplan instead of release + name [Ryan Harper] + - Allow control of curtin install unmounting [Ryan Harper] + - vmtests: Add Bionic release to tests and update classes. + - storage: add 'options' key mount type to specify mount parameters for + filesystems [Ryan Harper] + - Re-add curthooks.write_files method for backwards compat + [Ryan Harper] + - vmtest: Remove ArtfulTestBridging skip_by_date check, bug fixed + - Drop Precise from vmtest [Ryan Harper] + - clear_holders: bcache log IO/OS exceptions but do not raise [Ryan Harper] + - vmtest: Support newer qemu and multipath. + - block: enable control over exclusive_open use when wiping volumes + [Ryan Harper] + - block: handle wiping bcache parts [Ryan Harper] + - vmtests: Defer ArtfulNetworkMtu SkipbyDate to 2018 [Ryan Harper] + - bcache: accept sysfs write failure in shutdown handler if path missing + [Ryan Harper] + - vmtest: Rephrase a message about no disks to be less scary + - block_meta: use block.wipe_volume(mode=superblock) to clear MBR/GPT + tables [Ryan Harper] + + -- Ryan Harper Tue, 16 Jan 2018 15:24:44 -0600 + curtin (0.1.0~bzr532-0ubuntu1~16.04.1) xenial; urgency=medium * New upstream snapshot. (LP: #1721808) diff -Nru curtin-0.1.0~bzr532/debian/changelog.trunk curtin-17.1-11-ga4c9636b/debian/changelog.trunk --- curtin-0.1.0~bzr532/debian/changelog.trunk 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/debian/changelog.trunk 2018-01-18 21:30:09.000000000 +0000 @@ -0,0 +1,5 @@ +curtin (UPSTREAM_VER-0ubuntu1) UNRELEASED; urgency=low + + * Initial release + + -- Scott Moser Mon, 29 Jul 2013 16:12:09 -0400 diff -Nru curtin-0.1.0~bzr532/debian/control curtin-17.1-11-ga4c9636b/debian/control --- curtin-0.1.0~bzr532/debian/control 2017-10-06 16:07:36.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/debian/control 2018-01-18 21:30:09.000000000 +0000 @@ -50,7 +50,6 @@ Architecture: all Priority: extra Depends: ${misc:Depends} -Conflicts: curtin (<= 0.1.0~bzr54-0ubuntu1) Description: Library and tools for curtin installer This package contains utilities for the curtin installer. @@ -71,7 +70,6 @@ Section: python Architecture: all Priority: extra -Conflicts: curtin (<= 0.1.0~bzr54-0ubuntu1) Depends: curtin-common (= ${binary:Version}), python3-oauthlib, python3-yaml, diff -Nru curtin-0.1.0~bzr532/debian/new-upstream-snapshot curtin-17.1-11-ga4c9636b/debian/new-upstream-snapshot --- curtin-0.1.0~bzr532/debian/new-upstream-snapshot 2017-10-06 16:07:36.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/debian/new-upstream-snapshot 2018-01-18 21:30:09.000000000 +0000 @@ -1,55 +1,52 @@ #!/bin/sh +TEMP_D="" +CR=' +' +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } Usage() { - cat <&2; exit 1; } print_commit() { - local subject="$1" author="$2" bugs="$3" aname="" + local subject="$1" author="$2" bugs="$3" aname="" buf="" abugs="" + local indent=" - " indent2=" " ll=79 aname=${author% <*} - echo " - $subject${aname:+ [${aname}]}${bugs:+ (LP: ${bugs})}" + local skipname="" + case "$aname" in + Scott*Moser|Ryan*Harper) aname="";; + esac + [ "${aname}" = "Scott Moser" ] && aname="" + abugs="${aname:+ [${aname}]}${bugs:+ (LP: ${bugs})}" + if [ $((${#subject}+${#abugs})) -le $(($ll-${#indent})) ]; then + echo "${indent}${subject}${abugs}" + elif [ ${#subject} -ge $(($ll-${#indent})) ]; then + echo "${subject}${abugs}" | + fmt --width=$(($ll-${#indent})) | + sed -e "1s/^/${indent}/; 1n;" \ + -e 's/^[ ]*//' \ + -e '/^[ ]*$/d' -e "s/^/$indent2/" -e 's/[ ]\+$//' + + else + ( echo "${subject}"; echo "${abugs}" ) | + fmt --width=$(($ll-${#indent})) | + sed -e "1s/^/${indent}/; 1n;" \ + -e 's/^[ ]*//' \ + -e '/^[ ]*$/d' -e "s/^/$indent2/" -e 's/[ ]\+$//' + fi } -# unfortunately seems like no easy way to get 'Author' unless -# the committer sets it on the commit. And since curtin is doing -# bzr merge and letting committer merge it, then author doesn't -# end up correct on most commits. -# -# log messages look like: -#revno: 424 [merge] -#fixes bug: https://launchpad.net/bugs/1618429 -#committer: Ryan Harper -#branch nick: merge-wesley-lp1618429 -#timestamp: Thu 2016-09-15 08:48:13 -0500 -#message: -# block/mdadm: add option to ignore mdadm_assemble errors -# -# When wiping disks with mdadm partitions we may encounter unexpected -# return codes. In the case of wiping disks, we don't care if we -# observe any error so allow wipe to ignore the errors by explicitly -# passing in ignore errors. -bzr_log_to_dch() { - local line="" commit="" lcommit="" bugs="" bug="" +git_log_to_dch() { + local line="" commit="" lcommit="" bugs="" while :; do read line || break case "$line" in - revno:\ *) + commit\ *) if [ -n "$commit" ]; then print_commit "$subject" "$author" "$bugs" fi @@ -59,111 +56,139 @@ subject="" ;; Author:*) author="${line#Author: }";; - fixes\ bug:*) - # fixes bug: https://launchpad.net/bugs/1618429 - bug="#${line##*/}" - bugs="${bugs:+${bugs}, }${bug}";; - message:) - read subject;; + LP:*) bugs="${bugs:+${bugs}, }${line#*: }";; + "") [ -z "$subject" ] && read subject;; esac done if [ -n "$commit" ]; then print_commit "$subject" "$author" "$bugs" fi } +cleanup() { + [ ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" +} -[ $# -eq 0 ] && { Usage 1>&2; exit 1; } -[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; } -trunk=$1 -revno=${2:-tip} -uver=${3:-0.1.0} # the *next* upstream version - -[ -d "$trunk" ] || - fail "trunk dir '$trunk' not a dir" -if [ "$revno" = "tip" ]; then - revno=$(cd "$trunk" && bzr revno) || - fail "failed getting bzr revno from $trunk" +from_ref=${1:-"master"} +cur_branch=$(git rev-parse --abbrev-ref HEAD) || + fail "failed to get current branch" + +case "$cur_branch" in + ubuntu/*) :;; + *) fail "You are on branch '$cur_branch', expect to be on ubuntu/*";; +esac + +TEMP_D=$(mktemp -d) || fail "failed mktemp" +trap cleanup EXIT + +prev_pkg_ver=$(dpkg-parsechangelog --show-field Version) || + fail "failed reading package version" +pkg_name=$(dpkg-parsechangelog --show-field Source) || + fail "failed to read Source from changelog" + +# turn 0.7.7-10-gbc2c326-0ubuntu1 into 'bc2c326' +t=${prev_pkg_ver%-*} +prev_pkg_hash=${t##*-g} +case "${prev_pkg_ver}" in + *-*-g[a-z]*-*) + # use the hash. + t=${prev_pkg_ver%-*} + prev_pkg_hash=${t##*-g};; + *) + prev_tag=ubuntu/$(echo "${prev_pkg_ver}" | sed 's,~,_,g') + echo "prev_tag=$prev_tag" + out=$(git tag --list "${prev_tag}") || + fail "failed to look for a tag $prev_tag" + if [ -z "$out" ]; then + fail "no way to figure out merge-base between " \ + "${prev_pkg_ver} and ${from_ref}" + fi + prev_pkg_hash=$(git merge-base "${from_ref}" "$prev_tag") || + fail "failed git merge-base \"${from_ref}\" \"${prev_tag}\"" + [ -n "${prev_pkg_hash}" ] || + fail "cannot find merge base for ${prev_pkg_ver} and ${from_ref}" + ;; +esac + +new_pkg_debian="0ubuntu1" +new_upstream_ver=$(git describe --abbrev=8 "${from_ref}") +new_pkg_ver="${new_upstream_ver}-${new_pkg_debian}" + +prev_upstream_ver=${prev_pkg_ver%-*} +if [ "${prev_upstream_ver}" = "${new_upstream_ver}" ]; then + echo "nothing to commit. '$from_ref' is at ${new_upstream_ver}." + exit 0 fi -pversion=$(dpkg-parsechangelog --show-field Version) || - fail "failed to read previous version with dpkg-parsechangelog" -prevno=$(echo "$pversion" | sed 's,.*bzr\([0-9]\+\)-.*,\1,') || - fail "fail reading previous bzr revision from previous version '$pversion'" -version=${uver}~bzr${revno} -tarball=../curtin-${version}.tar.gz -t=../curtin_${version}.orig.tar.gz -if [ -f "$t" ]; then - echo "using '$t' as tarball" 1>&2 - tarball="$t" -else - echo "creating $tarball with bzr export" 1>&2 - bzr export --format=tgz "--revision=${revno}" "$tarball" "${trunk}" || - fail "failed exporting bzr in $trunk to $tarball" - ln -sf "${tarball##*/}" "$t" || - fail "failed linking ${tarball##*/} to $t" + +dpseries="debian/patches/series" +if [ -e $dpseries ]; then + drops="" + while read bname extra; do + case "$bname" in + cpick-*) + commit=${bname#cpick-} + commit=${commit%%-*} + echo "bname=$bname commit=${commit}" 1>&2 + if git merge-base --is-ancestor "$commit" "$from_ref"; then + drops="${drops} debian/patches/$bname" + fi + ;; + *) echo "$bname${extra:+ ${extra}}";; + esac + done < $dpseries > "${TEMP_D}/series" + drops=${drops# } + if [ -n "$drops" ]; then + cp "${TEMP_D}/series" "$dpseries" || + fail "failed copying to $dpseries" + if [ ! -s $dpseries ]; then + git rm --force "$dpseries" || + fail "failed removing empty $dpseries: git rm $dpseries" + fi + msg="drop cherry picks before merge from ${from_ref} at $new_upstream_ver" + msg="$msg${CR}${CR}drop the following cherry picks:" + for file in $drops; do + git rm "$file" || fail "failed to git rm $file" + msg="${msg}$CR $file" + done + git commit -m "$msg" "$dpseries" $drops + fi fi -#bzr merge-upstream "$tarball" "--version=${version}" || -# fail "failed merge-upstream of $tarball at version=$version" -tmpd=$(mktemp -d "${TMPDIR:-/tmp}/curtin.${0##*/}.XXXXXX") -trap 'rm -Rf "$tmpd"' EXIT -newflist="${tmpd}/new-files" -oldflist="${tmpd}/old-files" - -tar -tf "$tarball" \ - --strip-components=1 --exclude="*/debian" > "$newflist.full" || - fail "failed tar tf on $tarball" -sed 's,^[^/]*/,,' "$newflist.full" > "$newflist" - -bzr ls --recursive --versioned > "$oldflist.full" || - fail "failed bzr ls --recursive" -grep -v "^debian/" "$oldflist.full" > "$oldflist" - -cat "$oldflist" "$newflist" "$newflist" > "$tmpd/all-old" || - fail "failed getting all old files" -cat "$newflist" "$oldflist" "$oldflist" > "$tmpd/all-new" || - fail "failed getting all new" - -removed="${tmpd}/removed" -added="$tmpd/added" -sort "$tmpd/all-old" | uniq --uniq > "$removed" -sort "$tmpd/all-new" | uniq --uniq > "$added" - -while read rmfile; do - case "$rmfile" in - .pc/*) continue;; - */) rflag="-r";; - *) rflag="";; - esac - bzr rm $rflag "$rmfile" || fail "failed bzr rm${rflag:+ ${rflag}} $rmfile" -done < "$removed" - -for f in *; do - [ "$f" = "debian" ] && continue - rm -rf "$f" || fail "failed removing '$f'" -done - -tar --strip-components=1 --exclude "*/debian/*" -xf "$tarball" || - fail "failed extraction of $tarball" - -while read newfile; do - bzr add "$newfile" || fail "failed adding '$newfile'" -done < "$added" - -oldrev=$(($prevno+1)) -( cd "$trunk" && bzr log -r "${oldrev}..${revno}" ) > new-changes.log || - fail "failed to get changes from ${oldrev}..$revno" -bzr_log_to_dch < new-changes.log > new-dch.log -cat < "$gitlog" || + fail "failed git log ${prev_pkg_hash}..${from_ref}" + +cat >> "$clog" <> "$clog" || + fail "failed git_log_to_dch" +cat >> "$clog" < $(date -R) -then: - $ dch --edit - # read changes in new-dch.log or full changes in new-changes.log - # and write changelog - $ debcommit - $ dch --release - $ debcommit --release - $ debuild -S EOF +cat "$clog" "debian/changelog" > "$TEMP_D/newlog" && + cp "$TEMP_D/newlog" "debian/changelog" || + fail "failed replacing debian/changelog" + +dch -e || fail "dch -e exited $?" + +git diff + +echo -n "Commit this change? (Y/n): " +read answer || fail "failed to read answer" +case "$answer" in + n|[Nn][oO]) exit 1;; +esac + +msg="update changelog (new upstream snapshot $new_upstream_ver)." +git commit -m "$msg" debian/changelog || + fail "failed to commit '$msg'" diff -Nru curtin-0.1.0~bzr532/doc/topics/config.rst curtin-17.1-11-ga4c9636b/doc/topics/config.rst --- curtin-0.1.0~bzr532/doc/topics/config.rst 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/doc/topics/config.rst 2018-01-18 21:10:51.000000000 +0000 @@ -205,6 +205,23 @@ Curtin will save the merged configuration data into the target OS at the path of ``save_install_config``. This defaults to /root/curtin-install-cfg.yaml +**save_install_logs**: ** + +Curtin will copy the install log to a specific path in the target +filesystem. This defaults to /root/install.log + +**target**: ** + +Control where curtin mounts the target device for installing the OS. If this +value is unset, curtin picks a suitable path under a temporary directory. If +a value is set, then curtin will utilize the ``target`` value instead. + +**unmount**: *disabled* + +If this key is set to the string 'disabled' then curtin will not +unmount the target filesystem when install is complete. This +skips unmounting in all cases of install success or failure. + **Example**:: install: @@ -213,6 +230,9 @@ - /tmp/install.log - /var/log/syslog save_install_config: /root/myconf.yaml + save_install_log: /var/log/curtin-install.log + target: /my_mount_point + unmount: disabled kernel diff -Nru curtin-0.1.0~bzr532/doc/topics/development.rst curtin-17.1-11-ga4c9636b/doc/topics/development.rst --- curtin-0.1.0~bzr532/doc/topics/development.rst 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/doc/topics/development.rst 2018-01-18 21:10:51.000000000 +0000 @@ -10,7 +10,7 @@ Install some virtualization and cloud packages to get started.:: - sudo apt-get -qy install kvm libvirt-bin cloud-utils bzr + sudo apt-get -qy install kvm libvirt-bin cloud-utils git Download cloud images @@ -33,19 +33,18 @@ Getting the source ================== -Download curtin source from launchpad via `bzr` command.:: +Download curtin source from launchpad via `git` command.:: mkdir -p ~/src - bzr init-repo ~/src/curtin - ( cd ~/src/curtin && bzr branch lp:curtin trunk.dist ) - ( cd ~/src/curtin && bzr branch trunk.dist trunk ) + cd ~/src + git clone https://git.launchpad.net/curtin Using curtin ============ Use `launch` to launch a kvm instance with user data to pack up local curtin and run it inside an instance.:: - cd ~/src/curtin/trunk + cd ~/src/curtin ./tools/launch $BOOTIMG --publish $ROOTTGZ -- curtin install "PUBURL/${ROOTTGZ##*/}" diff -Nru curtin-0.1.0~bzr532/doc/topics/storage.rst curtin-17.1-11-ga4c9636b/doc/topics/storage.rst --- curtin-0.1.0~bzr532/doc/topics/storage.rst 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/doc/topics/storage.rst 2018-01-18 21:10:51.000000000 +0000 @@ -366,12 +366,30 @@ fstab entry will contain ``_netdev`` to indicate networking is required to mount this filesystem. +**options**: ** + +The ``options`` key will replace the default options value of ``defaults``. + +.. warning:: + The kernel and user-space utilities may differ between the install + environment and the runtime environment. Not all kernels and user-space + combinations will support all options. Providing options for a mount point + will have both of the following effects: + + - ``curtin`` will mount the filesystems with the provided options during the installation. + + - ``curtin`` will ensure the target OS uses the provided mount options by updating the target OS (/etc/fstab). + + If either of the environments (install or target) do not have support for + the provided options, the behavior is undefined. + **Config Example**:: - id: disk0-part1-fs1-mount0 type: mount path: /home device: disk0-part1-fs1 + options: 'noatime,errors=remount-ro' Lvm Volgroup Command ~~~~~~~~~~~~~~~~~~~~ diff -Nru curtin-0.1.0~bzr532/examples/tests/allindata.yaml curtin-17.1-11-ga4c9636b/examples/tests/allindata.yaml --- curtin-0.1.0~bzr532/examples/tests/allindata.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/allindata.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: 1 - id: bios_boot_partition @@ -46,7 +46,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdb1 type: partition @@ -69,7 +69,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition @@ -92,7 +92,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vde + serial: disk-d name: fourth_disk - id: sdd1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/basic_iscsi.yaml curtin-17.1-11-ga4c9636b/examples/tests/basic_iscsi.yaml --- curtin-0.1.0~bzr532/examples/tests/basic_iscsi.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/basic_iscsi.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -5,7 +5,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk wipe: superblock grub_device: true diff -Nru curtin-0.1.0~bzr532/examples/tests/basic_scsi.yaml curtin-17.1-11-ga4c9636b/examples/tests/basic_scsi.yaml --- curtin-0.1.0~bzr532/examples/tests/basic_scsi.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/basic_scsi.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -53,6 +53,7 @@ - id: btrfs_disk_mnt_id type: mount path: /btrfs + options: 'defaults,noatime' device: btrfs_disk_fmt_id - id: pnum_disk type: disk diff -Nru curtin-0.1.0~bzr532/examples/tests/basic.yaml curtin-17.1-11-ga4c9636b/examples/tests/basic.yaml --- curtin-0.1.0~bzr532/examples/tests/basic.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/basic.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -6,7 +6,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk_with_in/\&valid@#dname wipe: superblock grub_device: true @@ -40,7 +40,7 @@ device: sda2_home - id: sparedisk_id type: disk - path: /dev/vdc + serial: disk-b name: sparedisk wipe: superblock - id: sparedisk_fat_fmt_id @@ -49,7 +49,7 @@ volume: sparedisk_id - id: btrfs_disk_id type: disk - path: /dev/vdd + serial: disk-c name: btrfs_volume wipe: superblock - id: btrfs_disk_fmt_id @@ -59,10 +59,11 @@ - id: btrfs_disk_mnt_id type: mount path: /btrfs + options: 'defaults,noatime' device: btrfs_disk_fmt_id - id: pnum_disk type: disk - path: /dev/vde + serial: disk-d name: pnum_disk wipe: superblock ptable: gpt diff -Nru curtin-0.1.0~bzr532/examples/tests/bcache_basic.yaml curtin-17.1-11-ga4c9636b/examples/tests/bcache_basic.yaml --- curtin-0.1.0~bzr532/examples/tests/bcache_basic.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/bcache_basic.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -5,14 +5,14 @@ - id: id_rotary0 type: disk name: rotary0 - path: /dev/vdb + serial: disk-a ptable: msdos wipe: superblock grub_device: true - id: id_ssd0 type: disk name: ssd0 - path: /dev/vdc + serial: disk-b wipe: superblock - id: id_rotary0_part1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/bcache-wipe-xfs.yaml curtin-17.1-11-ga4c9636b/examples/tests/bcache-wipe-xfs.yaml --- curtin-0.1.0~bzr532/examples/tests/bcache-wipe-xfs.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/bcache-wipe-xfs.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,74 @@ +showtrace: true + +early_commands: + # Create a partitioned disk with bcache metadata in one of the partitions. + # Then, wipe the partition table. This leaves "buried" bcache data that + # would be seen as soon as the disk was partitioned and cause problems + # for curtin's use of the disk. + # This config recreates issue LP: #1718699 + 00_blockmeta: [env, -u, OUTPUT_FSTAB, TARGET_MOUNT_POINT=/tmp/my.bdir/target, + WORKING_DIR=/tmp/my.bdir/work.d, curtin, --showtrace, -v, + block-meta, --umount, custom] + 01_clear_holders: [curtin, clear-holders, --preserve, /dev/disk/by-id/virtio-disk-b] + 02_quick_erase: [curtin, block-wipe, --mode, superblock, /dev/disk/by-id/virtio-disk-b] + +storage: + config: + - grub_device: true + id: vdb + name: vdb + serial: disk-b + ptable: msdos + type: disk + wipe: superblock + - id: vdc + name: vdc + serial: disk-c + type: disk + wipe: superblock + - device: vdb + id: vdb-part1 + name: vdb-part1 + number: 1 + offset: 4194304B + size: 3997171712B + type: partition + uuid: 1d112703-1ff7-49fb-9655-741016e216bf + wipe: superblock + - device: vdb + id: vdb-part2 + name: vdb-part2 + number: 2 + size: 3997171712B + type: partition + uuid: ec219a2e-c4a5-4623-b66a-965da2c6c1f1 + wipe: superblock + - backing_device: vdc + cache_device: vdb-part2 + cache_mode: writeback + id: bcache0 + name: bcache0 + type: bcache + - fstype: ext4 + id: vdb-part1_format + label: '' + type: format + uuid: 0687dc8f-c089-4f30-8603-0ddf646a5dd7 + volume: vdb-part1 + - fstype: xfs + id: bcache0_format + label: '' + type: format + uuid: c40a45b2-1f12-454e-a0ec-784eb4ded4e6 + volume: bcache0 + - device: vdb-part1_format + id: vdb-part1_mount + options: '' + path: / + type: mount + - device: bcache0_format + id: bcache0_mount + options: '' + path: /var/lib/ceph-bcache/test-disk + type: mount + version: 1 diff -Nru curtin-0.1.0~bzr532/examples/tests/dirty_disks_config.yaml curtin-17.1-11-ga4c9636b/examples/tests/dirty_disks_config.yaml --- curtin-0.1.0~bzr532/examples/tests/dirty_disks_config.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/dirty_disks_config.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,14 @@ +bucket: + - &run_if_systemd | + #!/bin/sh + command -v systemctl && systemctl mask media-root\\x2dro.mount + exit 0 + early_commands: + # systemd sucks; automatic mount and fsck services clash with curtin + # Once LP: #1723183 has landed (cloud-initramfs-tools 0.40ubuntu1) + # on all releases we can drop this workaround. + 01-unsuckify-systemd-mounts: [sh, -c, *run_if_systemd] # running block-meta custom from the install environment # inherits the CONFIG environment, so this works to actually prepare # the disks exactly as in this config before the rest of the install diff -Nru curtin-0.1.0~bzr532/examples/tests/install_disable_unmount.yaml curtin-17.1-11-ga4c9636b/examples/tests/install_disable_unmount.yaml --- curtin-0.1.0~bzr532/examples/tests/install_disable_unmount.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/install_disable_unmount.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,18 @@ +install: + unmount: disabled + +showtrace: true + +post_cmds: + - &cat_proc_mounts |- + cat /proc/mounts | tee ${TARGET_MOUNT_POINT}/root/postinst_mounts.out + + - &echo_target_mp |- + echo ${TARGET_MOUNT_POINT} | tee ${TARGET_MOUNT_POINT}/root/target.out + +# capture proc/mounts, unmount, capture proc/mounts +late_commands: + 01_get_proc_mounts: [sh, -c, *cat_proc_mounts] + 02_write_out_target: [sh, -c, *echo_target_mp] + 03_unmount_target: [curtin, unmount] + 04_get_proc_mounts: [cat, /proc/mounts] diff -Nru curtin-0.1.0~bzr532/examples/tests/lvm_iscsi.yaml curtin-17.1-11-ga4c9636b/examples/tests/lvm_iscsi.yaml --- curtin-0.1.0~bzr532/examples/tests/lvm_iscsi.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/lvm_iscsi.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -5,7 +5,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk wipe: superblock grub_device: true diff -Nru curtin-0.1.0~bzr532/examples/tests/lvmroot.yaml curtin-17.1-11-ga4c9636b/examples/tests/lvmroot.yaml --- curtin-0.1.0~bzr532/examples/tests/lvmroot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/lvmroot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,42 @@ +showtrace: true +storage: + version: 1 + config: + - id: main_disk + type: disk + ptable: gpt + name: root_disk + wipe: superblock + serial: '4cd98c5c-7761-4179' + grub_device: true + - id: main_disk_p1 + type: partition + number: 1 + size: 4GB + device: main_disk + flag: boot + - id: bios_boot + type: partition + size: 1MB + number: 15 + device: main_disk + flag: bios_grub + - id: root_vg + type: lvm_volgroup + name: root_vg + devices: + - main_disk_p1 + - id: root_vg_lv1 + type: lvm_partition + name: lv1_root + size: 3.5G + volgroup: root_vg + - id: lv1_root_fs + type: format + fstype: __ROOTFS_FORMAT__ + uuid: 04836770-e989-460f-8774-8e277ddcb40f + volume: root_vg_lv1 + - id: lvroot_mount + path: / + type: mount + device: lv1_root_fs diff -Nru curtin-0.1.0~bzr532/examples/tests/lvm.yaml curtin-17.1-11-ga4c9636b/examples/tests/lvm.yaml --- curtin-0.1.0~bzr532/examples/tests/lvm.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/lvm.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk - id: sda1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/mdadm_bcache_complex.yaml curtin-17.1-11-ga4c9636b/examples/tests/mdadm_bcache_complex.yaml --- curtin-0.1.0~bzr532/examples/tests/mdadm_bcache_complex.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mdadm_bcache_complex.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk - id: bios_boot_partition type: partition @@ -46,14 +46,14 @@ type: disk wipe: superblock model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdc type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/mdadm_bcache.yaml curtin-17.1-11-ga4c9636b/examples/tests/mdadm_bcache.yaml --- curtin-0.1.0~bzr532/examples/tests/mdadm_bcache.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mdadm_bcache.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -8,7 +8,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk - id: bios_boot_partition type: partition @@ -56,14 +56,14 @@ type: disk wipe: superblock model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdc type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/mdadm_iscsi.yaml curtin-17.1-11-ga4c9636b/examples/tests/mdadm_iscsi.yaml --- curtin-0.1.0~bzr532/examples/tests/mdadm_iscsi.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mdadm_iscsi.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -5,7 +5,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk wipe: superblock grub_device: true diff -Nru curtin-0.1.0~bzr532/examples/tests/mirrorboot-msdos-partition.yaml curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot-msdos-partition.yaml --- curtin-0.1.0~bzr532/examples/tests/mirrorboot-msdos-partition.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot-msdos-partition.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -6,7 +6,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: true wipe: superblock @@ -14,7 +14,7 @@ type: disk ptable: msdos model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk wipe: superblock - id: sda-part1 diff -Nru curtin-0.1.0~bzr532/examples/tests/mirrorboot-uefi.yaml curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot-uefi.yaml --- curtin-0.1.0~bzr532/examples/tests/mirrorboot-uefi.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot-uefi.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,14 +7,14 @@ ptable: msdos type: disk wipe: superblock - path: /dev/vdb + serial: disk-a name: main_disk - id: sdb name: sdb ptable: gpt type: disk wipe: superblock - path: /dev/vdc + serial: disk-b name: second_disk - device: sda flag: boot diff -Nru curtin-0.1.0~bzr532/examples/tests/mirrorboot.yaml curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot.yaml --- curtin-0.1.0~bzr532/examples/tests/mirrorboot.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/mirrorboot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: 1 - id: bios_boot_partition @@ -24,7 +24,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdb1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/nvme_bcache.yaml curtin-17.1-11-ga4c9636b/examples/tests/nvme_bcache.yaml --- curtin-0.1.0~bzr532/examples/tests/nvme_bcache.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/nvme_bcache.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -6,13 +6,13 @@ model: LOGICAL VOLUME name: sda ptable: gpt - path: /dev/vdb + serial: disk-a type: disk wipe: superblock - id: sdb model: LOGICAL VOLUME name: sdb - path: /dev/vdc + serial: disk-b type: disk wipe: superblock - id: nvme0n1 diff -Nru curtin-0.1.0~bzr532/examples/tests/nvme.yaml curtin-17.1-11-ga4c9636b/examples/tests/nvme.yaml --- curtin-0.1.0~bzr532/examples/tests/nvme.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/nvme.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -5,7 +5,7 @@ - id: main_disk type: disk ptable: gpt - path: /dev/vdb + serial: disk-a name: main_disk wipe: superblock grub_device: true diff -Nru curtin-0.1.0~bzr532/examples/tests/raid10boot.yaml curtin-17.1-11-ga4c9636b/examples/tests/raid10boot.yaml --- curtin-0.1.0~bzr532/examples/tests/raid10boot.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/raid10boot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: 1 - id: bios_boot_partition @@ -24,7 +24,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdb1 type: partition @@ -35,7 +35,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition @@ -46,7 +46,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vde + serial: disk-d name: fourth_disk - id: sdd1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/raid5bcache.yaml curtin-17.1-11-ga4c9636b/examples/tests/raid5bcache.yaml --- curtin-0.1.0~bzr532/examples/tests/raid5bcache.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/raid5bcache.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -6,31 +6,31 @@ model: QEMU HARDDISK name: sda ptable: msdos - path: /dev/vdb + serial: disk-a type: disk wipe: superblock - id: sdb model: QEMU HARDDISK name: sdb - path: /dev/vdc + serial: disk-b type: disk wipe: superblock - id: sdc model: QEMU HARDDISK name: sdc - path: /dev/vdd + serial: disk-c type: disk wipe: superblock - id: sdd model: QEMU HARDDISK name: sdd - path: /dev/vde + serial: disk-d type: disk wipe: superblock - id: sde model: QEMU HARDDISK name: sde - path: /dev/vdf + serial: disk-e type: disk wipe: superblock - devices: diff -Nru curtin-0.1.0~bzr532/examples/tests/raid5boot.yaml curtin-17.1-11-ga4c9636b/examples/tests/raid5boot.yaml --- curtin-0.1.0~bzr532/examples/tests/raid5boot.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/raid5boot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: 1 - id: bios_boot_partition @@ -24,7 +24,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdb1 type: partition @@ -35,7 +35,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/raid6boot.yaml curtin-17.1-11-ga4c9636b/examples/tests/raid6boot.yaml --- curtin-0.1.0~bzr532/examples/tests/raid6boot.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/raid6boot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -7,7 +7,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdb + serial: disk-a name: main_disk grub_device: 1 - id: bios_boot_partition @@ -24,7 +24,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdc + serial: disk-b name: second_disk - id: sdb1 type: partition @@ -35,7 +35,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vdd + serial: disk-c name: third_disk - id: sdc1 type: partition @@ -46,7 +46,7 @@ wipe: superblock ptable: gpt model: QEMU HARDDISK - path: /dev/vde + serial: disk-d name: fourth_disk - id: sdd1 type: partition diff -Nru curtin-0.1.0~bzr532/examples/tests/uefi_basic.yaml curtin-17.1-11-ga4c9636b/examples/tests/uefi_basic.yaml --- curtin-0.1.0~bzr532/examples/tests/uefi_basic.yaml 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/uefi_basic.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -1,10 +1,18 @@ showtrace: true + +early_commands: + # Recreate and test LP:1722322 + # Make one disk dirty with an MBR and a storage configuration + # GPT and don't supply wipe: superblock. This will exercise + # curtin use of sgdisk --zap-all instead of --clear (GPT only) + blockmeta: ["parted", /dev/disk/by-id/virtio-disk-b, "--script", "mklabel", "msdos"] + storage: config: - id: id_disk0 type: disk name: main_disk - path: /dev/vdb + serial: disk-a ptable: gpt wipe: superblock grub_device: true @@ -55,4 +63,19 @@ id: id_efi_mount path: /boot/efi type: mount + - id: pnum_disk + type: disk + serial: disk-b + name: pnum_disk + ptable: gpt + - id: pnum_disk_p1 + type: partition + number: 1 + size: 1GB + device: pnum_disk + - id: pnum_disk_p2 + type: partition + number: 10 + size: 1GB + device: pnum_disk version: 1 diff -Nru curtin-0.1.0~bzr532/examples/tests/uefi_lvmroot.yaml curtin-17.1-11-ga4c9636b/examples/tests/uefi_lvmroot.yaml --- curtin-0.1.0~bzr532/examples/tests/uefi_lvmroot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/examples/tests/uefi_lvmroot.yaml 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,82 @@ +showtrace: true +storage: + config: + - grub_device: true + id: sda + model: PERC 6/i + name: sda + ptable: gpt + serial: '4cd98c5c-7761-4179' + type: disk + wipe: superblock + - device: sda + id: sda-part1 + name: sda-part1 + number: 1 + offset: 4194304B + size: 96468992B + type: partition + uuid: 88a0df20-c224-4637-ad93-1d932e065a77 + wipe: superblock + - device: sda + id: sda-part2 + name: sda-part2 + number: 2 + size: 499122176B + type: partition + uuid: 1df4c42e-a94c-41d9-887f-9f2139425029 + wipe: superblock + - device: sda + id: sda-part3 + name: sda-part3 + number: 3 + size: 8G + type: partition + uuid: 14870e21-4ef7-4058-baf1-8ae1148bb1b0 + wipe: superblock + - devices: + - sda-part3 + id: vg0 + name: vg0 + type: lvm_volgroup + uuid: 1cae2f4d-ea47-45df-9d33-d1b77f23ee3f + - id: vg0-lv0 + name: lv0 + size: 7.5G + type: lvm_partition + volgroup: vg0 + - fstype: vfat + id: sda-part1_format + label: '' + type: format + uuid: 0bea118f-558e-4235-8547-644c76078066 + volume: sda-part1 + - fstype: __BOOTFS_FORMAT__ + id: sda-part2_format + label: '' + type: format + uuid: a45a16b8-018c-4a24-b9d8-aee19ca4566e + volume: sda-part2 + - fstype: __ROOTFS_FORMAT__ + id: vg0-lv0_format + label: '' + type: format + uuid: 48691520-0025-4e6c-a7c0-50bb0ac30713 + volume: vg0-lv0 + - device: vg0-lv0_format + id: vg0-lv0_mount + options: '' + path: / + type: mount + - device: sda-part2_format + id: sda-part2_mount + options: '' + path: /boot + type: mount + - device: sda-part1_format + id: sda-part1_mount + options: '' + path: /boot/efi + type: mount + version: 1 + diff -Nru curtin-0.1.0~bzr532/.gitignore curtin-17.1-11-ga4c9636b/.gitignore --- curtin-0.1.0~bzr532/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/.gitignore 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,5 @@ +*.pyc +__pycache__ +.tox +.coverage +curtin.egg-info/ diff -Nru curtin-0.1.0~bzr532/tests/unittests/helpers.py curtin-17.1-11-ga4c9636b/tests/unittests/helpers.py --- curtin-0.1.0~bzr532/tests/unittests/helpers.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/helpers.py 2018-01-18 21:10:51.000000000 +0000 @@ -79,3 +79,16 @@ if _dir is None: _dir = self.tmp_dir() return os.path.normpath(os.path.abspath(os.path.join(_dir, path))) + + +def dir2dict(startdir, prefix=None): + flist = {} + if prefix is None: + prefix = startdir + for root, dirs, files in os.walk(startdir): + for fname in files: + fpath = os.path.join(root, fname) + key = fpath[len(prefix):] + with open(fpath, "r") as fp: + flist[key] = fp.read() + return flist diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_apt_custom_sources_list.py curtin-17.1-11-ga4c9636b/tests/unittests/test_apt_custom_sources_list.py --- curtin-0.1.0~bzr532/tests/unittests/test_apt_custom_sources_list.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_apt_custom_sources_list.py 2018-01-18 21:10:51.000000000 +0000 @@ -4,10 +4,10 @@ import logging import os - -import yaml import mock from mock import call +import textwrap +import yaml from curtin import util from curtin.commands import apt_config @@ -137,31 +137,88 @@ self._apt_source_list(cfg, EXPECTED_PRIMSEC_CONTENT) - @staticmethod - def test_apt_srcl_custom(): + def test_apt_srcl_custom(self): """test_apt_srcl_custom - Test rendering a custom source template""" cfg = yaml.safe_load(YAML_TEXT_CUSTOM_SL) + target = self.new_root arch = util.get_architecture() # would fail inside the unittest context - with mock.patch.object(util, 'get_architecture', - return_value=arch) as mockga: - with mock.patch.object(util, 'write_file') as mockwrite: - # keep it side effect free and avoid permission errors - with mock.patch.object(os, 'rename'): - with mock.patch.object(util, 'lsb_release', - return_value={'codename': - 'fakerel'}): - apt_config.handle_apt(cfg, TARGET) - - mockga.assert_called_with("/") - cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' - cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) - calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'), - EXPECTED_CONVERTED_CONTENT, mode=0o644), - call(util.target_path(TARGET, cloudfile), cloudconf, - mode=0o644)] - mockwrite.assert_has_calls(calls) + with mock.patch.object(util, 'get_architecture', return_value=arch): + with mock.patch.object(util, 'lsb_release', + return_value={'codename': 'fakerel'}): + apt_config.handle_apt(cfg, target) + + self.assertEqual( + EXPECTED_CONVERTED_CONTENT, + util.load_file(util.target_path(target, "/etc/apt/sources.list"))) + cloudfile = util.target_path( + target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') + self.assertEqual({'apt_preserve_sources_list': True}, + yaml.load(util.load_file(cloudfile))) + + @mock.patch("curtin.util.lsb_release") + @mock.patch("curtin.util.get_architecture", return_value="amd64") + def test_trusty_source_lists(self, m_get_arch, m_lsb_release): + """Support mirror equivalency with and without trailing /. + + Trusty official images do not have a trailing slash on + http://archive.ubuntu.com/ubuntu .""" + + orig_primary = apt_config.PRIMARY_ARCH_MIRRORS['PRIMARY'] + orig_security = apt_config.PRIMARY_ARCH_MIRRORS['SECURITY'] + msg = "Test is invalid. %s mirror does not end in a /." + self.assertEqual(orig_primary[-1], "/", msg % "primary") + self.assertEqual(orig_security[-1], "/", msg % "security") + orig_primary = orig_primary[:-1] + orig_security = orig_security[:-1] + + m_lsb_release.return_value = { + 'codename': 'trusty', 'description': 'Ubuntu 14.04.5 LTS', + 'id': 'Ubuntu', 'release': '14.04'} + + target = self.new_root + my_primary = 'http://fixed-primary.ubuntu.com/ubuntu' + my_security = 'http://fixed-security.ubuntu.com/ubuntu' + cfg = { + 'preserve_sources_list': False, + 'primary': [{'arches': ['amd64'], 'uri': my_primary}], + 'security': [{'arches': ['amd64'], 'uri': my_security}]} + + # this is taken from a trusty image /etc/apt/sources.list + tmpl = textwrap.dedent("""\ + deb {mirror} {release} {comps} + deb {mirror} {release}-updates {comps} + deb {mirror} {release}-backports {comps} + deb {security} {release}-security {comps} + # not modified + deb http://my.example.com/updates testing main + """) + + release = 'trusty' + comps = 'main universe multiverse restricted' + easl = util.target_path(target, 'etc/apt/sources.list') + + orig_content = tmpl.format( + mirror=orig_primary, security=orig_security, + release=release, comps=comps) + orig_content_slash = tmpl.format( + mirror=orig_primary + "/", security=orig_security + "/", + release=release, comps=comps) + expected = tmpl.format( + mirror=my_primary, security=my_security, + release=release, comps=comps) + + # Avoid useless test. Make sure the strings don't start out equal. + self.assertNotEqual(expected, orig_content) + + util.write_file(easl, orig_content) + apt_config.handle_apt(cfg, target) + self.assertEqual(expected, util.load_file(easl)) + + util.write_file(easl, orig_content_slash) + apt_config.handle_apt(cfg, target) + self.assertEqual(expected, util.load_file(easl)) # vi: ts=4 expandtab diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_block_iscsi.py curtin-17.1-11-ga4c9636b/tests/unittests/test_block_iscsi.py --- curtin-0.1.0~bzr532/tests/unittests/test_block_iscsi.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_block_iscsi.py 2018-01-18 21:10:51.000000000 +0000 @@ -527,7 +527,7 @@ def test_volpath_is_iscsi_layered_true(self): volume_path = '/dev/wark' - slaves = ['wark', 'bzr', 'super-iscsi-lun-27'] + slaves = ['wark', 'bzoink', 'super-iscsi-lun-27'] self.mock_get_device_slave_knames.return_value = slaves self.mock_path_to_kname.side_effect = lambda x: x self.mock_kname_is_iscsi.side_effect = lambda x: 'iscsi' in x @@ -542,7 +542,7 @@ def test_volpath_is_iscsi_layered_false(self): volume_path = '/dev/wark' - slaves = ['wark', 'bzr', 'nvmen27p47'] + slaves = ['wark', 'bzoink', 'nvmen27p47'] self.mock_get_device_slave_knames.return_value = slaves self.mock_path_to_kname.side_effect = lambda x: x self.mock_kname_is_iscsi.side_effect = lambda x: 'iscsi' in x diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_block_mkfs.py curtin-17.1-11-ga4c9636b/tests/unittests/test_block_mkfs.py --- curtin-0.1.0~bzr532/tests/unittests/test_block_mkfs.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_block_mkfs.py 2018-01-18 21:10:51.000000000 +0000 @@ -66,6 +66,13 @@ ["--uuid", self.test_uuid]] self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags) + def test_mkfs_xfs(self): + """ mkfs.xfs passes uuid parameter """ + conf = self._get_config("xfs") + expected_flags = ['-f', ['-L', 'format1'], + ['-m', 'uuid=%s' % self.test_uuid]] + self._run_mkfs_with_config(conf, "mkfs.xfs", expected_flags) + def test_mkfs_btrfs_on_precise(self): # Test precise+btrfs where there is no force or uuid conf = self._get_config("btrfs") diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_clear_holders.py curtin-17.1-11-ga4c9636b/tests/unittests/test_clear_holders.py --- curtin-0.1.0~bzr532/tests/unittests/test_clear_holders.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_clear_holders.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,3 +1,4 @@ +import errno import mock import os import textwrap @@ -329,6 +330,52 @@ mock.call(cset, retries=self.remove_retries) ]) + # test bcache shutdown with 'stop' sysfs write failure + @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') + @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') + @mock.patch('curtin.block.clear_holders.util') + @mock.patch('curtin.block.clear_holders.os') + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') + def test_shutdown_bcache_stop_sysfs_write_fails(self, mock_get_bcache, + mock_log, mock_os, + mock_util, + mock_get_bcache_block, + mock_udevadm_settle): + """Test writes sysfs write failures pass if file not present""" + device = "/sys/class/block/null" + mock_os.path.exists.side_effect = iter([ + True, # backing device exists + True, # cset device not present (already removed) + False, # backing device is removed with cset + False, # bcache/stop sysfs is missing (already removed) + ]) + cset = '/sys/fs/bcache/fake' + mock_get_bcache.return_value = cset + mock_get_bcache_block.return_value = device + '/bcache' + mock_os.path.join.side_effect = os.path.join + + # make writes to sysfs fail + mock_util.write_file.side_effect = IOError(errno.ENOENT, + "File not found") + + clear_holders.shutdown_bcache(device) + + self.assertEqual(2, len(mock_log.info.call_args_list)) + self.assertEqual(3, len(mock_os.path.exists.call_args_list)) + self.assertEqual(1, len(mock_get_bcache.call_args_list)) + self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) + self.assertEqual(1, len(mock_util.write_file.call_args_list)) + self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) + + mock_get_bcache.assert_called_with(device, strict=False) + mock_util.write_file.assert_has_calls([ + mock.call(cset + '/stop', '1', mode=None), + ]) + mock_util.wait_for_removal.assert_has_calls([ + mock.call(cset, retries=self.remove_retries) + ]) + @mock.patch('curtin.block.clear_holders.LOG') @mock.patch('curtin.block.clear_holders.block.sys_block_path') @mock.patch('curtin.block.clear_holders.lvm') @@ -531,6 +578,41 @@ for tree, result in test_trees_and_results: self.assertEqual(clear_holders.format_holders_tree(tree), result) + @mock.patch('curtin.block.clear_holders.util.write_file') + def test_maybe_stop_bcache_device_raises_errors(self, m_write_file): + """Non-IO/OS exceptions are raised by maybe_stop_bcache_device.""" + m_write_file.side_effect = ValueError('Crazy Value Error') + with self.assertRaises(ValueError) as cm: + clear_holders.maybe_stop_bcache_device('does/not/matter') + self.assertEqual('Crazy Value Error', str(cm.exception)) + self.assertEqual( + mock.call('does/not/matter/stop', '1', mode=None), + m_write_file.call_args) + + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.util.write_file') + def test_maybe_stop_bcache_device_handles_oserror(self, m_write_file, + m_log): + """When OSError.NOENT is raised, log the condition and move on.""" + m_write_file.side_effect = OSError(errno.ENOENT, 'Expected oserror') + clear_holders.maybe_stop_bcache_device('does/not/matter') + self.assertEqual( + 'Error writing to bcache stop file %s, device removed: %s', + m_log.debug.call_args[0][0]) + self.assertEqual('does/not/matter/stop', m_log.debug.call_args[0][1]) + + @mock.patch('curtin.block.clear_holders.LOG') + @mock.patch('curtin.block.clear_holders.util.write_file') + def test_maybe_stop_bcache_device_handles_ioerror(self, m_write_file, + m_log): + """When IOError.NOENT is raised, log the condition and move on.""" + m_write_file.side_effect = IOError(errno.ENOENT, 'Expected ioerror') + clear_holders.maybe_stop_bcache_device('does/not/matter') + self.assertEqual( + 'Error writing to bcache stop file %s, device removed: %s', + m_log.debug.call_args[0][0]) + self.assertEqual('does/not/matter/stop', m_log.debug.call_args[0][1]) + def test_get_holder_types(self): """test clear_holders.get_holder_types""" test_trees_and_results = [ diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_commands_block_meta.py curtin-17.1-11-ga4c9636b/tests/unittests/test_commands_block_meta.py --- curtin-0.1.0~bzr532/tests/unittests/test_commands_block_meta.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_commands_block_meta.py 2018-01-18 21:10:51.000000000 +0000 @@ -115,7 +115,10 @@ basepath = 'curtin.commands.block_meta.' self.add_patch(basepath + 'get_path_to_storage_volume', 'mock_getpath') self.add_patch(basepath + 'make_dname', 'mock_make_dname') + self.add_patch('curtin.util.load_command_environment', + 'mock_load_env') self.add_patch('curtin.util.subp', 'mock_subp') + self.add_patch('curtin.util.ensure_dir', 'mock_ensure_dir') self.add_patch('curtin.block.get_part_table_type', 'mock_block_get_part_table_type') self.add_patch('curtin.block.wipe_volume', @@ -130,6 +133,12 @@ 'mock_clear_holders') self.add_patch('curtin.block.clear_holders.assert_clear', 'mock_assert_clear') + self.add_patch('curtin.block.iscsi.volpath_is_iscsi', + 'mock_volpath_is_iscsi') + self.add_patch('curtin.block.get_volume_uuid', + 'mock_block_get_volume_uuid') + self.add_patch('curtin.block.zero_file_at_offsets', + 'mock_block_zero_file') self.target = "my_target" self.config = { @@ -152,7 +161,20 @@ 'size': '511705088B', 'type': 'partition', 'uuid': 'fc7ab24c-b6bf-460f-8446-d3ac362c0625', - 'wipe': 'superblock'} + 'wipe': 'superblock'}, + {'id': 'sda1-root', + 'type': 'format', + 'fstype': 'xfs', + 'volume': 'sda-part1'}, + {'id': 'sda-part1-mnt-root', + 'type': 'mount', + 'path': '/', + 'device': 'sda1-root'}, + {'id': 'sda-part1-mnt-root-ro', + 'type': 'mount', + 'path': '/readonly', + 'options': 'ro', + 'device': 'sda1-root'} ], } } @@ -177,32 +199,122 @@ self.mock_clear_holders.assert_called_with(disk) self.mock_assert_clear.assert_called_with(disk) - def test_partition_handler_calls_clear_holder(self): + def test_partition_handler_wipes_at_partition_offset(self): + """ Test wiping partition at offset prior to creating partition""" disk_info = self.storage_config.get('sda') part_info = self.storage_config.get('sda-part1') disk_kname = disk_info.get('path') part_kname = disk_kname + '1' self.mock_getpath.side_effect = iter([ - disk_info.get('id'), - part_kname, - part_kname, + disk_kname, part_kname, ]) - self.mock_block_get_part_table_type.return_value = 'dos' kname = 'xxx' self.mock_block_path_to_kname.return_value = kname self.mock_block_sys_block_path.return_value = '/sys/class/block/xxx' - self.mock_subp.side_effect = iter([ - ("", 0), # parted mkpart - ("", 0), # ?? - ]) - holders = ['md1'] - self.mock_get_holders.return_value = holders block_meta.partition_handler(part_info, self.storage_config) + part_offset = 2048 * 512 + self.mock_block_zero_file.assert_called_with(disk_kname, [part_offset], + exclusive=False) + self.mock_subp.assert_called_with(['parted', disk_kname, '--script', + 'mkpart', 'primary', '2048s', + '1001471s'], capture=True) + + @patch('curtin.util.write_file') + def test_mount_handler_defaults(self, mock_write_file): + """Test mount_handler has defaults to 'defaults' for mount options""" + fstab = self.tmp_path('fstab') + self.mock_load_env.return_value = {'fstab': fstab, + 'target': self.target} + disk_info = self.storage_config.get('sda') + fs_info = self.storage_config.get('sda1-root') + mount_info = self.storage_config.get('sda-part1-mnt-root') - print("clear_holders: %s" % self.mock_clear_holders.call_args_list) - print("assert_clear: %s" % self.mock_assert_clear.call_args_list) - self.mock_clear_holders.assert_called_with(part_kname) - self.mock_assert_clear.assert_called_with(part_kname) + self.mock_getpath.return_value = '/wark/xxx' + self.mock_volpath_is_iscsi.return_value = False + self.mock_block_get_volume_uuid.return_value = None + + block_meta.mount_handler(mount_info, self.storage_config) + options = 'defaults' + expected = "%s %s %s %s 0 0\n" % (disk_info['path'], + mount_info['path'], + fs_info['fstype'], options) + + mock_write_file.assert_called_with(fstab, expected, omode='a') + + @patch('curtin.util.write_file') + def test_mount_handler_uses_mount_options(self, mock_write_file): + """Test mount_handler 'options' string is present in fstab entry""" + fstab = self.tmp_path('fstab') + self.mock_load_env.return_value = {'fstab': fstab, + 'target': self.target} + disk_info = self.storage_config.get('sda') + fs_info = self.storage_config.get('sda1-root') + mount_info = self.storage_config.get('sda-part1-mnt-root-ro') + + self.mock_getpath.return_value = '/wark/xxx' + self.mock_volpath_is_iscsi.return_value = False + self.mock_block_get_volume_uuid.return_value = None + + block_meta.mount_handler(mount_info, self.storage_config) + options = 'ro' + expected = "%s %s %s %s 0 0\n" % (disk_info['path'], + mount_info['path'], + fs_info['fstype'], options) + + mock_write_file.assert_called_with(fstab, expected, omode='a') + + @patch('curtin.util.write_file') + def test_mount_handler_empty_options_string(self, mock_write_file): + """Test mount_handler with empty 'options' string, selects defaults""" + fstab = self.tmp_path('fstab') + self.mock_load_env.return_value = {'fstab': fstab, + 'target': self.target} + disk_info = self.storage_config.get('sda') + fs_info = self.storage_config.get('sda1-root') + mount_info = self.storage_config.get('sda-part1-mnt-root-ro') + mount_info['options'] = '' + + self.mock_getpath.return_value = '/wark/xxx' + self.mock_volpath_is_iscsi.return_value = False + self.mock_block_get_volume_uuid.return_value = None + + block_meta.mount_handler(mount_info, self.storage_config) + options = 'defaults' + expected = "%s %s %s %s 0 0\n" % (disk_info['path'], + mount_info['path'], + fs_info['fstype'], options) + + mock_write_file.assert_called_with(fstab, expected, omode='a') + + def test_mount_handler_appends_to_fstab(self): + """Test mount_handler appends fstab lines to existing file""" + fstab = self.tmp_path('fstab') + with open(fstab, 'w') as fh: + fh.write("#curtin-test\n") + + self.mock_load_env.return_value = {'fstab': fstab, + 'target': self.target} + disk_info = self.storage_config.get('sda') + fs_info = self.storage_config.get('sda1-root') + mount_info = self.storage_config.get('sda-part1-mnt-root-ro') + mount_info['options'] = '' + + self.mock_getpath.return_value = '/wark/xxx' + self.mock_volpath_is_iscsi.return_value = False + self.mock_block_get_volume_uuid.return_value = None + + block_meta.mount_handler(mount_info, self.storage_config) + options = 'defaults' + expected = "#curtin-test\n%s %s %s %s 0 0\n" % (disk_info['path'], + mount_info['path'], + fs_info['fstype'], + options) + + with open(fstab, 'r') as fh: + rendered_fstab = fh.read() + + print(rendered_fstab) + self.assertEqual(rendered_fstab, expected) diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_commands_unmount.py curtin-17.1-11-ga4c9636b/tests/unittests/test_commands_unmount.py --- curtin-0.1.0~bzr532/tests/unittests/test_commands_unmount.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_commands_unmount.py 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,47 @@ + +from curtin.commands import unmount +from .helpers import CiTestCase + +import argparse +import mock +import os + + +class TestUnmount(CiTestCase): + + def setUp(self): + super(TestUnmount, self).setUp() + self.args = argparse.Namespace() + self.args.target = os.environ.get('TEST_CURTIN_TARGET_MOUNT_POINT') + self.args.disable_recursive_mounts = False + self.add_patch('curtin.util.do_umount', 'm_umount') + + def test_unmount_notarget_raises_exception(self): + """Check missing target raises ValueError exception""" + self.assertRaises(ValueError, unmount.unmount_main, self.args) + + def test_unmount_target_not_found_exception(self): + """Check target path not found raises FileNotFoundError exception""" + self.args.target = "catch-me-if-you-can" + try: + FileNotFoundError + except NameError: + FileNotFoundError = IOError + self.assertRaises(FileNotFoundError, unmount.unmount_main, self.args) + + @mock.patch('curtin.commands.unmount.os') + def test_unmount_target_with_path(self, mock_os): + """Assert do_umount is called with args.target and recursive=True""" + self.args.target = "test/path/to/my/path" + mock_os.path.exists.return_value = True + unmount.unmount_main(self.args) + self.m_umount.assert_called_with(self.args.target, recursive=True) + + @mock.patch('curtin.commands.unmount.os') + def test_unmount_target_with_path_no_recursive(self, mock_os): + """Assert args.disable_recursive_mounts True sends recursive=False""" + self.args.target = "test/path/to/my/path" + self.args.disable_recursive_mounts = True + mock_os.path.exists.return_value = True + unmount.unmount_main(self.args) + self.m_umount.assert_called_with(self.args.target, recursive=False) diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_curthooks.py curtin-17.1-11-ga4c9636b/tests/unittests/test_curthooks.py --- curtin-0.1.0~bzr532/tests/unittests/test_curthooks.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_curthooks.py 2018-01-18 21:10:51.000000000 +0000 @@ -5,7 +5,7 @@ from curtin import util from curtin import config from curtin.reporter import events -from .helpers import CiTestCase +from .helpers import CiTestCase, dir2dict class TestGetFlashKernelPkgs(CiTestCase): @@ -761,4 +761,50 @@ curthooks.detect_required_packages({'network': {'version': 3}}) +class TestCurthooksWriteFiles(CiTestCase): + def test_handle_write_files_empty(self): + """ Test curthooks.write_files returns for empty config """ + tmpd = self.tmp_dir() + ret = curthooks.write_files({}, tmpd) + self.assertEqual({}, dir2dict(tmpd, prefix=tmpd)) + self.assertIsNone(ret) + + def test_handle_write_files(self): + """ Test curthooks.write_files works as it used to """ + tmpd = self.tmp_dir() + cfg = {'file1': {'path': '/etc/default/hello.txt', + 'content': "Hello World!\n"}, + 'foobar': {'path': '/sys/wark', 'content': "Engauge!\n"}} + curthooks.write_files({'write_files': cfg}, tmpd) + self.assertEqual( + dict((cfg[i]['path'], cfg[i]['content']) for i in cfg.keys()), + dir2dict(tmpd, prefix=tmpd)) + + @patch('curtin.commands.curthooks.futil.target_path') + @patch('curtin.commands.curthooks.futil.write_finfo') + def test_handle_write_files_finfo(self, mock_write_finfo, mock_tp): + """ Validate that futils.write_files handles target_path correctly """ + cc_target = "/tmpXXXX/random/dir/used/by/maas" + cfg = { + 'file1': { + 'path': '/etc/default/hello.txt', + 'content': "Hello World!\n", + }, + } + mock_tp.side_effect = [ + cc_target + cfg['file1']['path'], + ] + + expected_cfg = { + 'file1': { + 'path': '/etc/default/hello.txt', + 'content': cfg['file1']['content']}, + } + curthooks.write_files({'write_files': cfg}, cc_target) + mock_write_finfo.assert_called_with( + content=expected_cfg['file1']['content'], owner='-1:-1', + path=cc_target + expected_cfg['file1']['path'], + perms='0644') + + # vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_pack.py curtin-17.1-11-ga4c9636b/tests/unittests/test_pack.py --- curtin-0.1.0~bzr532/tests/unittests/test_pack.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_pack.py 2018-01-18 21:10:51.000000000 +0000 @@ -4,6 +4,7 @@ from curtin import util from curtin.commands.install import INSTALL_PASS_MSG, INSTALL_START_MSG +import glob import json import os import shutil @@ -148,7 +149,6 @@ # has, and verify that python --help has that changed string, then # change it back for other tests. version_py = os.path.join(self.extract_dir, 'curtin', 'version.py') - version_pyc = version_py + "c" hack_version_str = "MY_VERSION_STRING" orig_contents = util.load_file(version_py) hacked_contents = orig_contents.replace( @@ -156,11 +156,11 @@ self.assertIn(hack_version_str, hacked_contents) try: util.write_file(version_py, hacked_contents) - util.del_file(version_pyc) + remove_pyc_for_file(version_py) out, err = self.run_main(['--help']) finally: - util.del_file(version_pyc) util.write_file(version_py, orig_contents) + remove_pyc_for_file(version_py) self.assertIn(hack_version_str, out) @@ -171,4 +171,20 @@ self.assertTrue(os.path.isdir(os.path.join(tld, 'curtin'))) self.assertTrue(os.path.isdir(os.path.join(tld, 'bin'))) + +def remove_pyc_for_file(py_path): + """Remove any .pyc files that have been created by running py_path. + + Different versions of python create different .pyc files for a given .py: + my_path/my.py -> my_path/my.pyc + my_path/__pycache__/my..pyc""" + without_py = py_path.rpartition(".")[0] + pycache_wildcard = os.path.join( + os.path.dirname(without_py), "__pycache__", + os.path.basename(without_py)) + ".*.pyc" + for pyc in [without_py + ".pyc"] + glob.glob(pycache_wildcard): + if os.path.exists(pyc): + os.unlink(pyc) + + # vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr532/tests/unittests/test_version.py curtin-17.1-11-ga4c9636b/tests/unittests/test_version.py --- curtin-0.1.0~bzr532/tests/unittests/test_version.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/unittests/test_version.py 2018-01-18 21:10:51.000000000 +0000 @@ -31,16 +31,16 @@ version._PACKAGED_VERSION = original_pkg_string - def test_bzr_revno_version(self): + def test_git_describe_version(self): self.mock_path.exists.return_value = True - bzr_revno = "999" - self.mock_subp.return_value = bzr_revno.encode("utf-8") + git_describe = old_version + "-13-g90fa654f" + self.mock_subp.return_value = git_describe.encode("utf-8") ver_string = version.version_string() - self.assertEqual(old_version + "~bzr" + bzr_revno, ver_string) + self.assertEqual(git_describe, ver_string) @mock.patch.object(os, 'getcwd') - def test_bzr_revno_version_exception(self, mock_getcwd): + def test_git_describe_version_exception(self, mock_getcwd): self.mock_path.exists.return_value = True mock_getcwd.return_value = "/tmp/foo" self.mock_subp.side_effect = subprocess.CalledProcessError(1, 'foo') diff -Nru curtin-0.1.0~bzr532/tests/vmtests/image_sync.py curtin-17.1-11-ga4c9636b/tests/vmtests/image_sync.py --- curtin-0.1.0~bzr532/tests/vmtests/image_sync.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/image_sync.py 2018-01-18 21:10:51.000000000 +0000 @@ -28,20 +28,21 @@ IMAGE_SRC_URL = os.environ.get( 'IMAGE_SRC_URL', - "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") + "http://maas.ubuntu.com/images/ephemeral-v3/daily/streams/v1/index.sjson") IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' -ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel|root-tgz)'] +ITEM_NAME_FILTERS = \ + ['ftype~(root-image.gz|boot-initrd|boot-kernel|root-tgz|squashfs)'] FORMAT_JSON = 'JSON' STREAM_BASE = 'com.ubuntu.maas:daily' VMTEST_CONTENT_ID_PATH_MAP = { - STREAM_BASE + ":v2:download": "streams/v1/vmtest.json", + STREAM_BASE + ":v3:download": "streams/v1/vmtest.json", STREAM_BASE + ":centos-bases-download": "streams/v1/vmtest-centos.json", } -DEFAULT_OUTPUT_FORMAT = ( - "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s") +DEFAULT_OUTPUT_FORMAT = ("%(release)-7s %(arch)s/%(subarch)s/%(kflavor)s " + "%(version_name)-10s %(item_name)s") DEFAULT_ARCHES = { 'i386': ['i386'], diff -Nru curtin-0.1.0~bzr532/tests/vmtests/__init__.py curtin-17.1-11-ga4c9636b/tests/vmtests/__init__.py --- curtin-0.1.0~bzr532/tests/vmtests/__init__.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/__init__.py 2018-01-18 21:10:51.000000000 +0000 @@ -11,6 +11,7 @@ import tempfile import textwrap import time +import uuid import yaml import curtin.net as curtin_net import curtin.util as util @@ -107,11 +108,13 @@ return _TOPDIR -def _initialize_logging(): +def _initialize_logging(name=None): # Configure logging module to save output to disk and present it on # sys.stderr + if not name: + name = __name__ - logger = logging.getLogger(__name__) + logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) formatter = logging.Formatter( @@ -174,16 +177,19 @@ def get_images(src_url, local_d, distro, release, arch, krel=None, sync="1", - ftypes=None): + kflavor=None, subarch=None, ftypes=None): # ensure that the image items (roottar, kernel, initrd) # we need for release and arch are available in base_dir. # # returns ftype dictionary with path to each ftype as values # {ftype: item_url} + logger.debug("distro=%s release=%s krel=%s subarch=%s ftypes=%s", + distro, release, krel, subarch, ftypes) if not ftypes: ftypes = { 'vmtest.root-image': '', 'vmtest.root-tgz': '', + 'squashfs': '', 'boot-kernel': '', 'boot-initrd': '' } @@ -192,8 +198,12 @@ common_filters = ['release=%s' % release, 'arch=%s' % arch, 'os=%s' % distro] + if kflavor: + common_filters.append('kflavor=%s' % kflavor) if krel: common_filters.append('krel=%s' % krel) + if subarch: + common_filters.append('subarch=%s' % subarch) filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters if sync == "1": @@ -225,7 +235,8 @@ logger.info(fail_msg + " Attempting to fix with an image sync. (%s)", query_str) return get_images(src_url, local_d, distro, release, arch, - krel=krel, sync="1", ftypes=ftypes) + krel=krel, sync="1", kflavor=kflavor, + subarch=subarch, ftypes=ftypes) elif not results: raise ValueError("Required images not found and " "syncing disabled:\n%s" % query_str) @@ -355,24 +366,29 @@ boot_cloudconf = None install_timeout = INSTALL_TIMEOUT interactive = False + logger = None multipath = False multipath_num_paths = 2 nvme_disks = [] iscsi_disks = [] recorded_errors = 0 recorded_failures = 0 + conf_replace = {} uefi = False proxy = None # these get set from base_vm_classes release = None arch = None + kflavor = None krel = None distro = None + subarch = None + ephemeral_ftype = "squashfs" target_distro = None target_release = None target_krel = None - target_ftype = "vmtest.root-tgz" + target_ftype = "squashfs" _debian_packages = None @@ -386,35 +402,66 @@ @classmethod def get_test_files(cls): # get local absolute filesystem paths for each of the needed file types - img_verstr, ftypes = get_images( + logger.debug('cls.ephemeral_ftype: %s', cls.ephemeral_ftype) + logger.debug('cls.target_ftype: %s', cls.target_ftype) + eph_img_verstr, ftypes = get_images( IMAGE_SRC_URL, IMAGE_DIR, cls.distro, cls.release, cls.arch, krel=cls.krel if cls.krel else cls.release, + kflavor=cls.kflavor if cls.kflavor else None, + subarch=cls.subarch if cls.subarch else None, sync=CURTIN_VMTEST_IMAGE_SYNC, - ftypes=('boot-initrd', 'boot-kernel', 'vmtest.root-image')) - logger.debug("Install Image %s\n, ftypes: %s\n", img_verstr, ftypes) - logger.info("Install Image: %s", img_verstr) + ftypes=('boot-initrd', 'boot-kernel', cls.ephemeral_ftype)) + logger.debug("Install Image %s\n, ftypes: %s\n", + eph_img_verstr, ftypes) if not cls.target_krel and cls.krel: cls.target_krel = cls.krel # get local absolute filesystem paths for the OS tarball to be # installed - if cls.target_ftype == "vmtest.root-tgz": - img_verstr, found = get_images( + if cls.target_ftype in ["vmtest.root-tgz", "squashfs"]: + target_img_verstr, found = get_images( IMAGE_SRC_URL, IMAGE_DIR, cls.target_distro if cls.target_distro else cls.distro, cls.target_release if cls.target_release else cls.release, - cls.arch, krel=cls.target_krel, sync=CURTIN_VMTEST_IMAGE_SYNC, - ftypes=('vmtest.root-tgz',)) + cls.arch, subarch=cls.subarch if cls.subarch else None, + kflavor=cls.kflavor if cls.kflavor else None, + krel=cls.target_krel, sync=CURTIN_VMTEST_IMAGE_SYNC, + ftypes=(cls.target_ftype,)) logger.debug("Target Tarball %s\n, ftypes: %s\n", - img_verstr, found) - logger.info("Target Tarball: %s", img_verstr) - else: + target_img_verstr, found) + elif cls.target_ftype in ["root-image.xz"]: logger.info('get-testfiles UC16 hack!') found = {'root-image.xz': UC16_IMAGE} + target_img_verstr = "UbuntuCore 16" + logger.info("Ephemeral Image Version:[%s] Target Image Version:[%s]", + eph_img_verstr, target_img_verstr) ftypes.update(found) return ftypes @classmethod + def load_conf_file(cls): + logger.info('Loading testcase config file: %s', cls.conf_file) + confdata = util.load_file(cls.conf_file) + # replace rootfs file system format with class value + if cls.conf_replace: + logger.debug('Rendering conf template: %s', cls.conf_replace) + for k, v in cls.conf_replace.items(): + confdata = confdata.replace(k, v) + suffix = ".yaml" + prefix = (cls.td.tmpdir + '/' + + os.path.basename(cls.conf_file).replace(suffix, "-")) + temp_yaml = tempfile.NamedTemporaryFile(prefix=prefix, + suffix=suffix, mode='w+t', + delete=False) + shutil.copyfile(cls.conf_file, temp_yaml.name) + cls.conf_file = temp_yaml.name + logger.info('Updating class conf file %s', cls.conf_file) + with open(cls.conf_file, 'w+t') as fh: + fh.write(confdata) + + return confdata + + @classmethod def build_iscsi_disks(cls): cls._iscsi_disks = list() disks = [] @@ -470,10 +517,7 @@ disk_iuser = '' disk_ipassword = '' - uuid, _ = util.subp(['uuidgen'], capture=True, - decode='replace') - uuid = uuid.rstrip() - target = 'curtin-%s' % uuid + target = 'curtin-%s' % uuid.uuid1() cls._iscsi_disks.append(target) dpath = os.path.join(cls.td.disks, '%s.img' % (target)) iscsi_disk = '{}:{}:iscsi:{}:{}:{}:{}:{}:{}'.format( @@ -482,30 +526,16 @@ disks.extend(['--disk', iscsi_disk]) # replace next __RFC4173__ placeholder in YAML + rfc4173 = get_rfc4173(cls.tgtd_ip, cls.tgtd_port, target, + user=disk_user, pword=disk_password, + iuser=disk_iuser, ipword=disk_ipassword) with tempfile.NamedTemporaryFile(mode='w+t') as temp_yaml: shutil.copyfile(cls.conf_file, temp_yaml.name) with open(cls.conf_file, 'w+t') as conf: replaced = False for line in temp_yaml: if not replaced and '__RFC4173__' in line: - actual_rfc4173 = '' - if len(disk_user) > 0: - actual_rfc4173 += '%s:%s' % (disk_user, - disk_password) - if len(disk_iuser) > 0: - # empty target user/password - if len(actual_rfc4173) == 0: - actual_rfc4173 += ':' - actual_rfc4173 += ':%s:%s' % (disk_iuser, - disk_ipassword) - # any auth specified? - if len(actual_rfc4173) > 0: - actual_rfc4173 += '@' - # assumes LUN 1 - actual_rfc4173 += '%s::%s:1:%s' % ( - cls.tgtd_ip, - cls.tgtd_port, target) - line = line.replace('__RFC4173__', actual_rfc4173) + line = line.replace('__RFC4173__', rfc4173) replaced = True conf.write(line) return disks @@ -542,7 +572,36 @@ return str(nr_cpus) @classmethod + def get_kernel_package(cls): + """ Return the kernel package name for this class """ + package = 'linux-image-' + cls.kflavor + if cls.subarch is None or cls.subarch.startswith('ga'): + return package + + return package + '-' + cls.subarch + + @classmethod + def get_kernel_config(cls): + """Return a kernel config dictionary to install the same + kernel subarch into the target for hwe, hwe-edge classes + + Returns: config dictionary only for hwe, or edge subclass + """ + if cls.subarch is None or cls.subarch.startswith('ga'): + return None + + # -hwe-version or -hwe-version-edge packages + package = cls.get_kernel_package() + return {'kernel': {'fallback-package': package}} + + @classmethod def setUpClass(cls): + # initialize global logger with class name to help make sense of + # parallel vmtest runs which intermingle output. + global logger + logger = _initialize_logging(name=cls.__name__) + cls.logger = logger + # check if we should skip due to host arch if cls.arch in cls.arch_skip: reason = "{} is not supported on arch {}".format(cls.__name__, @@ -579,14 +638,38 @@ if cls.extra_kern_args: cmd.extend(["--append=" + cls.extra_kern_args]) - # publish the root tarball ftypes = cls.get_test_files() - install_src = "PUBURL/" + os.path.basename(ftypes[cls.target_ftype]) - if cls.target_ftype == 'vmtest.root-tgz': - cmd.append("--publish=%s" % ftypes['vmtest.root-tgz']) + # trusty can't yet use root=URL due to LP:#1735046 + if cls.release in ['trusty']: + root_url = "/dev/disk/by-id/virtio-boot-disk" else: - cmd.append("--publish=%s::dd-xz" % ftypes[cls.target_ftype]) - logger.info("Publishing src as %s", cmd[-1]) + root_url = "squash:PUBURL/%s" % ( + os.path.basename(ftypes[cls.ephemeral_ftype])) + # configure ephemeral boot environment + cmd.extend([ + "--root-arg=root=%s" % root_url, + "--append=overlayroot=tmpfs", + "--append=ip=dhcp", # enable networking + "--append=rc-initrd-dns", # carry DNS from initramfs into + # real-root (LP: #1735225) + ]) + # getting resolvconf configured is only fixed in bionic + # the iscsi_auto handles resolvconf setup via call to + # configure_networking in initramfs + if cls.release in ('precise', 'trusty', 'xenial', 'zesty', 'artful'): + cmd.extend(["--append=iscsi_auto"]) + + # publish the ephemeral image (used in root=URL) + cmd.append("--publish=%s" % ftypes[cls.ephemeral_ftype]) + logger.info("Publishing ephemeral image as %s", cmd[-1]) + # publish the target image + cmd.append("--publish=%s" % ftypes[cls.target_ftype]) + logger.info("Publishing target image as %s", cmd[-1]) + + # set curtin install source + install_src = cls.get_install_source(ftypes) + + logger.info("Curtin install source URI: %s", install_src) # check for network configuration cls.network_state = curtin_net.parse_net_config(cls.conf_file) @@ -605,6 +688,9 @@ macs.append(hwaddr) netdevs = [] if len(macs) > 0: + # take first mac and mark it as the boot interface to prevent DHCP + # on multiple interfaces which can hang the install. + cmd.extend(["--append=BOOTIF=01-%s" % macs[0].replace(":", "-")]) for mac in macs: netdevs.extend(["--netdev=" + DEFAULT_BRIDGE + ",mac={}".format(mac)]) @@ -613,7 +699,7 @@ # build disk arguments disks = [] - sc = util.load_file(cls.conf_file) + sc = cls.load_conf_file() storage_config = yaml.load(sc).get('storage', {}).get('config', {}) cls.disk_wwns = ["wwn=%s" % x.get('wwn') for x in storage_config if 'wwn' in x] @@ -679,7 +765,7 @@ # always attempt to update target nvram (via grub) grub_config = os.path.join(cls.td.install, 'grub.cfg') - if not os.path.exists(grub_config): + if not os.path.exists(grub_config) and not cls.td.restored: with open(grub_config, "w") as fp: fp.write(json.dumps({'grub': {'update_nvram': True}})) configs.append(grub_config) @@ -699,9 +785,6 @@ logger.info('Detected centos, adding default config %s', centos_default) - if cls.multipath: - disks = disks * cls.multipath_num_paths - # set reporting logger cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') reporting_logger = CaptureReporting(cls.reporting_log) @@ -727,10 +810,34 @@ })) configs.append(reporting_config) - cmd.extend(uefi_flags + netdevs + disks + - [ftypes['vmtest.root-image'], "--kernel=%s" % - ftypes['boot-kernel'], "--initrd=%s" % - ftypes['boot-initrd'], "--", "curtin", "-vv", "install"] + + # For HWE/Edge subarch tests, add a kernel config specifying + # the same kernel package to install to validate subarch kernel is + # installable and bootable. + kconfig = cls.get_kernel_config() + if kconfig: + kconf = os.path.join(cls.td.logs, 'kernel.cfg') + if not os.path.exists(kconf) and not cls.td.restored: + logger.debug('Injecting kernel cfg for hwe/edge subarch: %s', + kconfig) + with open(kconf, 'w') as fp: + fp.write(json.dumps(kconfig)) + configs.append(kconf) + + # pass the ephemeral_ftype image as the boot-able image if trusty + # until LP: #1735046 is fixed. + root_disk = [] + if cls.release in ["trusty"]: + root_disk = ['--boot-image', ftypes.get(cls.ephemeral_ftype)] + if not root_disk[1]: + raise ValueError('No root disk for %s: ephemeral_ftype "%s"', + cls.__name__, cls.ephemeral_ftype) + logger.info('Using root_disk: %s', root_disk) + + cmd.extend(uefi_flags + netdevs + cls.mpath_diskargs(disks) + + root_disk + [ + "--kernel=%s" % ftypes['boot-kernel'], + "--initrd=%s" % ftypes['boot-initrd'], + "--", "curtin", "-vv", "install"] + ["--config=%s" % f for f in configs] + [install_src]) @@ -825,11 +932,6 @@ # unlike NVMe disks, we do not want to configure the iSCSI disks # via KVM, which would use qemu's iSCSI target layer. - if cls.multipath: - target_disks = target_disks * cls.multipath_num_paths - extra_disks = extra_disks * cls.multipath_num_paths - nvme_disks = nvme_disks * cls.multipath_num_paths - # output disk is always virtio-blk, with serial of output_disk.img output_disk = '--disk={},driver={},format={},{},{}'.format( cls.td.output_disk, 'virtio-blk', @@ -840,11 +942,9 @@ # create xkvm cmd cmd = (["tools/xkvm", "-v", dowait] + uefi_flags + netdevs + - target_disks + extra_disks + nvme_disks + - ["--", "-drive", - "file=%s,if=virtio,media=cdrom" % cls.td.seed_disk, - "-smp", cls.get_config_smp(), - "-m", "1024"]) + cls.mpath_diskargs(target_disks + extra_disks + nvme_disks) + + ["--disk=file=%s,if=virtio,media=cdrom" % cls.td.seed_disk] + + ["--", "-smp", cls.get_config_smp(), "-m", "1024"]) if not cls.interactive: if cls.arch == 's390x': @@ -931,6 +1031,22 @@ 'removed.', target) @classmethod + def get_install_source(cls, ftypes): + if cls.target_ftype == 'squashfs': + # If we're installing from squashfs source then direct + # curtin to install from the read-only undermount + install_src = "cp:///media/root-ro" + else: + if cls.target_ftype == 'root-image.xz': + stype = "dd-xz" + else: + stype = "tgz" + src = os.path.basename(ftypes[cls.target_ftype]) + install_src = "%s:PUBURL/%s" % (stype, src) + + return install_src + + @classmethod def tearDownClass(cls): success = False sfile = os.path.exists(cls.td.success_file) @@ -1007,6 +1123,15 @@ # prepending ./ makes '/root/file' or 'root/file' work as expected. return os.path.normpath(os.path.join(cls.td.collect, "./" + path)) + @classmethod + def mpath_diskargs(cls, disks): + """make multipath versions of --disk args in disks.""" + if not cls.multipath: + return disks + opt = ",file.locking=off" + return ([d if d == "--disk" else d + opt for d in disks] * + cls.multipath_num_paths) + # Misc functions that are useful for many tests def output_files_exist(self, files): logger.debug('checking files exist: %s', files) @@ -1028,6 +1153,10 @@ with open(self.collect_path(filename), mode) as fp: return fp.read() + def load_collect_file_shell_content(self, filename, mode="r"): + with open(self.collect_path(filename), mode) as fp: + return util.load_shell_content(content=fp.read()) + def load_log_file(self, filename): with open(filename, 'rb') as fp: return fp.read().decode('utf-8', errors='replace') @@ -1134,6 +1263,16 @@ self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", interfacesd.split("\n")) + def test_installed_correct_kernel_package(self): + """ Test curtin installs the correct kernel package. """ + + # target_distro is set for non-ubuntu targets + if self.target_distro is not None: + raise SkipTest("Can't check non-ubuntu kernel packages") + + kpackage = self.get_kernel_package() + self.assertIn(kpackage, self.debian_packages) + def run(self, result): super(VMBaseClass, self).run(result) self.record_result(result) @@ -1219,6 +1358,7 @@ return { 'vmtest.root-image': get_psuedo_path('psuedo-root-image'), + 'squashfs': get_psuedo_path('psuedo-squashfs'), 'boot-kernel': get_psuedo_path('psuedo-kernel'), 'boot-initrd': get_psuedo_path('psuedo-initrd'), 'vmtest.root-tgz': get_psuedo_path('psuedo-root-tgz') @@ -1259,11 +1399,32 @@ def test_reporting_data(self): pass + def test_installed_correct_kernel_package(self): + pass + def _maybe_raise(self, exc): if self.allow_test_fails: raise exc +def get_rfc4173(ip, port, target, user=None, pword=None, + iuser=None, ipword=None, lun=1): + + rfc4173 = '' + if user: + rfc4173 += '%s:%s' % (user, pword) + if iuser: + # empty target user/password + if not rfc4173: + rfc4173 += ':' + rfc4173 += ':%s:%s' % (iuser, ipword) + # any auth specified? + if rfc4173: + rfc4173 += '@' + rfc4173 += '%s::%s:%d:%s' % (ip, port, lun, target) + return rfc4173 + + def find_error_context(err_match, contents, nrchars=200): context_start = err_match.start() - nrchars context_end = err_match.end() + nrchars @@ -1459,7 +1620,7 @@ logger.info('Taring %s disks sparsely to %s', len(disks), outfile) util.subp(cmd, capture=True) else: - logger.error('Failed to find "disks" dir under tmpdir: %s', tmpdir) + logger.debug('No "disks" dir found in tmpdir: %s', tmpdir) def boot_log_wrap(name, func, cmd, console_log, timeout, purpose): diff -Nru curtin-0.1.0~bzr532/tests/vmtests/releases.py curtin-17.1-11-ga4c9636b/tests/vmtests/releases.py --- curtin-0.1.0~bzr532/tests/vmtests/releases.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/releases.py 2018-01-18 21:10:51.000000000 +0000 @@ -8,16 +8,21 @@ class _UbuntuBase(_ReleaseBase): distro = "ubuntu" + kflavor = "generic" class _CentosFromUbuntuBase(_UbuntuBase): # base for installing centos tarballs from ubuntu base target_distro = "centos" + target_ftype = "vmtest.root-tgz" + kflavor = None class _UbuntuCoreUbuntuBase(_UbuntuBase): # base for installing UbuntuCore root-image.xz from ubuntu base target_distro = "ubuntu-core-16" + target_ftype = "root-image.xz" + kflavor = None class _Centos70FromXenialBase(_CentosFromUbuntuBase): @@ -39,15 +44,6 @@ target_release = "centos66" -class _PreciseBase(_UbuntuBase): - release = "precise" - - -class _PreciseHWET(_UbuntuBase): - release = "precise" - krel = "trusty" - - class _TrustyBase(_UbuntuBase): release = "trusty" @@ -79,19 +75,33 @@ class _XenialBase(_UbuntuBase): release = "xenial" + subarch = "ga-16.04" -class _ZestyBase(_UbuntuBase): - release = "zesty" +class _XenialGA(_UbuntuBase): + release = "xenial" + subarch = "ga-16.04" + + +class _XenialHWE(_UbuntuBase): + release = "xenial" + subarch = "hwe-16.04" + + +class _XenialEdge(_UbuntuBase): + release = "xenial" + subarch = "hwe-16.04-edge" class _ArtfulBase(_UbuntuBase): release = "artful" +class _BionicBase(_UbuntuBase): + release = "bionic" + + class _Releases(object): - precise = _PreciseBase - precise_hwe_t = _PreciseHWET trusty = _TrustyBase trusty_hwe_u = _TrustyHWEU trusty_hwe_v = _TrustyHWEV @@ -99,8 +109,11 @@ trusty_hwe_x = _TrustyHWEX trustyfromxenial = _TrustyFromXenial xenial = _XenialBase - zesty = _ZestyBase + xenial_ga = _XenialGA + xenial_hwe = _XenialHWE + xenial_edge = _XenialEdge artful = _ArtfulBase + bionic = _BionicBase class _CentosReleases(object): diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_apt_config_cmd.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_apt_config_cmd.py --- curtin-0.1.0~bzr532/tests/vmtests/test_apt_config_cmd.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_apt_config_cmd.py 2018-01-18 21:10:51.000000000 +0000 @@ -15,7 +15,7 @@ extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname @@ -55,9 +55,9 @@ __test__ = True -class ZestyTestAptConfigCMDCMD(relbase.zesty, TestAptConfigCMD): +class ArtfulTestAptConfigCMDCMD(relbase.artful, TestAptConfigCMD): __test__ = True -class ArtfulTestAptConfigCMDCMD(relbase.artful, TestAptConfigCMD): +class BionicTestAptConfigCMDCMD(relbase.bionic, TestAptConfigCMD): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_apt_source.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_apt_source.py --- curtin-0.1.0~bzr532/tests/vmtests/test_apt_source.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_apt_source.py 2018-01-18 21:10:51.000000000 +0000 @@ -16,7 +16,7 @@ extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_basic.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_basic.py --- curtin-0.1.0~bzr532/tests/vmtests/test_basic.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_basic.py 2018-01-18 21:10:51.000000000 +0000 @@ -15,7 +15,7 @@ nvme_disks = ['4G'] disk_to_check = [('main_disk_with_in---valid--dname', 1), ('main_disk_with_in---valid--dname', 2)] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 @@ -104,6 +104,7 @@ break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") + self.assertEqual(fstab_entry.split(' ')[3], "defaults,noatime") def test_whole_disk_format(self): # confirm the whole disk format is the expected device @@ -137,49 +138,6 @@ self.assertEqual(source_version, installed_version) -class PreciseTestBasic(relbase.precise, TestBasicAbs): - __test__ = True - - collect_scripts = [textwrap.dedent(""" - cd OUTPUT_COLLECT_D - blkid -o export /dev/vda > blkid_output_vda - blkid -o export /dev/vda1 > blkid_output_vda1 - blkid -o export /dev/vda2 > blkid_output_vda2 - f="btrfs_uuid_vdd" - btrfs-show /dev/vdd | awk '/uuid/ {print $4}' > $f - cat /proc/partitions > proc_partitions - ls -al /dev/disk/by-uuid/ > ls_uuid - cat /etc/fstab > fstab - mkdir -p /dev/disk/by-dname - ls /dev/disk/by-dname/ > ls_dname - find /etc/network/interfaces.d > find_interfacesd - - v="" - out=$(apt-config shell v Acquire::HTTP::Proxy) - eval "$out" - echo "$v" > apt-proxy - """)] - - def test_whole_disk_format(self): - # confirm the whole disk format is the expected device - btrfs_uuid = self.load_collect_file("btrfs_uuid_vdd").strip() - - self.assertTrue(btrfs_uuid is not None) - self.assertEqual(len(btrfs_uuid), 36) - - # extract uuid from ls_uuid by kname - kname_uuid = self._kname_to_uuid('vdd') - - # compare them - self.assertEqual(kname_uuid, btrfs_uuid) - - def test_ptable(self): - print("test_ptable does not work for Precise") - - def test_dname(self): - print("test_dname does not work for Precise") - - class TrustyTestBasic(relbase.trusty, TestBasicAbs): __test__ = True @@ -193,20 +151,19 @@ print("test_ptable does not work for Trusty") -class PreciseHWETTestBasic(relbase.precise_hwe_t, PreciseTestBasic): - # FIXME: off due to test_whole_disk_format failing - __test__ = False +class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic): + __test__ = True -class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic): +class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs): __test__ = True -class XenialTestBasic(relbase.xenial, TestBasicAbs): +class XenialHWETestBasic(relbase.xenial_hwe, TestBasicAbs): __test__ = True -class ZestyTestBasic(relbase.zesty, TestBasicAbs): +class XenialEdgeTestBasic(relbase.xenial_edge, TestBasicAbs): __test__ = True @@ -214,12 +171,16 @@ __test__ = True +class BionicTestBasic(relbase.bionic, TestBasicAbs): + __test__ = True + + class TestBasicScsiAbs(TestBasicAbs): conf_file = "examples/tests/basic_scsi.yaml" disk_driver = 'scsi-hd' extra_disks = ['128G', '128G', '4G'] nvme_disks = ['4G'] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/sda > blkid_output_sda blkid -o export /dev/sda1 > blkid_output_sda1 @@ -285,7 +246,7 @@ self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/home") - # Test whole disk sdc is mounted at /btrfs + # Test whole disk sdc is mounted at /btrfs, and uses defaults,noatime uuid = self._kname_to_uuid('sdc') fstab_entry = None for line in fstab_lines: @@ -294,6 +255,7 @@ break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") + self.assertEqual(fstab_entry.split(' ')[3], "defaults,noatime") def test_whole_disk_format(self): # confirm the whole disk format is the expected device @@ -310,13 +272,21 @@ self.assertEqual(kname_uuid, btrfs_uuid) -class XenialTestScsiBasic(relbase.xenial, TestBasicScsiAbs): +class XenialGATestScsiBasic(relbase.xenial_ga, TestBasicScsiAbs): __test__ = True -class ZestyTestScsiBasic(relbase.zesty, TestBasicScsiAbs): +class XenialHWETestScsiBasic(relbase.xenial_hwe, TestBasicScsiAbs): + __test__ = True + + +class XenialEdgeTestScsiBasic(relbase.xenial_edge, TestBasicScsiAbs): __test__ = True class ArtfulTestScsiBasic(relbase.artful, TestBasicScsiAbs): __test__ = True + + +class BionicTestScsiBasic(relbase.bionic, TestBasicScsiAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_bcache_basic.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_bcache_basic.py --- curtin-0.1.0~bzr532/tests/vmtests/test_bcache_basic.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_bcache_basic.py 2018-01-18 21:10:51.000000000 +0000 @@ -12,7 +12,7 @@ nr_cpus = 2 dirty_disks = True extra_disks = ['2G'] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda2 > bcache_super_vda2 ls /sys/fs/bcache > bcache_ls @@ -43,10 +43,6 @@ self.check_file_regex("cmdline", r"root=UUID=") -class PreciseHWETBcacheBasic(relbase.precise_hwe_t, TestBcacheBasic): - __test__ = True - - class TrustyBcacheBasic(relbase.trusty, TestBcacheBasic): __test__ = False # covered by test_raid5_bcache @@ -55,13 +51,21 @@ __test__ = False # covered by test_raid5_bcache -class XenialBcacheBasic(relbase.xenial, TestBcacheBasic): +class XenialGABcacheBasic(relbase.xenial_ga, TestBcacheBasic): __test__ = True -class ZestyBcacheBasic(relbase.zesty, TestBcacheBasic): +class XenialHWEBcacheBasic(relbase.xenial_hwe, TestBcacheBasic): + __test__ = True + + +class XenialEdgeBcacheBasic(relbase.xenial_edge, TestBcacheBasic): __test__ = True class ArtfulBcacheBasic(relbase.artful, TestBcacheBasic): __test__ = True + + +class BionicBcacheBasic(relbase.bionic, TestBcacheBasic): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_bcache_bug1718699.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_bcache_bug1718699.py --- curtin-0.1.0~bzr532/tests/vmtests/test_bcache_bug1718699.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_bcache_bug1718699.py 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,21 @@ +from .releases import base_vm_classes as relbase +from .test_bcache_basic import TestBcacheBasic + + +class TestBcacheBug1718699(TestBcacheBasic): + conf_file = "examples/tests/bcache-wipe-xfs.yaml" + dirty_disks = False + nr_cpus = 2 + extra_disks = ['10G'] + + +class XenialTestBcacheBug1718699(relbase.xenial, TestBcacheBug1718699): + __test__ = True + + +class ArtfulTestBcacheBug1718699(relbase.artful, TestBcacheBug1718699): + __test__ = True + + +class BionicTestBcacheBug1718699(relbase.bionic, TestBcacheBug1718699): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_centos_basic.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_centos_basic.py --- curtin-0.1.0~bzr532/tests/vmtests/test_centos_basic.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_centos_basic.py 2018-01-18 21:10:51.000000000 +0000 @@ -12,7 +12,7 @@ extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" # XXX: command | tee output is required for Centos under SELinux # http://danwalsh.livejournal.com/22860.html - collect_scripts = [textwrap.dedent( + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( """ cd OUTPUT_COLLECT_D cat /etc/fstab > fstab @@ -24,6 +24,8 @@ cp -a /var/log/cloud-init* . cp -a /var/lib/cloud ./var_lib_cloud cp -a /run/cloud-init ./run_cloud-init + rpm -E '%rhel' > rpm_dist_version_major + cp -a /etc/centos-release . """)] fstab_expected = { 'LABEL=cloudimg-rootfs': '/', @@ -38,6 +40,21 @@ def test_output_files_exist(self): self.output_files_exist(["fstab"]) + def test_centos_release(self): + """Test this image is the centos release expected""" + self.output_files_exist(["rpm_dist_version_major", "centos-release"]) + + centos_release = self.load_collect_file("centos-release").lower() + rpm_major_version = ( + self.load_collect_file("rpm_dist_version_major").strip()) + _, os_id, os_version = self.target_release.partition("centos") + + self.assertTrue(os_version.startswith(rpm_major_version), + "%s doesn't start with %s" % (os_version, + rpm_major_version)) + self.assertTrue(centos_release.startswith(os_id), + "%s doesn't start with %s" % (centos_release, os_id)) + # FIXME: this naming scheme needs to be replaced class Centos70FromXenialTestBasic(relbase.centos70fromxenial, diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_install_umount.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_install_umount.py --- curtin-0.1.0~bzr532/tests/vmtests/test_install_umount.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_install_umount.py 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,56 @@ +from . import VMBaseClass +from .releases import base_vm_classes as relbase + +import textwrap +import yaml + + +class TestInstallUnmount(VMBaseClass): + """ Test a curtin install which disabled unmonting """ + conf_file = "examples/tests/install_disable_unmount.yaml""" + extra_nics = [] + extra_disks = [] + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + sfdisk --list > sfdisk_list + for d in /dev/[sv]d[a-z] /dev/xvd?; do + [ -b "$d" ] || continue + echo == $d == + sgdisk --print $d + done > sgdisk_list + blkid > blkid + cat /proc/partitions > proc_partitions + cp /etc/network/interfaces interfaces + if [ -f /var/log/cloud-init-output.log ]; then + cp /var/log/cloud-init-output.log . + fi + cp /var/log/cloud-init.log . + find /etc/network/interfaces.d > find_interfacesd + """)] + + def test_proc_mounts_before_unmount(self): + """Test TARGET_MOUNT_POINT value is in ephemeral /proc/mounts""" + self.output_files_exist([ + 'root/postinst_mounts.out', + 'root/target.out']) + + # read target mp and mounts + target_mp = self.load_collect_file('root/target.out').strip() + curtin_mounts = self.load_collect_file('root/postinst_mounts.out') + self.assertIn(target_mp, curtin_mounts) + + def test_install_config_has_unmount_disabled(self): + """Test that install ran with unmount: disabled""" + collect_curtin_cfg = 'root/curtin-install-cfg.yaml' + self.output_files_exist([collect_curtin_cfg]) + curtin_cfg = yaml.load(self.load_collect_file(collect_curtin_cfg)) + + # check that we have + # install: + # unmount: disabled + install_unmount = curtin_cfg.get('install', {}).get('unmount') + self.assertEqual(install_unmount, "disabled") + + +class XenialTestInstallUnmount(relbase.xenial, TestInstallUnmount): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_iscsi.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_iscsi.py --- curtin-0.1.0~bzr532/tests/vmtests/test_iscsi.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_iscsi.py 2018-01-18 21:10:51.000000000 +0000 @@ -14,7 +14,7 @@ conf_file = "examples/tests/basic_iscsi.yaml" nr_testfiles = 4 - collect_scripts = [textwrap.dedent( + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( """ cd OUTPUT_COLLECT_D cat /etc/fstab > fstab @@ -47,21 +47,25 @@ (testfile, expected_content, content)) -class PreciseTestIscsiBasic(relbase.precise, TestBasicIscsiAbs): +class TrustyTestIscsiBasic(relbase.trusty, TestBasicIscsiAbs): __test__ = True -class TrustyTestIscsiBasic(relbase.trusty, TestBasicIscsiAbs): +class XenialGATestIscsiBasic(relbase.xenial_ga, TestBasicIscsiAbs): __test__ = True -class XenialTestIscsiBasic(relbase.xenial, TestBasicIscsiAbs): +class XenialHWETestIscsiBasic(relbase.xenial_hwe, TestBasicIscsiAbs): __test__ = True -class ZestyTestIscsiBasic(relbase.zesty, TestBasicIscsiAbs): +class XenialEdgeTestIscsiBasic(relbase.xenial_edge, TestBasicIscsiAbs): __test__ = True class ArtfulTestIscsiBasic(relbase.artful, TestBasicIscsiAbs): __test__ = True + + +class BionicTestIscsiBasic(relbase.bionic, TestBasicIscsiAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_journald_reporter.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_journald_reporter.py --- curtin-0.1.0~bzr532/tests/vmtests/test_journald_reporter.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_journald_reporter.py 2018-01-18 21:10:51.000000000 +0000 @@ -10,28 +10,13 @@ conf_file = "examples/tests/journald_reporter.yaml" extra_disks = [] extra_nics = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D - sfdisk --list > sfdisk_list - for d in /dev/[sv]d[a-z] /dev/xvd?; do - [ -b "$d" ] || continue - echo == $d == - sgdisk --print $d - done > sgdisk_list - blkid > blkid - cat /proc/partitions > proc_partitions - cp /etc/network/interfaces interfaces - if [ -f /var/log/cloud-init-output.log ]; then - cp /var/log/cloud-init-output.log . - fi - cp /var/log/cloud-init.log . find /etc/network/interfaces.d > find_interfacesd """)] def test_output_files_exist(self): - self.output_files_exist(["sfdisk_list", "blkid", - "proc_partitions", "interfaces", - "root/journalctl.curtin_events.log", + self.output_files_exist(["root/journalctl.curtin_events.log", "root/journalctl.curtin_events.json"]) def test_journal_reporter_events(self): @@ -50,3 +35,7 @@ class ArtfulTestJournaldReporter(relbase.artful, TestJournaldReporter): __test__ = True + + +class BionicTestJournaldReporter(relbase.bionic, TestJournaldReporter): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_lvm_iscsi.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm_iscsi.py --- curtin-0.1.0~bzr532/tests/vmtests/test_lvm_iscsi.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm_iscsi.py 2018-01-18 21:10:51.000000000 +0000 @@ -47,21 +47,29 @@ self.check_file_strippedline("pvs", "vg2=/dev/sdb6") -class PreciseTestIscsiLvm(relbase.precise, TestLvmIscsiAbs): +class TrustyTestIscsiLvm(relbase.trusty, TestLvmIscsiAbs): __test__ = True -class TrustyTestIscsiLvm(relbase.trusty, TestLvmIscsiAbs): +class XenialTestIscsiLvm(relbase.xenial, TestLvmIscsiAbs): __test__ = True -class XenialTestIscsiLvm(relbase.xenial, TestLvmIscsiAbs): +class XenialGATestIscsiLvm(relbase.xenial_ga, TestLvmIscsiAbs): + __test__ = True + + +class XenialHWETestIscsiLvm(relbase.xenial_hwe, TestLvmIscsiAbs): __test__ = True -class ZestyTestIscsiLvm(relbase.zesty, TestLvmIscsiAbs): +class XenialEdgeTestIscsiLvm(relbase.xenial_edge, TestLvmIscsiAbs): __test__ = True class ArtfulTestIscsiLvm(relbase.artful, TestLvmIscsiAbs): __test__ = True + + +class BionicTestIscsiLvm(relbase.bionic, TestLvmIscsiAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_lvm.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm.py --- curtin-0.1.0~bzr532/tests/vmtests/test_lvm.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm.py 2018-01-18 21:10:51.000000000 +0000 @@ -9,7 +9,7 @@ conf_file = "examples/tests/lvm.yaml" interactive = False extra_disks = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname @@ -48,29 +48,29 @@ raise SkipTest("test_dname does not work for %s" % self.release) -class PreciseTestLvm(relbase.precise, TestLvmAbs): +class TrustyTestLvm(relbase.trusty, TestLvmAbs): __test__ = True -class PreciseHWETTestLvm(relbase.precise_hwe_t, PreciseTestLvm): +class TrustyHWEXTestLvm(relbase.trusty_hwe_x, TestLvmAbs): __test__ = True -class TrustyTestLvm(relbase.trusty, TestLvmAbs): +class XenialGATestLvm(relbase.xenial_ga, TestLvmAbs): __test__ = True -class TrustyHWEXTestLvm(relbase.trusty_hwe_x, TestLvmAbs): +class XenialHWETestLvm(relbase.xenial_hwe, TestLvmAbs): __test__ = True -class XenialTestLvm(relbase.xenial, TestLvmAbs): +class XenialEdgeTestLvm(relbase.xenial_edge, TestLvmAbs): __test__ = True -class ZestyTestLvm(relbase.zesty, TestLvmAbs): +class ArtfulTestLvm(relbase.artful, TestLvmAbs): __test__ = True -class ArtfulTestLvm(relbase.artful, TestLvmAbs): +class BionicTestLvm(relbase.bionic, TestLvmAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_lvm_root.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm_root.py --- curtin-0.1.0~bzr532/tests/vmtests/test_lvm_root.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_lvm_root.py 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,155 @@ +from . import VMBaseClass +from .releases import base_vm_classes as relbase + +import json +import textwrap + + +class TestLvmRootAbs(VMBaseClass): + conf_file = "examples/tests/lvmroot.yaml" + interactive = False + rootfs_uuid = '04836770-e989-460f-8774-8e277ddcb40f' + extra_disks = [] + dirty_disks = True + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cat /etc/fstab > fstab + lsblk --json --fs -o KNAME,MOUNTPOINT,UUID,FSTYPE > lsblk.json + lsblk --fs -P -o KNAME,MOUNTPOINT,UUID,FSTYPE > lsblk.out + ls -al /dev/disk/by-dname > ls_al_dname + ls -al /dev/disk/by-id > ls_al_byid + ls -al /dev/disk/by-uuid > ls_al_byuuid + ls -al /dev/mapper > ls_al_dev_mapper + find /etc/network/interfaces.d > find_interfacesd + pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs + lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs + pvdisplay > pvdisplay + vgdisplay > vgdisplay + lvdisplay > lvdisplay + ls -al /dev/root_vg/ > dev_root_vg + """)] + fstab_expected = { + 'UUID=04836770-e989-460f-8774-8e277ddcb40f': '/', + } + conf_replace = {} + + def test_output_files_exist(self): + self.output_files_exist(["fstab"]) + + def test_rootfs_format(self): + if self.release not in ['trusty']: + self.output_files_exist(["lsblk.json"]) + lsblk_data = json.load(open(self.collect_path('lsblk.json'))) + print(json.dumps(lsblk_data, indent=4)) + [entry] = [entry for entry in lsblk_data.get('blockdevices') + if entry['mountpoint'] == '/'] + print(entry) + self.assertEqual(self.conf_replace['__ROOTFS_FORMAT__'], + entry['fstype']) + else: + # no json output on trusty + self.output_files_exist(["lsblk.out"]) + lsblk_data = open(self.collect_path('lsblk.out')).readlines() + print(lsblk_data) + [root] = [line.strip() for line in lsblk_data + if 'MOUNTPOINT="/"' in line] + print(root) + [fstype] = [val.replace('"', '').split("=")[1] + for val in root.split() if 'FSTYPE' in val] + print(fstype) + self.assertEqual(self.conf_replace['__ROOTFS_FORMAT__'], fstype) + + +class TrustyTestLvmRootExt4(relbase.trusty, TestLvmRootAbs): + __test__ = True + conf_replace = { + '__ROOTFS_FORMAT__': 'ext4', + } + + +class TrustyTestLvmRootXfs(relbase.trusty, TestLvmRootAbs): + __test__ = True + # xfs on trusty can't support uuid= + fstab_expected = {} + conf_replace = { + '__ROOTFS_FORMAT__': 'xfs', + } + + +class XenialTestLvmRootExt4(relbase.xenial, TestLvmRootAbs): + __test__ = True + conf_replace = { + '__ROOTFS_FORMAT__': 'ext4', + } + + +class XenialTestLvmRootXfs(relbase.xenial, TestLvmRootAbs): + __test__ = True + conf_replace = { + '__ROOTFS_FORMAT__': 'xfs', + } + + +class ArtfulTestLvmRootExt4(relbase.artful, TestLvmRootAbs): + __test__ = True + conf_replace = { + '__ROOTFS_FORMAT__': 'ext4', + } + + +class ArtfulTestLvmRootXfs(relbase.artful, TestLvmRootAbs): + __test__ = True + conf_replace = { + '__ROOTFS_FORMAT__': 'xfs', + } + + +class TestUefiLvmRootAbs(TestLvmRootAbs): + conf_file = "examples/tests/uefi_lvmroot.yaml" + uefi = True + + +class XenialTestUefiLvmRootExt4(relbase.xenial, TestUefiLvmRootAbs): + __test__ = True + conf_replace = { + '__BOOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'ext4', + } + + +class XenialTestUefiLvmRootXfs(relbase.xenial, TestUefiLvmRootAbs): + __test__ = True + conf_replace = { + '__BOOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'xfs', + } + + +class XenialTestUefiLvmRootXfsBootXfs(relbase.xenial, TestUefiLvmRootAbs): + __test__ = True + conf_replace = { + '__BOOTFS_FORMAT__': 'xfs', # Expected to fail until LP: #1652822 + '__ROOTFS_FORMAT__': 'xfs', + } + + @classmethod + def setUpClass(cls): + cls.skip_by_date(cls.__name__, cls.release, "1652822", + fixby=(2018, 1, 20), removeby=(2018, 2, 23)) + super().setUpClass() + + +class ArtfulTestUefiLvmRootExt4(relbase.artful, TestUefiLvmRootAbs): + __test__ = True + conf_replace = { + '__BOOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'ext4', + } + + +class ArtfulTestUefiLvmRootXfs(relbase.artful, TestUefiLvmRootAbs): + __test__ = True + conf_replace = { + '__BOOTFS_FORMAT__': 'ext4', + '__ROOTFS_FORMAT__': 'xfs', + } diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_mdadm_bcache.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_mdadm_bcache.py --- curtin-0.1.0~bzr532/tests/vmtests/test_mdadm_bcache.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_mdadm_bcache.py 2018-01-18 21:10:51.000000000 +0000 @@ -8,7 +8,7 @@ interactive = False extra_disks = [] active_mdadm = "1" - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status @@ -132,11 +132,15 @@ __test__ = True -class XenialTestMdadmBcache(relbase.xenial, TestMdadmBcacheAbs): +class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs): __test__ = True -class ZestyTestMdadmBcache(relbase.zesty, TestMdadmBcacheAbs): +class XenialHWETestMdadmBcache(relbase.xenial_hwe, TestMdadmBcacheAbs): + __test__ = True + + +class XenialEdgeTestMdadmBcache(relbase.xenial_edge, TestMdadmBcacheAbs): __test__ = True @@ -144,6 +148,10 @@ __test__ = True +class BionicTestMdadmBcache(relbase.bionic, TestMdadmBcacheAbs): + __test__ = True + + class TestMirrorbootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/mirrorboot.yaml" @@ -172,11 +180,15 @@ __test__ = True -class XenialTestMirrorboot(relbase.xenial, TestMirrorbootAbs): +class XenialGATestMirrorboot(relbase.xenial_ga, TestMirrorbootAbs): __test__ = True -class ZestyTestMirrorboot(relbase.zesty, TestMirrorbootAbs): +class XenialHWETestMirrorboot(relbase.xenial_hwe, TestMirrorbootAbs): + __test__ = True + + +class XenialEdgeTestMirrorboot(relbase.xenial_edge, TestMirrorbootAbs): __test__ = True @@ -184,6 +196,10 @@ __test__ = True +class BionicTestMirrorboot(relbase.bionic, TestMirrorbootAbs): + __test__ = True + + class TestMirrorbootPartitionsAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/mirrorboot-msdos-partition.yaml" @@ -213,13 +229,18 @@ __test__ = True -class XenialTestMirrorbootPartitions(relbase.xenial, - TestMirrorbootPartitionsAbs): +class XenialGATestMirrorbootPartitions(relbase.xenial_ga, + TestMirrorbootPartitionsAbs): __test__ = True -class ZestyTestMirrorbootPartitions(relbase.zesty, - TestMirrorbootPartitionsAbs): +class XenialHWETestMirrorbootPartitions(relbase.xenial_hwe, + TestMirrorbootPartitionsAbs): + __test__ = True + + +class XenialEdgeTestMirrorbootPartitions(relbase.xenial_edge, + TestMirrorbootPartitionsAbs): __test__ = True @@ -228,6 +249,11 @@ __test__ = True +class BionicTestMirrorbootPartitions(relbase.bionic, + TestMirrorbootPartitionsAbs): + __test__ = True + + class TestMirrorbootPartitionsUEFIAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/mirrorboot-uefi.yaml" @@ -256,13 +282,18 @@ print("test_ptable does not work for Trusty") -class XenialTestMirrorbootPartitionsUEFI(relbase.xenial, - TestMirrorbootPartitionsUEFIAbs): +class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga, + TestMirrorbootPartitionsUEFIAbs): + __test__ = True + + +class XenialHWETestMirrorbootPartitionsUEFI(relbase.xenial_hwe, + TestMirrorbootPartitionsUEFIAbs): __test__ = True -class ZestyTestMirrorbootPartitionsUEFI(relbase.zesty, - TestMirrorbootPartitionsUEFIAbs): +class XenialEdgeTestMirrorbootPartitionsUEFI(relbase.xenial_edge, + TestMirrorbootPartitionsUEFIAbs): __test__ = True @@ -271,6 +302,11 @@ __test__ = True +class BionicTestMirrorbootPartitionsUEFI(relbase.bionic, + TestMirrorbootPartitionsUEFIAbs): + __test__ = True + + class TestRaid5bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid5boot.yaml" @@ -300,11 +336,15 @@ __test__ = True -class XenialTestRaid5boot(relbase.xenial, TestRaid5bootAbs): +class XenialGATestRaid5boot(relbase.xenial_ga, TestRaid5bootAbs): __test__ = True -class ZestyTestRaid5boot(relbase.zesty, TestRaid5bootAbs): +class XenialHWETestRaid5boot(relbase.xenial_hwe, TestRaid5bootAbs): + __test__ = True + + +class XenialEdgeTestRaid5boot(relbase.xenial_edge, TestRaid5bootAbs): __test__ = True @@ -312,6 +352,10 @@ __test__ = True +class BionicTestRaid5boot(relbase.bionic, TestRaid5bootAbs): + __test__ = True + + class TestRaid6bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid6boot.yaml" @@ -353,11 +397,15 @@ __test__ = True -class XenialTestRaid6boot(relbase.xenial, TestRaid6bootAbs): +class XenialGATestRaid6boot(relbase.xenial_ga, TestRaid6bootAbs): + __test__ = True + + +class XenialHWETestRaid6boot(relbase.xenial_hwe, TestRaid6bootAbs): __test__ = True -class ZestyTestRaid6boot(relbase.zesty, TestRaid6bootAbs): +class XenialEdgeTestRaid6boot(relbase.xenial_edge, TestRaid6bootAbs): __test__ = True @@ -365,6 +413,10 @@ __test__ = True +class BionicTestRaid6boot(relbase.bionic, TestRaid6bootAbs): + __test__ = True + + class TestRaid10bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid10boot.yaml" @@ -394,11 +446,15 @@ __test__ = True -class XenialTestRaid10boot(relbase.xenial, TestRaid10bootAbs): +class XenialGATestRaid10boot(relbase.xenial_ga, TestRaid10bootAbs): + __test__ = True + + +class XenialHWETestRaid10boot(relbase.xenial_hwe, TestRaid10bootAbs): __test__ = True -class ZestyTestRaid10boot(relbase.zesty, TestRaid10bootAbs): +class XenialEdgeTestRaid10boot(relbase.xenial_edge, TestRaid10bootAbs): __test__ = True @@ -406,6 +462,10 @@ __test__ = True +class BionicTestRaid10boot(relbase.bionic, TestRaid10bootAbs): + __test__ = True + + class TestAllindataAbs(TestMdadmAbs): # more complex, needs more time # alternative config for more complex setup @@ -495,13 +555,21 @@ __test__ = False # lukes=no does not disable mounting of device -class XenialTestAllindata(relbase.xenial, TestAllindataAbs): +class XenialGATestAllindata(relbase.xenial_ga, TestAllindataAbs): __test__ = True -class ZestyTestAllindata(relbase.zesty, TestAllindataAbs): +class XenialHWETestAllindata(relbase.xenial_hwe, TestAllindataAbs): + __test__ = True + + +class XenialEdgeTestAllindata(relbase.xenial_edge, TestAllindataAbs): __test__ = True class ArtfulTestAllindata(relbase.artful, TestAllindataAbs): __test__ = True + + +class BionicTestAllindata(relbase.bionic, TestAllindataAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_mdadm_iscsi.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_mdadm_iscsi.py --- curtin-0.1.0~bzr532/tests/vmtests/test_mdadm_iscsi.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_mdadm_iscsi.py 2018-01-18 21:10:51.000000000 +0000 @@ -22,21 +22,25 @@ """)] -class PreciseTestIscsiMdadm(relbase.precise, TestMdadmIscsiAbs): +class TrustyTestIscsiMdadm(relbase.trusty, TestMdadmIscsiAbs): __test__ = True -class TrustyTestIscsiMdadm(relbase.trusty, TestMdadmIscsiAbs): +class XenialGATestIscsiMdadm(relbase.xenial_ga, TestMdadmIscsiAbs): __test__ = True -class XenialTestIscsiMdadm(relbase.xenial, TestMdadmIscsiAbs): +class XenialHWETestIscsiMdadm(relbase.xenial_hwe, TestMdadmIscsiAbs): __test__ = True -class ZestyTestIscsiMdadm(relbase.zesty, TestMdadmIscsiAbs): +class XenialEdgeTestIscsiMdadm(relbase.xenial_edge, TestMdadmIscsiAbs): __test__ = True class ArtfulTestIscsiMdadm(relbase.artful, TestMdadmIscsiAbs): __test__ = True + + +class BionicTestIscsiMdadm(relbase.bionic, TestMdadmIscsiAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_multipath.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_multipath.py --- curtin-0.1.0~bzr532/tests/vmtests/test_multipath.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_multipath.py 2018-01-18 21:10:51.000000000 +0000 @@ -10,7 +10,7 @@ disk_driver = 'scsi-hd' extra_disks = [] nvme_disks = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/sda > blkid_output_sda blkid -o export /dev/sda1 > blkid_output_sda1 @@ -52,13 +52,21 @@ __test__ = True -class XenialTestMultipathBasic(relbase.xenial, TestMultipathBasicAbs): +class XenialGATestMultipathBasic(relbase.xenial_ga, TestMultipathBasicAbs): __test__ = True -class ZestyTestMultipathBasic(relbase.zesty, TestMultipathBasicAbs): +class XenialHWETestMultipathBasic(relbase.xenial_hwe, TestMultipathBasicAbs): + __test__ = True + + +class XenialEdgeTestMultipathBasic(relbase.xenial_edge, TestMultipathBasicAbs): __test__ = True class ArtfulTestMultipathBasic(relbase.artful, TestMultipathBasicAbs): __test__ = True + + +class BionicTestMultipathBasic(relbase.bionic, TestMultipathBasicAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_alias.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_alias.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_alias.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_alias.py 2018-01-18 21:10:51.000000000 +0000 @@ -41,11 +41,6 @@ __test__ = True -class PreciseHWETTestNetworkAlias(relbase.precise_hwe_t, TestNetworkAliasAbs): - # FIXME: off due to hang at test: Starting execute cloud user/final scripts - __test__ = True - - class TrustyTestNetworkAlias(relbase.trusty, TestNetworkAliasAbs): __test__ = True @@ -73,9 +68,9 @@ __test__ = True -class ZestyTestNetworkAlias(relbase.zesty, TestNetworkAliasAbs): +class ArtfulTestNetworkAlias(relbase.artful, TestNetworkAliasAbs): __test__ = True -class ArtfulTestNetworkAlias(relbase.artful, TestNetworkAliasAbs): +class BionicTestNetworkAlias(relbase.bionic, TestNetworkAliasAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_bonding.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_bonding.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_bonding.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_bonding.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,3 @@ -from . import logger from .releases import base_vm_classes as relbase from .test_network import TestNetworkBaseTestsAbs from .releases import centos_base_vm_classes as centos_relbase @@ -28,7 +27,7 @@ def test_ifenslave_installed(self): status = self.load_collect_file("ifenslave_installed") - logger.debug('ifenslave installed: {}'.format(status)) + self.logger.debug('ifenslave installed: {}'.format(status)) self.assertTrue('iputils' in status) def test_etc_network_interfaces(self): @@ -38,14 +37,6 @@ pass -class PreciseHWETTestBonding(relbase.precise_hwe_t, TestNetworkBondingAbs): - __test__ = True - - def test_ifenslave_installed(self): - self.assertIn("ifenslave-2.6", self.debian_packages, - "ifenslave deb not installed") - - class TrustyTestBonding(relbase.trusty, TestNetworkBondingAbs): __test__ = False @@ -70,10 +61,6 @@ __test__ = True -class ZestyTestBonding(relbase.zesty, TestNetworkBondingAbs): - __test__ = True - - class ArtfulTestBonding(relbase.artful, TestNetworkBondingAbs): __test__ = True @@ -88,6 +75,20 @@ self.debian_packages.get('ifenslave')) +class BionicTestBonding(relbase.bionic, TestNetworkBondingAbs): + __test__ = True + + def test_ifenslave_installed(self): + """Bionic should not have ifenslave installed.""" + pass + + def test_ifenslave_not_installed(self): + """Confirm that ifenslave is not installed on bionic""" + self.assertNotIn('ifenslave', self.debian_packages, + "ifenslave is not expected in bionic: %s" % + self.debian_packages.get('ifenslave')) + + class Centos66TestNetworkBonding(centos_relbase.centos66fromxenial, CentosTestNetworkBondingAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_bridging.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_bridging.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_bridging.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_bridging.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,3 @@ -from . import logger from .releases import base_vm_classes as relbase from .releases import centos_base_vm_classes as centos_relbase from .test_network import TestNetworkBaseTestsAbs @@ -154,7 +153,7 @@ if self._network_renderer() == "systemd-networkd": reason = ("%s: skip until lp#1668347" " is fixed" % self.__class__) - logger.warn('Skipping: %s', reason) + self.logger.warn('Skipping: %s', reason) print(reason) return @@ -203,7 +202,7 @@ def test_bridge_utils_installed(self): self.output_files_exist(["bridge-utils_installed"]) status = self.load_collect_file("bridge-utils_installed").strip() - logger.debug('bridge-utils installed: {}'.format(status)) + self.logger.debug('bridge-utils installed: {}'.format(status)) self.assertTrue('bridge' in status) @@ -219,19 +218,9 @@ # only testing Yakkety or newer as older releases do not yet # have updated ifupdown/bridge-utils packages; -class ZestyTestBridging(relbase.zesty, TestBridgeNetworkAbs): - __test__ = True - - class ArtfulTestBridging(relbase.artful, TestBridgeNetworkAbs): __test__ = True - @classmethod - def setUpClass(cls): - cls.skip_by_date(cls.__name__, cls.release, "1721157", - fixby=(2017, 10, 16), removeby=(2017, 11, 16)) - super().setUpClass() - def test_bridge_utils_installed(self): """bridge-utils not needed in artful.""" pass @@ -240,3 +229,16 @@ self.assertNotIn("bridge-utils", self.debian_packages, "bridge-utils is not expected in artful: %s" % self.debian_packages.get('bridge-utils')) + + +class BionicTestBridging(relbase.bionic, TestBridgeNetworkAbs): + __test__ = True + + def test_bridge_utils_installed(self): + """bridge-utils not needed in bionic.""" + pass + + def test_bridge_utils_not_installed(self): + self.assertNotIn("bridge-utils", self.debian_packages, + "bridge-utils is not expected in bionic: %s" % + self.debian_packages.get('bridge-utils')) diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_enisource.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_enisource.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_enisource.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_enisource.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,4 @@ -from . import logger, helpers +from . import helpers from .releases import base_vm_classes as relbase from .test_network import TestNetworkBaseTestsAbs @@ -27,7 +27,8 @@ extract information about what curtin wrote and compare that with what was actually configured (which we capture via ifconfig) - Note: This test is *not* valid for Artful as it has no ENI. + Note: This test is *not* valid for Artful and later as they do not + have ENI. """ conf_file = "examples/tests/network_source.yaml" @@ -50,16 +51,16 @@ subprocess.check_call(cmd, stderr=subprocess.STDOUT) curtin_ifaces = self.parse_deb_config(interfaces) - logger.debug('parsed eni dict:\n{}'.format( + self.logger.debug('parsed eni dict:\n{}'.format( yaml.dump(curtin_ifaces, default_flow_style=False, indent=4))) print('parsed eni dict:\n{}'.format( yaml.dump(curtin_ifaces, default_flow_style=False, indent=4))) ip_a = self.load_collect_file("ip_a") - logger.debug('ip a:\n{}'.format(ip_a)) + self.logger.debug('ip a:\n{}'.format(ip_a)) ip_a_dict = helpers.ip_a_to_dict(ip_a) - logger.debug('parsed ip_a dict:\n{}'.format( + self.logger.debug('parsed ip_a dict:\n{}'.format( yaml.dump(ip_a_dict, default_flow_style=False, indent=4))) print('parsed ip_a dict:\n{}'.format( yaml.dump(ip_a_dict, default_flow_style=False, indent=4))) @@ -79,12 +80,6 @@ self.assertEqual(_nocidr(expected_address), _nocidr(actual_address)) -class PreciseTestNetworkENISource(relbase.precise, TestNetworkENISource): - __test__ = False - # not working, still debugging though; possible older ifupdown doesn't - # like the multiple iface method. - - class TrustyTestNetworkENISource(relbase.trusty, TestNetworkENISource): __test__ = True @@ -98,5 +93,4 @@ __test__ = True -class ZestyTestNetworkENISource(relbase.zesty, TestNetworkENISource): - __test__ = True +# Artful and later are deliberately not present. They do not have ifupdown. diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_enisource.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_enisource.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_enisource.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_enisource.py 2018-01-18 21:10:51.000000000 +0000 @@ -12,13 +12,6 @@ pass -class PreciseTestNetworkIPV6ENISource(relbase.precise, - TestNetworkIPV6ENISource): - __test__ = False - # not working, still debugging though; possible older ifupdown doesn't - # like the multiple iface method. - - class TrustyTestNetworkIPV6ENISource(relbase.trusty, TestNetworkIPV6ENISource): __test__ = True @@ -32,10 +25,4 @@ __test__ = True -class ZestyTestNetworkIPV6ENISource(relbase.zesty, TestNetworkIPV6ENISource): - __test__ = True - - -# Artful no longer has eni/ifupdown -class ArtfulTestNetworkIPV6ENISource(relbase.artful, TestNetworkIPV6ENISource): - __test__ = False +# Artful and later are deliberately not present. They do not have ifupdown. diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6.py 2018-01-18 21:10:51.000000000 +0000 @@ -40,11 +40,6 @@ pass -class PreciseHWETTestNetwork(relbase.precise_hwe_t, TestNetworkIPV6Abs): - # FIXME: off due to hang at test: Starting execute cloud user/final scripts - __test__ = False - - class TrustyTestNetworkIPV6(relbase.trusty, TestNetworkIPV6Abs): __test__ = True @@ -68,11 +63,11 @@ __test__ = True -class ZestyTestNetworkIPV6(relbase.zesty, TestNetworkIPV6Abs): +class ArtfulTestNetworkIPV6(relbase.artful, TestNetworkIPV6Abs): __test__ = True -class ArtfulTestNetworkIPV6(relbase.artful, TestNetworkIPV6Abs): +class BionicTestNetworkIPV6(relbase.bionic, TestNetworkIPV6Abs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_static.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_static.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_static.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_static.py 2018-01-18 21:10:51.000000000 +0000 @@ -13,11 +13,6 @@ conf_file = "examples/tests/basic_network_static_ipv6.yaml" -class PreciseHWETTestNetworkIPV6Static(relbase.precise_hwe_t, - TestNetworkIPV6StaticAbs): - __test__ = True - - class TrustyTestNetworkIPV6Static(relbase.trusty, TestNetworkIPV6StaticAbs): __test__ = True @@ -49,11 +44,11 @@ __test__ = True -class ZestyTestNetworkIPV6Static(relbase.zesty, TestNetworkIPV6StaticAbs): +class ArtfulTestNetworkIPV6Static(relbase.artful, TestNetworkIPV6StaticAbs): __test__ = True -class ArtfulTestNetworkIPV6Static(relbase.artful, TestNetworkIPV6StaticAbs): +class BionicTestNetworkIPV6Static(relbase.bionic, TestNetworkIPV6StaticAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_vlan.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_vlan.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_ipv6_vlan.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_ipv6_vlan.py 2018-01-18 21:10:51.000000000 +0000 @@ -12,22 +12,6 @@ conf_file = "examples/tests/vlan_network_ipv6.yaml" -class PreciseTestNetworkIPV6Vlan(relbase.precise, TestNetworkIPV6VlanAbs): - __test__ = True - - # precise ip -d link show output is different (of course) - def test_vlan_enabled(self): - - # we must have at least one - self.assertGreaterEqual(len(self.get_vlans()), 1) - - # did they get configured? - for vlan in self.get_vlans(): - link_file = "ip_link_show_" + vlan['name'] - vlan_msg = "vlan id " + str(vlan['vlan_id']) - self.check_file_regex(link_file, vlan_msg) - - class TrustyTestNetworkIPV6Vlan(relbase.trusty, TestNetworkIPV6VlanAbs): __test__ = True @@ -41,11 +25,11 @@ __test__ = True -class ZestyTestNetworkIPV6Vlan(relbase.zesty, TestNetworkIPV6VlanAbs): +class ArtfulTestNetworkIPV6Vlan(relbase.artful, TestNetworkIPV6VlanAbs): __test__ = True -class ArtfulTestNetworkIPV6Vlan(relbase.artful, TestNetworkIPV6VlanAbs): +class BionicTestNetworkIPV6Vlan(relbase.bionic, TestNetworkIPV6VlanAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_mtu.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_mtu.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_mtu.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_mtu.py 2018-01-18 21:10:51.000000000 +0000 @@ -155,11 +155,6 @@ pass -class PreciseHWETTestNetworkMtu(relbase.precise_hwe_t, TestNetworkMtuAbs): - # FIXME: Precise mtu / ipv6 is buggy - __test__ = False - - class TrustyTestNetworkMtu(relbase.trusty, TestNetworkMtuAbs): __test__ = True @@ -193,17 +188,23 @@ __test__ = True -class ZestyTestNetworkMtu(relbase.zesty, TestNetworkMtuAbs): +class ArtfulTestNetworkMtu(relbase.artful, TestNetworkMtuAbs): __test__ = True + @classmethod + def setUpClass(cls): + cls.skip_by_date(cls.__name__, cls.release, "1671951", + fixby=(2018, 1, 20), removeby=(2018, 2, 23)) + super().setUpClass() + -class ArtfulTestNetworkMtu(relbase.artful, TestNetworkMtuAbs): +class BionicTestNetworkMtu(relbase.bionic, TestNetworkMtuAbs): __test__ = True @classmethod def setUpClass(cls): cls.skip_by_date(cls.__name__, cls.release, "1671951", - fixby=(2017, 10, 20), removeby=(2018, 1, 23)) + fixby=(2018, 1, 20), removeby=(2018, 2, 23)) super().setUpClass() diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,4 @@ -from . import VMBaseClass, logger, helpers +from . import VMBaseClass, helpers from .releases import base_vm_classes as relbase from .releases import centos_base_vm_classes as centos_relbase @@ -69,7 +69,7 @@ eni_cfg = "" eni = self.load_collect_file("interfaces") - logger.debug('etc/network/interfaces:\n{}'.format(eni)) + self.logger.debug('etc/network/interfaces:\n{}'.format(eni)) # we don't use collect_path as we're building a glob eni_dir = os.path.join(self.td.collect, "interfaces.d", "*.cfg") @@ -106,7 +106,7 @@ 'network passthrough not available on %s' % self.__class__) eni, eni_cfg = self.read_eni() - logger.debug('etc/network/interfaces:\n{}'.format(eni)) + self.logger.debug('etc/network/interfaces:\n{}'.format(eni)) expected_eni = self.get_expected_etc_network_interfaces() eni_lines = eni.split('\n') + eni_cfg.split('\n') @@ -172,13 +172,13 @@ } resolvconfpath = render2resolvconf.get(self._network_renderer(), None) self.assertTrue(resolvconfpath is not None) - logger.debug('Selected path to resolvconf: %s', resolvconfpath) + self.logger.debug('Selected path to resolvconf: %s', resolvconfpath) resolvconf = self.load_collect_file(resolvconfpath) - logger.debug('etc/resolv.conf:\n{}'.format(resolvconf)) + self.logger.debug('etc/resolv.conf:\n{}'.format(resolvconf)) resolv_lines = resolvconf.split('\n') - logger.debug('resolv.conf lines:\n{}'.format(resolv_lines)) + self.logger.debug('resolv.conf lines:\n{}'.format(resolv_lines)) # resolv.conf ''' nameserver X.Y.Z.A @@ -202,7 +202,7 @@ search: foo.bar ''' expected_ifaces = self.get_expected_etc_resolvconf() - logger.debug('parsed eni ifaces:\n{}'.format(expected_ifaces)) + self.logger.debug('parsed eni ifaces:\n{}'.format(expected_ifaces)) def _mk_dns_lines(dns_type, config): """ nameservers get a line per ns @@ -225,7 +225,7 @@ for k, v in iface.get('dns', {}).items(): print('k=%s v=%s' % (k, v)) for dns_line in _mk_dns_lines(k, v): - logger.debug('dns_line:%s', dns_line) + self.logger.debug('dns_line:%s', dns_line) self.assertTrue(dns_line in resolv_lines) def test_static_routes(self): @@ -263,24 +263,24 @@ def test_ip_output(self): '''check iproute2 'ip a' output with test input''' network_state = self.get_network_state() - logger.debug('expected_network_state:\n{}'.format( + self.logger.debug('expected_network_state:\n{}'.format( yaml.dump(network_state, default_flow_style=False, indent=4))) ip_a = self.load_collect_file("ip_a") - logger.debug('ip a:\n{}'.format(ip_a)) + self.logger.debug('ip a:\n{}'.format(ip_a)) ip_dict = helpers.ip_a_to_dict(ip_a) print('parsed ip_a dict:\n{}'.format( yaml.dump(ip_dict, default_flow_style=False, indent=4))) route_n = self.load_collect_file("route_n") - logger.debug("route -n:\n{}".format(route_n)) + self.logger.debug("route -n:\n{}".format(route_n)) route_6_n = self.load_collect_file("route_6_n") - logger.debug("route -6 -n:\n{}".format(route_6_n)) + self.logger.debug("route -6 -n:\n{}".format(route_6_n)) ip_route_show = self.load_collect_file("ip_route_show") - logger.debug("ip route show:\n{}".format(ip_route_show)) + self.logger.debug("ip route show:\n{}".format(ip_route_show)) for line in [line for line in ip_route_show.split('\n') if 'src' in line and not line.startswith('default')]: print('ip_route_show: line: %s' % line) @@ -292,7 +292,7 @@ line) if m: route_info = m.groupdict('') - logger.debug(route_info) + self.logger.debug(route_info) else: raise ValueError('Failed match ip_route_show line: %s' % line) @@ -399,7 +399,8 @@ configured_gws = __find_gw_config(subnet) print('iface:%s configured_gws: %s' % (ifname, configured_gws)) for gw_ip in configured_gws: - logger.debug('found a gateway in subnet config: %s', gw_ip) + self.logger.debug('found a gateway in subnet config: %s', + gw_ip) if ":" in gw_ip: route_d = routes['6'] else: @@ -407,7 +408,8 @@ found_gws = [line for line in route_d.split('\n') if 'UG' in line and gw_ip in line] - logger.debug('found gateways in guest output:\n%s', found_gws) + self.logger.debug('found gateways in guest output:\n%s', + found_gws) print('found_gws: %s\nexpected: %s' % (found_gws, configured_gws)) @@ -420,7 +422,8 @@ else: (dest, gw, genmask, flags, metric, ref, use, iface) = \ fgw.split() - logger.debug('configured gw:%s found gw:%s', gw_ip, gw) + self.logger.debug('configured gw:%s found gw:%s', + gw_ip, gw) self.assertEqual(gw_ip, gw) @@ -449,11 +452,6 @@ pass -class PreciseHWETTestNetworkBasic(relbase.precise_hwe_t, TestNetworkBasicAbs): - # FIXME: off due to hang at test: Starting execute cloud user/final scripts - __test__ = False - - class TrustyTestNetworkBasic(relbase.trusty, TestNetworkBasicAbs): __test__ = True @@ -481,11 +479,11 @@ __test__ = True -class ZestyTestNetworkBasic(relbase.zesty, TestNetworkBasicAbs): +class ArtfulTestNetworkBasic(relbase.artful, TestNetworkBasicAbs): __test__ = True -class ArtfulTestNetworkBasic(relbase.artful, TestNetworkBasicAbs): +class BionicTestNetworkBasic(relbase.bionic, TestNetworkBasicAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_static.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_static.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_static.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_static.py 2018-01-18 21:10:51.000000000 +0000 @@ -28,12 +28,6 @@ pass -class PreciseHWETTestNetworkStatic(relbase.precise_hwe_t, - TestNetworkStaticAbs): - # FIXME: off due to hang at test: Starting execute cloud user/final scripts - __test__ = False - - class TrustyTestNetworkStatic(relbase.trusty, TestNetworkStaticAbs): __test__ = True @@ -65,11 +59,11 @@ __test__ = True -class ZestyTestNetworkStatic(relbase.zesty, TestNetworkStaticAbs): +class ArtfulTestNetworkStatic(relbase.artful, TestNetworkStaticAbs): __test__ = True -class ArtfulTestNetworkStatic(relbase.artful, TestNetworkStaticAbs): +class BionicTestNetworkStatic(relbase.bionic, TestNetworkStaticAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_static_routes.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_static_routes.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_static_routes.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_static_routes.py 2018-01-18 21:10:51.000000000 +0000 @@ -16,12 +16,6 @@ conf_file = "examples/tests/network_static_routes.yaml" -class PreciseHWETTestNetworkStaticRoutes(relbase.precise_hwe_t, - TestNetworkStaticRoutesAbs): - # FIXME: off due to hang at test: Starting execute cloud user/final scripts - __test__ = False - - class TrustyTestNetworkStaticRoutes(relbase.trusty, TestNetworkStaticRoutesAbs): __test__ = True @@ -53,11 +47,12 @@ __test__ = True -class ZestyTestNetworkStaticRoutes(relbase.zesty, TestNetworkStaticRoutesAbs): +class ArtfulTestNetworkStaticRoutes(relbase.artful, + TestNetworkStaticRoutesAbs): __test__ = True -class ArtfulTestNetworkStaticRoutes(relbase.artful, +class BionicTestNetworkStaticRoutes(relbase.bionic, TestNetworkStaticRoutesAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_network_vlan.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_vlan.py --- curtin-0.1.0~bzr532/tests/vmtests/test_network_vlan.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_network_vlan.py 2018-01-18 21:10:51.000000000 +0000 @@ -1,4 +1,3 @@ -from . import logger from .releases import base_vm_classes as relbase from .releases import centos_base_vm_classes as centos_relbase from .test_network import TestNetworkBaseTestsAbs @@ -20,9 +19,9 @@ def get_vlans(self): network_state = self.get_network_state() - logger.debug('get_vlans ns:\n%s', yaml.dump(network_state, - default_flow_style=False, - indent=4)) + self.logger.debug('get_vlans ns:\n%s', + yaml.dump(network_state, default_flow_style=False, + indent=4)) interfaces = network_state.get('interfaces') return [iface for iface in interfaces.values() if iface['type'] == 'vlan'] @@ -68,22 +67,6 @@ pass -class PreciseTestNetworkVlan(relbase.precise, TestNetworkVlanAbs): - __test__ = True - - # precise ip -d link show output is different (of course) - def test_vlan_enabled(self): - - # we must have at least one - self.assertGreaterEqual(len(self.get_vlans()), 1) - - # did they get configured? - for vlan in self.get_vlans(): - link_file = "ip_link_show_" + vlan['name'] - vlan_msg = "vlan id " + str(vlan['vlan_id']) - self.check_file_regex(link_file, vlan_msg) - - class TrustyTestNetworkVlan(relbase.trusty, TestNetworkVlanAbs): __test__ = True @@ -96,11 +79,11 @@ __test__ = True -class ZestyTestNetworkVlan(relbase.zesty, TestNetworkVlanAbs): +class ArtfulTestNetworkVlan(relbase.artful, TestNetworkVlanAbs): __test__ = True -class ArtfulTestNetworkVlan(relbase.artful, TestNetworkVlanAbs): +class BionicTestNetworkVlan(relbase.bionic, TestNetworkVlanAbs): __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_nvme.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_nvme.py --- curtin-0.1.0~bzr532/tests/vmtests/test_nvme.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_nvme.py 2018-01-18 21:10:51.000000000 +0000 @@ -16,7 +16,7 @@ disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 15), ('nvme_disk', 1), ('nvme_disk', 2), ('nvme_disk', 3), ('second_nvme', 1)] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D ls /sys/class/ > sys_class ls /sys/class/nvme/ > ls_nvme @@ -53,11 +53,6 @@ self.check_file_strippedline("ls_dev_nvme", "/dev/nvme1") -class PreciseTestNvme(relbase.precise, TestNvmeAbs): - __test__ = False - # Precise kernel doesn't have NVME support, with TrustyHWE it would - - class TrustyTestNvme(relbase.trusty, TestNvmeAbs): __test__ = True @@ -84,11 +79,15 @@ print("test_ptable does not work for Trusty") -class XenialTestNvme(relbase.xenial, TestNvmeAbs): +class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs): __test__ = True -class ZestyTestNvme(relbase.zesty, TestNvmeAbs): +class XenialHWETestNvme(relbase.xenial_hwe, TestNvmeAbs): + __test__ = True + + +class XenialEdgeTestNvme(relbase.xenial_edge, TestNvmeAbs): __test__ = True @@ -96,6 +95,10 @@ __test__ = True +class BionicTestNvme(relbase.bionic, TestNvmeAbs): + __test__ = True + + class TestNvmeBcacheAbs(VMBaseClass): arch_skip = [ "s390x", # nvme is a pci device, no pci on s390x @@ -107,7 +110,7 @@ uefi = True disk_to_check = [('sda', 1), ('sda', 2), ('sda', 3)] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D ls /sys/class/ > sys_class ls /sys/class/nvme/ > ls_nvme @@ -165,13 +168,21 @@ self.check_file_regex("bcache_cache_mode", r"\[writeback\]") -class XenialTestNvmeBcache(relbase.xenial, TestNvmeBcacheAbs): +class XenialGATestNvmeBcache(relbase.xenial_ga, TestNvmeBcacheAbs): + __test__ = True + + +class XenialHWETestNvmeBcache(relbase.xenial_hwe, TestNvmeBcacheAbs): __test__ = True -class ZestyTestNvmeBcache(relbase.zesty, TestNvmeBcacheAbs): +class XenialEdgeTestNvmeBcache(relbase.xenial_edge, TestNvmeBcacheAbs): __test__ = True class ArtfulTestNvmeBcache(relbase.artful, TestNvmeBcacheAbs): __test__ = True + + +class BionicTestNvmeBcache(relbase.bionic, TestNvmeBcacheAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_old_apt_features.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_old_apt_features.py --- curtin-0.1.0~bzr532/tests/vmtests/test_old_apt_features.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_old_apt_features.py 2018-01-18 21:10:51.000000000 +0000 @@ -39,7 +39,7 @@ extra_disks = [] fstab_expected = {} disk_to_check = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_raid5_bcache.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_raid5_bcache.py --- curtin-0.1.0~bzr532/tests/vmtests/test_raid5_bcache.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_raid5_bcache.py 2018-01-18 21:10:51.000000000 +0000 @@ -8,7 +8,7 @@ interactive = False extra_disks = ['10G', '10G', '10G', '10G'] active_mdadm = "1" - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status @@ -65,11 +65,6 @@ self.check_file_regex("bcache_cache_mode", r"\[writeback\]") -class PreciseHWETTestRaid5Bcache(relbase.precise_hwe_t, TestMdadmBcacheAbs): - # FIXME: off due to failing install: RUN_ARRAY failed: Invalid argument - __test__ = False - - class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect @@ -94,13 +89,21 @@ __test__ = True -class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs): +class XenialGATestRaid5Bcache(relbase.xenial_ga, TestMdadmBcacheAbs): __test__ = True -class ZestyTestRaid5Bcache(relbase.zesty, TestMdadmBcacheAbs): +class XenialHWETestRaid5Bcache(relbase.xenial_hwe, TestMdadmBcacheAbs): + __test__ = True + + +class XenialEdgeTestRaid5Bcache(relbase.xenial_edge, TestMdadmBcacheAbs): __test__ = True class ArtfulTestRaid5Bcache(relbase.artful, TestMdadmBcacheAbs): __test__ = True + + +class BionicTestRaid5Bcache(relbase.bionic, TestMdadmBcacheAbs): + __test__ = True diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_simple.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_simple.py --- curtin-0.1.0~bzr532/tests/vmtests/test_simple.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_simple.py 2018-01-18 21:10:51.000000000 +0000 @@ -9,7 +9,7 @@ conf_file = "examples/tests/simple.yaml" extra_disks = [] extra_nics = [] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D sfdisk --list > sfdisk_list for d in /dev/[sv]d[a-z] /dev/xvd?; do @@ -20,6 +20,7 @@ blkid > blkid cat /proc/partitions > proc_partitions cp /etc/network/interfaces interfaces + cp /etc/netplan/50-cloud-init.yaml netplan.yaml if [ -f /var/log/cloud-init-output.log ]; then cp /var/log/cloud-init-output.log . fi @@ -29,7 +30,7 @@ def test_output_files_exist(self): self.output_files_exist(["sfdisk_list", "blkid", - "proc_partitions", "interfaces"]) + "proc_partitions"]) class TrustyTestSimple(relbase.trusty, TestSimple): @@ -40,9 +41,15 @@ __test__ = True -class ZestyTestSimple(relbase.zesty, TestSimple): +class ArtfulTestSimple(relbase.artful, TestSimple): __test__ = True + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) -class ArtfulTestSimple(relbase.artful, TestSimple): + +class BionicTestSimple(relbase.bionic, TestSimple): __test__ = True + + def test_output_files_exist(self): + self.output_files_exist(["netplan.yaml"]) diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_ubuntu_core.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_ubuntu_core.py --- curtin-0.1.0~bzr532/tests/vmtests/test_ubuntu_core.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_ubuntu_core.py 2018-01-18 21:10:51.000000000 +0000 @@ -8,7 +8,7 @@ target_ftype = "root-image.xz" interactive = False conf_file = "examples/tests/ubuntu_core.yaml" - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid diff -Nru curtin-0.1.0~bzr532/tests/vmtests/test_uefi_basic.py curtin-17.1-11-ga4c9636b/tests/vmtests/test_uefi_basic.py --- curtin-0.1.0~bzr532/tests/vmtests/test_uefi_basic.py 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tests/vmtests/test_uefi_basic.py 2018-01-18 21:10:51.000000000 +0000 @@ -9,10 +9,10 @@ interactive = False arch_skip = ["s390x"] conf_file = "examples/tests/uefi_basic.yaml" - extra_disks = [] + extra_disks = ['4G'] uefi = True disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 3)] - collect_scripts = [textwrap.dedent(""" + collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 @@ -76,16 +76,6 @@ self.assertEqual(self.disk_block_size, size) -class PreciseUefiTestBasic(relbase.precise, TestBasicAbs): - __test__ = True - - def test_ptable(self): - print("test_ptable does not work for Precise") - - def test_dname(self): - print("test_dname does not work for Precise") - - class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs): __test__ = True @@ -103,11 +93,15 @@ __test__ = True -class XenialUefiTestBasic(relbase.xenial, TestBasicAbs): +class XenialGAUefiTestBasic(relbase.xenial_ga, TestBasicAbs): __test__ = True -class ZestyUefiTestBasic(relbase.zesty, TestBasicAbs): +class XenialHWEUefiTestBasic(relbase.xenial_hwe, TestBasicAbs): + __test__ = True + + +class XenialEdgeUefiTestBasic(relbase.xenial_edge, TestBasicAbs): __test__ = True @@ -115,8 +109,8 @@ __test__ = True -class PreciseUefiTestBasic4k(PreciseUefiTestBasic): - disk_block_size = 4096 +class BionicUefiTestBasic(relbase.bionic, TestBasicAbs): + __test__ = True class TrustyUefiTestBasic4k(TrustyUefiTestBasic): @@ -127,13 +121,13 @@ __test__ = True -class XenialUefiTestBasic4k(XenialUefiTestBasic): +class XenialGAUefiTestBasic4k(XenialGAUefiTestBasic): disk_block_size = 4096 -class ZestyUefiTestBasic4k(ZestyUefiTestBasic): +class ArtfulUefiTestBasic4k(ArtfulUefiTestBasic): disk_block_size = 4096 -class ArtfulUefiTestBasic4k(ArtfulUefiTestBasic): +class BionicUefiTestBasic4k(BionicUefiTestBasic): disk_block_size = 4096 diff -Nru curtin-0.1.0~bzr532/tools/build-deb curtin-17.1-11-ga4c9636b/tools/build-deb --- curtin-0.1.0~bzr532/tools/build-deb 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/build-deb 2018-01-18 21:10:51.000000000 +0000 @@ -32,34 +32,64 @@ start_d=$PWD top_d=$(cd "$(dirname "${0}")"/.. && pwd) +ref=HEAD + +if [ $# -eq 0 ]; then + # if no opts given, build source, without depends, and not signed. + set -- -S -d -us -uc +fi # grab the first line in the changelog # hopefully this pulls the version info there -# resulting in something like: 0.1.0~bzrREVNO-1~trunk1 +# resulting in something like: UPSTREAM_VER-0ubuntu1 clogver_o=$(sed -n '1s,.*(\([^)]*\)).*,\1,p' debian/changelog.trunk) -revno=$(bzr revno) || fail "failed to get revno" -clogver_upstream=${clogver_o%%-*} -clogver_debian=${clogver_o#*-} - -uver=$(echo "${clogver_upstream}" | sed "s,REVNO,$revno,") +# uver gets 17.1-3-gc85e2562 '17.1' if this is a tag. +uver=$(git describe --long --abbrev=8 "--match=[0-9][0-9]*" "$ref") +clogver_debian=${clogver_o##*-} clogver_new="${uver}-${clogver_debian}" +# uver_base_rel rel gets '17.1' +uver_base_rel=${uver%%-*} +if [ "${uver_base_rel}" = "${uver}" ]; then + echo "building from a tagged version ($uver)" 1>&2 +fi + TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${bname}.XXXXXX") trap cleanup EXIT -echo "building upstream version $uver, debian ver=${clogver_debian}" +echo "building version ${uver}, debian_ver=${clogver_debian}" dir="${sourcename}-$uver" tarball="${sourcename}_$uver.orig.tar.gz" myd=$(dirname "$0") -"$myd/export-tarball" "${revno}" "${TEMP_D}/$tarball" +"$myd/make-tarball" --output="$TEMP_D/$tarball" --long "$ref" echo "created ${tarball}" cd "${TEMP_D}" tar xzf "$tarball" || fail "failed extract tarball" + +if [ ! -d "$dir" ]; then + # make-tarball will create the directory name based on the + # contents of debian/changelog.trunk in the version provided. + # if that differs from what is here, then user has changes. + for d in ${sourcename}*; do + [ -d "$d" ] && break + done + if [ -d "$d" ]; then + { + echo "WARNING: git at '${uver}' had different version" + echo " in debian/changelog.trunk than your tree. version there" + echo " is '$d' working directory had $uver" + } 1>&2 + dir=$d + else + echo "did not find a directory created by make-tarball. sorry." 1>&2 + exit + fi +fi cd "$dir" || fail "failed cd $dir" # move files ending in .trunk to name without .trunk @@ -68,7 +98,10 @@ mv "$f" "${f%.trunk}" done -sed -i -e "1s,${clogver_o},${clogver_new}," \ +# first line of debian/changelog looks like +# curtin () UNRELEASED; urgency=low +# fix the version and UNRELEASED +sed -i -e "1s,([^)]*),(${clogver_new})," \ -e "1s,UNRELEASED,${RELEASE}," debian/changelog || fail "failed to write debian/changelog" debuild "$@" || fail "debuild failed" diff -Nru curtin-0.1.0~bzr532/tools/export-tarball curtin-17.1-11-ga4c9636b/tools/export-tarball --- curtin-0.1.0~bzr532/tools/export-tarball 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/export-tarball 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -#!/bin/sh - -set -e -sourcename="curtin" - -Usage() { - cat <&2; exit 1; } -cleanup() { - [ -z "$TEMP_D" ] || rm -Rf "$TEMP_D" -} - -export_uncommitted="" -if [ "${UNCOMMITTED:-0}" != "0" ]; then - export_uncommitted="--uncommitted" -fi - -[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; } - -TEMP_D=$(mktemp -d) -trap cleanup EXIT - -case "${1:-HEAD}" in - tag:*) version="${1#tag:}";; - HEAD) revno="$(bzr revno)"; revargs="-r $revno";; - [0-9]*) revno="$1" ; revargs="-r $1";; -esac -output="$2" - -if [ -z "$version" ]; then - bzr cat $revargs debian/changelog.trunk > "$TEMP_D/clog" || - fail "failed to extract debian/change.log.trunk at $revargs" - - clogver_o=$(sed -n '1s,.*(\([^)]*\)).*,\1,p' $TEMP_D/clog) - clogver_upstream=${clogver_o%%-*} - mmm=${clogver_o%%~*} - version="$mmm~bzr$revno" -fi - -if [ -z "$output" ]; then - output="$sourcename-$version.tar.gz" -fi - -bzr export ${export_uncommitted} \ - --format=tgz --root="$sourcename-${version}" $revargs $output -echo "wrote $output" diff -Nru curtin-0.1.0~bzr532/tools/jenkins-runner curtin-17.1-11-ga4c9636b/tools/jenkins-runner --- curtin-0.1.0~bzr532/tools/jenkins-runner 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/jenkins-runner 2018-01-18 21:10:51.000000000 +0000 @@ -105,12 +105,16 @@ # avoid LOG info by running python3 tests/vmtests/image_sync.py # rather than python3 -m tests.vmtests.image_sync (LP: #1594465) -echo "Working with images in $IMAGE_DIR" -fmt=" %(release)-7s %(arch)s/%(subarch)s %(version_name)-10s" +echo "Quering synced ephemeral images/kernels in $IMAGE_DIR" +printf "%.s=" {1..86}; echo +printf " %6s %8s %-10s %s/%-14s %s\n" "Release" "Codename" "ImageDate" "Arch" "SubArch" "Path" +printf "%.s-" {1..86}; echo +fmt=" %(version)6s %(release)8s %(version_name)-10s %(arch)s/%(subarch)-14s %(path)s" PYTHONPATH="$PWD" python3 tests/vmtests/image_sync.py query \ - --output-format="$fmt" "$IMAGE_DIR" ftype=root-image.gz || + --output-format="$fmt" "$IMAGE_DIR" "kflavor=generic" "ftype~(root-image.gz|squashfs)" || { ret=$?; echo "FATAL: error querying images in $IMAGE_DIR" 1>&2; exit $ret; } +printf "%.s=" {1..86}; printf "\n\n" echo "$(date -R): vmtest start: nosetests3 ${pargs[*]} ${ntargs[*]}" nosetests3 "${pargs[@]}" "${ntargs[@]}" diff -Nru curtin-0.1.0~bzr532/tools/launch curtin-17.1-11-ga4c9636b/tools/launch --- curtin-0.1.0~bzr532/tools/launch 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/launch 2018-01-18 21:10:51.000000000 +0000 @@ -5,15 +5,17 @@ HTTP_PID="" XKVM_PID="" MY_D=$(dirname "$0") -DEFAULT_ROOT_ARG="root=LABEL=cloudimg-rootfs" +DEFAULT_ROOT_ARG="root=/dev/disk/by-id/virtio-boot-disk" error() { echo "$@" 1>&2; } Usage() { cat < "$udata" || { error "failed to write user-data"; return 1; } - bootimg="${TEMP_D}/boot.img" - qemu-img create -f qcow2 -b "${bootimg_dist}" "$bootimg" 2G || - { error "failed create from ${bootimg_dist}"; return 1; } - local seedargs="" seedargs=() if [ -n "$kernel" ]; then local append="" root="" - # Note: root_arg is by default done by LABEL. This assumes - # a.) our root device is not multipath - # b.) no other disks attached will have this LABEL - # c.) the LABEL is in fact correct. - # all of these assumptions are true under vmtest. + # Note: root_arg is by default done by serial. This assumes + # the root device is not multipath if [ -z "$root_arg" ]; then debug 1 "WARN: root_arg is empty with kernel." fi - append="${root_arg:+${root_arg} }ds=nocloud-net;seedfrom=$burl" + root_arg="${root_arg//PUBURL/$burl}" + # Note: cloud-init requires seedfrom= to have a trailing-slash + append="${root_arg:+${root_arg} }ds=nocloud-net;seedfrom=${burl}/" local console_name="" case "${arch_hint}" in @@ -754,7 +767,7 @@ seed="${TEMP_D}/seed.img" cloud-localds "$seed" "$udata" "$mdata" || { error "failed cloud-localds"; return 1; } - seedargs=( "-drive" "file=${seed},if=virtio,media=cdrom" ) + seedargs=( "--disk=file=${seed},if=virtio,media=cdrom" ) fi local netargs @@ -778,14 +791,14 @@ #debug mode serial_args="-serial ${serial_log} -monitor stdio" fi fi - # -monitor stdio cmd=( - xkvm "${pt[@]}" "${netargs[@]}" -- + xkvm + ${bootimg:+"--disk=$bootimg,if=virtio,serial=boot-disk"} + "${pt[@]}" "${netargs[@]}" + "${disk_args[@]}" + -- -smp ${smp} -m ${mem} ${serial_args} ${video} - -drive "file=$bootimg,if=none,cache=unsafe,format=qcow2,id=boot,index=0" - -device "virtio-blk,drive=boot" - "${disk_args[@]}" "${seedargs[@]}" ) @@ -829,7 +842,6 @@ echo $size } - test_start_http() { # run this like: # HTTP_PORT_MIN=59000 HTTP_PORT_MAX=63001 ./tools/launch \ diff -Nru curtin-0.1.0~bzr532/tools/make-tarball curtin-17.1-11-ga4c9636b/tools/make-tarball --- curtin-0.1.0~bzr532/tools/make-tarball 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/make-tarball 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,50 @@ +#!/bin/sh +set -e + +TEMP_D="" +cleanup() { + [ -z "$TEMP_D" ] || rm -Rf "${TEMP_D}" +} +trap cleanup EXIT + +Usage() { + cat <&2; exit 1; } + +long_opt="" +while [ $# -ne 0 ]; do + cur=$1; next=$2 + case "$cur" in + -o|--output) output=$next; shift;; + --long) long_opt="--long";; + --) shift; break;; + esac + shift; +done + +rev=${1:-HEAD} +version=$(git describe --abbrev=8 "--match=[0-9][0-9]*" ${long_opt} $rev) + +archive_base="curtin-$version" +if [ -z "$output" ]; then + output="$archive_base.tar.gz" +fi + +TEMP_D=$(mktemp -d) +tar=${output##*/} +tar="$TEMP_D/${tar%.gz}" +git archive --format=tar --prefix="$archive_base/" "$rev" > "$tar" +gzip -9 -c "$tar" > "$output" +echo "$output" diff -Nru curtin-0.1.0~bzr532/tools/new-upstream-snapshot curtin-17.1-11-ga4c9636b/tools/new-upstream-snapshot --- curtin-0.1.0~bzr532/tools/new-upstream-snapshot 1970-01-01 00:00:00.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/new-upstream-snapshot 2018-01-18 21:10:51.000000000 +0000 @@ -0,0 +1,169 @@ +#!/bin/sh + +TEMP_D="" +CR=' +' +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } +Usage() { +cat <&2 + if git merge-base --is-ancestor "$commit" "$from_ref"; then + drops="${drops} debian/patches/$bname" + fi + ;; + *) echo "$bname${extra:+ ${extra}}";; + esac + done < $dpseries > "${TEMP_D}/series" + drops=${drops# } + if [ -n "$drops" ]; then + cp "${TEMP_D}/series" "$dpseries" || + fail "failed copying to $dpseries" + if [ ! -s $dpseries ]; then + git rm --force "$dpseries" || + fail "failed removing empty $dpseries: git rm $dpseries" + fi + msg="drop cherry picks before merge from ${from_ref} at $new_upstream_ver" + msg="$msg${CR}${CR}drop the following cherry picks:" + for file in $drops; do + git rm "$file" || fail "failed to git rm $file" + msg="${msg}$CR $file" + done + git commit -m "$msg" "$dpseries" $drops + fi +fi + +git merge "${from_ref}" -m "merge from $from_ref at $new_upstream_ver" || + fail "failed: git merge ${from_ref} -m 'merge from $from_ref ..'" +clog="${TEMP_D}/changelog" +gitlog="${TEMP_D}/gitlog" + +git log --first-parent --no-decorate --format=full \ + "${merge_base}..${from_ref}" > "$gitlog" || + fail "failed git log ${merge_base}..${from_ref}" + +cat >> "$clog" <> "$clog" || + fail "failed git_log_to_dch" +cat >> "$clog" < $(date -R) + +EOF + +cat "$clog" "debian/changelog" > "$TEMP_D/newlog" && + cp "$TEMP_D/newlog" "debian/changelog" || + fail "failed replacing debian/changelog" + +dch -e || fail "dch -e exited $?" + +git diff + +echo -n "Commit this change? (Y/n): " +read answer || fail "failed to read answer" +case "$answer" in + n|[Nn][oO]) exit 1;; +esac + +msg="update changelog (new upstream snapshot $new_upstream_ver)." +git commit -m "$msg" debian/changelog || + fail "failed to commit '$msg'" diff -Nru curtin-0.1.0~bzr532/tools/xkvm curtin-17.1-11-ga4c9636b/tools/xkvm --- curtin-0.1.0~bzr532/tools/xkvm 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tools/xkvm 2018-01-18 21:10:51.000000000 +0000 @@ -11,6 +11,8 @@ # OVS_CLEANUP gets populated with bridge:devname pairs used with ovs OVS_CLEANUP=( ) MAC_PREFIX="52:54:00:12:34" +# allow this to be set externally. +_QEMU_SUPPORTS_FILE_LOCKING="${_QEMU_SUPPORTS_FILE_LOCKING}" KVM="kvm" declare -A KVM_DEVOPTS @@ -119,6 +121,21 @@ return 1 } +qemu_supports_file_locking() { + # hackily check if qemu has file.locking in -drive params (LP: #1716028) + if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then + # The only way we could find to check presense of file.locking is + # qmp (query-qmp-schema). Simply checking if the virtio-blk driver + # supports 'share-rw' is expected to be equivalent and simpler. + isdevopt virtio-blk share-rw && + _QEMU_SUPPORTS_FILE_LOCKING=true || + _QEMU_SUPPORTS_FILE_LOCKING=false + debug 1 "qemu supports file locking = ${_QEMU_SUPPORTS_FILE_LOCKING}" + fi + [ "$_QEMU_SUPPORTS_FILE_LOCKING" = "true" ] + return +} + padmac() { # return a full mac, given a subset. # assume whatever is input is the last portion to be @@ -443,7 +460,7 @@ out=$(LANG=C qemu-img info "$file") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $file"; return 1; } - else + elif [ -z "$fmt" ]; then fmt=raw fi if [ -z "$driver" ]; then @@ -470,6 +487,12 @@ id=*|if=*|driver=*|$file|file=*) continue;; fmt=*|format=*) continue;; serial=*|bus=*|unit=*|index=*) continue;; + file.locking=*) + qemu_supports_file_locking || { + debug 2 "qemu has no file locking." \ + "Dropping '$tok' from: $cur" + continue + };; esac isdevopt "$driver" "$tok" && devopts="${devopts},$tok" || diskopts="${diskopts},${tok}" diff -Nru curtin-0.1.0~bzr532/tox.ini curtin-17.1-11-ga4c9636b/tox.ini --- curtin-0.1.0~bzr532/tox.ini 2017-10-06 02:20:05.000000000 +0000 +++ curtin-17.1-11-ga4c9636b/tox.ini 2018-01-18 21:10:51.000000000 +0000 @@ -43,7 +43,7 @@ # set basepython because tox 1.6 (trusty) does not support generated environments basepython = python3 deps = {[testenv]deps} - pylint==1.5.4 + pylint==1.8.1 bzr+lp:simplestreams commands = {envpython} -m pylint --errors-only {posargs:curtin tests/vmtests} @@ -51,7 +51,7 @@ # set basepython because tox 1.6 (trusty) does not support generated environments basepython = python2.7 deps = {[testenv]deps} - pylint==1.5.4 + pylint==1.8.1 commands = {envpython} -m pylint --errors-only {posargs:curtin} [testenv:docs] @@ -101,4 +101,4 @@ [flake8] builtins = _ -exclude = .venv,.bzr,.tox,dist,doc,*lib/python*,*egg,build +exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build