diff -Nru curtin-0.1.0~bzr276/bin/curtin curtin-0.1.0~bzr314/bin/curtin --- curtin-0.1.0~bzr276/bin/curtin 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/bin/curtin 2015-11-23 16:22:09.000000000 +0000 @@ -1,8 +1,7 @@ #!/bin/sh PY3OR2_MAIN="curtin.commands.main" PY3OR2_MCHECK="curtin.deps.check" -PY3OR2_MINSTALL="curtin.deps.install" -PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python2:python"} +PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"} PYTHON=${PY3OR2_PYTHON} debug() { @@ -27,25 +26,28 @@ if [ ! -n "$PYTHON" ]; then first_exe="" oifs="$IFS"; IFS=":" + best=0 + best_exe="" for p in $PY3OR2_PYTHONS; do command -v "$p" >/dev/null 2>&1 || { debug "$p: not in path"; continue; } - [ -n "$first_exe" ] || first_exe="$p" [ -z "$PY3OR2_MCHECK" ] && PYTHON=$p && break - out=$($p -m "$PY3OR2_MCHECK" 2>&1) && PYTHON="$p" && break - debug "$p: $out" + out=$($p -m "$PY3OR2_MCHECK" "$@" 2>&1) && PYTHON="$p" && + { debug "$p passed check [$p -m $PY3OR2_MCHECK $*]"; break; } + ret=$? + debug "$p [$ret]: $out" + # exit code of 1 is unuseable + [ $ret -eq 1 ] && continue + [ -n "$first_exe" ] || first_exe="$p" + # higher non-zero exit values indicate more plausible usability + [ $best -lt $ret ] && best_exe="$p" && best=$ret && + debug "current best: $best_exe" done IFS="$oifs" - if [ -z "$PYTHON" ]; then - if [ -n "$first_exe" -a -n "$PY3OR2_MINSTALL" ]; then - debug "attempting deps install with $first_exe $PY3OR2_MINSTALL" - "$first_exe" -m "$PY3OR2_MINSTALL" || - fail "failed to install deps!" - PYTHON="$first_exe" - fi - fi + [ -z "$best_exe" -a -n "$first_exe" ] && best_exe="$first_exe" + [ -n "$PYTHON" ] || PYTHON="$best_exe" [ -n "$PYTHON" ] || fail "no availble python? [PY3OR2_DEBUG=1 for more info]" fi -debug "executing: $PYTHON -m "$PY3OR2_MAIN" $*" +debug "executing: $PYTHON -m \"$PY3OR2_MAIN\" $*" exec $PYTHON -m "$PY3OR2_MAIN" "$@" diff -Nru curtin-0.1.0~bzr276/curtin/commands/block_meta.py curtin-0.1.0~bzr314/curtin/commands/block_meta.py --- curtin-0.1.0~bzr276/curtin/commands/block_meta.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/block_meta.py 2015-11-23 16:22:09.000000000 +0000 @@ -31,13 +31,12 @@ import sys import tempfile import time +import re SIMPLE = 'simple' SIMPLE_BOOT = 'simple-boot' CUSTOM = 'custom' -CUSTOM_REQUIRED_PACKAGES = ['mdadm', 'lvm2', 'bcache-tools'] - CMD_ARGUMENTS = ( ((('-D', '--devices'), {'help': 'which devices to operate on', 'action': 'append', @@ -154,6 +153,45 @@ util.subp(cmd, rcs=[0, 1, 2, 5], capture=True) +def block_find_sysfs_path(devname): + # Look up any block device holders. Handle devices and partitions + # as devnames (vdb, md0, vdb7) + if not devname: + return [] + + sys_class_block = '/sys/class/block/' + basename = os.path.basename(devname) + # try without parent blockdevice, then prepend parent + paths = [ + os.path.join(sys_class_block, basename), + os.path.join(sys_class_block, + re.split('[\d+]', basename)[0], basename), + ] + + # find path to devname directory in sysfs + devname_sysfs = None + for path in paths: + if os.path.exists(path): + devname_sysfs = path + + if devname_sysfs is None: + err = ('No sysfs path to device:' + ' {}'.format(devname_sysfs)) + LOG.error(err) + raise ValueError(err) + + return devname_sysfs + + +def get_holders(devname): + devname_sysfs = block_find_sysfs_path(devname) + if devname_sysfs: + LOG.debug('Getting blockdev holders: {}'.format(devname_sysfs)) + return os.listdir(os.path.join(devname_sysfs, 'holders')) + + return [] + + def clear_holders(sys_block_path): holders = os.listdir(os.path.join(sys_block_path, "holders")) LOG.info("clear_holders running on '%s', with holders '%s'" % @@ -463,6 +501,14 @@ make_dname(info.get('id'), storage_config) +def getnumberoflogicaldisks(device, storage_config): + logicaldisks = 0 + for key, item in storage_config.items(): + if item.get('device') == device and item.get('flag') == "logical": + logicaldisks = logicaldisks + 1 + return logicaldisks + + def partition_handler(info, storage_config): device = info.get('device') size = info.get('size') @@ -477,11 +523,18 @@ disk = get_path_to_storage_volume(device, storage_config) partnumber = determine_partition_number(info.get('id'), storage_config) - # Offset is either 1 sector after last partition, or near the beginning if - # this is the first partition + disk_kname = os.path.split( + get_path_to_storage_volume(device, storage_config))[-1] + # consider the disks logical sector size when calculating sectors + try: + prefix = "/sys/block/%s/queue/" % disk_kname + with open(prefix + "logical_block_size", "r") as f: + l = f.readline() + logical_block_size_bytes = int(l) + except: + logical_block_size_bytes = 512 + if partnumber > 1: - disk_kname = os.path.split( - get_path_to_storage_volume(device, storage_config))[-1] if partnumber == 5 and disk_ptable == "msdos": for key, item in storage_config.items(): if item.get('type') == "partition" and \ @@ -498,12 +551,41 @@ with open(os.path.join(previous_partition, "size"), "r") as fp: previous_size = int(fp.read()) with open(os.path.join(previous_partition, "start"), "r") as fp: - offset_sectors = previous_size + int(fp.read()) + 1 + previous_start = int(fp.read()) + + # Align to 1M at the beginning of the disk and at logical partitions + alignment_offset = (1 << 20) / logical_block_size_bytes + if partnumber == 1: + # start of disk + offset_sectors = alignment_offset else: - offset_sectors = 2048 + # further partitions + if disk_ptable == "gpt" or flag != "logical": + # msdos primary and any gpt part start after former partition end + offset_sectors = previous_start + previous_size + else: + # msdos extended/logical partitions + if flag == "logical": + if partnumber == 5: + # First logical partition + # start at extended partition start + alignment_offset + offset_sectors = previous_start + alignment_offset + else: + # Further logical partitions + # start at former logical partition end + alignment_offset + offset_sectors = (previous_start + previous_size + + alignment_offset) length_bytes = util.human2bytes(size) - length_sectors = int(length_bytes / 512) + # start sector is part of the sectors that define the partitions size + # so length has to be "size in sectors - 1" + length_sectors = int(length_bytes / logical_block_size_bytes) - 1 + # logical partitions can't share their start sector with the extended + # partition and logical partitions can't go head-to-head, so we have to + # realign and for that increase size as required + if info.get('flag') == "extended": + logdisks = getnumberoflogicaldisks(device, storage_config) + length_sectors = length_sectors + (logdisks * alignment_offset) # Handle preserve flag if info.get('preserve'): @@ -577,7 +659,7 @@ # Generate mkfs command and run if fstype in ["ext4", "ext3"]: - cmd = ['mkfs.%s' % fstype, '-q'] + cmd = ['mkfs.%s' % fstype, '-F', '-q'] if part_label: if len(part_label) > 16: raise ValueError( @@ -812,8 +894,12 @@ spare_devices = info.get('spare_devices') if not devices: raise ValueError("devices for raid must be specified") - if raidlevel not in [0, 1, 5]: + if raidlevel not in ['linear', 'raid0', 0, 'stripe', 'raid1', 1, 'mirror', + 'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10]: raise ValueError("invalid raidlevel '%s'" % raidlevel) + if raidlevel in ['linear', 'raid0', 0, 'stripe']: + if spare_devices: + raise ValueError("spareunsupported in raidlevel '%s'" % raidlevel) device_paths = list(get_path_to_storage_volume(dev, storage_config) for dev in devices) @@ -822,8 +908,14 @@ spare_device_paths = list(get_path_to_storage_volume(dev, storage_config) for dev in spare_devices) - cmd = ["yes", "|", "mdadm", "--create", "/dev/%s" % info.get('name'), - "--level=%s" % raidlevel, "--raid-devices=%s" % len(device_paths)] + mdnameparm = "" + mdname = info.get('mdname') + if mdname: + mdnameparm = "--name=%s" % info.get('mdname') + + cmd = ["mdadm", "--create", "/dev/%s" % info.get('name'), "--run", + "--level=%s" % raidlevel, "--raid-devices=%s" % len(device_paths), + mdnameparm] for device in device_paths: # Zero out device superblock just in case device has been used for raid @@ -840,7 +932,11 @@ cmd.append(device) # Create the raid device + util.subp(["udevadm", "settle"]) + util.subp(["udevadm", "control", "--stop-exec-queue"]) util.subp(" ".join(cmd), shell=True) + util.subp(["udevadm", "control", "--start-exec-queue"]) + util.subp(["udevadm", "settle"]) # Make dname rule for this dev make_dname(info.get('id'), storage_config) @@ -871,18 +967,40 @@ storage_config) cache_device = get_path_to_storage_volume(info.get('cache_device'), storage_config) + cache_mode = info.get('cache_mode', None) + if not backing_device or not cache_device: - raise ValueError("backing device and cache device for bcache must be \ - specified") + raise ValueError("backing device and cache device for bcache" + " must be specified") # The bcache module is not loaded when bcache is installed by apt-get, so # we will load it now util.subp(["modprobe", "bcache"]) - # If both the backing device and cache device are specified at the same - # time than it is not necessary to attach the cache device manually, as - # bcache will do this automatically. - util.subp(["make-bcache", "-B", backing_device, "-C", cache_device]) + if cache_device: + # /sys/class/block/XXX/YYY/ + cache_device_sysfs = block_find_sysfs_path(cache_device) + + if os.path.exists(os.path.join(cache_device_sysfs, "bcache")): + # read in cset uuid from cache device + (out, err) = util.subp(["bcache-super-show", cache_device], + capture=True) + LOG.debug('out=[{}]'.format(out)) + [cset_uuid] = [line.split()[-1] for line in out.split("\n") + if line.startswith('cset.uuid')] + + else: + # make the cache device, extracting cacheset uuid + (out, err) = util.subp(["make-bcache", "-C", cache_device], + capture=True) + LOG.debug('out=[{}]'.format(out)) + [cset_uuid] = [line.split()[-1] for line in out.split("\n") + if line.startswith('Set UUID:')] + + if backing_device: + backing_device_sysfs = block_find_sysfs_path(backing_device) + if not os.path.exists(os.path.join(backing_device_sysfs, "bcache")): + util.subp(["make-bcache", "-B", backing_device]) # Some versions of bcache-tools will register the bcache device as soon as # we run make-bcache using udev rules, so wait for udev to settle, then try @@ -898,6 +1016,41 @@ fp.write(path) fp.close() + # if we specify both then we need to attach backing to cache + if cache_device and backing_device: + if cset_uuid: + LOG.info("Attaching backing device to cacheset: " + "{} -> {} cset.uuid: {}".format(backing_device, + cache_device, + cset_uuid)) + attach = os.path.join(backing_device_sysfs, + "bcache", + "attach") + with open(attach, "w") as fp: + fp.write(cset_uuid) + else: + LOG.error("Invalid cset_uuid: {}".format(cset_uuid)) + raise + + if cache_mode: + # find the actual bcache device name via sysfs using the + # backing device's holders directory. + holders = get_holders(backing_device) + + if len(holders) != 1: + err = ('Invalid number of holding devices:' + ' {}'.format(holders)) + LOG.error(err) + raise ValueError(err) + + [bcache_dev] = holders + LOG.info("Setting cache_mode on {} to {}".format(bcache_dev, + cache_mode)) + cache_mode_file = \ + '/sys/block/{}/bcache/cache_mode'.format(info.get('id')) + with open(cache_mode_file, "w") as fp: + fp.write(cache_mode) + if info.get('name'): # Make dname rule for this dev make_dname(info.get('id'), storage_config) @@ -907,19 +1060,6 @@ not supported") -def install_missing_packages_for_meta_custom(): - """Install all the missing package that `meta_custom` requires to - function properly.""" - missing_packages = [ - package - for package in CUSTOM_REQUIRED_PACKAGES - if not util.has_pkg_installed(package) - ] - if len(missing_packages) > 0: - util.apt_update() - util.install_packages(missing_packages) - - def meta_custom(args): """Does custom partitioning based on the layout provided in the config file. Section with the name storage contains information on which @@ -942,9 +1082,6 @@ state = util.load_command_environment() cfg = config.load_command_config(args, state) - # make sure the required packages are installed - install_missing_packages_for_meta_custom() - storage_config = cfg.get('storage', {}) if not storage_config: raise Exception("storage configuration is required by mode '%s' " diff -Nru curtin-0.1.0~bzr276/curtin/commands/curthooks.py curtin-0.1.0~bzr314/curtin/commands/curthooks.py --- curtin-0.1.0~bzr276/curtin/commands/curthooks.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/curthooks.py 2015-11-23 16:22:09.000000000 +0000 @@ -591,6 +591,12 @@ 'bridge-utils': ['bridge']}, } + format_configs = { + 'xfsprogs': ['xfs'], + 'e2fsprogs': ['ext2', 'ext3', 'ext4'], + 'brtfs-tools': ['btrfs'], + } + needed_packages = [] installed_packages = get_installed_packages(target) for cust_cfg, pkg_reqs in custom_configs.items(): @@ -606,6 +612,15 @@ pkg not in installed_packages: needed_packages.append(pkg) + format_types = set( + [operation['fstype'] + for operation in cfg[cust_cfg]['config'] + if operation['type'] == 'format']) + for pkg, fstypes in format_configs.items(): + if set(fstypes).intersection(format_types) and \ + pkg not in installed_packages: + needed_packages.append(pkg) + if needed_packages: util.install_packages(needed_packages, target=target) @@ -687,6 +702,10 @@ "mdadm.conf") if os.path.exists(mdadm_location): copy_mdadm_conf(mdadm_location, target) + # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 + # reconfigure mdadm + util.subp(['chroot', target, 'dpkg-reconfigure', + '--frontend=noninteractive', 'mdadm'], data=None) # If udev dname rules were created, copy them to target udev_rules_d = os.path.join(state['scratch'], "rules.d") diff -Nru curtin-0.1.0~bzr276/curtin/commands/extract.py curtin-0.1.0~bzr314/curtin/commands/extract.py --- curtin-0.1.0~bzr276/curtin/commands/extract.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/extract.py 2015-11-23 16:22:09.000000000 +0000 @@ -52,10 +52,13 @@ def extract_root_tgz_url(source, target): + # extract a -root.tar.gz url in the 'target' directory + # + # Uses smtar to avoid specifying the compression type curtin.util.subp(args=['sh', '-cf', ('wget "$1" --progress=dot:mega -O - |' - 'tar -C "$2" ' + ' '.join(tar_xattr_opts()) + - ' ' + '-Sxpzf - --numeric-owner'), + 'smtar -C "$2" ' + ' '.join(tar_xattr_opts()) + + ' ' + '-Sxpf - --numeric-owner'), '--', source, target]) diff -Nru curtin-0.1.0~bzr276/curtin/commands/main.py curtin-0.1.0~bzr314/curtin/commands/main.py --- curtin-0.1.0~bzr276/curtin/commands/main.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/main.py 2015-11-23 16:22:09.000000000 +0000 @@ -23,12 +23,11 @@ from .. import log from .. import util -from .. import config -from ..reporter import (events, update_configuration) +from ..deps import install_deps SUB_COMMAND_MODULES = ['apply_net', 'block-meta', 'curthooks', 'extract', 'hook', 'in-target', 'install', 'mkfs', 'net-meta', - 'pack', 'swap', 'system-upgrade'] + 'pack', 'swap', 'system-install', 'system-upgrade'] def add_subcmd(subparser, subcmd): @@ -43,20 +42,20 @@ popfunc(subparser.add_parser(subcmd)) -def main(args=None): - if args is None: - args = sys.argv +class NoHelpParser(argparse.ArgumentParser): + # ArgumentParser with forced 'add_help=False' + def __init__(self, *args, **kwargs): + kwargs.update({'add_help': False}) + super(NoHelpParser, self).__init__(*args, **kwargs) + + def error(self, message): + # without overriding this, argparse exits with bad usage + raise ValueError("failed parsing arguments: %s" % message) - parser = argparse.ArgumentParser() - - stacktrace = (os.environ.get('CURTIN_STACKTRACE', "0").lower() - not in ("0", "false", "")) - - try: - verbosity = int(os.environ.get('CURTIN_VERBOSITY', "0")) - except ValueError: - verbosity = 1 +def get_main_parser(stacktrace=False, verbosity=0, + parser_class=argparse.ArgumentParser): + parser = parser_class(prog='curtin') parser.add_argument('--showtrace', action='store_true', default=stacktrace) parser.add_argument('-v', '--verbose', action='count', default=verbosity, dest='verbosity') @@ -66,6 +65,9 @@ help='read configuration from cfg', metavar='FILE', type=argparse.FileType("rb"), dest='main_cfgopts', default=[]) + parser.add_argument('--install-deps', action='store_true', + help='install dependencies as necessary', + default=False) parser.add_argument('--set', action=util.MergedCmdAppend, help=('define a config variable. key can be a "/" ' 'delimited path ("early_commands/cmd1=a"). if ' @@ -75,6 +77,66 @@ parser.set_defaults(config={}) parser.set_defaults(reportstack=None) + return parser + + +def maybe_install_deps(args, stacktrace=True, verbosity=0): + parser = get_main_parser(stacktrace=stacktrace, verbosity=verbosity, + parser_class=NoHelpParser) + subps = parser.add_subparsers(dest="subcmd", parser_class=NoHelpParser) + for subcmd in SUB_COMMAND_MODULES: + subps.add_parser(subcmd) + + install_only_args = [ + ['-v', '--install-deps'], + ['-vv', '--install-deps'], + ['--install-deps', '-v'], + ['--install-deps', '-vv'], + ['--install-deps'], + ] + + install_only = args in install_only_args + + if install_only: + verbosity = 1 + else: + try: + ns, unknown = parser.parse_known_args(args) + verbosity = ns.verbosity + if not ns.install_deps: + return + except ValueError: + # bad usage will be reported by the real reporter + return + + ret = install_deps(verbosity=verbosity) + + if ret != 0 or install_only: + sys.exit(ret) + + return + + +def main(args=None): + if args is None: + args = sys.argv + + stacktrace = (os.environ.get('CURTIN_STACKTRACE', "0").lower() + not in ("0", "false", "")) + + try: + verbosity = int(os.environ.get('CURTIN_VERBOSITY', "0")) + except ValueError: + verbosity = 1 + + maybe_install_deps(sys.argv[1:], stacktrace=stacktrace, + verbosity=verbosity) + + # Above here, only standard library modules can be assumed. + from .. import config + from ..reporter import (events, update_configuration) + + parser = get_main_parser(stacktrace=stacktrace, verbosity=verbosity) subps = parser.add_subparsers(dest="subcmd") for subcmd in SUB_COMMAND_MODULES: add_subcmd(subps, subcmd) diff -Nru curtin-0.1.0~bzr276/curtin/commands/mkfs.py curtin-0.1.0~bzr314/curtin/commands/mkfs.py --- curtin-0.1.0~bzr276/curtin/commands/mkfs.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/mkfs.py 2015-11-23 16:22:09.000000000 +0000 @@ -38,7 +38,7 @@ 'default': 'ext4', 'action': 'store'}), (('-c', '--config'), {'help': 'read configuration from cfg', 'action': 'append', - 'metavar': 'CONFIG', 'dest': 'cfgopts'}) + 'metavar': 'CONFIG', 'dest': 'cfgopts', 'default': []}) ) ) @@ -52,7 +52,7 @@ if len(part_id) > 16: raise ValueError("ext3/4 partition labels cannot be longer than \ 16 characters") - cmd = ['mkfs.%s' % fstype, '-q', '-L', part_id, volume_path] + cmd = ['mkfs.%s' % fstype, '-F', '-q', '-L', part_id, volume_path] elif fstype in ["fat12", "fat16", "fat32", "fat"]: cmd = ["mkfs.fat"] fat_size = fstype.strip(string.ascii_letters) @@ -111,8 +111,8 @@ format_blockdev(device, args.fstype) else: # Bad argument - raise ValueError("device '%s' is neither an item in storage \ - config nor a block device" % device) + raise ValueError("device '%s' is neither an item in storage " + "config nor a block device" % device) sys.exit(0) diff -Nru curtin-0.1.0~bzr276/curtin/commands/system_install.py curtin-0.1.0~bzr314/curtin/commands/system_install.py --- curtin-0.1.0~bzr276/curtin/commands/system_install.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/commands/system_install.py 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,63 @@ +# Copyright (C) 2015 Canonical Ltd. +# +# Author: Scott Moser +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see . + +import os +import sys + +import curtin.util as util + +from . import populate_one_subcmd +from curtin.log import LOG + + +def system_install_pkgs_main(args): + # curtin system-install [--target=/] [pkg, [pkg...]] + if args.target is None: + args.target = "/" + + exit_code = 0 + try: + util.install_packages( + pkglist=args.packages, target=args.target, + allow_daemons=args.allow_daemons) + except util.ProcessExecutionError as e: + LOG.warn("system install failed for %s: %s" % (args.packages, e)) + exit_code = e.exit_code + + sys.exit(exit_code) + + +CMD_ARGUMENTS = ( + ((('--allow-daemons',), + {'help': ('do not disable running of daemons during upgrade.'), + 'action': 'store_true', 'default': False}), + (('-t', '--target'), + {'help': ('target root to upgrade. ' + 'default is env[TARGET_MOUNT_POINT]'), + 'action': 'store', 'metavar': 'TARGET', + 'default': os.environ.get('TARGET_MOUNT_POINT')}), + ('packages', + {'help': 'the list of packages to install', + 'metavar': 'PACKAGES', 'action': 'store', 'nargs': '+'}), + ) +) + + +def POPULATE_SUBCMD(parser): + populate_one_subcmd(parser, CMD_ARGUMENTS, system_install_pkgs_main) + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr276/curtin/deps/check.py curtin-0.1.0~bzr314/curtin/deps/check.py --- curtin-0.1.0~bzr276/curtin/deps/check.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/deps/check.py 2015-11-23 16:22:09.000000000 +0000 @@ -20,29 +20,57 @@ and exit success or fail, indicating that deps should be there. python -m curtin.deps.check [-v] """ -_imports = ( - "from ..commands import main", - "import yaml", -) +import argparse +import sys +from . import find_missing_deps -def _check_imports(imports=_imports): - errors = [] - for istr in _imports: - try: - exec(istr) - except ImportError as e: - errors.append("failed '%s': %s" % (istr, e)) - return errors +def debug(level, msg_level, msg): + if level >= msg_level: + if msg[-1] != "\n": + msg += "\n" + sys.stderr.write(msg) + + +def main(): + parser = argparse.ArgumentParser( + prog='curtin-check-deps', + description='check dependencies for curtin.') + parser.add_argument('-v', '--verbose', action='count', default=0, + dest='verbosity') + args, extra = parser.parse_known_args(sys.argv[1:]) + + errors = find_missing_deps() + + if len(errors) == 0: + # exit 0 means all dependencies are available. + debug(args.verbosity, 1, "No missing dependencies") + sys.exit(0) + + missing_pkgs = [] + fatal = [] + for e in errors: + if e.fatal: + fatal.append(e) + debug(args.verbosity, 2, str(e)) + missing_pkgs += e.deps + + if len(fatal): + for e in fatal: + debug(args.verbosity, 1, str(e)) + sys.exit(1) + + debug(args.verbosity, 1, + "Fix with:\n apt-get -qy install %s\n" % + ' '.join(sorted(missing_pkgs))) + + # we exit higher with less deps needed. + # exiting 99 means just 1 dep needed. + sys.exit(100-len(missing_pkgs)) + if __name__ == '__main__': - import sys - verbose = False - if len(sys.argv) > 1 and sys.argv[1] in ("-v", "--verbose"): - verbose = True - errors = _check_imports() - if verbose: - for emsg in errors: - sys.stderr.write("%s\n" % emsg) - sys.exit(len(errors)) + main() + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr276/curtin/deps/__init__.py curtin-0.1.0~bzr314/curtin/deps/__init__.py --- curtin-0.1.0~bzr276/curtin/deps/__init__.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/deps/__init__.py 2015-11-23 16:22:09.000000000 +0000 @@ -1 +1,164 @@ +# Copyright (C) 2015 Canonical Ltd. +# +# Author: Scott Moser +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see . +import os +import sys + +from curtin.util import (which, install_packages, lsb_release, + ProcessExecutionError) + +REQUIRED_IMPORTS = [ + # import string to execute, python2 package, python3 package + ('import yaml', 'python-yaml', 'python3-yaml'), +] + +REQUIRED_EXECUTABLES = [ + # executable in PATH, package + ('file', 'file'), + ('lvcreate', 'lvm2'), + ('mdadm', 'mdadm'), + ('mkfs.btrfs', 'btrfs-tools'), + ('mkfs.ext4', 'e2fsprogs'), + ('mkfs.xfs', 'xfsprogs'), + ('partprobe', 'parted'), + ('sgdisk', 'gdisk'), + ('udevadm', 'udev'), +] + +if lsb_release()['codename'] == "precise": + REQUIRED_IMPORTS.append( + ('import oauth.oauth', 'python-oauth', None),) +else: + REQUIRED_EXECUTABLES.append(('make-bcache', 'bcache-tools',)) + REQUIRED_IMPORTS.append( + ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),) + + +class MissingDeps(Exception): + def __init__(self, message, deps): + self.message = message + if isinstance(deps, str) or deps is None: + deps = [deps] + self.deps = [d for d in deps if d is not None] + self.fatal = None in deps + + def __str__(self): + if self.fatal: + if not len(self.deps): + return self.message + " Unresolvable." + return (self.message + + " Unresolvable. Partially resolvable with packages: %s" % + ' '.join(self.deps)) + else: + return self.message + " Install packages: %s" % ' '.join(self.deps) + + +def check_import(imports, py2pkgs, py3pkgs, message=None): + import_group = imports + if isinstance(import_group, str): + import_group = [import_group] + + for istr in import_group: + try: + exec(istr) + return + except ImportError: + pass + + if not message: + if isinstance(imports, str): + message = "Failed '%s'." % imports + else: + message = "Unable to do any of %s." % import_group + + if sys.version_info[0] == 2: + pkgs = py2pkgs + else: + pkgs = py3pkgs + + raise MissingDeps(message, pkgs) + + +def check_executable(cmdname, pkg): + if not which(cmdname): + raise MissingDeps("Missing program '%s'." % cmdname, pkg) + + +def check_executables(executables=None): + if executables is None: + executables = REQUIRED_EXECUTABLES + mdeps = [] + for exe, pkg in executables: + try: + check_executable(exe, pkg) + except MissingDeps as e: + mdeps.append(e) + return mdeps + + +def check_imports(imports=None): + if imports is None: + imports = REQUIRED_IMPORTS + + mdeps = [] + for import_str, py2pkg, py3pkg in imports: + try: + check_import(import_str, py2pkg, py3pkg) + except MissingDeps as e: + mdeps.append(e) + return mdeps + + +def find_missing_deps(): + return check_executables() + check_imports() + + +def install_deps(verbosity=False, dry_run=False, allow_daemons=True): + errors = find_missing_deps() + if len(errors) == 0: + if verbosity: + sys.stderr.write("No missing dependencies\n") + return 0 + + missing_pkgs = [] + for e in errors: + missing_pkgs += e.deps + + deps_string = ' '.join(sorted(missing_pkgs)) + + if dry_run: + sys.stderr.write("Missing dependencies: %s\n" % deps_string) + return 0 + + if os.geteuid() != 0: + sys.stderr.write("Missing dependencies: %s\n" % deps_string) + sys.stderr.write("Package installation is not possible as non-root.\n") + return 2 + + if verbosity: + sys.stderr.write("Installing %s\n" % deps_string) + + ret = 0 + try: + install_packages(missing_pkgs, allow_daemons=allow_daemons) + except ProcessExecutionError as e: + sys.stderr.write("%s\n" % e) + ret = e.exit_code + + return ret + + # vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr276/curtin/deps/install.py curtin-0.1.0~bzr314/curtin/deps/install.py --- curtin-0.1.0~bzr276/curtin/deps/install.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/deps/install.py 2015-11-23 16:22:09.000000000 +0000 @@ -20,33 +20,29 @@ python -m curtin.deps.install [-v] """ -import subprocess +import argparse import sys -import time +from . import install_deps -def runcmd(cmd, retries=[]): - for wait in retries: - try: - subprocess.check_call(cmd) - return 0 - except subprocess.CalledProcessError as e: - sys.stderr.write("%s failed. sleeping %s\n" % (cmd, wait)) - time.sleep(wait) - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - return e.returncode -if __name__ == '__main__': - if sys.version_info[0] == 2: - pkgs = ['python-yaml'] - else: - pkgs = ['python3-yaml'] - - ret = runcmd(['apt-get', '--quiet', 'update'], retries=[2, 4]) - if ret != 0: - sys.exit(ret) +def main(): + parser = argparse.ArgumentParser( + prog='curtin-install-deps', + description='install dependencies for curtin.') + parser.add_argument('-v', '--verbose', action='count', default=0, + dest='verbosity') + parser.add_argument('--dry-run', action='store_true', default=False) + parser.add_argument('--no-allow-daemons', action='store_false', + default=True) + args = parser.parse_args(sys.argv[1:]) - ret = runcmd(['apt-get', 'install', '--quiet', '--assume-yes'] + pkgs) + ret = install_deps(verbosity=args.verbosity, dry_run=args.dry_run, + allow_daemons=True) sys.exit(ret) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr276/curtin/__init__.py curtin-0.1.0~bzr314/curtin/__init__.py --- curtin-0.1.0~bzr276/curtin/__init__.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/__init__.py 2015-11-23 16:22:09.000000000 +0000 @@ -29,6 +29,10 @@ 'REPORTING_EVENTS_WEBHOOK', # install supports the 'storage' config version 1 'STORAGE_CONFIG_V1', + # subcommand 'system-install' is present + 'SUBCOMMAND_SYSTEM_INSTALL', + # subcommand 'system-upgrade' is present + 'SUBCOMMAND_SYSTEM_UPGRADE', ] # vi: ts=4 expandtab syntax=python diff -Nru curtin-0.1.0~bzr276/curtin/pack.py curtin-0.1.0~bzr314/curtin/pack.py --- curtin-0.1.0~bzr276/curtin/pack.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/pack.py 2015-11-23 16:22:09.000000000 +0000 @@ -26,7 +26,6 @@ #!/bin/sh PY3OR2_MAIN="%(ep_main)s" PY3OR2_MCHECK="%(ep_mcheck)s" -PY3OR2_MINSTALL="%(ep_minstall)s" PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"%(python_exe_list)s"} PYTHON=${PY3OR2_PYTHON} """.strip() @@ -54,41 +53,42 @@ if [ ! -n "$PYTHON" ]; then first_exe="" oifs="$IFS"; IFS=":" + best=0 + best_exe="" for p in $PY3OR2_PYTHONS; do command -v "$p" >/dev/null 2>&1 || { debug "$p: not in path"; continue; } - [ -n "$first_exe" ] || first_exe="$p" [ -z "$PY3OR2_MCHECK" ] && PYTHON=$p && break - out=$($p -m "$PY3OR2_MCHECK" 2>&1) && PYTHON="$p" && break - debug "$p: $out" + out=$($p -m "$PY3OR2_MCHECK" "$@" 2>&1) && PYTHON="$p" && + { debug "$p passed check [$p -m $PY3OR2_MCHECK $*]"; break; } + ret=$? + debug "$p [$ret]: $out" + # exit code of 1 is unuseable + [ $ret -eq 1 ] && continue + [ -n "$first_exe" ] || first_exe="$p" + # higher non-zero exit values indicate more plausible usability + [ $best -lt $ret ] && best_exe="$p" && best=$ret && + debug "current best: $best_exe" done IFS="$oifs" - if [ -z "$PYTHON" ]; then - if [ -n "$first_exe" -a -n "$PY3OR2_MINSTALL" ]; then - debug "attempting deps install with $first_exe $PY3OR2_MINSTALL" - "$first_exe" -m "$PY3OR2_MINSTALL" || - fail "failed to install deps!" - PYTHON="$first_exe" - fi - fi + [ -z "$best_exe" -a -n "$first_exe" ] && best_exe="$first_exe" + [ -n "$PYTHON" ] || PYTHON="$best_exe" [ -n "$PYTHON" ] || fail "no availble python? [PY3OR2_DEBUG=1 for more info]" fi -debug "executing: $PYTHON -m \"$PY3OR2_MAIN\" $*" +debug "executing: $PYTHON -m \\"$PY3OR2_MAIN\\" $*" exec $PYTHON -m "$PY3OR2_MAIN" "$@" """ def write_exe_wrapper(entrypoint, path=None, interpreter=None, - deps_check_entry=None, deps_install_entry=None, - mode=0o755): + deps_check_entry=None, mode=0o755): if not interpreter: - interpreter = "python3:python2:python" + interpreter = "python3:python" subs = { 'ep_main': entrypoint, 'ep_mcheck': deps_check_entry if deps_check_entry else "", - 'ep_minstall': deps_install_entry if deps_install_entry else "", 'python_exe_list': interpreter, } @@ -140,9 +140,7 @@ ignore=not_dot_py) write_exe_wrapper(entrypoint='curtin.commands.main', path=os.path.join(bindir, 'curtin'), - deps_check_entry="curtin.deps.check", - deps_install_entry="curtin.deps.install" - ) + deps_check_entry="curtin.deps.check") for archpath, filepath in copy_files: target = os.path.abspath(os.path.join(exdir, archpath)) @@ -208,7 +206,8 @@ def pack_install(fdout=None, configs=None, paths=None, - add_files=None, copy_files=None, args=None): + add_files=None, copy_files=None, args=None, + install_deps=True): if configs is None: configs = [] @@ -219,7 +218,12 @@ if args is None: args = [] - command = ["curtin", "install"] + if install_deps: + dep_flags = ["--install-deps"] + else: + dep_flags = [] + + command = ["curtin"] + dep_flags + ["install"] my_files = [] for n, config in enumerate(configs): diff -Nru curtin-0.1.0~bzr276/curtin/util.py curtin-0.1.0~bzr314/curtin/util.py --- curtin-0.1.0~bzr276/curtin/util.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/curtin/util.py 2015-11-23 16:22:09.000000000 +0000 @@ -30,6 +30,8 @@ _INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers' _INSTALLED_MAIN = '/usr/bin/curtin' +_LSB_RELEASE = {} + def _subp(args, data=None, rcs=None, env=None, capture=False, shell=False, logstring=False): @@ -473,17 +475,47 @@ marker = os.path.join(target, marker) # if marker exists, check if there are files that would make it obsolete + listfiles = [os.path.join(target, "etc/apt/sources.list")] + listfiles += glob.glob( + os.path.join(target, "etc/apt/sources.list.d/*.list")) + if os.path.exists(marker) and not force: - listfiles = [os.path.join(target, "/etc/apt/sources.list")] - listfiles += glob.glob( - os.path.join(target, "/etc/apt/sources.list.d/*.list")) if len(find_newer(marker, listfiles)) == 0: return - # we're not using 'run_apt_command' so we can use 'retries' to subp - apt_update = ['apt-get', 'update', '--quiet'] - with RunInChroot(target, allow_daemons=True) as inchroot: - inchroot(apt_update, env=env, retries=retries) + abs_tmpdir = tempfile.mkdtemp(dir=os.path.join(target, 'tmp')) + try: + abs_slist = abs_tmpdir + "/sources.list" + abs_slistd = abs_tmpdir + "/sources.list.d" + ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) + ch_slist = ch_tmpdir + "/sources.list" + ch_slistd = ch_tmpdir + "/sources.list.d" + + # create tmpdir/sources.list with all lines other than deb-src + # avoid apt complaining by using existing and empty dir for sourceparts + os.mkdir(abs_slistd) + with open(abs_slist, "w") as sfp: + for sfile in listfiles: + with open(sfile, "r") as fp: + contents = fp.read() + for line in contents.splitlines(): + line = line.lstrip() + if not line.startswith("deb-src"): + sfp.write(line + "\n") + + update_cmd = [ + 'apt-get', '--quiet', + '--option=Acquire::Languages=none', + '--option=Dir::Etc::sourcelist=%s' % ch_slist, + '--option=Dir::Etc::sourceparts=%s' % ch_slistd, + 'update'] + + # do not using 'run_apt_command' so we can use 'retries' to subp + with RunInChroot(target, allow_daemons=True) as inchroot: + inchroot(update_cmd, env=env, retries=retries) + finally: + if abs_tmpdir: + shutil.rmtree(abs_tmpdir) with open(marker, "w") as fp: fp.write(comment + "\n") @@ -661,6 +693,31 @@ return (isinstance(exc, IOError) and exc.errno == errno.ENOENT) +def lsb_release(): + fmap = {'Codename': 'codename', 'Description': 'description', + 'Distributor ID': 'id', 'Release': 'release'} + global _LSB_RELEASE + if not _LSB_RELEASE: + data = {} + try: + out, err = subp(['lsb_release', '--all'], capture=True) + for line in out.splitlines(): + fname, tok, val = line.partition(":") + if fname in fmap: + data[fmap[fname]] = val.strip() + missing = [k for k in fmap.values() if k not in data] + if len(missing): + LOG.warn("Missing fields in lsb_release --all output: %s", + ','.join(missing)) + + except ProcessExecutionError as e: + LOG.warn("Unable to get lsb_release --all: %s", e) + data = {v: "UNAVAILABLE" for v in fmap.values()} + + _LSB_RELEASE.update(data) + return _LSB_RELEASE + + class MergedCmdAppend(argparse.Action): """This appends to a list in order of appearence both the option string and the value""" diff -Nru curtin-0.1.0~bzr276/debian/changelog curtin-0.1.0~bzr314/debian/changelog --- curtin-0.1.0~bzr276/debian/changelog 2015-10-21 13:39:13.000000000 +0000 +++ curtin-0.1.0~bzr314/debian/changelog 2015-11-23 16:28:52.000000000 +0000 @@ -1,4 +1,22 @@ -curtin (0.1.0~bzr276-0ubuntu1~vivid1) vivid; urgency=medium +curtin (0.1.0~bzr314-0ubuntu1) xenial; urgency=medium + + * New upstream snapshot. + * support installing .tar.xz, .tar.gz, .tar.bz2 or .tar via smtar helper + * improve dependency checking (LP: #1514888) + * Allow re-use of bcache cache devices with separate backing devices + (LP: #1514094) + * call mkfs.ext3 or mkfs.ext4 with -F (force) + * Partition alignment and sizing fixes (LP: #1513085) + * Set bcache cache_mode if present in storage config (LP: #1510334) + * apt-get update: do not download translation files and source files + during installation. + * new sub-commands 'system-upgrade' and 'system-install' for package + upgrade and installation. + * debian/control: do not list unversioned e2fsprogs as it is essential. + + -- Scott Moser Mon, 23 Nov 2015 11:28:52 -0500 + +curtin (0.1.0~bzr276-0ubuntu1) wily; urgency=medium * New upstream snapshot. * debian/control: add python3-curtin depends to curtin (LP: #1503507) diff -Nru curtin-0.1.0~bzr276/debian/control curtin-0.1.0~bzr314/debian/control --- curtin-0.1.0~bzr276/debian/control 2015-10-07 13:50:18.000000000 +0000 +++ curtin-0.1.0~bzr314/debian/control 2015-11-23 16:22:10.000000000 +0000 @@ -25,7 +25,15 @@ Package: curtin Architecture: all Priority: extra -Depends: python3-curtin (= ${binary:Version}), +Depends: btrfs-tools, + file, + gdisk, + lvm2, + mdadm, + parted, + python3-curtin (= ${binary:Version}), + udev, + xfsprogs, ${misc:Depends} Description: Library and tools for the curtin installer This package provides the curtin installer. diff -Nru curtin-0.1.0~bzr276/doc/devel/README-vmtest.txt curtin-0.1.0~bzr314/doc/devel/README-vmtest.txt --- curtin-0.1.0~bzr276/doc/devel/README-vmtest.txt 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/doc/devel/README-vmtest.txt 2015-11-23 16:22:09.000000000 +0000 @@ -3,14 +3,66 @@ do installs and validate a number of configurations. The general flow of the vmtests is: - * install a system using 'tools/launch'. The install environment is - booted from a maas ephemeral image. - * configure the installed image to contain a 'NoCloud' datasource seed - that provides scripts that will run on first boot. - * power off the system. - * boot the system with 'tools/xkvm'. This boot will run the provided - scripts, write their output to a "data" disk and then shut itself down. - * extract the data from the data disk and verify the output is as expected. + 1. each test has an associated yaml config file for curtin in examples/tests + 2. uses curtin-pack to create the user-data for cloud-init to trigger install + 3. create and install a system using 'tools/launch'. + 3.1 The install environment is booted from a maas ephemeral image. + 3.2 kernel & initrd used are from maas images (not part of the image) + 3.3 network by default is handled via user networking + 3.4 It creates all empty disks required + 3.5 cloud-init datasource is provided by launch + a) like: ds=nocloud-net;seedfrom=http://10.7.0.41:41518/ + provided by python webserver start_http + b) via -drive file=/tmp/launch.8VOiOn/seed.img,if=virtio,media=cdrom + as a seed disk (if booted without external kernel) + 3.6 dependencies and other preparations are installed at the beginning by + curtin inside the ephemeral image prior to configuring the target + 4. power off the system. + 5. configure a 'NoCloud' datasource seed image that provides scripts that + will run on first boot. + 5.1 this will contain all our code to gather health data on the install + 5.2 by cloud-init design this runs only once per instance, if you start + the system again this won't be called again + 6. boot the installed system with 'tools/xkvm'. + 6.1 reuses the disks that were installed/configured in the former steps + 6.2 also adds an output disk + 6.3 additionally the seed image for the data gathering is added + 6.4 On this boot it will run the provided scripts, write their output to a + "data" disk and then shut itself down. + 7. extract the data from the output disk + 8. vmtest python code now verifies if the output is as expected. + +== Debugging == +At 3.1 + - one can pull data out of the maas image with + sudo mount-image-callback your.img -- sh -c 'COMMAND' + e.g. sudo mount-image-callback your.img -- sh -c 'cp $MOUNTPOINT/boot/* .' +At step 3.6 -> 4. + - tools/launch can be called in a way to give you console access + to do so just call tools/launch but drop the -serial=x parameter. + One might want to change "'power_state': {'mode': 'poweroff'}" to avoid + the auto reboot before getting control + Replace the directory usually seen in the launch calls with a clean fresh + directory + - In /curtin curtin and its config can be found + - if the system gets that far cloud-init will create a user ubuntu/passw0rd + - otherwise one can use a cloud-image from https://cloud-images.ubuntu.com/ + and add a backdoor user via + bzr branch lp:~maas-maintainers/maas/backdoor-image backdoor-image + sudo ./backdoor-image -v --user= --password-auth --password= IMG +At step 6 -> 7 + - You might want to keep all the temporary images around. + To do so you can set CURTIN_VMTEST_KEEP_DATA to true: + export CURTIN_VMTEST_KEEP_DATA=true + That will keep the /tmp/tmpXXXXX directories and all files in there for + further execution. +At step 7 + - You might want to take a look at the output disk yourself. + It is a normal qcow image, so one can use mount-image-callback as described + above + - to invoke xkvm on your own take the command you see in the output and + remove the "-serial ..." but add -nographic instead + For graphical console one can add --vnc 127.0.0.1:1 == Setup == In order to run vmtest you'll need some dependencies. To get them, you @@ -24,8 +76,8 @@ make vmtest -If you wish to run a single test, you can run all tests in test_network.py with: - sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py:WilyTestBasic +If you wish to all tests in test_network.py, do so with: + sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py Or run a single test with: sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py:WilyTestBasic @@ -38,3 +90,11 @@ The tests themselves don't actually have to run as root, but the test setup does. * the 'tools' directory must be in your path. + * test will set apt_proxy in the guests to the value of + 'apt_proxy' environment variable. If that is not set it will + look at the host's apt config and read 'Acquire::HTTP::Proxy' + +If required for further debugging one can set the environment variable +"CURTIN_VMTEST_KEEP_DATA" like: +export CURTIN_VMTEST_KEEP_DATA=true +This will keep the temporary disk files around by skipping __init__.py:__del__ diff -Nru curtin-0.1.0~bzr276/examples/tests/allindata.yaml curtin-0.1.0~bzr314/examples/tests/allindata.yaml --- curtin-0.1.0~bzr276/examples/tests/allindata.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/allindata.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,197 @@ +storage: + version: 1 + config: + - id: sda + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdb + name: main_disk + grub_device: 1 + - id: bios_boot_partition + type: partition + size: 1MB + device: sda + flag: bios_grub + - id: sda1 + type: partition + size: 1GB + device: sda + - id: sda2 + type: partition + size: 1GB + device: sda + - id: sda3 + type: partition + size: 1GB + device: sda + - id: sda4 + type: partition + size: 1GB + device: sda + - id: sda5 + type: partition + size: 3GB + device: sda + - id: sdb + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdc + name: second_disk + - id: sdb1 + type: partition + size: 1GB + device: sdb + - id: sdb2 + type: partition + size: 1GB + device: sdb + - id: sdb3 + type: partition + size: 1GB + device: sdb + - id: sdb4 + type: partition + size: 1GB + device: sdb + - id: sdc + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdd + name: third_disk + - id: sdc1 + type: partition + size: 1GB + device: sdc + - id: sdc2 + type: partition + size: 1GB + device: sdc + - id: sdc3 + type: partition + size: 1GB + device: sdc + - id: sdc4 + type: partition + size: 1GB + device: sdc + - id: sdd + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vde + name: fourth_disk + - id: sdd1 + type: partition + size: 1GB + device: sdd + - id: sdd2 + type: partition + size: 1GB + device: sdd + - id: sdd3 + type: partition + size: 1GB + device: sdd + - id: sdd4 + type: partition + size: 1GB + device: sdd + - id: mddevice0 + name: md0 + type: raid + raidlevel: 5 + devices: + - sda1 + - sdb1 + - sdc1 + spare_devices: + - sdd1 + - id: mddevice1 + name: md1 + type: raid + raidlevel: raid6 + devices: + - sda2 + - sdb2 + - sdc2 + - sdd2 + spare_devices: + - sda3 + - id: mddevice2 + name: md2 + type: raid + raidlevel: 1 + devices: + - sda4 + - sdb3 + spare_devices: + - sdc3 + - sdb4 + - id: mddevice3 + name: md3 + type: raid + raidlevel: raid0 + devices: + - sdc4 + - sdd3 + - id: volgroup1 + name: vg1 + type: lvm_volgroup + devices: + - mddevice0 + - mddevice1 + - mddevice2 + - mddevice3 + - id: lvmpart1 + name: lv1 + size: 1G + type: lvm_partition + volgroup: volgroup1 + - id: lvmpart2 + name: lv2 + size: 1G + type: lvm_partition + volgroup: volgroup1 + - id: lvmpart3 + name: lv3 + type: lvm_partition + volgroup: volgroup1 + - id: dmcrypt0 + type: dm_crypt + volume: lvmpart3 + key: testkey + dm_name: dmcrypt0 + - id: lv1_fs + name: storage + type: format + fstype: ext3 + volume: lvmpart1 + - id: lv2_fs + name: storage + type: format + fstype: ext4 + volume: lvmpart2 + - id: dmcrypt_fs + name: storage + type: format + fstype: xfs + volume: dmcrypt0 + - id: sda5_root + type: format + fstype: ext4 + volume: sda5 + - id: sda5_mount + type: mount + path: / + device: sda5_root + - id: lv1_mount + type: mount + path: /srv/data + device: lv1_fs + - id: lv2_mount + type: mount + path: /srv/backup + device: lv2_fs diff -Nru curtin-0.1.0~bzr276/examples/tests/lvm.yaml curtin-0.1.0~bzr314/examples/tests/lvm.yaml --- curtin-0.1.0~bzr276/examples/tests/lvm.yaml 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/lvm.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -24,7 +24,7 @@ device: sda - id: sda3 type: partition - size: 2G + size: 3G flag: logical device: sda - id: volgroup1 diff -Nru curtin-0.1.0~bzr276/examples/tests/mdadm_bcache.yaml curtin-0.1.0~bzr314/examples/tests/mdadm_bcache.yaml --- curtin-0.1.0~bzr276/examples/tests/mdadm_bcache.yaml 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/mdadm_bcache.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -32,6 +32,10 @@ type: partition size: 1GB device: sda + - id: sda6 + type: partition + size: 1GB + device: sda - id: mddevice name: md0 type: raid @@ -46,6 +50,13 @@ name: cached_array backing_device: mddevice cache_device: sda5 + cache_mode: writeback + - id: bcache1 + type: bcache + name: cached_array_2 + backing_device: sda6 + cache_device: sda5 + cache_mode: writeback - id: sda1_root type: format fstype: ext4 @@ -54,6 +65,10 @@ type: format fstype: ext4 volume: bcache0 + - id: bcache_storage + type: format + fstype: ext4 + volume: bcache1 - id: sda1_mount type: mount path: / @@ -62,3 +77,7 @@ type: mount path: /media/data device: raid_storage + - id: bcache1_mount + type: mount + path: /media/bcache1 + device: bcache_storage diff -Nru curtin-0.1.0~bzr276/examples/tests/mirrorboot.yaml curtin-0.1.0~bzr314/examples/tests/mirrorboot.yaml --- curtin-0.1.0~bzr276/examples/tests/mirrorboot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/mirrorboot.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,44 @@ +storage: + version: 1 + config: + - id: sda + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdb + name: main_disk + grub_device: 1 + - id: bios_boot_partition + type: partition + size: 1MB + device: sda + flag: bios_grub + - id: sda1 + type: partition + size: 3GB + device: sda + - id: sdb + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdc + name: second_disk + - id: sdb1 + type: partition + size: 3GB + device: sdb + - id: mddevice + name: md0 + type: raid + raidlevel: 1 + devices: + - sda1 + - sdb1 + - id: md_root + type: format + fstype: ext4 + volume: mddevice + - id: md_mount + type: mount + path: / + device: md_root diff -Nru curtin-0.1.0~bzr276/examples/tests/raid10boot.yaml curtin-0.1.0~bzr314/examples/tests/raid10boot.yaml --- curtin-0.1.0~bzr276/examples/tests/raid10boot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/raid10boot.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,66 @@ +storage: + version: 1 + config: + - id: sda + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdb + name: main_disk + grub_device: 1 + - id: bios_boot_partition + type: partition + size: 1MB + device: sda + flag: bios_grub + - id: sda1 + type: partition + size: 3GB + device: sda + - id: sdb + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdc + name: second_disk + - id: sdb1 + type: partition + size: 3GB + device: sdb + - id: sdc + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdd + name: third_disk + - id: sdc1 + type: partition + size: 3GB + device: sdc + - id: sdd + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vde + name: fourth_disk + - id: sdd1 + type: partition + size: 3GB + device: sdd + - id: mddevice0 + name: md0 + type: raid + raidlevel: 10 + devices: + - sda1 + - sdb1 + - sdc1 + - sdd1 + - id: md_root + type: format + fstype: ext4 + volume: mddevice0 + - id: md_mount + type: mount + path: / + device: md_root diff -Nru curtin-0.1.0~bzr276/examples/tests/raid5bcache.yaml curtin-0.1.0~bzr314/examples/tests/raid5bcache.yaml --- curtin-0.1.0~bzr276/examples/tests/raid5bcache.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/raid5bcache.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,97 @@ +storage: + config: + - grub_device: true + id: sda + model: QEMU HARDDISK + name: sda + ptable: msdos + path: /dev/vdb + type: disk + wipe: superblock + - id: sdb + model: QEMU HARDDISK + name: sdb + path: /dev/vdc + type: disk + wipe: superblock + - id: sdc + model: QEMU HARDDISK + name: sdc + path: /dev/vdd + type: disk + wipe: superblock + - id: sdd + model: QEMU HARDDISK + name: sdd + path: /dev/vde + type: disk + wipe: superblock + - id: sde + model: QEMU HARDDISK + name: sde + path: /dev/vdf + type: disk + wipe: superblock + - devices: + - sdc + - sdd + - sde + id: md0 + name: md0 + raidlevel: 5 + spare_devices: [] + type: raid + - device: sda + id: sda-part1 + name: sda-part1 + number: 1 + offset: 2097152B + size: 1000001536B + type: partition + uuid: 3a38820c-d675-4069-b060-509a3d9d13cc + wipe: superblock + - device: sda + id: sda-part2 + name: sda-part2 + number: 2 + size: 7586787328B + type: partition + uuid: 17747faa-4b9e-4411-97e5-12fd3d199fb8 + wipe: superblock + - backing_device: sda-part2 + cache_device: sdb + cache_mode: writeback + id: bcache0 + name: bcache0 + type: bcache + - fstype: ext4 + id: sda-part1_format + label: '' + type: format + uuid: 71b1ef6f-5cab-4a77-b4c8-5a209ec11d7c + volume: sda-part1 + - fstype: ext4 + id: md0_format + label: '' + type: format + uuid: b031f0a0-adb3-43be-bb43-ce0fc8a224a4 + volume: md0 + - fstype: ext4 + id: bcache0_format + label: '' + type: format + uuid: ce45bbaf-5a44-4487-b89e-035c2dd40657 + volume: bcache0 + - device: bcache0_format + id: bcache0_mount + path: / + type: mount + - device: sda-part1_format + id: sda-part1_mount + path: /boot + type: mount + - device: md0_format + id: md0_mount + path: /srv/data + type: mount + version: 1 diff -Nru curtin-0.1.0~bzr276/examples/tests/raid5boot.yaml curtin-0.1.0~bzr314/examples/tests/raid5boot.yaml --- curtin-0.1.0~bzr276/examples/tests/raid5boot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/raid5boot.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,55 @@ +storage: + version: 1 + config: + - id: sda + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdb + name: main_disk + grub_device: 1 + - id: bios_boot_partition + type: partition + size: 1MB + device: sda + flag: bios_grub + - id: sda1 + type: partition + size: 3GB + device: sda + - id: sdb + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdc + name: second_disk + - id: sdb1 + type: partition + size: 3GB + device: sdb + - id: sdc + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdd + name: third_disk + - id: sdc1 + type: partition + size: 3GB + device: sdc + - id: mddevice + name: md0 + type: raid + raidlevel: 5 + devices: + - sda1 + - sdb1 + - sdc1 + - id: md_root + type: format + fstype: ext4 + volume: mddevice + - id: md_mount + type: mount + path: / + device: md_root diff -Nru curtin-0.1.0~bzr276/examples/tests/raid6boot.yaml curtin-0.1.0~bzr314/examples/tests/raid6boot.yaml --- curtin-0.1.0~bzr276/examples/tests/raid6boot.yaml 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/examples/tests/raid6boot.yaml 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,67 @@ +storage: + version: 1 + config: + - id: sda + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdb + name: main_disk + grub_device: 1 + - id: bios_boot_partition + type: partition + size: 1MB + device: sda + flag: bios_grub + - id: sda1 + type: partition + size: 3GB + device: sda + - id: sdb + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdc + name: second_disk + - id: sdb1 + type: partition + size: 3GB + device: sdb + - id: sdc + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vdd + name: third_disk + - id: sdc1 + type: partition + size: 3GB + device: sdc + - id: sdd + type: disk + ptable: gpt + model: QEMU HARDDISK + path: /dev/vde + name: fourth_disk + - id: sdd1 + type: partition + size: 3GB + device: sdd + - id: mddevice + name: md0 + mdname: foobar + type: raid + raidlevel: 6 + devices: + - sda1 + - sdb1 + - sdc1 + - sdd1 + - id: md_root + type: format + fstype: ext4 + volume: mddevice + - id: md_mount + type: mount + path: / + device: md_root diff -Nru curtin-0.1.0~bzr276/helpers/smtar curtin-0.1.0~bzr314/helpers/smtar --- curtin-0.1.0~bzr276/helpers/smtar 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/helpers/smtar 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,116 @@ +#!/bin/sh +# smtar (smart tar) +# GNU Tar can only determine the compression type if input is a local file. +# If input is a pipe, it will not even attempt. +# +# This works around that limitation by using 'file' to determine +# the compression format via a local temp file of BUFLEN (1024) bytes. +# After determining format, it passes the correct flag to tar. +# +# Compression format determination is done with 'file' via use of a temp file +# +# The following are supported: +# +# # compression option provided explicitly: just exec tar +# $ ./smtar -tvz < my.tar.gz +# +# # file argument provided, tar can determine: just exec tar +# $ ./smtar -tvf my.tar.gz +# $ ./smtar -tvf my.tar +# +# # input from stdin. determine the appropriate compress flag and execute +# $ ./smtar -tv < my.tar.bz2 +# $ ./smtar -tv < my.tar.bz2 +# $ cat my.tar.xz | ./smtar -tv -f - +# $ wget http://some.tar | ./smtar -tv -f - +# +# +TEMPF="" +BUFLEN="1024" +cleanup() { [ -z "$TEMPF" ] || rm -f "$TEMPF"; } +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } +find_tar_filearg() { + # walk through list of args, return the 'file' argument in _RET + local cur="" next="" + while [ $# -ne 0 ]; do + cur="$1" + next="$2" + case "$cur" in + --file=*) _RET=${cur#*=}; return 0;; + --file) _RET=$next; return 0;; + --*=*) :;; + *-f) _RET="$next"; return 0;; + --) _RET=""; return 0;; + esac + shift + done + return 1 +} + +tar_has_compress_opt() { + # this isnt perfect, but catch common ways + # without fully parsing the args, we risk interpreting + # tar -xvf -J + # as bzip2 compression, where in reality its a file name '-J' + local cur="" next="" + while [ $# -ne 0 ]; do + cur="$1" + next="$2" + case "$cur" in + -z|--gzip|--gunzip|--ungzip) return 0;; + -j|--bzip2) return 0;; + -J|--xz) return 0;; + -Z|--compress|--uncompress) return 0;; + --) return 1;; + esac + shift + done + return 1 +} + +# see if we can get out without reading anything +if [ -t 0 ] || tar_has_compress_opt; then + # input is a terminal, or args contain a compress option + exec tar "$@" +fi + +# if there was a compression arg in input, then let it be +find_tar_filearg "$@" +if ! [ "$_RET" = "/dev/stdin" -o "$_RET" = "-" -o -z "$_RET" ]; then + exec "tar" "$@" +fi + +# now we have work to do +zopt="" +TEMPF=$(mktemp) || fail "mktemp failed" +trap cleanup EXIT +head -c "$BUFLEN" > "$TEMPF" || fail "FAILED: head -c '$BUFLEN'" +size=$(stat --format="%s" "$TEMPF") +file_out=$(LANG=C file --mime-type "$TEMPF") +# my.tar: application/x-tar +# my.tar.bz2: application/x-bzip2 +# my.tar.gz: application/gzip +# my.tar.xz: application/x-xz +# my.tar.Z: application/x-compress +if [ $? -eq 0 ]; then + case "$file_out" in + */x-bzip2|*/bzip2) zopt="--bzip2";; + */x-gzip|*/gzip) zopt="--gzip";; + */x-xz|*/xz) zopt="--xz";; + */x-compress|*/compress) zopt="--compress";; + *) zopt="";; + esac +else + error "WARN: 'file' failed on input" +fi +if [ "$size" -lt "$BUFLEN" ]; then + # input was less than BUFLEN chars, so we just exec tar with input from it + exec < "$TEMPF" + rm -f "$TEMPF" + exec tar $zopt "$@" +else + ( cat "$TEMPF" && rm "$TEMPF" && exec cat ) | exec tar $zopt "$@" +fi + +# vi: ts=4 expandtab syntax=sh diff -Nru curtin-0.1.0~bzr276/Makefile curtin-0.1.0~bzr314/Makefile --- curtin-0.1.0~bzr276/Makefile 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/Makefile 2015-11-23 16:22:09.000000000 +0000 @@ -1,6 +1,8 @@ TOP := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) CWD := $(shell pwd) PYTHON ?= python3 +CURTIN_VMTEST_IMAGE_SYNC ?= False +export CURTIN_VMTEST_IMAGE_SYNC build: @@ -28,11 +30,15 @@ echo " apt-get install -qy python3-sphinx"; exit 1; } 1>&2 make -C doc html +# By default don't sync images when running all tests. vmtest: nosetests3 $(noseopts) tests/vmtests vmtest-deps: @$(CWD)/tools/vmtest-system-setup +sync-images: + @$(CWD)/tools/vmtest-sync-images + .PHONY: all test pyflakes pyflakes3 pep8 build diff -Nru curtin-0.1.0~bzr276/tests/unittests/test_util.py curtin-0.1.0~bzr314/tests/unittests/test_util.py --- curtin-0.1.0~bzr276/tests/unittests/test_util.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/unittests/test_util.py 2015-11-23 16:22:09.000000000 +0000 @@ -1,4 +1,5 @@ from unittest import TestCase +import mock import os import shutil import tempfile @@ -99,6 +100,45 @@ self.assertEqual(found, "/usr/bin2/fuzz") +class TestLsbRelease(TestCase): + def setUp(self): + self._reset_cache() + + def _reset_cache(self): + keys = [k for k in util._LSB_RELEASE.keys()] + for d in keys: + del util._LSB_RELEASE[d] + + @mock.patch("curtin.util.subp") + def test_lsb_release_functional(self, mock_subp): + output = '\n'.join([ + "Distributor ID: Ubuntu", + "Description: Ubuntu 14.04.2 LTS", + "Release: 14.04", + "Codename: trusty", + ]) + rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', + 'codename': 'trusty', 'release': '14.04'} + + def fake_subp(cmd, capture=False): + return output, 'No LSB modules are available.' + + mock_subp.side_effect = fake_subp + found = util.lsb_release() + mock_subp.assert_called_with(['lsb_release', '--all'], capture=True) + self.assertEqual(found, rdata) + + @mock.patch("curtin.util.subp") + def test_lsb_release_unavailable(self, mock_subp): + def doraise(*args, **kwargs): + raise util.ProcessExecutionError("foo") + mock_subp.side_effect = doraise + + expected = {k: "UNAVAILABLE" for k in + ('id', 'description', 'codename', 'release')} + self.assertEqual(util.lsb_release(), expected) + + class TestSubp(TestCase): def test_subp_handles_utf8(self): diff -Nru curtin-0.1.0~bzr276/tests/vmtests/helpers.py curtin-0.1.0~bzr314/tests/vmtests/helpers.py --- curtin-0.1.0~bzr276/tests/vmtests/helpers.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/helpers.py 2015-11-23 16:22:09.000000000 +0000 @@ -15,9 +15,11 @@ # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . -import threading +import os import subprocess import signal +import threading +from unittest import TestLoader class Command(object): @@ -93,3 +95,23 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs): # provide a 'check_call' like interface, but kill with a nice signal return Command(cmd, signal).run(**kwargs) + + +def find_releases(): + """Return a sorted list of releases defined in test cases.""" + # Use the TestLoader to load all tests cases defined within + # tests/vmtests/ and figure out which releases they are testing. + loader = TestLoader() + # dir with the vmtest modules (i.e. tests/vmtests/) + tests_dir = os.path.dirname(__file__) + # The root_dir for the curtin branch. (i.e. curtin/) + root_dir = os.path.split(os.path.split(tests_dir)[0])[0] + # Find all test modules defined in curtin/tests/vmtests/ + module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) + releases = set() + for mts in module_test_suites: + for class_test_suite in mts: + for test_case in class_test_suite: + if getattr(test_case, 'release', ''): + releases.add(getattr(test_case, 'release')) + return sorted(releases) diff -Nru curtin-0.1.0~bzr276/tests/vmtests/__init__.py curtin-0.1.0~bzr314/tests/vmtests/__init__.py --- curtin-0.1.0~bzr276/tests/vmtests/__init__.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/__init__.py 2015-11-23 16:22:09.000000000 +0000 @@ -2,16 +2,28 @@ import datetime import hashlib import logging +import json import os +import pathlib import re import shutil import subprocess import tempfile +import textwrap +import urllib import curtin.net as curtin_net from .helpers import check_call +IMAGE_SRC_URL = os.environ.get( + 'IMAGE_SRC_URL', + "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") + IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") +DEFAULT_SSTREAM_OPTS = [ + '--max=1', '--keyring=/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'] +DEFAULT_FILTERS = ['arch=amd64', 'item_name=root-image.gz'] + DEVNULL = open(os.devnull, 'w') @@ -37,53 +49,95 @@ class ImageStore: - def __init__(self, base_dir): + """Local mirror of MAAS images simplestreams data.""" + + # By default sync on demand. + sync = True + + def __init__(self, source_url, base_dir): + """Initialize the ImageStore. + + source_url is the simplestreams source from where the images will be + downloaded. + base_dir is the target dir in the filesystem to keep the mirror. + """ + self.source_url = source_url self.base_dir = base_dir if not os.path.isdir(self.base_dir): os.makedirs(self.base_dir) + self.url = pathlib.Path(self.base_dir).as_uri() + + def convert_image(self, root_image_path): + """Use maas2roottar to unpack root-image, kernel and initrd.""" + logger.debug('Converting image format') + subprocess.check_call(["tools/maas2roottar", root_image_path]) + + def sync_images(self, filters=DEFAULT_FILTERS): + """Sync MAAS images from source_url simplestreams.""" + # Verify sstream-mirror supports --progress option. + progress = [] + try: + out = subprocess.check_output( + ['sstream-mirror', '--help'], stderr=subprocess.STDOUT) + if isinstance(out, bytes): + out = out.decode() + if '--progress' in out: + progress = ['--progress'] + except subprocess.CalledProcessError: + # swallow the error here as if sstream-mirror --help failed + # then the real sstream-mirror call below will also fail. + pass + + cmd = ['sstream-mirror'] + DEFAULT_SSTREAM_OPTS + progress + [ + self.source_url, self.base_dir] + filters + logger.debug('Syncing images {}'.format(cmd)) + out = subprocess.check_output(cmd) + logger.debug(out) - def get_image(self, repo, release, arch): - # query sstream for root image + def get_image(self, release, arch, filters=None): + """Return local path for root image, kernel and initrd.""" + if filters is None: + filters = [ + 'release=%s' % release, 'krel=%s' % release, 'arch=%s' % arch] + # Query local sstream for compressed root image. logger.debug( - 'Query simplestreams for root image: ' - 'release={release} arch={arch}'.format(release=release, arch=arch)) - cmd = ["tools/usquery", "--max=1", repo, "release=%s" % release, - "krel=%s" % release, "arch=%s" % arch, - "item_name=root-image.gz"] + 'Query simplestreams for root image: %s', filters) + cmd = ['sstream-query'] + DEFAULT_SSTREAM_OPTS + [ + self.url, 'item_name=root-image.gz'] + filters logger.debug(" ".join(cmd)) out = subprocess.check_output(cmd) logger.debug(out) + # No output means we have no images locally, so try to sync. + # If there's output, ImageStore.sync decides if images should be + # synced or not. + if not out or self.sync: + self.sync_images(filters=filters) + out = subprocess.check_output(cmd) sstream_data = ast.literal_eval(bytes.decode(out)) - - # Check if we already have the image - logger.debug('Checking cache for image') - checksum = "" - release_dir = os.path.join(self.base_dir, repo, release, arch, - sstream_data['version_name']) - - def p(x): - return os.path.join(release_dir, x) - - if os.path.isdir(release_dir) and os.path.exists(p("root-image.gz")): - # get checksum of cached image - with open(p("root-image.gz"), "rb") as fp: - checksum = hashlib.sha256(fp.read()).hexdigest() - if not os.path.exists(p("root-image.gz")) or \ - checksum != sstream_data['sha256'] or \ - not os.path.exists(p("root-image")) or \ - not os.path.exists(p("root-image-kernel")) or \ - not os.path.exists(p("root-image-initrd")): - # clear dir, download, and extract image - logger.debug('Image not found, downloading...') - shutil.rmtree(release_dir, ignore_errors=True) - os.makedirs(release_dir) - subprocess.check_call( - ["wget", "-c", sstream_data['item_url'], "-O", - p("root-image.gz")]) - logger.debug('Converting image format') - subprocess.check_call(["tools/maas2roottar", p("root-image.gz")]) - return (p("root-image"), p("root-image-kernel"), - p("root-image-initrd")) + root_image_gz = urllib.parse.urlsplit(sstream_data['item_url']).path + # Make sure the image checksums correctly. Ideally + # sstream-[query,mirror] would make the verification for us. + # See https://bugs.launchpad.net/simplestreams/+bug/1513625 + with open(root_image_gz, "rb") as fp: + checksum = hashlib.sha256(fp.read()).hexdigest() + if checksum != sstream_data['sha256']: + self.sync_images(filters=filters) + out = subprocess.check_output(cmd) + sstream_data = ast.literal_eval(bytes.decode(out)) + root_image_gz = urllib.parse.urlsplit( + sstream_data['item_url']).path + + image_dir = os.path.dirname(root_image_gz) + root_image_path = os.path.join(image_dir, 'root-image') + tarball = os.path.join(image_dir, 'root-image.tar.gz') + kernel_path = os.path.join(image_dir, 'root-image-kernel') + initrd_path = os.path.join(image_dir, 'root-image-initrd') + # Check if we need to unpack things from the compressed image. + if (not os.path.exists(root_image_path) or + not os.path.exists(kernel_path) or + not os.path.exists(initrd_path)): + self.convert_image(root_image_gz) + return (root_image_path, kernel_path, initrd_path, tarball) class TempDir: @@ -131,42 +185,58 @@ subprocess.check_call(['tar', '-C', self.mnt, '-xf', self.output_disk], stdout=DEVNULL, stderr=subprocess.STDOUT) - def __del__(self): + def remove_tmpdir(self): # remove tempdir - shutil.rmtree(self.tmpdir) + if os.path.exists(self.tmpdir): + shutil.rmtree(self.tmpdir) class VMBaseClass: + disk_to_check = {} + fstab_expected = {} + extra_kern_args = None + @classmethod - def setUpClass(self): + def setUpClass(cls): logger.debug('Acquiring boot image') # get boot img - image_store = ImageStore(IMAGE_DIR) - (boot_img, boot_kernel, boot_initrd) = image_store.get_image( - self.repo, self.release, self.arch) + image_store = ImageStore(IMAGE_SRC_URL, IMAGE_DIR) + # Disable sync if env var is set. + image_store.sync = get_env_var_bool('CURTIN_VMTEST_IMAGE_SYNC', False) + logger.debug("Image sync = %s", image_store.sync) + (boot_img, boot_kernel, boot_initrd, tarball) = image_store.get_image( + cls.release, cls.arch) # set up tempdir logger.debug('Setting up tempdir') - self.td = TempDir(self.user_data) - self.install_log = os.path.join(self.td.tmpdir, 'install-serial.log') - self.boot_log = os.path.join(self.td.tmpdir, 'boot-serial.log') + cls.td = TempDir( + generate_user_data(collect_scripts=cls.collect_scripts)) + cls.install_log = os.path.join(cls.td.tmpdir, 'install-serial.log') + cls.boot_log = os.path.join(cls.td.tmpdir, 'boot-serial.log') # create launch cmd cmd = ["tools/launch", "-v"] - if not self.interactive: + if not cls.interactive: cmd.extend(["--silent", "--power=off"]) - cmd.extend(["--serial-log=" + self.install_log]) + cmd.extend(["--serial-log=" + cls.install_log]) + + if cls.extra_kern_args: + cmd.extend(["--append=" + cls.extra_kern_args]) + + # publish the root tarball + install_src = "PUBURL/" + os.path.basename(tarball) + cmd.append("--publish=%s" % tarball) # check for network configuration - self.network_state = curtin_net.parse_net_config(self.conf_file) - logger.debug("Network state: {}".format(self.network_state)) + cls.network_state = curtin_net.parse_net_config(cls.conf_file) + logger.debug("Network state: {}".format(cls.network_state)) # build -n arg list with macaddrs from net_config physical config macs = [] interfaces = {} - if self.network_state: - interfaces = self.network_state.get('interfaces') + if cls.network_state: + interfaces = cls.network_state.get('interfaces') for ifname in interfaces: logger.debug("Interface name: {}".format(ifname)) iface = interfaces.get(ifname) @@ -182,46 +252,55 @@ # build disk arguments extra_disks = [] - for (disk_no, disk_sz) in enumerate(self.extra_disks): - dpath = os.path.join(self.td.tmpdir, 'extra_disk_%d.img' % disk_no) + for (disk_no, disk_sz) in enumerate(cls.extra_disks): + dpath = os.path.join(cls.td.tmpdir, 'extra_disk_%d.img' % disk_no) extra_disks.extend(['--disk', '{}:{}'.format(dpath, disk_sz)]) - cmd.extend(netdevs + ["--disk", self.td.target_disk] + extra_disks + + # proxy config + configs = [cls.conf_file] + proxy = get_apt_proxy() + if get_apt_proxy is not None: + proxy_config = os.path.join(cls.td.tmpdir, 'proxy.cfg') + with open(proxy_config, "w") as fp: + fp.write(json.dumps({'apt_proxy': proxy}) + "\n") + configs.append(proxy_config) + + cmd.extend(netdevs + ["--disk", cls.td.target_disk] + extra_disks + [boot_img, "--kernel=%s" % boot_kernel, "--initrd=%s" % - boot_initrd, "--", "curtin", "-vv", - "install", "--config=%s" % self.conf_file, "cp:///"]) + boot_initrd, "--", "curtin", "-vv", "install"] + + ["--config=%s" % f for f in configs] + + [install_src]) # run vm with installer - lout_path = os.path.join(self.td.tmpdir, "launch-install.out") + lout_path = os.path.join(cls.td.tmpdir, "launch-install.out") try: logger.debug('Running curtin installer') logger.debug('{}'.format(" ".join(cmd))) with open(lout_path, "wb") as fpout: - check_call(cmd, timeout=self.install_timeout, + check_call(cmd, timeout=cls.install_timeout, stdout=fpout, stderr=subprocess.STDOUT) except subprocess.TimeoutExpired: logger.debug('Curtin installer failed') raise finally: - if os.path.exists(self.install_log): - with open(self.install_log, 'r', encoding='utf-8') as l: + if os.path.exists(cls.install_log): + with open(cls.install_log, 'r', encoding='utf-8') as l: logger.debug( u'Serial console output:\n{}'.format(l.read())) else: logger.warn("Did not have a serial log file from launch.") logger.debug('') - if os.path.exists(self.install_log): + if os.path.exists(cls.install_log): logger.debug('Checking curtin install output for errors') - with open(self.install_log) as l: + with open(cls.install_log) as l: install_log = l.read() - errors = re.findall( - '\[.*\]\ cloud-init.*:.*Installation\ failed', install_log) - if len(errors) > 0: + errmsg, errors = check_install_log(install_log) + if errmsg: for e in errors: - logger.debug(e) - logger.debug('Errors during curtin installer') - raise Exception('Errors during curtin installer') + logger.error(e) + logger.error(errmsg) + raise Exception(cls.__name__ + ":" + errmsg) else: logger.debug('Install OK') else: @@ -231,45 +310,47 @@ extra_disks = [x if ":" not in x else x.split(':')[0] for x in extra_disks] # create xkvm cmd - cmd = (["tools/xkvm", "-v"] + netdevs + ["--disk", self.td.target_disk, - "--disk", self.td.output_disk] + extra_disks + + cmd = (["tools/xkvm", "-v"] + netdevs + ["--disk", cls.td.target_disk, + "--disk", cls.td.output_disk] + extra_disks + ["--", "-drive", - "file=%s,if=virtio,media=cdrom" % self.td.seed_disk, + "file=%s,if=virtio,media=cdrom" % cls.td.seed_disk, "-m", "1024"]) - if not self.interactive: - cmd.extend(["-nographic", "-serial", "file:" + self.boot_log]) + if not cls.interactive: + cmd.extend(["-nographic", "-serial", "file:" + cls.boot_log]) # run vm with installed system, fail if timeout expires try: logger.debug('Booting target image') logger.debug('{}'.format(" ".join(cmd))) - xout_path = os.path.join(self.td.tmpdir, "xkvm-boot.out") + xout_path = os.path.join(cls.td.tmpdir, "xkvm-boot.out") with open(xout_path, "wb") as fpout: - check_call(cmd, timeout=self.boot_timeout, + check_call(cmd, timeout=cls.boot_timeout, stdout=fpout, stderr=subprocess.STDOUT) except subprocess.TimeoutExpired: logger.debug('Booting after install failed') raise finally: - if os.path.exists(self.boot_log): - with open(self.boot_log, 'r', encoding='utf-8') as l: + if os.path.exists(cls.boot_log): + with open(cls.boot_log, 'r', encoding='utf-8') as l: logger.debug( u'Serial console output:\n{}'.format(l.read())) # mount output disk - self.td.mount_output_disk() + cls.td.mount_output_disk() logger.debug('Ready for testcases') @classmethod - def tearDownClass(self): - logger.debug('Removing launch logfile') + def tearDownClass(cls): + if not get_env_var_bool('CURTIN_VMTEST_KEEP_DATA', False): + logger.debug('Removing tmpdir: {}'.format(cls.td.tmpdir)) + cls.td.remove_tmpdir() @classmethod - def expected_interfaces(self): + def expected_interfaces(cls): expected = [] interfaces = {} - if self.network_state: - interfaces = self.network_state.get('interfaces') + if cls.network_state: + interfaces = cls.network_state.get('interfaces') # handle interface aliases when subnets have multiple entries for iface in interfaces.values(): subnets = iface.get('subnets', {}) @@ -284,17 +365,17 @@ return expected @classmethod - def get_network_state(self): - return self.network_state + def get_network_state(cls): + return cls.network_state @classmethod - def get_expected_etc_network_interfaces(self): - return curtin_net.render_interfaces(self.network_state) + def get_expected_etc_network_interfaces(cls): + return curtin_net.render_interfaces(cls.network_state) @classmethod - def get_expected_etc_resolvconf(self): + def get_expected_etc_resolvconf(cls): ifaces = {} - eni = curtin_net.render_interfaces(self.network_state) + eni = curtin_net.render_interfaces(cls.network_state) curtin_net.parse_deb_config_data(ifaces, eni, None) return ifaces @@ -303,6 +384,16 @@ for f in files: self.assertTrue(os.path.exists(os.path.join(self.td.mnt, f))) + def check_file_strippedline(self, filename, search): + with open(os.path.join(self.td.mnt, filename), "r") as fp: + data = list(i.strip() for i in fp.readlines()) + self.assertIn(search, data) + + def check_file_regex(self, filename, regex): + with open(os.path.join(self.td.mnt, filename), "r") as fp: + data = fp.read() + self.assertRegex(data, regex) + def get_blkid_data(self, blkid_file): with open(os.path.join(self.td.mnt, blkid_file)) as fp: data = fp.read() @@ -313,3 +404,141 @@ val = line.split('=') ret[val[0]] = val[1] return ret + + def test_fstab(self): + if (os.path.exists(self.td.mnt + "fstab") and + self.fstab_expected is not None): + with open(os.path.join(self.td.mnt, "fstab")) as fp: + fstab_lines = fp.readlines() + fstab_entry = None + for line in fstab_lines: + for device, mntpoint in self.fstab_expected.items(): + if device in line: + fstab_entry = line + self.assertIsNotNone(fstab_entry) + self.assertEqual(fstab_entry.split(' ')[1], + mntpoint) + + def test_dname(self): + if (os.path.exists(self.td.mnt + "ls_dname") and + self.disk_to_check is not None): + with open(os.path.join(self.td.mnt, "ls_dname"), "r") as fp: + contents = fp.read().splitlines() + for diskname, part in self.disk_to_check.items(): + if part is not 0: + link = diskname + "-part" + str(part) + self.assertIn(link, contents) + self.assertIn(diskname, contents) + + +def check_install_log(install_log): + # look if install is OK via curtin 'Installation ok" + # if we dont find that, scan for known error messages and report + # if we don't see any errors, fail with general error + errors = [] + errmsg = None + + # regexps expected in curtin output + install_pass = "Installation finished. No error reported." + install_fail = "({})".format("|".join([ + 'Installation\ failed', + 'ImportError: No module named.*', + 'Unexpected error while running command', + 'E: Unable to locate package.*'])) + + install_is_ok = re.findall(install_pass, install_log) + if len(install_is_ok) == 0: + errors = re.findall(install_fail, install_log) + if len(errors) > 0: + for e in errors: + logger.error(e) + errmsg = ('Errors during curtin installer') + else: + errmsg = ('Failed to verify Installation is OK') + + return errmsg, errors + + +def get_apt_proxy(): + # get setting for proxy. should have same result as in tools/launch + apt_proxy = os.environ.get('apt_proxy') + if apt_proxy: + return apt_proxy + + get_apt_config = textwrap.dedent(""" + command -v apt-config >/dev/null 2>&1 + out=$(apt-config shell x Acquire::HTTP::Proxy) + out=$(sh -c 'eval $1 && echo $x' -- "$out") + [ -n "$out" ] + echo "$out" + """) + + try: + out = subprocess.check_output(['sh', '-c', get_apt_config]) + if isinstance(out, bytes): + out = out.decode() + out = out.rstrip() + return out + except subprocess.CalledProcessError: + pass + + return None + + +def generate_user_data(collect_scripts=None, apt_proxy=None): + # this returns the user data for the *booted* system + # its a cloud-config-archive type, which is + # just a list of parts. the 'x-shellscript' parts + # will be executed in the order they're put in + if collect_scripts is None: + collect_scripts = [] + parts = [] + base_cloudconfig = { + 'password': 'passw0rd', + 'chpasswd': {'expire': False}, + 'power_state': {'mode': 'poweroff'}, + } + + parts = [{'type': 'text/cloud-config', + 'content': json.dumps(base_cloudconfig, indent=1)}] + + output_dir_macro = 'OUTPUT_COLLECT_D' + output_dir = '/mnt/output' + output_device = '/dev/vdb' + + collect_prep = textwrap.dedent("mkdir -p " + output_dir) + collect_post = textwrap.dedent( + 'tar -C "%s" -cf "%s" .' % (output_dir, output_device)) + + # failsafe poweroff runs on precise only, where power_state does + # not exist. + precise_poweroff = textwrap.dedent("""#!/bin/sh + [ "$(lsb_release -sc)" = "precise" ] || exit 0; + shutdown -P now "Shutting down on precise" + """) + + scripts = ([collect_prep] + collect_scripts + [collect_post] + + [precise_poweroff]) + + for part in scripts: + if not part.startswith("#!"): + part = "#!/bin/sh\n" + part + part = part.replace(output_dir_macro, output_dir) + logger.debug('Cloud config archive content (pre-json):' + part) + parts.append({'content': part, 'type': 'text/x-shellscript'}) + return '#cloud-config-archive\n' + json.dumps(parts, indent=1) + + +def get_env_var_bool(envname, default=False): + """get a boolean environment variable. + + If environment variable is not set, use default. + False values are case insensitive 'false', '0', ''.""" + if not isinstance(default, bool): + raise ValueError("default '%s' for '%s' is not a boolean" % + (default, envname)) + val = os.environ.get(envname) + if val is None: + return default + + return val.lower() not in ("false", "0", "") diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_basic.py curtin-0.1.0~bzr314/tests/vmtests/test_basic.py --- curtin-0.1.0~bzr276/tests/vmtests/test_basic.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_basic.py 2015-11-23 16:22:09.000000000 +0000 @@ -1,4 +1,7 @@ -from . import VMBaseClass +from . import ( + VMBaseClass, + get_apt_proxy) + from unittest import TestCase import os @@ -12,22 +15,19 @@ install_timeout = 600 boot_timeout = 120 extra_disks = ['128G'] - user_data = textwrap.dedent("""\ - #cloud-config - password: passw0rd - chpasswd: { expire: False } - bootcmd: - - mkdir /media/output - runcmd: - - blkid -o export /dev/vda > /media/output/blkid_output_vda - - blkid -o export /dev/vda1 > /media/output/blkid_output_vda1 - - blkid -o export /dev/vda2 > /media/output/blkid_output_vda2 - - cat /etc/fstab > /media/output/fstab - - ls /dev/disk/by-dname/ > /media/output/ls_dname - - [tar, -C, /media/output, -cf, /dev/vdb, .] - power_state: - mode: poweroff - """) + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + blkid -o export /dev/vda > blkid_output_vda + blkid -o export /dev/vda1 > blkid_output_vda1 + blkid -o export /dev/vda2 > blkid_output_vda2 + cat /etc/fstab > fstab + ls /dev/disk/by-dname/ > ls_dname + + v="" + out=$(apt-config shell v Acquire::HTTP::Proxy) + eval "$out" + echo "$v" > apt-proxy + """)] def test_output_files_exist(self): self.output_files_exist( @@ -67,6 +67,17 @@ self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/home") + def test_proxy_set(self): + expected = get_apt_proxy() + with open(os.path.join(self.td.mnt, "apt-proxy")) as fp: + apt_proxy_found = fp.read().rstrip() + if expected: + # the proxy should have gotten set through + self.assertIn(expected, apt_proxy_found) + else: + # no proxy, so the output of apt-config dump should be empty + self.assertEqual("", apt_proxy_found) + class WilyTestBasic(TestBasicAbs, TestCase): __test__ = True @@ -80,3 +91,16 @@ repo = "maas-daily" release = "vivid" arch = "amd64" + + +class PreciseTestBasic(TestBasicAbs, TestCase): + __test__ = True + repo = "maas-daily" + release = "precise" + arch = "amd64" + + def test_ptable(self): + print("test_ptable does not work for Precise") + + def test_dname(self): + print("test_dname does not work for Precise") diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_bonding.py curtin-0.1.0~bzr314/tests/vmtests/test_bonding.py --- curtin-0.1.0~bzr276/tests/vmtests/test_bonding.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_bonding.py 2015-11-23 16:22:09.000000000 +0000 @@ -54,24 +54,15 @@ boot_timeout = 600 extra_disks = [] extra_nics = [] - user_data = textwrap.dedent("""\ - #cloud-config - password: passw0rd - chpasswd: { expire: False } - bootcmd: - - mkdir -p /media/output - runcmd: - - ifconfig -a > /media/output/ifconfig_a - - cp -av /etc/network/interfaces /media/output - - cp -av /etc/udev/rules.d/70-persistent-net.rules /media/output - - ip -o route show > /media/output/ip_route_show - - route -n > /media/output/route_n - - dpkg-query -W -f '${Status}' ifenslave > \ - /media/output/ifenslave_installed - - [tar, -C, /media/output, -cf, /dev/vdb, .] - power_state: - mode: poweroff - """) + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + ifconfig -a > ifconfig_a + cp -av /etc/network/interfaces . + cp -av /etc/udev/rules.d/70-persistent-net.rules . + ip -o route show > ip_route_show + route -n > route_n + dpkg-query -W -f '${Status}' ifenslave > ifenslave_installed + """)] def test_output_files_exist(self): self.output_files_exist(["ifconfig_a", @@ -214,21 +205,21 @@ self.assertEqual(gw_ip, gw) -class TrustyTestBasic(TestNetworkAbs, TestCase): +class TrustyTestBonding(TestNetworkAbs, TestCase): __test__ = False repo = "maas-daily" release = "trusty" arch = "amd64" -class WilyTestBasic(TestNetworkAbs, TestCase): +class WilyTestBonding(TestNetworkAbs, TestCase): __test__ = True repo = "maas-daily" release = "wily" arch = "amd64" -class VividTestBasic(TestNetworkAbs, TestCase): +class VividTestBonding(TestNetworkAbs, TestCase): __test__ = True repo = "maas-daily" release = "vivid" diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_lvm.py curtin-0.1.0~bzr314/tests/vmtests/test_lvm.py --- curtin-0.1.0~bzr276/tests/vmtests/test_lvm.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_lvm.py 2015-11-23 16:22:09.000000000 +0000 @@ -2,80 +2,52 @@ from unittest import TestCase import textwrap -import os -class TestMdadmBcacheAbs(VMBaseClass): +class TestLvmAbs(VMBaseClass, TestCase): __test__ = False conf_file = "examples/tests/lvm.yaml" + repo = "maas-daily" + arch = "amd64" install_timeout = 600 boot_timeout = 100 interactive = False extra_disks = [] - user_data = textwrap.dedent("""\ - #cloud-config - password: passw0rd - chpasswd: { expire: False } - bootcmd: - - mkdir -p /media/output - runcmd: - - cat /etc/fstab > /media/output/fstab - - ls /dev/disk/by-dname > /media/output/ls_dname - - pvdisplay -C --separator = -o vg_name,pv_name --noheadings > \ - /media/output/pvs - - lvdisplay -C --separator = -o lv_name,vg_name --noheadings > \ - /media/output/lvs - - [tar, -C, /media/output, -cf, /dev/vdb, .] - power_state: - mode: poweroff - """) - - def test_fstab(self): - with open(os.path.join(self.td.mnt, "fstab")) as fp: - fstab_lines = fp.readlines() - fstab_entry = None - for line in fstab_lines: - if "/dev/vg1/lv1" in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/srv/data") + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cat /etc/fstab > fstab + ls /dev/disk/by-dname > ls_dname + pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs + lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs + """)] + fstab_expected = { + '/dev/vg1/lv1': '/srv/data', + '/dev/vg1/lv2': '/srv/backup', + } + disk_to_check = {'main_disk': 1, + 'main_disk': 5, + 'main_disk': 6, + 'vg1-lv1': 0, + 'vg1-lv2': 0} def test_lvs(self): - with open(os.path.join(self.td.mnt, "lvs"), "r") as fp: - lv_data = list(i.strip() for i in fp.readlines()) - self.assertIn("lv1=vg1", lv_data) - self.assertIn("lv2=vg1", lv_data) + self.check_file_strippedline("lvs", "lv1=vg1") + self.check_file_strippedline("lvs", "lv2=vg1") def test_pvs(self): - with open(os.path.join(self.td.mnt, "pvs"), "r") as fp: - lv_data = list(i.strip() for i in fp.readlines()) - self.assertIn("vg1=/dev/vda5", lv_data) - self.assertIn("vg1=/dev/vda6", lv_data) + self.check_file_strippedline("pvs", "vg1=/dev/vda5") + self.check_file_strippedline("pvs", "vg1=/dev/vda6") def test_output_files_exist(self): self.output_files_exist( ["fstab", "ls_dname"]) - def test_dname(self): - with open(os.path.join(self.td.mnt, "ls_dname"), "r") as fp: - contents = fp.read().splitlines() - for link in list(("main_disk-part%s" % i for i in (1, 5, 6))): - self.assertIn(link, contents) - self.assertIn("main_disk", contents) - self.assertIn("vg1-lv1", contents) - self.assertIn("vg1-lv2", contents) - -class WilyTestLvm(TestMdadmBcacheAbs, TestCase): +class WilyTestLvm(TestLvmAbs): __test__ = True - repo = "maas-daily" release = "wily" - arch = "amd64" -class VividTestLvm(TestMdadmBcacheAbs, TestCase): +class VividTestLvm(TestLvmAbs): __test__ = True - repo = "maas-daily" release = "vivid" - arch = "amd64" diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_mdadm_bcache.py curtin-0.1.0~bzr314/tests/vmtests/test_mdadm_bcache.py --- curtin-0.1.0~bzr276/tests/vmtests/test_mdadm_bcache.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_mdadm_bcache.py 2015-11-23 16:22:09.000000000 +0000 @@ -5,79 +5,291 @@ import os -class TestMdadmBcacheAbs(VMBaseClass): +class TestMdadmAbs(VMBaseClass, TestCase): __test__ = False - conf_file = "examples/tests/mdadm_bcache.yaml" + repo = "maas-daily" + arch = "amd64" install_timeout = 600 boot_timeout = 100 interactive = False extra_disks = [] - user_data = textwrap.dedent("""\ - #cloud-config - password: passw0rd - chpasswd: { expire: False } - bootcmd: - - mkdir -p /media/output - runcmd: - - cat /etc/fstab > /media/output/fstab - - mdadm --detail --scan > /media/output/mdadm_status - - bcache-super-show /dev/vda6 > /media/output/bcache_super_vda6 - - ls /sys/fs/bcache > /media/output/bcache_ls - - ls /dev/disk/by-dname > /media/output/ls_dname - - [tar, -C, /media/output, -cf, /dev/vdb, .] - power_state: - mode: poweroff - """) - - def test_fstab(self): - with open(os.path.join(self.td.mnt, "fstab")) as fp: - fstab_lines = fp.readlines() - fstab_entry = None - for line in fstab_lines: - if "/dev/bcache0" in line: - fstab_entry = line - break - self.assertIsNotNone(fstab_entry) - self.assertEqual(fstab_entry.split(' ')[1], "/media/data") + active_mdadm = "1" + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cat /etc/fstab > fstab + mdadm --detail --scan > mdadm_status + mdadm --detail --scan | grep -c ubuntu > mdadm_active1 + grep -c active /proc/mdstat > mdadm_active2 + ls /dev/disk/by-dname > ls_dname + """)] + + def test_mdadm_output_files_exist(self): + self.output_files_exist( + ["fstab", "mdadm_status", "mdadm_active1", "mdadm_active2", + "ls_dname"]) def test_mdadm_status(self): - with open(os.path.join(self.td.mnt, "mdadm_status"), "r") as fp: - mdadm_status = fp.read() - self.assertTrue("/dev/md/ubuntu" in mdadm_status) + # ubuntu: is the name assigned to the md array + self.check_file_regex("mdadm_status", r"ubuntu:[0-9]*") + self.check_file_strippedline("mdadm_active1", self.active_mdadm) + self.check_file_strippedline("mdadm_active2", self.active_mdadm) - def test_output_files_exist(self): - self.output_files_exist( - ["fstab", "mdadm_status", "bcache_super_vda6", "bcache_ls"]) - def test_dname(self): - with open(os.path.join(self.td.mnt, "ls_dname"), "r") as fp: - contents = fp.read().splitlines() - for link in list(("main_disk-part%s" % i for i in range(1, 6))): - self.assertIn(link, contents) - self.assertIn("md0", contents) - self.assertIn("cached_array", contents) - self.assertIn("main_disk", contents) +class TestMdadmBcacheAbs(TestMdadmAbs): + conf_file = "examples/tests/mdadm_bcache.yaml" + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'main_disk': 3, + 'main_disk': 4, + 'main_disk': 5, + 'main_disk': 6, + 'md0': 0, + 'cached_array': 0, + 'cached_array_2': 0} + + collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + bcache-super-show /dev/vda6 > bcache_super_vda6 + bcache-super-show /dev/vda7 > bcache_super_vda7 + bcache-super-show /dev/md0 > bcache_super_md0 + ls /sys/fs/bcache > bcache_ls + cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode + cat /proc/mounts > proc_mounts + """)] + fstab_expected = { + '/dev/bcache0': '/media/data', + '/dev/bcache1': '/media/bcache1', + } + + def test_bcache_output_files_exist(self): + self.output_files_exist(["bcache_super_vda6", + "bcache_super_vda7", + "bcache_super_md0", + "bcache_ls", + "bcache_cache_mode"]) def test_bcache_status(self): + bcache_supers = [ + "bcache_super_vda6", + "bcache_super_vda7", + "bcache_super_md0", + ] bcache_cset_uuid = None - with open(os.path.join(self.td.mnt, "bcache_super_vda6"), "r") as fp: - for line in fp.read().splitlines(): - if line != "" and line.split()[0] == "cset.uuid": - bcache_cset_uuid = line.split()[-1].rstrip() - self.assertIsNotNone(bcache_cset_uuid) - with open(os.path.join(self.td.mnt, "bcache_ls"), "r") as fp: - self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) + found = {} + for bcache_super in bcache_supers: + with open(os.path.join(self.td.mnt, bcache_super), "r") as fp: + for line in fp.read().splitlines(): + if line != "" and line.split()[0] == "cset.uuid": + bcache_cset_uuid = line.split()[-1].rstrip() + if bcache_cset_uuid in found: + found[bcache_cset_uuid].append(bcache_super) + else: + found[bcache_cset_uuid] = [bcache_super] + self.assertIsNotNone(bcache_cset_uuid) + with open(os.path.join(self.td.mnt, "bcache_ls"), "r") as fp: + self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) + + # one cset.uuid for all devices + self.assertEqual(len(found), 1) + + # three devices with same cset.uuid + self.assertEqual(len(found[bcache_cset_uuid]), 3) + + # check the cset.uuid in the dict + self.assertEqual(list(found.keys()).pop(), + bcache_cset_uuid) + + def test_bcache_cachemode(self): + self.check_file_regex("bcache_cache_mode", r"\[writeback\]") -class WilyTestMdadmBcache(TestMdadmBcacheAbs, TestCase): +class WilyTestMdadmBcache(TestMdadmBcacheAbs): __test__ = True - repo = "maas-daily" release = "wily" - arch = "amd64" -class VividTestMdadmBcache(TestMdadmBcacheAbs, TestCase): +class VividTestMdadmBcache(TestMdadmBcacheAbs): + __test__ = True + release = "vivid" + + +class TestMirrorbootAbs(TestMdadmAbs): + # alternative config for more complex setup + conf_file = "examples/tests/mirrorboot.yaml" + # initialize secondary disk + extra_disks = ['4G'] + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'second_disk': 1, + 'md0': 0} + + +class WilyTestMirrorboot(TestMirrorbootAbs): + __test__ = True + release = "wily" + + +class VividTestMirrorboot(TestMirrorbootAbs): + __test__ = True + release = "vivid" + + +class TestRaid5bootAbs(TestMdadmAbs): + # alternative config for more complex setup + conf_file = "examples/tests/raid5boot.yaml" + # initialize secondary disk + extra_disks = ['4G', '4G'] + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'second_disk': 1, + 'third_disk': 1, + 'md0': 0} + + +class WilyTestRaid5boot(TestRaid5bootAbs): + __test__ = True + release = "wily" + + +class VividTestRaid5boot(TestRaid5bootAbs): + __test__ = True + release = "vivid" + + +class TestRaid6bootAbs(TestMdadmAbs): + # alternative config for more complex setup + conf_file = "examples/tests/raid6boot.yaml" + # initialize secondary disk + extra_disks = ['4G', '4G', '4G'] + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'second_disk': 1, + 'third_disk': 1, + 'fourth_disk': 1, + 'md0': 0} + collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + mdadm --detail --scan > mdadm_detail + """)] + + def test_raid6_output_files_exist(self): + self.output_files_exist( + ["mdadm_detail"]) + + def test_mdadm_custom_name(self): + # the raid6boot.yaml sets this name, check if it was set + self.check_file_regex("mdadm_detail", r"ubuntu:foobar") + + +class WilyTestRaid6boot(TestRaid6bootAbs): + __test__ = True + release = "wily" + + +class VividTestRaid6boot(TestRaid6bootAbs): + __test__ = True + release = "vivid" + + +class TestRaid10bootAbs(TestMdadmAbs): + # alternative config for more complex setup + conf_file = "examples/tests/raid10boot.yaml" + # initialize secondary disk + extra_disks = ['4G', '4G', '4G'] + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'second_disk': 1, + 'third_disk': 1, + 'fourth_disk': 1, + 'md0': 0} + + +class WilyTestRaid10boot(TestRaid10bootAbs): + __test__ = True + release = "wily" + + +class VividTestRaid10boot(TestRaid10bootAbs): + __test__ = True + release = "vivid" + + +class TestAllindataAbs(TestMdadmAbs): + # more complex, needs more time + install_timeout = 900 + boot_timeout = 200 + # alternative config for more complex setup + conf_file = "examples/tests/allindata.yaml" + # we have to avoid a systemd hang due to the way it handles dmcrypt + extra_kern_args = "--- luks=no" + active_mdadm = "4" + # initialize secondary disk + extra_disks = ['5G', '5G', '5G'] + disk_to_check = {'main_disk': 1, + 'main_disk': 2, + 'main_disk': 3, + 'main_disk': 4, + 'main_disk': 5, + 'second_disk': 1, + 'second_disk': 2, + 'second_disk': 3, + 'second_disk': 4, + 'third_disk': 1, + 'third_disk': 2, + 'third_disk': 3, + 'third_disk': 4, + 'fourth_disk': 1, + 'fourth_disk': 2, + 'fourth_disk': 3, + 'fourth_disk': 4, + 'md0': 0, + 'md1': 0, + 'md2': 0, + 'md3': 0, + 'vg1-lv1': 0, + 'vg1-lv2': 0} + collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs + lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs + cat /etc/crypttab > crypttab + yes "testkey" | cryptsetup open /dev/vg1/lv3 dmcrypt0 --type luks + ls -laF /dev/mapper/dmcrypt0 > mapper + mkdir -p /tmp/xfstest + mount /dev/mapper/dmcrypt0 /tmp/xfstest + xfs_info /tmp/xfstest/ > xfs_info + """)] + fstab_expected = { + '/dev/vg1/lv1': '/srv/data', + '/dev/vg1/lv2': '/srv/backup', + } + + def test_output_files_exist(self): + self.output_files_exist(["pvs", "lvs", "crypttab", "mapper", + "xfs_info"]) + + def test_lvs(self): + self.check_file_strippedline("lvs", "lv1=vg1") + self.check_file_strippedline("lvs", "lv2=vg1") + self.check_file_strippedline("lvs", "lv3=vg1") + + def test_pvs(self): + self.check_file_strippedline("pvs", "vg1=/dev/md0") + self.check_file_strippedline("pvs", "vg1=/dev/md1") + self.check_file_strippedline("pvs", "vg1=/dev/md2") + self.check_file_strippedline("pvs", "vg1=/dev/md3") + + def test_dmcrypt(self): + self.check_file_regex("crypttab", r"dmcrypt0.*luks") + self.check_file_regex("mapper", r"^lrwxrwxrwx.*/dev/mapper/dmcrypt0") + self.check_file_regex("xfs_info", r"^meta-data=/dev/mapper/dmcrypt0") + + +class WilyTestAllindata(TestAllindataAbs): + __test__ = True + release = "wily" + + +class VividTestAllindata(TestAllindataAbs): __test__ = True - repo = "maas-daily" release = "vivid" - arch = "amd64" diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_network.py curtin-0.1.0~bzr314/tests/vmtests/test_network.py --- curtin-0.1.0~bzr276/tests/vmtests/test_network.py 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_network.py 2015-11-23 16:22:09.000000000 +0000 @@ -54,23 +54,15 @@ boot_timeout = 600 extra_disks = [] extra_nics = [] - user_data = textwrap.dedent("""\ - #cloud-config - password: passw0rd - chpasswd: { expire: False } - bootcmd: - - mkdir -p /media/output - runcmd: - - ifconfig -a > /media/output/ifconfig_a - - cp -av /etc/network/interfaces /media/output - - cp /etc/resolv.conf /media/output - - cp -av /etc/udev/rules.d/70-persistent-net.rules /media/output - - ip -o route show > /media/output/ip_route_show - - route -n > /media/output/route_n - - [tar, -C, /media/output, -cf, /dev/vdb, .] - power_state: - mode: poweroff - """) + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + ifconfig -a > ifconfig_a + cp -av /etc/network/interfaces . + cp /etc/resolv.conf . + cp -av /etc/udev/rules.d/70-persistent-net.rules . + ip -o route show > ip_route_show + route -n > route_n + """)] def test_output_files_exist(self): self.output_files_exist(["ifconfig_a", @@ -244,21 +236,21 @@ self.assertEqual(gw_ip, gw) -class TrustyTestBasic(TestNetworkAbs, TestCase): +class TrustyTestNetwork(TestNetworkAbs, TestCase): __test__ = True repo = "maas-daily" release = "trusty" arch = "amd64" -class WilyTestBasic(TestNetworkAbs, TestCase): +class WilyTestNetwork(TestNetworkAbs, TestCase): __test__ = True repo = "maas-daily" release = "wily" arch = "amd64" -class VividTestBasic(TestNetworkAbs, TestCase): +class VividTestNetwork(TestNetworkAbs, TestCase): __test__ = True repo = "maas-daily" release = "vivid" diff -Nru curtin-0.1.0~bzr276/tests/vmtests/test_raid5_bcache.py curtin-0.1.0~bzr314/tests/vmtests/test_raid5_bcache.py --- curtin-0.1.0~bzr276/tests/vmtests/test_raid5_bcache.py 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/tests/vmtests/test_raid5_bcache.py 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,86 @@ +from . import VMBaseClass +from unittest import TestCase + +import textwrap +import os + + +class TestMdadmAbs(VMBaseClass, TestCase): + __test__ = False + repo = "maas-daily" + arch = "amd64" + install_timeout = 600 + boot_timeout = 100 + interactive = False + extra_disks = ['10G', '10G', '10G', '10G'] + active_mdadm = "1" + collect_scripts = [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + cat /etc/fstab > fstab + mdadm --detail --scan > mdadm_status + mdadm --detail --scan | grep -c ubuntu > mdadm_active1 + grep -c active /proc/mdstat > mdadm_active2 + ls /dev/disk/by-dname > ls_dname + """)] + + def test_mdadm_output_files_exist(self): + self.output_files_exist( + ["fstab", "mdadm_status", "mdadm_active1", "mdadm_active2", + "ls_dname"]) + + def test_mdadm_status(self): + # ubuntu: is the name assigned to the md array + self.check_file_regex("mdadm_status", r"ubuntu:[0-9]*") + self.check_file_strippedline("mdadm_active1", self.active_mdadm) + self.check_file_strippedline("mdadm_active2", self.active_mdadm) + + +class TestMdadmBcacheAbs(TestMdadmAbs): + conf_file = "examples/tests/raid5bcache.yaml" + disk_to_check = {'sda': 2, + 'md0': 0} + + collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" + cd OUTPUT_COLLECT_D + bcache-super-show /dev/vda2 > bcache_super_vda2 + ls /sys/fs/bcache > bcache_ls + cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode + cat /proc/mounts > proc_mounts + cat /proc/partitions > proc_partitions + """)] + fstab_expected = { + '/dev/bcache0': '/', + '/dev/md0': '/srv/data', + } + + def test_bcache_output_files_exist(self): + self.output_files_exist(["bcache_super_vda2", "bcache_ls", + "bcache_cache_mode"]) + + def test_bcache_status(self): + bcache_cset_uuid = None + with open(os.path.join(self.td.mnt, "bcache_super_vda2"), "r") as fp: + for line in fp.read().splitlines(): + if line != "" and line.split()[0] == "cset.uuid": + bcache_cset_uuid = line.split()[-1].rstrip() + self.assertIsNotNone(bcache_cset_uuid) + with open(os.path.join(self.td.mnt, "bcache_ls"), "r") as fp: + self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) + + def test_bcache_cachemode(self): + self.check_file_regex("bcache_cache_mode", r"\[writeback\]") + + +class WilyTestRaid5Bcache(TestMdadmBcacheAbs): + __test__ = True + release = "wily" + + +class VividTestRaid5Bcache(TestMdadmBcacheAbs): + __test__ = True + release = "vivid" + + +class TrustyTestRaid5Bcache(TestMdadmBcacheAbs): + __test__ = True + release = "trusty" diff -Nru curtin-0.1.0~bzr276/tools/launch curtin-0.1.0~bzr314/tools/launch --- curtin-0.1.0~bzr276/tools/launch 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/launch 2015-11-23 16:22:09.000000000 +0000 @@ -29,9 +29,14 @@ --mem K memory in Kb -n | --netdev netdev can be 'user' or a bridge -p | --publish F make file 'F' available in web server - --silent use -vga none + --silent use -nographic + --vnc D use -vnc D (mutually exclusive with --silent) + directly through to qemu-system. + Note, qemu adds 5900 to port numbers. (:0 = port 5900) --serial-log F : log to F (default 'serial.log') -v | --verbose be more verbose + --no-install-deps do not install insert '--install-deps' + on curtin command invocations use of --kernel/--initrd will seed cloud-init via cmdline rather than the local datasource @@ -96,13 +101,42 @@ EOF } +write_pstate_config() { + local pstate="$1" config1="$2" config2="$3" + cat > "$config1" < "$config2" <<"EOF" +#upstart-job +# precise does not do cloud-config poweroff +description "power-state for precise" +start on stopped cloud-final +console output +task +script + [ "$(lsb_release -sc)" = "precise" ] || exit 0 + target="PSTATE" + msg="precise-powerstate: $target" + case "$target" in + on) exit 0;; + off|poweroff) shutdown -P now "$msg";; + reboot) shutdown -r now "$msg";; + *) echo "$msg : unknown target"; exit 1;; + esac + echo "$msg" + exit 0 +end script +EOF +} + write_userdata() { local x cat </dev/null 2>&1; then + out=$(apt-config shell x Acquire::HTTP::Proxy) && + out=$(sh -c 'eval $1 && echo $x' -- "$out") && [ -n "$out" ] && + echo "$out" && return + fi + + return 1 +} + main() { local short_opts="a:d:h:i:k:n:p:v" - local long_opts="add:,append:,disk:,dowait,help,initrd:,kernel:,mem:,netdev:,no-dowait,power:,publish:,silent,serial-log:,uefi,verbose" + local long_opts="add:,append:,disk:,dowait,help,initrd:,kernel:,mem:,netdev:,no-dowait,power:,publish:,silent,serial-log:,uefi,verbose,vnc:" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -200,8 +251,8 @@ local initrd="" kernel="" uappend="" iargs="" disk_args="" local pubs="" disks="" pstate="null" local uefi=0 - local netdevs="" - local video="-curses" serial_log="file:serial.log" + local netdevs="" install_deps="--install-deps" + local video="-curses -vga std" serial_log="file:serial.log" # dowait: run xkvm with a '&' and then 'wait' on the pid. # the reason to do this or not do this has to do with interactivity # if detached with &, then user input will not go to xkvm. @@ -227,6 +278,7 @@ -d|--disk) disks[${#disks[@]}]="$next"; shift;; -h|--help) Usage ; exit 0;; -i|--initrd) initrd="$next"; shift;; + --no-install-deps) install_deps="";; -k|--kernel) kernel="$next"; shift;; --mem) mem="$next"; shift;; --power) @@ -241,6 +293,10 @@ -p|--publish) pubs[${#pub[@]}]="$next"; shift;; --uefi) uefi=1;; --silent) video="-nographic";; + --vnc) + video="-vnc $next" + debug 1 "VNC requested - $next" + shift;; --serial-log) serial_log="file:$next"; shift;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --dowait) pt[${#pt[@]}]="$cur"; dowait=true;; @@ -297,7 +353,7 @@ tmp=$(dirname "$0") && top_d=$(cd "$tmp" && cd .. && pwd) || { error "failed to get dir for $0"; return 1; } - local disk="" src="" size="" + local disk="" src="" size="" fmt="" out="" disk_args=( ) for disk in "${disks[@]}"; do src=${disk} @@ -309,9 +365,14 @@ if [ ! -f "$src" ]; then qemu-img create -f raw "${src}" "$size" || { error "failed create $src of size $size"; return 1; } + fmt="raw" + else + out=$(LANG=C qemu-img info "$src") && + fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || + { error "failed to determine format of $src"; return 1; } fi disk_args=( "${disk_args[@]}" - "-drive" "file=${src},if=virtio,cache=unsafe" ) + "-drive" "file=${src},if=virtio,cache=unsafe,format=$fmt" ) done get_my_ip || { error "failed to get your ip. set IP_ADDR"; return 1; } @@ -333,6 +394,10 @@ ln -sf "$fpath" "${TEMP_D}/${pub}" done + start_http "${TEMP_D}" "$ip" "$http_port" "${HTTP_TRIES}" "${TEMP_D}/install-cmd" || { error "failed to pack"; return 1; } - start_http "${TEMP_D}" "$ip" "$http_port" "${HTTP_TRIES}" "${TEMP_D}/pstate" - ccfiles[${#ccfiles[@]}]="${TEMP_D}/pstate" + write_pstate_config "$pstate" "${TEMP_D}/pstate.1" "${TEMP_D}/pstate.2" + ccfiles[${#ccfiles[@]}]="${TEMP_D}/pstate.1" + ccfiles[${#ccfiles[@]}]="${TEMP_D}/pstate.2" + fi + + if tmp=$(find_apt_proxy); then + debug 1 "using $tmp for proxy" + printf '#cloud-config\napt_proxy: "%s"\n' "$tmp" > "${TEMP_D}/cc-proxy" + ccfiles[${#ccfiles[@]}]="${TEMP_D}/cc-proxy" fi write_metadata > "$mdata" || { error "failed to write meta-data"; return 1; } diff -Nru curtin-0.1.0~bzr276/tools/report-webhook-logger curtin-0.1.0~bzr314/tools/report-webhook-logger --- curtin-0.1.0~bzr276/tools/report-webhook-logger 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/report-webhook-logger 2015-11-23 16:22:09.000000000 +0000 @@ -1,6 +1,12 @@ -#!/usr/bin/python -import SimpleHTTPServer -import SocketServer +#!/usr/bin/python3 +try: + # python2 + import SimpleHTTPServer as http_server + import SocketServer as socketserver +except ImportError: + import http.server as http_server + import socketserver + import json import sys @@ -12,7 +18,16 @@ endpoint: %(endpoint)s install: log_file: /tmp/foo - post_files: [/tmp/foo]""" + post_files: [/tmp/foo] + +# example python: +from curtin.reporter import events, update_configuration +cfg = {'mypost': {'type': 'webhook', 'endpoint': '%(endpoint)s'}} +update_configuration(cfg) +with events.ReportEventStack(name="myname", description="mydesc", + reporting_enabled=True): + print("do something") +""" if len(sys.argv) > 2: PORT = int(sys.argv[2]) @@ -29,7 +44,7 @@ return json.dumps(json.loads(event_str), indent=1) -class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): +class ServerHandler(http_server.SimpleHTTPRequestHandler): def log_request(self, code, size=None): lines = [ @@ -55,13 +70,17 @@ self._message = '\n'.join( ["failed printing event: %s" % e, post_data]) - self.send_response(200, "received post to %s" % self.path) + msg = "received post to %s" % self.path + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + self.wfile.write(msg.encode('utf-8')) # avoid 'Address already in use' after ctrl-c -SocketServer.TCPServer.allow_reuse_address = True +socketserver.TCPServer.allow_reuse_address = True Handler = ServerHandler -httpd = SocketServer.TCPServer(("", PORT), Handler) +httpd = socketserver.TCPServer(("", PORT), Handler) httpd.allow_reuse_address = True info = { diff -Nru curtin-0.1.0~bzr276/tools/vmtest-sync-images curtin-0.1.0~bzr314/tools/vmtest-sync-images --- curtin-0.1.0~bzr276/tools/vmtest-sync-images 1970-01-01 00:00:00.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/vmtest-sync-images 2015-11-23 16:22:09.000000000 +0000 @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +# This tool keeps a local copy of the maas images used by vmtests. +# It keeps only the latest copy of the available images. +import os +import sys + +# Fix path so we can import ImageStore class. +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) + +from tests.vmtests import ( + ImageStore, IMAGE_DIR, IMAGE_SRC_URL, DEFAULT_FILTERS) +from tests.vmtests.helpers import find_releases + + +if __name__ == '__main__': + # Instantiate the ImageStore object. + store = ImageStore(IMAGE_SRC_URL, IMAGE_DIR) + release_filter = 'release~{}'.format('|'.join(find_releases())) + DEFAULT_FILTERS.append(release_filter) + # Sync images. + store.sync_images(filters=DEFAULT_FILTERS) diff -Nru curtin-0.1.0~bzr276/tools/vmtest-system-setup curtin-0.1.0~bzr314/tools/vmtest-system-setup --- curtin-0.1.0~bzr276/tools/vmtest-system-setup 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/vmtest-system-setup 2015-11-23 16:22:09.000000000 +0000 @@ -5,6 +5,10 @@ fail() { [ $# -eq 0 ] || error "$@"; exit 2; } rel="$(lsb_release -sc)" +case "$(uname -m)" in + i?86|x86_64) qemu="qemu-system-x86";; + ppc*) qemu="qemu-system-ppc";; +esac DEPS=( cloud-image-utils make @@ -12,6 +16,7 @@ python3-nose python3-yaml simplestreams + $qemu ubuntu-cloudimage-keyring ) diff -Nru curtin-0.1.0~bzr276/tools/write-curtin curtin-0.1.0~bzr314/tools/write-curtin --- curtin-0.1.0~bzr276/tools/write-curtin 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/write-curtin 2015-11-23 16:22:09.000000000 +0000 @@ -16,8 +16,7 @@ parg = path ret = write_exe_wrapper(entrypoint="curtin.commands.main", path=parg, - deps_check_entry="curtin.deps.check", - deps_install_entry="curtin.deps.install") + deps_check_entry="curtin.deps.check") if path == "-": sys.stdout.write(ret) diff -Nru curtin-0.1.0~bzr276/tools/xkvm curtin-0.1.0~bzr314/tools/xkvm --- curtin-0.1.0~bzr276/tools/xkvm 2015-10-07 13:46:23.000000000 +0000 +++ curtin-0.1.0~bzr314/tools/xkvm 2015-11-23 16:22:09.000000000 +0000 @@ -298,9 +298,15 @@ esac kvmcmd=( $kvm -enable-kvm ) + local out="" fmt="" for i in "${diskdevs[@]}"; do + if [ -f "$i" ]; then + out=$(LANG=C qemu-img info "$i") && + fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || + { error "failed to determine format of $i"; return 1; } + fi diskargs[${#diskargs[@]}]="-drive"; - diskargs[${#diskargs[@]}]="if=${def_diskif},file=$i"; + diskargs[${#diskargs[@]}]="if=${def_diskif},file=$i,format=$fmt"; done local mnics_vflag=""