diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/cmd/main.py cloud-init-0.7.8-68-gca3ae67/cloudinit/cmd/main.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/cmd/main.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/cmd/main.py 2016-12-19 16:10:50.000000000 +0000 @@ -694,4 +694,6 @@ if __name__ == '__main__': + if 'TZ' not in os.environ: + os.environ['TZ'] = ":/etc/localtime" main(sys.argv) diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_apt_configure.py cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_apt_configure.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_apt_configure.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_apt_configure.py 2016-12-19 16:10:50.000000000 +0000 @@ -199,6 +199,8 @@ deb-src $MIRROR $RELEASE main restricted deb $PRIMARY $RELEASE universe restricted deb $SECURITY $RELEASE-security multiverse + debconf_selections: + set1: the-package the-package/some-flag boolean true conf: | APT { Get { @@ -216,9 +218,9 @@ keyserver: "keyserverurl" source: "deb http:/// xenial main" source2: - source "ppa:" + source: "ppa:" source3: - source "deb $MIRROR $RELEASE multiverse" + source: "deb $MIRROR $RELEASE multiverse" key: | ------BEGIN PGP PUBLIC KEY BLOCK------- diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_bootcmd.py cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_bootcmd.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_bootcmd.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_bootcmd.py 2016-12-19 16:10:50.000000000 +0000 @@ -43,7 +43,7 @@ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - - [ cloud-nit-per, once, mymkfs, mkfs, /dev/vdb ] + - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] """ import os diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_disk_setup.py cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_disk_setup.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_disk_setup.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_disk_setup.py 2016-12-19 16:10:50.000000000 +0000 @@ -436,14 +436,13 @@ def get_mbr_hdd_size(device): - size_cmd = [SFDISK_CMD, '--show-size', device] - size = None try: - size, _err = util.subp(size_cmd) + size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) + sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) - return int(size.strip()) + return int(size_in_bytes) / int(sector_size) def get_gpt_hdd_size(device): @@ -588,7 +587,7 @@ raise Exception("Partition was incorrectly defined: %s" % part) percent, part_type = part - part_size = int((float(size) * (float(percent) / 100)) / 1024) + part_size = int(float(size) * (float(percent) / 100)) if part_num == last_part_num: part_definition.append(",,%s" % part_type) @@ -692,7 +691,7 @@ types, i.e. gpt """ # Create the partitions - prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device] + prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device] try: util.subp(prt_cmd, data="%s\n" % layout) except Exception as e: @@ -909,7 +908,8 @@ LOG.debug("Error in device identification handling.") return - LOG.debug("File system %s will be created on %s", label, device) + LOG.debug("File system type '%s' with label '%s' will be created on %s", + fs_type, label, device) # Make sure the device is defined if not device: diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_mounts.py cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_mounts.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_mounts.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_mounts.py 2016-12-19 16:10:50.000000000 +0000 @@ -327,6 +327,8 @@ if "mounts" in cfg: cfgmnt = cfg["mounts"] + LOG.debug("mounts configuration is %s", cfgmnt) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -423,37 +425,51 @@ cc_lines.append('\t'.join(line)) fstab_lines = [] + removed = [] for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: + removed.append(line) continue except Exception: pass fstab_lines.append(line) + for d in dirs: + try: + util.ensure_dir(d) + except Exception: + util.logexc(log, "Failed to make '%s' config-mount", d) + + sadds = [WS.sub(" ", n) for n in cc_lines] + sdrops = [WS.sub(" ", n) for n in removed] + + sops = (["- " + drop for drop in sdrops if drop not in sadds] + + ["+ " + add for add in sadds if add not in sdrops]) + fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) util.write_file(FSTAB_PATH, contents) + activate_cmds = [] if needswap: - try: - util.subp(("swapon", "-a")) - except Exception: - util.logexc(log, "Activating swap via 'swapon -a' failed") + activate_cmds.append(["swapon", "-a"]) - for d in dirs: + if len(sops) == 0: + log.debug("No changes to /etc/fstab made.") + else: + log.debug("Changes to fstab: %s", sops) + activate_cmds.append(["mount", "-a"]) + if uses_systemd: + activate_cmds.append(["systemctl", "daemon-reload"]) + + fmt = "Activating swap and mounts with: %s" + for cmd in activate_cmds: + fmt = "Activate mounts: %s:" + ' '.join(cmd) try: - util.ensure_dir(d) - except Exception: - util.logexc(log, "Failed to make '%s' config-mount", d) - - activate_cmd = ["mount", "-a"] - if uses_systemd: - activate_cmd = ["systemctl", "daemon-reload"] - fmt = "Activate mounts: %s:" + ' '.join(activate_cmd) - try: - util.subp(activate_cmd) - LOG.debug(fmt, "PASS") - except util.ProcessExecutionError: - util.logexc(log, fmt, "FAIL") + util.subp(cmd) + log.debug(fmt, "PASS") + except util.ProcessExecutionError: + log.warn(fmt, "FAIL") + util.logexc(log, fmt, "FAIL") diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_rh_subscription.py cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_rh_subscription.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/config/cc_rh_subscription.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/config/cc_rh_subscription.py 2016-12-19 16:10:50.000000000 +0000 @@ -421,11 +421,12 @@ "because it is not enabled".format(fail)) cmd = ['repos'] - if len(enable_list) > 0: - cmd.extend(enable_list) if len(disable_list) > 0: cmd.extend(disable_list) + if len(enable_list) > 0: + cmd.extend(enable_list) + try: self._sub_man_cli(cmd) except util.ProcessExecutionError as e: diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/distros/__init__.py cloud-init-0.7.8-68-gca3ae67/cloudinit/distros/__init__.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/distros/__init__.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/distros/__init__.py 2016-12-19 16:10:50.000000000 +0000 @@ -398,13 +398,17 @@ # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: - if isinstance(groups, (list, tuple)): - # kwargs.items loop below wants a comma delimeted string - # that can go right through to the command. - kwargs['groups'] = ",".join(groups) - else: + if isinstance(groups, six.string_types): groups = groups.split(",") + # remove any white spaces in group names, most likely + # that came in as a string like: groups: group1, group2 + groups = [g.strip() for g in groups] + + # kwargs.items loop below wants a comma delimeted string + # that can go right through to the command. + kwargs['groups'] = ",".join(groups) + primary_group = kwargs.get('primary_group') if primary_group: groups.append(primary_group) @@ -413,10 +417,10 @@ for group in groups: if not util.is_group(group): self.create_group(group) - LOG.debug("created group %s for user %s", name, group) + LOG.debug("created group '%s' for user '%s'", group, name) # Check the values and create the command - for key, val in kwargs.items(): + for key, val in sorted(kwargs.items()): if key in adduser_opts and val and isinstance(val, str): adduser_cmd.extend([adduser_opts[key], val]) @@ -433,7 +437,7 @@ # Don't create the home directory if directed so or if the user is a # system user - if 'no_create_home' in kwargs or 'system' in kwargs: + if kwargs.get('no_create_home') or kwargs.get('system'): adduser_cmd.append('-M') log_adduser_cmd.append('-M') else: diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/net/cmdline.py cloud-init-0.7.8-68-gca3ae67/cloudinit/net/cmdline.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/net/cmdline.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/net/cmdline.py 2016-12-19 16:10:50.000000000 +0000 @@ -26,7 +26,7 @@ import six from . import get_devicelist -from . import sys_netdev_info +from . import read_sys_net_safe from cloudinit import util @@ -57,7 +57,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): - """Convert a klibc writtent shell content file to a 'config' entry + """Convert a klibc written shell content file to a 'config' entry When ip= is seen on the kernel command line in debian initramfs and networking is brought up, ipconfig will populate /run/net-.cfg. @@ -140,7 +140,7 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): if files is None: - files = glob.glob('/run/net*.conf') + files = glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') entries = [] names = {} @@ -148,12 +148,19 @@ name, entry = _klibc_to_config_entry(util.load_file(cfg_file), mac_addrs=mac_addrs) if name in names: - raise ValueError( - "device '%s' defined multiple times: %s and %s" % ( - name, names[name], cfg_file)) + prev = names[name]['entry'] + if prev.get('mac_address') != entry.get('mac_address'): + raise ValueError( + "device '%s' was defined multiple times (%s)" + " but had differing mac addresses: %s -> %s.", + (name, ' '.join(names[name]['files']), + prev.get('mac_address'), entry.get('mac_address'))) + prev['subnets'].extend(entry['subnets']) + names[name]['files'].append(cfg_file) + else: + names[name] = {'files': [cfg_file], 'entry': entry} + entries.append(entry) - names[name] = cfg_file - entries.append(entry) return {'config': entries, 'version': 1} @@ -199,11 +206,14 @@ if data64: return util.load_yaml(_b64dgz(data64)) - if 'ip=' not in cmdline: + if 'ip=' not in cmdline and 'ip6=' not in cmdline: return None if mac_addrs is None: - mac_addrs = dict((k, sys_netdev_info(k, 'address')) - for k in get_devicelist()) + mac_addrs = {} + for k in get_devicelist(): + mac_addr = read_sys_net_safe(k, 'address') + if mac_addr: + mac_addrs[k] = mac_addr return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/net/__init__.py cloud-init-0.7.8-68-gca3ae67/cloudinit/net/__init__.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/net/__init__.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/net/__init__.py 2016-12-19 16:10:50.000000000 +0000 @@ -32,25 +32,53 @@ return SYS_CLASS_NET + devname + "/" + path -def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): +def read_sys_net(devname, path, translate=None, + on_enoent=None, on_keyerror=None, + on_einval=None): + dev_path = sys_dev_path(devname, path) try: - contents = util.load_file(sys_dev_path(devname, path)) + contents = util.load_file(dev_path) except (OSError, IOError) as e: - if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR): - if enoent is not None: - return enoent + e_errno = getattr(e, 'errno', None) + if e_errno in (errno.ENOENT, errno.ENOTDIR): + if on_enoent is not None: + return on_enoent(e) + if e_errno in (errno.EINVAL,): + if on_einval is not None: + return on_einval(e) raise contents = contents.strip() if translate is None: return contents try: - return translate.get(contents) - except KeyError: - LOG.debug("found unexpected value '%s' in '%s/%s'", contents, - devname, path) - if keyerror is not None: - return keyerror - raise + return translate[contents] + except KeyError as e: + if on_keyerror is not None: + return on_keyerror(e) + else: + LOG.debug("Found unexpected (not translatable) value" + " '%s' in '%s", contents, dev_path) + raise + + +def read_sys_net_safe(iface, field, translate=None): + def on_excp_false(e): + return False + return read_sys_net(iface, field, + on_keyerror=on_excp_false, + on_enoent=on_excp_false, + on_einval=on_excp_false, + translate=translate) + + +def read_sys_net_int(iface, field): + val = read_sys_net_safe(iface, field) + if val is False: + return None + try: + return int(val) + except TypeError: + return None def is_up(devname): @@ -58,8 +86,7 @@ # operstate as up for the purposes of network configuration. See # Documentation/networking/operstates.txt in the kernel source. translate = {'up': True, 'unknown': True, 'down': False} - return read_sys_net(devname, "operstate", enoent=False, keyerror=False, - translate=translate) + return read_sys_net_safe(devname, "operstate", translate=translate) def is_wireless(devname): @@ -70,21 +97,14 @@ # is_connected isn't really as simple as that. 2 is # 'physically connected'. 3 is 'not connected'. but a wlan interface will # always show 3. - try: - iflink = read_sys_net(devname, "iflink", enoent=False) - if iflink == "2": - return True - if not is_wireless(devname): - return False - LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname) - - return read_sys_net(devname, "carrier", enoent=False, keyerror=False, - translate={'0': False, '1': True}) - - except IOError as e: - if e.errno == errno.EINVAL: - return False - raise + iflink = read_sys_net_safe(devname, "iflink") + if iflink == "2": + return True + if not is_wireless(devname): + return False + LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname) + return read_sys_net_safe(devname, "carrier", + translate={'0': False, '1': True}) def is_physical(devname): @@ -109,25 +129,9 @@ return cfg.get('config') == "disabled" -def sys_netdev_info(name, field): - if not os.path.exists(os.path.join(SYS_CLASS_NET, name)): - raise OSError("%s: interface does not exist in %s" % - (name, SYS_CLASS_NET)) - fname = os.path.join(SYS_CLASS_NET, name, field) - if not os.path.exists(fname): - raise OSError("%s: could not find sysfs entry: %s" % (name, fname)) - data = util.load_file(fname) - if data[-1] == '\n': - data = data[:-1] - return data - - def generate_fallback_config(): """Determine which attached net dev is most likely to have a connection and generate network state to run dhcp on that interface""" - # by default use eth0 as primary interface - nconf = {'config': [], 'version': 1} - # get list of interfaces that could have connections invalid_interfaces = set(['lo']) potential_interfaces = set(get_devicelist()) @@ -142,30 +146,21 @@ if os.path.exists(sys_dev_path(interface, "bridge")): # skip any bridges continue - try: - carrier = int(sys_netdev_info(interface, 'carrier')) - if carrier: - connected.append(interface) - continue - except OSError: - pass + carrier = read_sys_net_int(interface, 'carrier') + if carrier: + connected.append(interface) + continue # check if nic is dormant or down, as this may make a nick appear to # not have a carrier even though it could acquire one when brought # online by dhclient - try: - dormant = int(sys_netdev_info(interface, 'dormant')) - if dormant: - possibly_connected.append(interface) - continue - except OSError: - pass - try: - operstate = sys_netdev_info(interface, 'operstate') - if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']: - possibly_connected.append(interface) - continue - except OSError: - pass + dormant = read_sys_net_int(interface, 'dormant') + if dormant: + possibly_connected.append(interface) + continue + operstate = read_sys_net_safe(interface, 'operstate') + if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']: + possibly_connected.append(interface) + continue # don't bother with interfaces that might not be connected if there are # some that definitely are @@ -173,23 +168,30 @@ potential_interfaces = connected else: potential_interfaces = possibly_connected - # if there are no interfaces, give up - if not potential_interfaces: - return + # if eth0 exists use it above anything else, otherwise get the interface - # that looks 'first' - if DEFAULT_PRIMARY_INTERFACE in potential_interfaces: - name = DEFAULT_PRIMARY_INTERFACE + # that we can read 'first' (using the sorted defintion of first). + names = list(sorted(potential_interfaces)) + if DEFAULT_PRIMARY_INTERFACE in names: + names.remove(DEFAULT_PRIMARY_INTERFACE) + names.insert(0, DEFAULT_PRIMARY_INTERFACE) + target_name = None + target_mac = None + for name in names: + mac = read_sys_net_safe(name, 'address') + if mac: + target_name = name + target_mac = mac + break + if target_mac and target_name: + nconf = {'config': [], 'version': 1} + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf else: - name = sorted(potential_interfaces)[0] - - mac = sys_netdev_info(name, 'address') - target_name = name - - nconf['config'].append( - {'type': 'physical', 'name': target_name, - 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) - return nconf + # can't read any interfaces addresses (or there are none); give up + return None def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): @@ -352,7 +354,7 @@ # for a bond slave, get the nic's hwaddress, not the address it # is using because its part of a bond. path = "bonding_slave/perm_hwaddr" - return read_sys_net(ifname, path, enoent=False) + return read_sys_net_safe(ifname, path) def get_interfaces_by_mac(devs=None): diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/DataSourceAzure.py cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/DataSourceAzure.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/DataSourceAzure.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/DataSourceAzure.py 2016-12-19 16:10:50.000000000 +0000 @@ -36,6 +36,7 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] +AGENT_START_BUILTIN = "__builtin__" BOUNCE_COMMAND = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" @@ -45,7 +46,7 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START, + 'agent_command': AGENT_START_BUILTIN, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { @@ -230,7 +231,7 @@ # the directory to be protected. write_files(ddir, files, dirmode=0o700) - if self.ds_cfg['agent_command'] == '__builtin__': + if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. dhclient_lease_file) @@ -304,7 +305,7 @@ return False, msg def count_files(mp): - ignored = {'dataloss_warning_readme.txt'} + ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/DataSourceCloudSigma.py cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/DataSourceCloudSigma.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/DataSourceCloudSigma.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/DataSourceCloudSigma.py 2016-12-19 16:10:50.000000000 +0000 @@ -115,7 +115,7 @@ # Used to match classes to dependencies. Since this datasource uses the serial # port network is not really required, so it's okay to load without it, too. datasources = [ - (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)), + (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, )), ] diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/helpers/openstack.py cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/helpers/openstack.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/sources/helpers/openstack.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/sources/helpers/openstack.py 2016-12-19 16:10:50.000000000 +0000 @@ -61,6 +61,19 @@ OS_LIBERTY, ) +PHYSICAL_TYPES = ( + None, + 'bridge', + 'ethernet', + 'hw_veb', + 'hyperv', + 'ovs', + 'phy', + 'tap', + 'vhostuser', + 'vif', +) + class NonReadable(IOError): pass @@ -583,8 +596,7 @@ subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in [None, 'ethernet', 'vif', 'ovs', 'phy', - 'bridge', 'tap']: + if link['type'] in PHYSICAL_TYPES: cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) elif link['type'] in ['bond']: params = {} diff -Nru cloud-init-0.7.8-49-g9e904bb/cloudinit/util.py cloud-init-0.7.8-68-gca3ae67/cloudinit/util.py --- cloud-init-0.7.8-49-g9e904bb/cloudinit/util.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/cloudinit/util.py 2016-12-19 16:10:50.000000000 +0000 @@ -235,15 +235,16 @@ 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Reason: %(reason)s\n' - 'Stdout: %(stdout)r\n' - 'Stderr: %(stderr)r') + 'Stdout: %(stdout)s\n' + 'Stderr: %(stderr)s') + empty_attr = '-' def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None, reason=None, errno=None): if not cmd: - self.cmd = '-' + self.cmd = self.empty_attr else: self.cmd = cmd @@ -253,36 +254,56 @@ self.description = description if not isinstance(exit_code, six.integer_types): - self.exit_code = '-' + self.exit_code = self.empty_attr else: self.exit_code = exit_code if not stderr: - self.stderr = '' + self.stderr = self.empty_attr else: - self.stderr = stderr + self.stderr = self._indent_text(stderr) if not stdout: - self.stdout = '' + self.stdout = self.empty_attr else: - self.stdout = stdout + self.stdout = self._indent_text(stdout) if reason: self.reason = reason else: - self.reason = '-' + self.reason = self.empty_attr self.errno = errno message = self.MESSAGE_TMPL % { - 'description': self.description, - 'cmd': self.cmd, - 'exit_code': self.exit_code, - 'stdout': self.stdout, - 'stderr': self.stderr, - 'reason': self.reason, + 'description': self._ensure_string(self.description), + 'cmd': self._ensure_string(self.cmd), + 'exit_code': self._ensure_string(self.exit_code), + 'stdout': self._ensure_string(self.stdout), + 'stderr': self._ensure_string(self.stderr), + 'reason': self._ensure_string(self.reason), } IOError.__init__(self, message) + def _ensure_string(self, text): + """ + if data is bytes object, decode + """ + return text.decode() if isinstance(text, six.binary_type) else text + + def _indent_text(self, text, indent_level=8): + """ + indent text on all but the first line, allowing for easy to read output + """ + cr = '\n' + indent = ' ' * indent_level + # if input is bytes, return bytes + if isinstance(text, six.binary_type): + cr = cr.encode() + indent = indent.encode() + # remove any newlines at end of text first to prevent unneeded blank + # line in output + return text.rstrip(cr).replace(cr, cr + indent) + class SeLinuxGuard(object): def __init__(self, path, recursive=False): diff -Nru cloud-init-0.7.8-49-g9e904bb/config/cloud.cfg.d/05_logging.cfg cloud-init-0.7.8-68-gca3ae67/config/cloud.cfg.d/05_logging.cfg --- cloud-init-0.7.8-49-g9e904bb/config/cloud.cfg.d/05_logging.cfg 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/config/cloud.cfg.d/05_logging.cfg 2016-12-19 16:10:50.000000000 +0000 @@ -53,14 +53,19 @@ args=("/dev/log", handlers.SysLogHandler.LOG_USER) log_cfgs: -# These will be joined into a string that defines the configuration - - [ *log_base, *log_syslog ] -# These will be joined into a string that defines the configuration +# Array entries in this list will be joined into a string +# that defines the configuration. +# +# If you want logs to go to syslog, uncomment the following line. +# - [ *log_base, *log_syslog ] +# +# The default behavior is to just log to a file. +# This mechanism that does not depend on a system service to operate. - [ *log_base, *log_file ] -# A file path can also be used +# A file path can also be used. # - /etc/log.conf -# this tells cloud-init to redirect its stdout and stderr to +# This tells cloud-init to redirect its stdout and stderr to # 'tee -a /var/log/cloud-init-output.log' so the user can see output # there without needing to look on the console. output: {all: '| tee -a /var/log/cloud-init-output.log'} diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/changelog cloud-init-0.7.8-68-gca3ae67/debian/changelog --- cloud-init-0.7.8-49-g9e904bb/debian/changelog 2016-11-22 22:04:36.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/changelog 2016-12-19 20:07:12.000000000 +0000 @@ -1,3 +1,34 @@ +cloud-init (0.7.8-68-gca3ae67-0ubuntu1~16.10.1) yakkety; urgency=medium + + * debian/cherry-pick: add utility for cherry picking commits from upstream + into patches in debian/patches. + * New upstream snapshot. + - mounts: use mount -a again to accomplish mounts (LP: #1647708) + - CloudSigma: Fix bug where datasource was not loaded in local search. + (LP: #1648380) + - when adding a user, strip whitespace from group list + [Lars Kellogg-Stedman] (LP: #1354694) + - fix decoding of utf-8 chars in yaml test + - Replace usage of sys_netdev_info with read_sys_net (LP: #1625766) + - fix problems found in python2.6 test. + - OpenStack: extend physical types to include hyperv, hw_veb, vhost_user. + (LP: #1642679) + - tests: fix assumptions that expected no eth0 in system. (LP: #1644043) + - net/cmdline: Consider ip= or ip6= on command line not only ip= + (LP: #1639930) + - Just use file logging by default [Joshua Harlow] (LP: #1643990) + - Improve formatting for ProcessExecutionError [Wesley Wiedenmeier] + - flake8: fix trailing white space + - Doc: various documentation fixes [Sean Bright] + - cloudinit/config/cc_rh_subscription.py: Remove repos before adding + [Brent Baude] + - packages/redhat: fix rpm spec file. + - main: set TZ in environment if not already set. [Ryan Harper] + - disk_setup: Use sectors as unit when formatting MBR disks with sfdisk. + [Daniel Watkins] (LP: #1460715) + + -- Scott Moser Mon, 19 Dec 2016 15:07:12 -0500 + cloud-init (0.7.8-49-g9e904bb-0ubuntu1~16.10.1) yakkety; urgency=medium * debian/cloud-init.templates: enable DigitalOcean by default [Ben Howard] diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/cherry-pick cloud-init-0.7.8-68-gca3ae67/debian/cherry-pick --- cloud-init-0.7.8-49-g9e904bb/debian/cherry-pick 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/cherry-pick 2016-12-19 20:07:12.000000000 +0000 @@ -0,0 +1,194 @@ +#!/bin/bash + +VERBOSITY=0 +TEMP_D="" +CR=$'\n' + +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } + +Usage() { + cat <> + + Cherry pick a patch into debian/patches. + Useful to grab an upstream commit to the current packaging branch. + + options: + -h | --help show help +EOF +} + +bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; } +cleanup() { + [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" +} + +debug() { + local level=${1}; shift; + [ "${level}" -gt "${VERBOSITY}" ] && return + error "${@}" +} + +shorten() { + local name="$1" len="70" + while [ "${#name}" -gt "$len" ]; do + name="${name%-*}" + done + _RET="$name" +} + +print_commit() { + local subject="$1" author="$2" bugs="$3" aname="" + aname=${author% <*} + echo "$subject${bugs:+ (LP: ${bugs})}" +} + +print_bugs() { + local subject="$1" author="$2" bugs="$3" aname="" + echo "$bugs" +} + +git_log_to_dch() { + # call printer with subject, author and bugs as extracted + # from either git format-patch output or git show output. + local line="" commit="" lcommit="" bugs="" + local printer="${1:-print_commit}" + while :; do + read line || break + case "$line" in + commit\ *|From\ *) + if [ -n "$commit" ]; then + "$printer" "$subject" "$author" "$bugs" + fi + commit=${line#* } + commit=${commit%% *} + bugs="" + author="" + subject="" + ;; + Author:\ *|From:\ *) author="${line#*: }";; + LP:*) bugs="${bugs:+${bugs}, }${line#*: }";; + "") [ -z "$subject" ] && read subject;; + Subject:\ *) + subject="${line#Subject: }" + subject="${subject#\[PATCH\] }" + ;; + esac + done + if [ -n "$commit" ]; then + "$printer" "$subject" "$author" "$bugs" + fi +} + +main() { + local short_opts="ho:v" + local long_opts="help,verbose" + local getopt_out="" + getopt_out=$(getopt --name "${0##*/}" \ + --options "${short_opts}" --long "${long_opts}" -- "$@") && + eval set -- "${getopt_out}" || + { bad_Usage; return; } + + local cur="" next="" + + while [ $# -ne 0 ]; do + cur="$1"; next="$2"; + case "$cur" in + -h|--help) Usage ; exit 0;; + -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; + --) shift; break;; + esac + shift; + done + + [ -n "$TEMP_D" ] || + TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || + { error "failed to make tempdir"; return 1; } + trap cleanup EXIT + + [ $# -gt 0 ] || { bad_Usage "must provide commit-ish"; return; } + + local r="" commit_in="$1" chash="" shash="" sname="" fname="" cur_br="" + cur_br=$(git rev-parse --abbrev-ref HEAD) || + { error "failed to get current branch"; return 1; } + chash=$(git show --quiet "--pretty=format:%H" "${commit_in}") || + { error "failed git show $commit_in"; return 1; } + + if git merge-base --is-ancestor "$chash" HEAD; then + error "current branch '$cur_br' already contains $commit_in ($chash)" + return 1 + fi + + out=$(git show --quiet "--pretty=format:%h %f" "$chash") || + { error "failed git show $chash"; return 1; } + + shash=${out% *} + sname=${out#* } + longname="cpick-$shash-$sname" + shorten "$longname" + fname="$_RET" + + [ -d debian/patches ] || mkdir -p debian/patches || + { error "failed to make debian/patches"; return 1; } + + local series="debian/patches/series" fpath="debian/patches/$fname" + if [ -e "$series" ] && out=$(grep -- "-${shash}-" "$series"); then + error "$chash already exists in $series" + error " $out" + return 1 + fi + + if [ -e "$series" ]; then + if out=$(quilt applied 2>&1); then + error "there are quilt patches applied!" + error "$out" + return 1 + fi + fi + + git format-patch --stdout -1 "$chash" > "$fpath" || + { error "failed git format-patch -1 $chash > $fpath"; return 1; } + + echo "$fname" >> "$series" || + { error "failed to write to $series"; return 1; } + + quilt push "$fname" || + { error "patches do not cleanly apply"; return 1; } + quilt refresh && quilt pop -a || + { error "failed to refresh or pop quilt"; return 1; } + + local message="" + message=$(git_log_to_dch < "$fpath") || + { error "failed getting log entry from $fpath"; return 1; } + dch -i "cherry-pick $shash: $message" + + dch -e || { + r=$?; + error "dch -e exited $r"; + return $r; + } + + local commit_files="" + commit_files=( debian/changelog "$series" "$fpath" ) + git diff HEAD "${commit_files[@]}" + + echo -n "Commit this change? (Y/n): " + read answer || fail "failed to read answer" + case "$answer" in + n|[Nn][oO]) exit 1;; + esac + + bugs=$(git_log_to_dch print_bugs < "$fpath") + msg="cherry pick $shash${bugs:+${CR}${CR}LP: ${bugs}}" + git add "$series" "$fpath" || + { error "failed to git add $series $fpath"; return 1; } + + git commit -m "$msg" "${commit_files[@]}" || + fail "failed to commit '$msg'" + + return 0 +} + +main "$@" +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/cherry-pick-rev cloud-init-0.7.8-68-gca3ae67/debian/cherry-pick-rev --- cloud-init-0.7.8-49-g9e904bb/debian/cherry-pick-rev 2016-11-22 22:04:36.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/cherry-pick-rev 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -#!/bin/sh -Usage() { - cat <&2; exit 1; } -[ "$1" = "--help" -o "$1" = "-h" ] && { Usage; exit 0; } - -repo=${1} -revno=${2} -name=${3} - -name=${name%.patch} -name=${name%.diff} - -fname="${name}.patch" - -( cd "${repo}" && bzr log -r${revno}..${revno} && - bzr diff -p1 -r$((${revno}-1))..${revno} ) | - filterdiff --exclude "*/ChangeLog" | - quilt import -P "${fname}" /dev/stdin - -[ $? -eq 0 ] || { echo "failed"; exit 1; } - -cat <&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } Usage() { @@ -53,6 +55,7 @@ *) fail "You are on branch '$cur_branch', expect to be on ubuntu/*";; esac +TEMP_D=$(mktemp -d) || fail "failed mktemp" trap cleanup EXIT prev_pkg_ver=$(dpkg-parsechangelog --show-field Version) || @@ -75,9 +78,42 @@ exit 0 fi +dpseries="debian/patches/series" +if [ -e $dpseries ]; then + drops="" + while read bname extra; do + case "$bname" in + cpick-*) + commit=${bname#cpick-} + commit=${commit%%-*} + echo "bname=$bname commit=${commit}" 1>&2 + if git merge-base --is-ancestor "$commit" "$from_ref"; then + drops="${drops} debian/patches/$bname" + fi + ;; + *) echo "$bname${extra:+ ${extra}}";; + esac + done < $dpseries > "${TEMP_D}/series" + drops=${drops# } + if [ -n "$drops" ]; then + cp "${TEMP_D}/series" "$dpseries" || + fail "failed copying to $dpseries" + if [ ! -s $dpseries ]; then + git rm --force "$dpseries" || + fail "failed removing empty $dpseries: git rm $dpseries" + fi + msg="drop cherry picks before merge from ${from_ref} at $new_upstream_ver" + msg="$msg${CR}${CR}drop the following cherry picks:" + for file in $drops; do + git rm "$file" || fail "failed to git rm $file" + msg="${msg}$CR $file" + done + git commit -m "$msg" "$dpseries" $drops + fi +fi + git merge "${from_ref}" -m "merge from $from_ref at $new_upstream_ver" || fail "failed: git merge ${from_ref} -m 'merge from $from_ref ..'" -TEMP_D=$(mktemp -d) || fail "failed mktemp" clog="${TEMP_D}/changelog" gitlog="${TEMP_D}/gitlog" diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/patches/azure-use-walinux-agent.patch cloud-init-0.7.8-68-gca3ae67/debian/patches/azure-use-walinux-agent.patch --- cloud-init-0.7.8-49-g9e904bb/debian/patches/azure-use-walinux-agent.patch 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/patches/azure-use-walinux-agent.patch 2016-12-19 20:07:12.000000000 +0000 @@ -0,0 +1,17 @@ +Description: Use walinux-agent rather than builtin fabric support + Upstream now uses the built-in support for instance initialization on Azure. + On a stable release, we want to continue to use the walinux-agent integration. + Upstream made this change under bug 1538522. +Forwarded: not-needed +Author: Scott Moser +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -46,7 +46,7 @@ BOUNCE_COMMAND = [ + RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' + + BUILTIN_DS_CONFIG = { +- 'agent_command': AGENT_START_BUILTIN, ++ 'agent_command': AGENT_START, + 'data_dir': "/var/lib/waagent", + 'set_hostname': True, + 'hostname_bounce': { diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/patches/series cloud-init-0.7.8-68-gca3ae67/debian/patches/series --- cloud-init-0.7.8-49-g9e904bb/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/patches/series 2016-12-19 20:07:12.000000000 +0000 @@ -0,0 +1 @@ +azure-use-walinux-agent.patch diff -Nru cloud-init-0.7.8-49-g9e904bb/debian/README.source cloud-init-0.7.8-68-gca3ae67/debian/README.source --- cloud-init-0.7.8-49-g9e904bb/debian/README.source 2016-11-22 22:04:36.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/debian/README.source 2016-12-19 20:07:12.000000000 +0000 @@ -21,3 +21,9 @@ To pull a new upstream snapshot: ./debian/new-upstream-snapshot ../trunk + +== Cherry Pick == +To cherry pick an upstream commit: + ./debian/cherry-pick + +That will add a patch to debian/patches/ and debian/patches/series. diff -Nru cloud-init-0.7.8-49-g9e904bb/doc/examples/cloud-config-apt.txt cloud-init-0.7.8-68-gca3ae67/doc/examples/cloud-config-apt.txt --- cloud-init-0.7.8-49-g9e904bb/doc/examples/cloud-config-apt.txt 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/doc/examples/cloud-config-apt.txt 2016-12-19 16:10:50.000000000 +0000 @@ -70,7 +70,7 @@ # modifications have been made. # Suites are even disabled if no other modification was made, # but not if is preserve_sources_list is active. - # There is a special alias “$RELEASE” as in the sources that will be replace + # There is a special alias "$RELEASE" as in the sources that will be replace # by the matching release. # # To ease configuration and improve readability the following common ubuntu @@ -84,7 +84,7 @@ # There is no harm in specifying a suite to be disabled that is not found in # the source.list file (just a no-op then) # - # Note: Lines don’t get deleted, but disabled by being converted to a comment. + # Note: Lines don't get deleted, but disabled by being converted to a comment. # The following example disables all usual defaults except $RELEASE-security. # On top it disables a custom suite called "mysuite" disable_suites: [$RELEASE-updates, backports, $RELEASE, mysuite] diff -Nru cloud-init-0.7.8-49-g9e904bb/doc/examples/cloud-config-boot-cmds.txt cloud-init-0.7.8-68-gca3ae67/doc/examples/cloud-config-boot-cmds.txt --- cloud-init-0.7.8-49-g9e904bb/doc/examples/cloud-config-boot-cmds.txt 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/doc/examples/cloud-config-boot-cmds.txt 2016-12-19 16:10:50.000000000 +0000 @@ -9,7 +9,7 @@ # boothook, but possibly with more friendly. # - bootcmd will run on every boot # - the INSTANCE_ID variable will be set to the current instance id. -# - you can use 'cloud-init-boot-per' command to help only run once +# - you can use 'cloud-init-per' command to help only run once bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] diff -Nru cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/boot.rst cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/boot.rst --- cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/boot.rst 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/boot.rst 2016-12-19 16:10:50.000000000 +0000 @@ -21,7 +21,7 @@ not enable cloud-init if either: * A file exists: ``/etc/cloud/cloud-init.disabled`` - * The kernel command line as fond in /proc/cmdline contains ``cloud-init=disabled``. + * The kernel command line as found in /proc/cmdline contains ``cloud-init=disabled``. When running in a container, the kernel command line is not honored, but cloud-init will read an environment variable named ``KERNEL_CMDLINE`` in its place. @@ -56,7 +56,7 @@ including persistent device naming with old mac addresses. This stage must block network bring-up or any stale configuration might -already have been applied. That could have negative effects such as DCHP +already have been applied. That could have negative effects such as DHCP hooks or broadcast of an old hostname. It would also put the system in an odd state to recover from as it may then have to restart network devices. diff -Nru cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/format.rst cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/format.rst --- cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/format.rst 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/format.rst 2016-12-19 16:10:50.000000000 +0000 @@ -9,7 +9,7 @@ Content found to be gzip compressed will be uncompressed. The uncompressed data will then be used as if it were not compressed. -This is typically is useful because user-data is limited to ~16384 [#]_ bytes. +This is typically useful because user-data is limited to ~16384 [#]_ bytes. Mime Multi Part Archive ======================= diff -Nru cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/logging.rst cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/logging.rst --- cloud-init-0.7.8-49-g9e904bb/doc/rtd/topics/logging.rst 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/doc/rtd/topics/logging.rst 2016-12-19 16:10:50.000000000 +0000 @@ -22,7 +22,7 @@ will append its output to the file as though ``>>`` was specified. By default, cloud-init loads its output configuration from -``/etc/cloud/coud.cfg.d/05_logging.cfg``. The default config directs both +``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default config directs both stdout and stderr from all cloud-init stages to ``/var/log/cloud-init-output.log``. The default config is given as :: diff -Nru cloud-init-0.7.8-49-g9e904bb/packages/redhat/cloud-init.spec.in cloud-init-0.7.8-68-gca3ae67/packages/redhat/cloud-init.spec.in --- cloud-init-0.7.8-49-g9e904bb/packages/redhat/cloud-init.spec.in 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/packages/redhat/cloud-init.spec.in 2016-12-19 16:10:50.000000000 +0000 @@ -171,10 +171,11 @@ %{_unitdir}/cloud-* #end if +%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager +%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient + # Program binaries %{_bindir}/cloud-init* -%{_libexecdir}/%{name}/uncloud-init -%{_libexecdir}/%{name}/write-ssh-key-fingerprints # Docs %doc LICENSE ChangeLog TODO.rst requirements.txt diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_azure.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_azure.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_azure.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_azure.py 2016-12-19 16:10:50.000000000 +0000 @@ -93,7 +93,7 @@ for module, name, new in patches: self.patches.enter_context(mock.patch.object(module, name, new)) - def _get_ds(self, data): + def _get_ds(self, data, agent_command=None): def dsdevs(): return data.get('dsdevs', []) @@ -137,6 +137,8 @@ dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) + if agent_command is not None: + dsrc.ds_cfg['agent_command'] = agent_command return dsrc @@ -299,7 +301,7 @@ data = {'ovfcontent': construct_valid_ovf_env(data=odata, pubkeys=pubkeys)} - dsrc = self._get_ds(data) + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) ret = dsrc.get_data() self.assertTrue(ret) for mypk in mypklist: @@ -314,7 +316,7 @@ data = {'ovfcontent': construct_valid_ovf_env(data=odata, pubkeys=pubkeys)} - dsrc = self._get_ds(data) + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) ret = dsrc.get_data() self.assertTrue(ret) @@ -330,7 +332,7 @@ data = {'ovfcontent': construct_valid_ovf_env(data=odata, pubkeys=pubkeys)} - dsrc = self._get_ds(data) + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) ret = dsrc.get_data() self.assertTrue(ret) @@ -487,12 +489,15 @@ def tearDown(self): self.patches.close() - def _get_ds(self, ovfcontent=None): + def _get_ds(self, ovfcontent=None, agent_command=None): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - return DataSourceAzure.DataSourceAzureNet( + dsrc = DataSourceAzure.DataSourceAzureNet( {}, distro=None, paths=self.paths) + if agent_command is not None: + dsrc.ds_cfg['agent_command'] = agent_command + return dsrc def get_ovf_env_with_dscfg(self, hostname, cfg): odata = { @@ -537,14 +542,17 @@ host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), + agent_command=['not', '__builtin__']).get_data() self.assertEqual(1, perform_hostname_bounce.call_count) def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data() + self.get_ovf_env_with_dscfg(expected_hostname, {}), + agent_command=['not', '__builtin__'], + ).get_data() self.assertEqual(expected_hostname, self.set_hostname.call_args_list[0][0][0]) @@ -554,14 +562,18 @@ expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data() + self.get_ovf_env_with_dscfg(expected_hostname, {}), + agent_command=['not', '__builtin__'], + ).get_data() self.assertEqual(1, perform_hostname_bounce.call_count) def test_different_hostnames_sets_hostname_back(self): initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})).get_data() + self.get_ovf_env_with_dscfg('some-host-name', {}), + agent_command=['not', '__builtin__'], + ).get_data() self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) @@ -572,7 +584,9 @@ initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})).get_data() + self.get_ovf_env_with_dscfg('some-host-name', {}), + agent_command=['not', '__builtin__'], + ).get_data() self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) @@ -583,7 +597,7 @@ self.get_hostname.return_value = old_hostname cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} data = self.get_ovf_env_with_dscfg(hostname, cfg) - self._get_ds(data).get_data() + self._get_ds(data, agent_command=['not', '__builtin__']).get_data() self.assertEqual(1, self.subp.call_count) bounce_env = self.subp.call_args[1]['env'] self.assertEqual(interface, bounce_env['interface']) @@ -595,7 +609,7 @@ DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() + self._get_ds(data, agent_command=['not', '__builtin__']).get_data() self.assertEqual(1, self.subp.call_count) bounce_args = self.subp.call_args[1]['args'] self.assertEqual(cmd, bounce_args) diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_cloudsigma.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_cloudsigma.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_cloudsigma.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_cloudsigma.py 2016-12-19 16:10:50.000000000 +0000 @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma from .. import helpers as test_helpers @@ -97,3 +98,17 @@ self.datasource.get_data() self.assertIsNone(self.datasource.vendordata_raw) + + +class DsLoads(test_helpers.TestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM,) + ds_list = DataSourceCloudSigma.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceCloudSigma.DataSourceCloudSigma]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) + self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], + found) diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_common.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_common.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_datasource/test_common.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_datasource/test_common.py 2016-12-19 16:10:50.000000000 +0000 @@ -0,0 +1,73 @@ +from cloudinit import settings +from cloudinit import sources +from cloudinit import type_utils +from cloudinit.sources import ( + DataSourceAliYun as AliYun, + DataSourceAltCloud as AltCloud, + DataSourceAzure as Azure, + DataSourceBigstep as Bigstep, + DataSourceCloudSigma as CloudSigma, + DataSourceCloudStack as CloudStack, + DataSourceConfigDrive as ConfigDrive, + DataSourceDigitalOcean as DigitalOcean, + DataSourceEc2 as Ec2, + DataSourceGCE as GCE, + DataSourceMAAS as MAAS, + DataSourceNoCloud as NoCloud, + DataSourceOpenNebula as OpenNebula, + DataSourceOpenStack as OpenStack, + DataSourceOVF as OVF, + DataSourceSmartOS as SmartOS, +) +from cloudinit.sources import DataSourceNone as DSNone + +from .. import helpers as test_helpers + +DEFAULT_LOCAL = [ + CloudSigma.DataSourceCloudSigma, + ConfigDrive.DataSourceConfigDrive, + DigitalOcean.DataSourceDigitalOcean, + NoCloud.DataSourceNoCloud, + OpenNebula.DataSourceOpenNebula, + OVF.DataSourceOVF, + SmartOS.DataSourceSmartOS, +] + +DEFAULT_NETWORK = [ + AltCloud.DataSourceAltCloud, + Azure.DataSourceAzureNet, + Bigstep.DataSourceBigstep, + CloudStack.DataSourceCloudStack, + DSNone.DataSourceNone, + Ec2.DataSourceEc2, + GCE.DataSourceGCE, + MAAS.DataSourceMAAS, + NoCloud.DataSourceNoCloudNet, + OpenStack.DataSourceOpenStack, + OVF.DataSourceOVFNet, +] + + +class ExpectedDataSources(test_helpers.TestCase): + builtin_list = settings.CFG_BUILTIN['datasource_list'] + deps_local = [sources.DEP_FILESYSTEM] + deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] + pkg_list = [type_utils.obj_name(sources)] + + def test_expected_default_local_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_local, self.pkg_list) + self.assertEqual(set(DEFAULT_LOCAL), set(found)) + + def test_expected_default_network_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_network, self.pkg_list) + self.assertEqual(set(DEFAULT_NETWORK), set(found)) + + def test_expected_nondefault_network_sources_found(self): + found = sources.list_sources( + ['AliYun'], self.deps_network, self.pkg_list) + self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) + + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_distros/test_create_users.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_distros/test_create_users.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_distros/test_create_users.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_distros/test_create_users.py 2016-12-19 16:10:50.000000000 +0000 @@ -0,0 +1,147 @@ +from cloudinit import distros +from ..helpers import (TestCase, mock) + + +class MyBaseDistro(distros.Distro): + # MyBaseDistro is here to test base Distro class implementations + + def __init__(self, name="basedistro", cfg={}, paths={}): + super(MyBaseDistro, self).__init__(name, cfg, paths) + + def install_packages(self, pkglist): + raise NotImplementedError() + + def _write_network(self, settings): + raise NotImplementedError() + + def package_command(self, cmd, args=None, pkgs=None): + raise NotImplementedError() + + def update_package_sources(self): + raise NotImplementedError() + + def apply_locale(self, locale, out_fn=None): + raise NotImplementedError() + + def set_timezone(self, tz): + raise NotImplementedError() + + def _read_hostname(self, filename, default=None): + raise NotImplementedError() + + def _write_hostname(self, hostname, filename): + raise NotImplementedError() + + def _read_system_hostname(self): + raise NotImplementedError() + + +class TestCreateUser(TestCase): + def setUp(self): + super(TestCase, self).setUp() + self.dist = MyBaseDistro() + + def _useradd2call(self, args): + # return a mock call for the useradd command in args + # with expected 'logstring'. + args = ['useradd'] + args + logcmd = [a for a in args] + for i in range(len(args)): + if args[i] in ('--password',): + logcmd[i + 1] = 'REDACTED' + return mock.call(args, logstring=logcmd) + + @mock.patch("cloudinit.distros.util.subp") + def test_basic(self, m_subp): + user = 'foouser' + self.dist.create_user(user) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.subp") + def test_no_home(self, m_subp): + user = 'foouser' + self.dist.create_user(user, no_create_home=True) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-M']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.subp") + def test_system_user(self, m_subp): + # system user should have no home and get --system + user = 'foouser' + self.dist.create_user(user, system=True) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '--system', '-M']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.subp") + def test_explicit_no_home_false(self, m_subp): + user = 'foouser' + self.dist.create_user(user, no_create_home=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.subp") + def test_unlocked(self, m_subp): + user = 'foouser' + self.dist.create_user(user, lock_passwd=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m'])]) + + @mock.patch("cloudinit.distros.util.subp") + def test_set_password(self, m_subp): + user = 'foouser' + password = 'passfoo' + self.dist.create_user(user, passwd=password) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '--password', password, '-m']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.is_group") + @mock.patch("cloudinit.distros.util.subp") + def test_group_added(self, m_subp, m_is_group): + m_is_group.return_value = False + user = 'foouser' + self.dist.create_user(user, groups=['group1']) + expected = [ + mock.call(['groupadd', 'group1']), + self._useradd2call([user, '--groups', 'group1', '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) + + @mock.patch("cloudinit.distros.util.is_group") + @mock.patch("cloudinit.distros.util.subp") + def test_only_new_group_added(self, m_subp, m_is_group): + ex_groups = ['existing_group'] + groups = ['group1', ex_groups[0]] + m_is_group.side_effect = lambda m: m in ex_groups + user = 'foouser' + self.dist.create_user(user, groups=groups) + expected = [ + mock.call(['groupadd', 'group1']), + self._useradd2call([user, '--groups', ','.join(groups), '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) + + @mock.patch("cloudinit.distros.util.is_group") + @mock.patch("cloudinit.distros.util.subp") + def test_create_groups_with_whitespace_string(self, m_subp, m_is_group): + # groups supported as a comma delimeted string even with white space + m_is_group.return_value = False + user = 'foouser' + self.dist.create_user(user, groups='group1, group2') + expected = [ + mock.call(['groupadd', 'group1']), + mock.call(['groupadd', 'group2']), + self._useradd2call([user, '--groups', 'group1,group2', '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_handler/test_handler_disk_setup.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_handler/test_handler_disk_setup.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_handler/test_handler_disk_setup.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_handler/test_handler_disk_setup.py 2016-12-19 16:10:50.000000000 +0000 @@ -1,3 +1,5 @@ +import random + from cloudinit.config import cc_disk_setup from ..helpers import ExitStack, mock, TestCase @@ -28,3 +30,73 @@ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock())) + + +class TestGetMbrHddSize(TestCase): + + def setUp(self): + super(TestGetMbrHddSize, self).setUp() + self.patches = ExitStack() + self.subp = self.patches.enter_context( + mock.patch.object(cc_disk_setup.util, 'subp')) + + def tearDown(self): + super(TestGetMbrHddSize, self).tearDown() + self.patches.close() + + def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): + def _subp(cmd, *args, **kwargs): + self.assertEqual(3, len(cmd)) + if '--getsize64' in cmd: + return hdd_size_in_bytes, None + elif '--getss' in cmd: + return sector_size_in_bytes, None + raise Exception('Unexpected blockdev command called') + + self.subp.side_effect = _subp + + def _test_for_sector_size(self, sector_size): + size_in_bytes = random.randint(10000, 10000000) * 512 + size_in_sectors = size_in_bytes / sector_size + self._configure_subp_mock(size_in_bytes, sector_size) + self.assertEqual(size_in_sectors, + cc_disk_setup.get_mbr_hdd_size('/dev/sda1')) + + def test_size_for_512_byte_sectors(self): + self._test_for_sector_size(512) + + def test_size_for_1024_byte_sectors(self): + self._test_for_sector_size(1024) + + def test_size_for_2048_byte_sectors(self): + self._test_for_sector_size(2048) + + def test_size_for_4096_byte_sectors(self): + self._test_for_sector_size(4096) + + +class TestGetPartitionMbrLayout(TestCase): + + def test_single_partition_using_boolean(self): + self.assertEqual('0,', + cc_disk_setup.get_partition_mbr_layout(1000, True)) + + def test_single_partition_using_list(self): + disk_size = random.randint(1000000, 1000000000000) + self.assertEqual( + ',,83', + cc_disk_setup.get_partition_mbr_layout(disk_size, [100])) + + def test_half_and_half(self): + disk_size = random.randint(1000000, 1000000000000) + expected_partition_size = int(float(disk_size) / 2) + self.assertEqual( + ',{0},83\n,,83'.format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50])) + + def test_thirds_with_different_partition_type(self): + disk_size = random.randint(1000000, 1000000000000) + expected_partition_size = int(float(disk_size) * 0.33) + self.assertEqual( + ',{0},83\n,,82'.format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_net.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_net.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_net.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_net.py 2016-12-19 16:10:50.000000000 +0000 @@ -8,6 +8,8 @@ from .helpers import dir2dict from .helpers import mock +from .helpers import populate_dir +from .helpers import TempDirTestCase from .helpers import TestCase import base64 @@ -54,22 +56,9 @@ } DHCP6_CONTENT_1 = """ -DEVICE=eno1 +DEVICE6=eno1 HOSTNAME= DNSDOMAIN= -reason='PREINIT' -interface='eno1' -DEVICE=eno1 -HOSTNAME= -DNSDOMAIN= -reason='FAIL' -interface='eno1' -DEVICE=eno1 -HOSTNAME= -DNSDOMAIN= -reason='PREINIT6' -interface='eno1' -DEVICE=eno1 IPV6PROTO=dhcp6 IPV6ADDR=2001:67c:1562:8010:0:1:: IPV6NETMASK=64 @@ -77,11 +66,6 @@ IPV6DOMAINSEARCH= HOSTNAME= DNSDOMAIN= -reason='BOUND6' -interface='eno1' -new_ip6_address='2001:67c:1562:8010:0:1::' -new_ip6_prefixlen='64' -new_dhcp6_name_servers='2001:67c:1562:8010::2:1' """ DHCP6_EXPECTED_1 = { @@ -461,7 +445,7 @@ } -def _setup_test(tmp_dir, mock_get_devicelist, mock_sys_netdev_info, +def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path): mock_get_devicelist.return_value = ['eth1000'] dev_characteristics = { @@ -474,10 +458,12 @@ } } - def netdev_info(name, field): - return dev_characteristics[name][field] + def fake_read(devname, path, translate=None, + on_enoent=None, on_keyerror=None, + on_einval=None): + return dev_characteristics[devname][path] - mock_sys_netdev_info.side_effect = netdev_info + mock_read_sys_net.side_effect = fake_read def sys_dev_path(devname, path=""): return tmp_dir + devname + "/" + path @@ -493,15 +479,15 @@ class TestSysConfigRendering(TestCase): @mock.patch("cloudinit.net.sys_dev_path") - @mock.patch("cloudinit.net.sys_netdev_info") + @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, - mock_sys_netdev_info, + mock_read_sys_net, mock_sys_dev_path): tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) _setup_test(tmp_dir, mock_get_devicelist, - mock_sys_netdev_info, mock_sys_dev_path) + mock_read_sys_net, mock_sys_dev_path) network_cfg = net.generate_fallback_config() ns = network_state.parse_net_config_data(network_cfg, @@ -550,15 +536,15 @@ class TestEniNetRendering(TestCase): @mock.patch("cloudinit.net.sys_dev_path") - @mock.patch("cloudinit.net.sys_netdev_info") + @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, - mock_sys_netdev_info, + mock_read_sys_net, mock_sys_dev_path): tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) _setup_test(tmp_dir, mock_get_devicelist, - mock_sys_netdev_info, mock_sys_dev_path) + mock_read_sys_net, mock_sys_dev_path) network_cfg = net.generate_fallback_config() ns = network_state.parse_net_config_data(network_cfg, @@ -677,6 +663,66 @@ self.assertEqual(found, self.simple_cfg) +class TestCmdlineReadKernelConfig(TempDirTestCase): + macs = { + 'eth0': '14:02:ec:42:48:00', + 'eno1': '14:02:ec:42:48:01', + } + + def test_ip_cmdline_read_kernel_cmdline_ip(self): + content = {'net-eth0.conf': DHCP_CONTENT_1} + populate_dir(self.tmp, content) + files = [os.path.join(self.tmp, k) for k in content.keys()] + found = cmdline.read_kernel_cmdline_config( + files=files, cmdline='foo ip=dhcp', mac_addrs=self.macs) + exp1 = copy.deepcopy(DHCP_EXPECTED_1) + exp1['mac_address'] = self.macs['eth0'] + self.assertEqual(found['version'], 1) + self.assertEqual(found['config'], [exp1]) + + def test_ip_cmdline_read_kernel_cmdline_ip6(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + populate_dir(self.tmp, content) + files = [os.path.join(self.tmp, k) for k in content.keys()] + found = cmdline.read_kernel_cmdline_config( + files=files, cmdline='foo ip6=dhcp root=/dev/sda', + mac_addrs=self.macs) + self.assertEqual( + found, + {'version': 1, 'config': [ + {'type': 'physical', 'name': 'eno1', + 'mac_address': self.macs['eno1'], + 'subnets': [ + {'dns_nameservers': ['2001:67c:1562:8010::2:1'], + 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]}) + + def test_ip_cmdline_read_kernel_cmdline_none(self): + # if there is no ip= or ip6= on cmdline, return value should be None + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + populate_dir(self.tmp, content) + files = [os.path.join(self.tmp, k) for k in content.keys()] + found = cmdline.read_kernel_cmdline_config( + files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) + self.assertEqual(found, None) + + def test_ip_cmdline_both_ip_ip6(self): + content = {'net-eth0.conf': DHCP_CONTENT_1, + 'net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} + populate_dir(self.tmp, content) + files = [os.path.join(self.tmp, k) for k in sorted(content.keys())] + found = cmdline.read_kernel_cmdline_config( + files=files, cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) + + eth0 = copy.deepcopy(DHCP_EXPECTED_1) + eth0['mac_address'] = self.macs['eth0'] + eth0['subnets'].append( + {'control': 'manual', 'type': 'dhcp6', + 'netmask': '64', 'dns_nameservers': ['2001:67c:1562:8010::2:1']}) + expected = [eth0] + self.assertEqual(found['version'], 1) + self.assertEqual(found['config'], expected) + + class TestEniRoundTrip(TestCase): def setUp(self): super(TestCase, self).setUp() diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_rh_subscription.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_rh_subscription.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_rh_subscription.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_rh_subscription.py 2016-12-19 16:10:50.000000000 +0000 @@ -83,8 +83,8 @@ ''' call_lists = [] call_lists.append(['attach', '--pool=pool1', '--pool=pool3']) - call_lists.append(['repos', '--enable=repo2', '--enable=repo3', - '--disable=repo5']) + call_lists.append(['repos', '--disable=repo5', '--enable=repo2', + '--enable=repo3']) call_lists.append(['attach', '--auto', '--servicelevel=self-support']) self.SM.log_success = mock.MagicMock() reg = "The system has been registered with ID:" \ diff -Nru cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_util.py cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_util.py --- cloud-init-0.7.8-49-g9e904bb/tests/unittests/test_util.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tests/unittests/test_util.py 2016-12-19 16:10:50.000000000 +0000 @@ -611,4 +611,73 @@ text = util.decode_binary(blob) self.assertEqual(text, blob) + +class TestProcessExecutionError(helpers.TestCase): + + template = ('{description}\n' + 'Command: {cmd}\n' + 'Exit code: {exit_code}\n' + 'Reason: {reason}\n' + 'Stdout: {stdout}\n' + 'Stderr: {stderr}') + empty_attr = '-' + empty_description = 'Unexpected error while running command.' + + def test_pexec_error_indent_text(self): + error = util.ProcessExecutionError() + msg = 'abc\ndef' + formatted = 'abc\n{0}def'.format(' ' * 4) + self.assertEqual(error._indent_text(msg, indent_level=4), formatted) + self.assertEqual(error._indent_text(msg.encode(), indent_level=4), + formatted.encode()) + self.assertIsInstance( + error._indent_text(msg.encode()), type(msg.encode())) + + def test_pexec_error_type(self): + self.assertIsInstance(util.ProcessExecutionError(), IOError) + + def test_pexec_error_empty_msgs(self): + error = util.ProcessExecutionError() + self.assertTrue(all(attr == self.empty_attr for attr in + (error.stderr, error.stdout, error.reason))) + self.assertEqual(error.description, self.empty_description) + self.assertEqual(str(error), self.template.format( + description=self.empty_description, exit_code=self.empty_attr, + reason=self.empty_attr, stdout=self.empty_attr, + stderr=self.empty_attr, cmd=self.empty_attr)) + + def test_pexec_error_single_line_msgs(self): + stdout_msg = 'out out' + stderr_msg = 'error error' + cmd = 'test command' + exit_code = 3 + error = util.ProcessExecutionError( + stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd) + self.assertEqual(str(error), self.template.format( + description=self.empty_description, stdout=stdout_msg, + stderr=stderr_msg, exit_code=str(exit_code), + reason=self.empty_attr, cmd=cmd)) + + def test_pexec_error_multi_line_msgs(self): + # make sure bytes is converted handled properly when formatting + stdout_msg = 'multi\nline\noutput message'.encode() + stderr_msg = 'multi\nline\nerror message\n\n\n' + error = util.ProcessExecutionError( + stdout=stdout_msg, stderr=stderr_msg) + self.assertEqual( + str(error), + '\n'.join(( + '{description}', + 'Command: {empty_attr}', + 'Exit code: {empty_attr}', + 'Reason: {empty_attr}', + 'Stdout: multi', + ' line', + ' output message', + 'Stderr: multi', + ' line', + ' error message', + )).format(description=self.empty_description, + empty_attr=self.empty_attr)) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.8-49-g9e904bb/tools/validate-yaml.py cloud-init-0.7.8-68-gca3ae67/tools/validate-yaml.py --- cloud-init-0.7.8-49-g9e904bb/tools/validate-yaml.py 2016-11-18 21:33:51.000000000 +0000 +++ cloud-init-0.7.8-68-gca3ae67/tools/validate-yaml.py 2016-12-19 16:10:50.000000000 +0000 @@ -12,8 +12,8 @@ for fn in sys.argv[1:]: sys.stdout.write("%s" % (fn)) try: - fh = open(fn, 'r') - yaml.safe_load(fh.read()) + fh = open(fn, 'rb') + yaml.safe_load(fh.read().decode('utf-8')) fh.close() sys.stdout.write(" - ok\n") except Exception as e: