diff -Nru cloud-init-18.3-9-g2e62cb8a/bash_completion/cloud-init cloud-init-18.3-24-gf6249277/bash_completion/cloud-init
--- cloud-init-18.3-9-g2e62cb8a/bash_completion/cloud-init 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/bash_completion/cloud-init 2018-08-07 14:35:29.000000000 +0000
@@ -28,7 +28,7 @@
COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
;;
devel)
- COMPREPLY=($(compgen -W "--help schema" -- $cur_word))
+ COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
;;
dhclient-hook|features)
COMPREPLY=($(compgen -W "--help" -- $cur_word))
@@ -59,6 +59,9 @@
--frequency)
COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
;;
+ net-convert)
+ COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
+ ;;
schema)
COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
;;
@@ -74,4 +77,4 @@
}
complete -F _cloudinit_complete cloud-init
-# vi: syntax=bash expandtab
+# vi: syntax=sh expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/cmd/devel/net_convert.py cloud-init-18.3-24-gf6249277/cloudinit/cmd/devel/net_convert.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/cmd/devel/net_convert.py 1970-01-01 00:00:00.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/cmd/devel/net_convert.py 2018-08-07 14:35:29.000000000 +0000
@@ -0,0 +1,115 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Debug network config format conversions."""
+import argparse
+import json
+import os
+import sys
+import yaml
+
+from cloudinit.sources.helpers import openstack
+
+from cloudinit.net import eni, netplan, network_state, sysconfig
+from cloudinit import log
+
+NAME = 'net-convert'
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for net-convert utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+ parser.add_argument("-p", "--network-data", type=open,
+ metavar="PATH", required=True)
+ parser.add_argument("-k", "--kind",
+ choices=['eni', 'network_data.json', 'yaml'],
+ required=True)
+ parser.add_argument("-d", "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True)
+ parser.add_argument("-m", "--mac",
+ metavar="name,mac",
+ action='append',
+ help="interface name to mac mapping")
+ parser.add_argument("--debug", action='store_true',
+ help='enable debug logging to stderr.')
+ parser.add_argument("-O", "--output-kind",
+ choices=['eni', 'netplan', 'sysconfig'],
+ required=True)
+ return parser
+
+
+def handle_args(name, args):
+ if not args.directory.endswith("/"):
+ args.directory += "/"
+
+ if not os.path.isdir(args.directory):
+ os.makedirs(args.directory)
+
+ if args.debug:
+ log.setupBasicLogging(level=log.DEBUG)
+ else:
+ log.setupBasicLogging(level=log.WARN)
+ if args.mac:
+ known_macs = {}
+ for item in args.mac:
+ iface_name, iface_mac = item.split(",", 1)
+ known_macs[iface_mac] = iface_name
+ else:
+ known_macs = None
+
+ net_data = args.network_data.read()
+ if args.kind == "eni":
+ pre_ns = eni.convert_eni_data(net_data)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == "yaml":
+ pre_ns = yaml.load(net_data)
+ if 'network' in pre_ns:
+ pre_ns = pre_ns.get('network')
+ if args.debug:
+ sys.stderr.write('\n'.join(
+ ["Input YAML",
+ yaml.dump(pre_ns, default_flow_style=False, indent=4), ""]))
+ ns = network_state.parse_net_config_data(pre_ns)
+ else:
+ pre_ns = openstack.convert_net_json(
+ json.loads(net_data), known_macs=known_macs)
+ ns = network_state.parse_net_config_data(pre_ns)
+
+ if not ns:
+ raise RuntimeError("No valid network_state object created from"
+ "input data")
+
+ if args.debug:
+ sys.stderr.write('\n'.join([
+ "", "Internal State",
+ yaml.dump(ns, default_flow_style=False, indent=4), ""]))
+ if args.output_kind == "eni":
+ r_cls = eni.Renderer
+ elif args.output_kind == "netplan":
+ r_cls = netplan.Renderer
+ else:
+ r_cls = sysconfig.Renderer
+
+ r = r_cls()
+ sys.stderr.write(''.join([
+ "Read input format '%s' from '%s'.\n" % (
+ args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n" % (
+ args.output_kind, args.directory)]) + "\n")
+ r.render_network_state(network_state=ns, target=args.directory)
+
+
+if __name__ == '__main__':
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
+
+
+# vi: ts=4 expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/cmd/devel/parser.py cloud-init-18.3-24-gf6249277/cloudinit/cmd/devel/parser.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/cmd/devel/parser.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/cmd/devel/parser.py 2018-08-07 14:35:29.000000000 +0000
@@ -5,8 +5,9 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
-from cloudinit.config.schema import (
- get_parser as schema_parser, handle_schema_args)
+from cloudinit.config import schema
+
+from . import net_convert
def get_parser(parser=None):
@@ -17,10 +18,15 @@
subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
subparsers.required = True
- parser_schema = subparsers.add_parser(
- 'schema', help='Validate cloud-config files or document schema')
- # Construct schema subcommand parser
- schema_parser(parser_schema)
- parser_schema.set_defaults(action=('schema', handle_schema_args))
+ subcmds = [
+ ('schema', 'Validate cloud-config files for document schema',
+ schema.get_parser, schema.handle_schema_args),
+ (net_convert.NAME, net_convert.__doc__,
+ net_convert.get_parser, net_convert.handle_args)
+ ]
+ for (subcmd, helpmsg, get_parser, handler) in subcmds:
+ parser = subparsers.add_parser(subcmd, help=helpmsg)
+ get_parser(parser)
+ parser.set_defaults(action=(subcmd, handler))
return parser
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/config/cc_lxd.py cloud-init-18.3-24-gf6249277/cloudinit/config/cc_lxd.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/config/cc_lxd.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/config/cc_lxd.py 2018-08-07 14:35:29.000000000 +0000
@@ -276,27 +276,27 @@
if net_name != _DEFAULT_NETWORK_NAME or not did_init:
return
- fail_assume_enoent = " failed. Assuming it did not exist."
- succeeded = " succeeded."
+ fail_assume_enoent = "failed. Assuming it did not exist."
+ succeeded = "succeeded."
if create:
- msg = "Deletion of lxd network '%s'" % net_name
+ msg = "Deletion of lxd network '%s' %s"
try:
_lxc(["network", "delete", net_name])
- LOG.debug(msg + succeeded)
+ LOG.debug(msg, net_name, succeeded)
except util.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
- LOG.debug(msg + fail_assume_enoent)
+ LOG.debug(msg, net_name, fail_assume_enoent)
if attach:
- msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
+ msg = "Removal of device '%s' from profile '%s' %s"
try:
_lxc(["profile", "device", "remove", profile, nic_name])
- LOG.debug(msg + succeeded)
+ LOG.debug(msg, nic_name, profile, succeeded)
except util.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
- LOG.debug(msg + fail_assume_enoent)
+ LOG.debug(msg, nic_name, profile, fail_assume_enoent)
# vi: ts=4 expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/config/cc_rh_subscription.py cloud-init-18.3-24-gf6249277/cloudinit/config/cc_rh_subscription.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/config/cc_rh_subscription.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/config/cc_rh_subscription.py 2018-08-07 14:35:29.000000000 +0000
@@ -126,7 +126,6 @@
self.enable_repo = self.rhel_cfg.get('enable-repo')
self.disable_repo = self.rhel_cfg.get('disable-repo')
self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
def log_success(self, msg):
'''Simple wrapper for logging info messages. Useful for unittests'''
@@ -173,21 +172,12 @@
cmd = ['identity']
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError:
return False
return True
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
def rhn_register(self):
'''
Registers the system by userid and password or activation key
@@ -209,7 +199,7 @@
cmd.append("--serverurl={0}".format(self.server_hostname))
try:
- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -232,7 +222,7 @@
# Attempting to register the system only
try:
- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -255,7 +245,7 @@
.format(self.servicelevel)]
try:
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
@@ -273,7 +263,7 @@
def _set_auto_attach(self):
cmd = ['attach', '--auto']
try:
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
@@ -292,12 +282,12 @@
# Get all available pools
cmd = ['list', '--available', '--pool-only']
- results = self._sub_man_cli(cmd)[0]
+ results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
cmd = ['list', '--consumed', '--pool-only']
- results = self._sub_man_cli(cmd)[0]
+ results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
@@ -309,14 +299,14 @@
'''
cmd = ['repos', '--list-enabled']
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
active_repos.append((repo.split(':')[1]).strip())
cmd = ['repos', '--list-disabled']
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
@@ -346,7 +336,7 @@
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
self.log.debug("Attached the following pools to your "
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
@@ -423,7 +413,7 @@
cmd.extend(enable_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -439,4 +429,15 @@
def is_configured(self):
return bool((self.userid and self.password) or self.activation_key)
+
+def _sub_man_cli(cmd, logstring_val=False):
+ '''
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs subscription-manager. Breaking this to a
+ separate function for later use in mocking and unittests
+ '''
+ return util.subp(['subscription-manager'] + cmd,
+ logstring=logstring_val)
+
+
# vi: ts=4 expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/net/eni.py cloud-init-18.3-24-gf6249277/cloudinit/net/eni.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/net/eni.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/net/eni.py 2018-08-07 14:35:29.000000000 +0000
@@ -247,8 +247,15 @@
ifaces[currif]['bridge']['ports'] = []
for iface in split[1:]:
ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_hw":
+ # doc is confusing and thus some may put literal 'MAC'
+ # bridge_hw MAC
+ # but correct is:
+ # bridge_hw
+ if split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ else:
+ ifaces[currif]['bridge']['mac'] = split[1]
elif option == "bridge_pathcost":
if 'pathcost' not in ifaces[currif]['bridge']:
ifaces[currif]['bridge']['pathcost'] = {}
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/net/netplan.py cloud-init-18.3-24-gf6249277/cloudinit/net/netplan.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/net/netplan.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/net/netplan.py 2018-08-07 14:35:29.000000000 +0000
@@ -291,6 +291,8 @@
if len(bond_config) > 0:
bond.update({'parameters': bond_config})
+ if ifcfg.get('mac_address'):
+ bond['macaddress'] = ifcfg.get('mac_address').lower()
slave_interfaces = ifcfg.get('bond-slaves')
if slave_interfaces == 'none':
_extract_bond_slaves_by_name(interfaces, bond, ifname)
@@ -327,6 +329,8 @@
if len(br_config) > 0:
bridge.update({'parameters': br_config})
+ if ifcfg.get('mac_address'):
+ bridge['macaddress'] = ifcfg.get('mac_address').lower()
_extract_addresses(ifcfg, bridge, ifname)
bridges.update({ifname: bridge})
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceOpenNebula.py cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceOpenNebula.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceOpenNebula.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceOpenNebula.py 2018-08-07 14:35:29.000000000 +0000
@@ -232,7 +232,7 @@
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
- if gateway:
+ if gateway6:
devconf['gateway6'] = gateway6
# Set DNS servers and search domains
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceOpenStack.py cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceOpenStack.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceOpenStack.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceOpenStack.py 2018-08-07 14:35:29.000000000 +0000
@@ -28,7 +28,8 @@
DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
-VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+DMI_ASSET_TAG_ORACLE_CLOUD = 'OracleCloud.com'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_ORACLE_CLOUD]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceSmartOS.py cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceSmartOS.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceSmartOS.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceSmartOS.py 2018-08-07 14:35:29.000000000 +0000
@@ -564,7 +564,7 @@
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. ' +
+ LOG.warning('Timeout while initializing metadata client. '
'Is the host metadata service running?')
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/__init__.py cloud-init-18.3-24-gf6249277/cloudinit/sources/__init__.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/__init__.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/sources/__init__.py 2018-08-07 14:35:29.000000000 +0000
@@ -103,14 +103,14 @@
url_timeout = 10 # timeout for each metadata url read attempt
url_retries = 5 # number of times to retry url upon 404
- # The datasource defines a list of supported EventTypes during which
+ # The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
# network configuration on metadata changes.
# A datasource which supports writing network config on each system boot
- # would set update_events = {'network': [EventType.BOOT]}
+ # would call update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
+ update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
@@ -475,8 +475,8 @@
for update_scope, update_events in self.update_events.items():
if event in update_events:
if not supported_events.get(update_scope):
- supported_events[update_scope] = []
- supported_events[update_scope].append(event)
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
@@ -490,6 +490,8 @@
result = self.get_data()
if result:
return True
+ LOG.debug("Datasource %s not updated for events: %s", self,
+ ', '.join(source_event_types))
return False
def check_instance_id(self, sys_cfg):
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/tests/test_init.py cloud-init-18.3-24-gf6249277/cloudinit/sources/tests/test_init.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/tests/test_init.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/sources/tests/test_init.py 2018-08-07 14:35:29.000000000 +0000
@@ -429,8 +429,9 @@
def test_update_metadata_only_acts_on_supported_update_events(self):
"""update_metadata won't get_data on unsupported update events."""
+ self.datasource.update_events['network'].discard(EventType.BOOT)
self.assertEqual(
- {'network': [EventType.BOOT_NEW_INSTANCE]},
+ {'network': set([EventType.BOOT_NEW_INSTANCE])},
self.datasource.update_events)
def fake_get_data():
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/tests/test_util.py cloud-init-18.3-24-gf6249277/cloudinit/tests/test_util.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/tests/test_util.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/tests/test_util.py 2018-08-07 14:35:29.000000000 +0000
@@ -57,6 +57,34 @@
REDHAT_SUPPORT_PRODUCT_VERSION="7"
""")
+OS_RELEASE_REDHAT_7 = dedent("""\
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
+""")
+
+REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
+REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
+REDHAT_RELEASE_REDHAT_6 = (
+ "Red Hat Enterprise Linux Server release 6.10 (Santiago)")
+REDHAT_RELEASE_REDHAT_7 = (
+ "Red Hat Enterprise Linux Server release 7.5 (Maipo)")
+
+
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
NAME="Debian GNU/Linux"
@@ -337,6 +365,12 @@
if path == '/etc/os-release':
return 1
+ @classmethod
+ def redhat_release_exists(self, path):
+ """Side effect function """
+ if path == '/etc/redhat-release':
+ return 1
+
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -356,8 +390,48 @@
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
@mock.patch('cloudinit.util.load_file')
- def test_get_linux_centos(self, m_os_release, m_path_exists):
- """Verify we get the correct name and release name on CentOS."""
+ def test_get_linux_centos6(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS 6."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '6.10', 'Final'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
+ """Verify the correct release info on CentOS 7 without os-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
+ m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '7.5.1804', 'Core'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 6 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '6.10', 'Santiago'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on COPR CentOS."""
m_os_release.return_value = OS_RELEASE_CENTOS
m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
dist = util.get_linux_distro()
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/util.py cloud-init-18.3-24-gf6249277/cloudinit/util.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/util.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/util.py 2018-08-07 14:35:29.000000000 +0000
@@ -576,12 +576,42 @@
return int(get_cfg_option_str(yobj, key, default=default))
+def _parse_redhat_release(release_file=None):
+ """Return a dictionary of distro info fields from /etc/redhat-release.
+
+ Dict keys will align with /etc/os-release keys:
+ ID, VERSION_ID, VERSION_CODENAME
+ """
+
+ if not release_file:
+ release_file = '/etc/redhat-release'
+ if not os.path.exists(release_file):
+ return {}
+ redhat_release = load_file(release_file)
+ redhat_regex = (
+ r'(?P.+) release (?P[\d\.]+) '
+ r'\((?P[^)]+)\)')
+ match = re.match(redhat_regex, redhat_release)
+ if match:
+ group = match.groupdict()
+ group['name'] = group['name'].lower().partition(' linux')[0]
+ if group['name'] == 'red hat enterprise':
+ group['name'] = 'redhat'
+ return {'ID': group['name'], 'VERSION_ID': group['version'],
+ 'VERSION_CODENAME': group['codename']}
+ return {}
+
+
def get_linux_distro():
distro_name = ''
distro_version = ''
flavor = ''
+ os_release = {}
if os.path.exists('/etc/os-release'):
os_release = load_shell_content(load_file('/etc/os-release'))
+ if not os_release:
+ os_release = _parse_redhat_release()
+ if os_release:
distro_name = os_release.get('ID', '')
distro_version = os_release.get('VERSION_ID', '')
if 'sles' in distro_name or 'suse' in distro_name:
@@ -594,9 +624,11 @@
flavor = os_release.get('VERSION_CODENAME', '')
if not flavor:
match = re.match(r'[^ ]+ \((?P[^)]+)\)',
- os_release.get('VERSION'))
+ os_release.get('VERSION', ''))
if match:
flavor = match.groupdict()['codename']
+ if distro_name == 'rhel':
+ distro_name = 'redhat'
else:
dist = ('', '', '')
try:
diff -Nru cloud-init-18.3-9-g2e62cb8a/cloudinit/warnings.py cloud-init-18.3-24-gf6249277/cloudinit/warnings.py
--- cloud-init-18.3-9-g2e62cb8a/cloudinit/warnings.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/cloudinit/warnings.py 2018-08-07 14:35:29.000000000 +0000
@@ -130,7 +130,7 @@
os.path.join(_get_warn_dir(cfg), name),
topline + "\n".join(fmtlines) + "\n" + topline)
- LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline)
+ LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline)
if sleep:
LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)
diff -Nru cloud-init-18.3-9-g2e62cb8a/config/cloud.cfg.tmpl cloud-init-18.3-24-gf6249277/config/cloud.cfg.tmpl
--- cloud-init-18.3-9-g2e62cb8a/config/cloud.cfg.tmpl 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/config/cloud.cfg.tmpl 2018-08-07 14:35:29.000000000 +0000
@@ -24,8 +24,6 @@
{% if variant in ["centos", "fedora", "rhel"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
resize_rootfs_tmp: /dev
-ssh_deletekeys: 0
-ssh_genkeytypes: ~
ssh_pwauth: 0
{% endif %}
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/changelog cloud-init-18.3-24-gf6249277/debian/changelog
--- cloud-init-18.3-9-g2e62cb8a/debian/changelog 2018-07-23 09:56:39.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/changelog 2018-08-24 08:45:57.000000000 +0000
@@ -1,4 +1,4 @@
-cloud-init (18.3-9-g2e62cb8a-0ubuntu2~scaleway) bionic; urgency=medium
+cloud-init (18.3-24-gf6249277-0ubuntu2~scaleway1) cosmic; urgency=medium
* cloudinit/sources/DataSourceScaleway.py: Add IPv4/IPv6 configuration
Remove DEP_NETWORK dependancy and have the DataSourceScaleway class
@@ -7,10 +7,39 @@
Use the newly introduced EventType.BOOT event to trigger network
configuration on every boot.
- /usr/share/cloud-init: Install debian-specific cloud.cfg file
- Modify postinst script to handle distro-specific cloud.cfg files
+ -- Louis Bouchard Fri, 24 Aug 2018 08:45:57 +0000
- -- Louis Bouchard Mon, 23 Jul 2018 09:56:39 +0000
+cloud-init (18.3-24-gf6249277-0ubuntu1) cosmic; urgency=medium
+
+ * debian/cloud-init.templates: enable Oracle Compute Infrastructure by
+ default
+ * New upstream snapshot.
+ - docs: Fix example cloud-init analyze command to match output.
+ [Wesley Gao]
+ - netplan: Correctly render macaddress on a bonds and bridges when
+ provided.
+ - tools: Add 'net-convert' subcommand command to 'cloud-init devel'.
+ - redhat: remove ssh keys on new instance.
+ - Use typeset or local in profile.d scripts.
+ - OpenNebula: Fix null gateway6 [Akihiko Ota]
+
+ -- Chad Smith Thu, 09 Aug 2018 10:27:29 -0600
+
+cloud-init (18.3-18-g3cee0bf8-0ubuntu1) cosmic; urgency=medium
+
+ * New upstream snapshot.
+ - oracle: fix detect_openstack to report True on OracleCloud.com DMI data
+ - tests: improve LXDInstance trying to workaround or catch bug.
+ - update_metadata re-config on every boot comments and tests not quite
+ right [Mike Gerdts]
+ - tests: Collect build_info from system if available.
+ - pylint: Fix pylint warnings reported in pylint 2.0.0.
+ - get_linux_distro: add support for rhel via redhat-release.
+ - get_linux_distro: add support for centos6 and rawhide flavors of redhat
+ - tools: add '--debug' to tools/net-convert.py
+ - tests: bump the version of paramiko to 2.4.1.
+
+ -- Chad Smith Tue, 31 Jul 2018 12:50:28 -0600
cloud-init (18.3-9-g2e62cb8a-0ubuntu1) cosmic; urgency=medium
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/cloud.cfg.Debian cloud-init-18.3-24-gf6249277/debian/cloud.cfg.Debian
--- cloud-init-18.3-9-g2e62cb8a/debian/cloud.cfg.Debian 2018-07-23 08:31:00.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/cloud.cfg.Debian 2018-08-24 08:45:57.000000000 +0000
@@ -7,16 +7,16 @@
users:
- default
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the default $user
-disable_root: false
+# If this is set, 'root' will not be able to ssh in and they
+# will get a message to login instead as the above $user (debian)
+disable_root: true
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
# Example datasource config
-# datasource:
-# Ec2:
+# datasource:
+# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
@@ -44,15 +44,12 @@
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
- - snap
- - snap_config # DEPRECATED- Drop in version 18.2
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- apt-pipelining
- apt-configure
- - ubuntu-advantage
- ntp
- timezone
- disable-ec2-metadata
@@ -61,15 +58,12 @@
# The modules that run in the 'final' stage
cloud_final_modules:
- - snappy # DEPRECATED- Drop in version 18.2
- package-update-upgrade-install
- fan
- - landscape
- - lxd
- puppet
- chef
- - mcollective
- salt-minion
+ - mcollective
- rightscale_userdata
- scripts-vendor
- scripts-per-once
@@ -92,7 +86,9 @@
name: debian
lock_passwd: True
gecos: Debian
- groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip]
+ groups: [adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
@@ -101,5 +97,6 @@
package_mirrors:
- arches: [default]
failsafe:
- primary: http://ftp.debian.org/debian
- security: http://security.debian.org
+ primary: http://deb.debian.org/debian
+ security: http://security.debian.org/
+ ssh_svcname: ssh
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/cloud-init.postinst cloud-init-18.3-24-gf6249277/debian/cloud-init.postinst
--- cloud-init-18.3-9-g2e62cb8a/debian/cloud-init.postinst 2018-07-23 09:56:39.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/cloud-init.postinst 2018-08-24 08:45:57.000000000 +0000
@@ -330,9 +330,9 @@
DISTRO=`lsb_release -is`
INITCONFFILE=`mktemp /etc/cloud/cloud.cfg.XXXXXX`
if [ "${DISTRO}" = "Ubuntu" ]; then
- sed 's/disable_root: true/disable_root: false/' /etc/cloud/cloud.cfg > ${INITCONFFILE}
+ sed 's/disable_root: true/disable_root: false/' /etc/cloud/cloud.cfg > ${INITCONFFILE}
else
- cp -pf /usr/share/cloud-init/cloud.cfg.${DISTRO} $INITCONFFILE
+ cp -pf /usr/share/cloud-init/cloud.cfg.${DISTRO} $INITCONFFILE
fi
cp ${INITCONFFILE} /etc/cloud/cloud.cfg
ucf --debconf-ok $INITCONFFILE /etc/cloud/cloud.cfg
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/cloud-init.templates cloud-init-18.3-24-gf6249277/debian/cloud-init.templates
--- cloud-init-18.3-9-g2e62cb8a/debian/cloud-init.templates 2018-07-09 20:16:03.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/cloud-init.templates 2018-08-09 16:27:29.000000000 +0000
@@ -1,8 +1,8 @@
Template: cloud-init/datasources
Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, None
-Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, None
+Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, None: Failsafe datasource
Description: Which data sources should be searched?
Cloud-init supports searching different "Data Sources" for information
that it uses to configure a cloud instance.
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/patches/add_network_config_to_DataSourceScaleway.patch cloud-init-18.3-24-gf6249277/debian/patches/add_network_config_to_DataSourceScaleway.patch
--- cloud-init-18.3-9-g2e62cb8a/debian/patches/add_network_config_to_DataSourceScaleway.patch 2018-07-20 13:53:32.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/patches/add_network_config_to_DataSourceScaleway.patch 1970-01-01 00:00:00.000000000 +0000
@@ -1,232 +0,0 @@
-Index: cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceScaleway.py
-===================================================================
---- cloud-init-18.3-9-g2e62cb8a.orig/cloudinit/sources/DataSourceScaleway.py
-+++ cloud-init-18.3-9-g2e62cb8a/cloudinit/sources/DataSourceScaleway.py
-@@ -29,7 +29,9 @@ from cloudinit import log as logging
- from cloudinit import sources
- from cloudinit import url_helper
- from cloudinit import util
--
-+from cloudinit import net
-+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-+from cloudinit.event import EventType
-
- LOG = logging.getLogger(__name__)
-
-@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address
-
-
- class DataSourceScaleway(sources.DataSource):
--
- dsname = "Scaleway"
-+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
-
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
-@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSou
-
- self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
- self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
-+ self._fallback_interface = None
-+ self._network_config = None
-
-- def _get_data(self):
-- if not on_scaleway():
-- return False
--
-+ def _crawl_metadata(self):
- resp = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- retries=self.retries)
-@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSou
- 'vendor-data', self.vendordata_address,
- self.retries, self.timeout
- )
-+
-+ def _get_data(self):
-+ if not on_scaleway():
-+ return False
-+
-+ if self._fallback_interface is None:
-+ self._fallback_interface = net.find_fallback_nic()
-+ try:
-+ with EphemeralDHCPv4(self._fallback_interface):
-+ util.log_time(
-+ logfunc=LOG.debug, msg='Crawl of metadata service',
-+ func=self._crawl_metadata)
-+ except (NoDHCPLeaseError) as e:
-+ util.logexc(LOG, str(e))
-+ return False
- return True
-
- @property
-+ def network_config(self):
-+ """
-+ Configure networking according to data received from the
-+ metadata API.
-+ """
-+ if self._network_config:
-+ return self._network_config
-+
-+ if self._fallback_interface is None:
-+ self._fallback_interface = net.find_fallback_nic()
-+
-+ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
-+ subnets = [{'type': 'dhcp4'}]
-+ if self.metadata['ipv6']:
-+ subnets += [{'type': 'static',
-+ 'address': '%s' % self.metadata['ipv6']['address'],
-+ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
-+ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
-+ }]
-+ netcfg['subnets'] = subnets
-+ self._network_config = {'version': 1, 'config': [netcfg]}
-+ return self._network_config
-+
-+ @property
- def launch_index(self):
- return None
-
-@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSou
-
-
- datasources = [
-- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-+ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
- ]
-
-
-Index: cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_common.py
-===================================================================
---- cloud-init-18.3-9-g2e62cb8a.orig/tests/unittests/test_datasource/test_common.py
-+++ cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_common.py
-@@ -41,6 +41,7 @@ DEFAULT_LOCAL = [
- SmartOS.DataSourceSmartOS,
- Ec2.DataSourceEc2Local,
- OpenStack.DataSourceOpenStackLocal,
-+ Scaleway.DataSourceScaleway,
- ]
-
- DEFAULT_NETWORK = [
-@@ -55,7 +56,6 @@ DEFAULT_NETWORK = [
- NoCloud.DataSourceNoCloudNet,
- OpenStack.DataSourceOpenStack,
- OVF.DataSourceOVFNet,
-- Scaleway.DataSourceScaleway,
- ]
-
-
-Index: cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_scaleway.py
-===================================================================
---- cloud-init-18.3-9-g2e62cb8a.orig/tests/unittests/test_datasource/test_scaleway.py
-+++ cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_scaleway.py
-@@ -176,11 +176,12 @@ class TestDataSourceScaleway(HttprettyTe
- self.vendordata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
-
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
-- def test_metadata_ok(self, sleep, m_get_cmdline):
-+ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, user data and vendor data.
- """
-@@ -211,11 +212,12 @@ class TestDataSourceScaleway(HttprettyTe
- self.assertIsNone(self.datasource.region)
- self.assertEqual(sleep.call_count, 0)
-
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
-- def test_metadata_404(self, sleep, m_get_cmdline):
-+ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, but no user data nor vendor data.
- """
-@@ -234,11 +236,12 @@ class TestDataSourceScaleway(HttprettyTe
- self.assertIsNone(self.datasource.get_vendordata_raw())
- self.assertEqual(sleep.call_count, 0)
-
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
-- def test_metadata_rate_limit(self, sleep, m_get_cmdline):
-+ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() is rate limited two times by the metadata API when fetching
- user data.
-@@ -262,3 +265,67 @@ class TestDataSourceScaleway(HttprettyTe
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(sleep.call_count, 2)
-+
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
-+ @mock.patch('cloudinit.util.get_cmdline')
-+ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
-+ """
-+ network_config will only generate IPv4 config if no ipv6 data is
-+ available in the metadata
-+ """
-+ m_get_cmdline.return_value = 'scaleway'
-+ fallback_nic.return_value = 'ens2'
-+ self.datasource.metadata['ipv6'] = None
-+
-+ netcfg = self.datasource.network_config
-+ resp = {'version': 1,
-+ 'config': [{
-+ 'type': 'physical',
-+ 'name': 'ens2',
-+ 'subnets': [{'type': 'dhcp4'}]}]
-+ }
-+ self.assertEqual(netcfg, resp)
-+
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
-+ @mock.patch('cloudinit.util.get_cmdline')
-+ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
-+ """
-+ network_config will only generate IPv4/v6 configs if ipv6 data is
-+ available in the metadata
-+ """
-+ m_get_cmdline.return_value = 'scaleway'
-+ fallback_nic.return_value = 'ens2'
-+ self.datasource.metadata['ipv6'] = {
-+ 'address': '2000:abc:4444:9876::42:999',
-+ 'gateway': '2000:abc:4444:9876::42:000',
-+ 'netmask': '127',
-+ }
-+
-+ netcfg = self.datasource.network_config
-+ resp = {'version': 1,
-+ 'config': [{
-+ 'type': 'physical',
-+ 'name': 'ens2',
-+ 'subnets': [{'type': 'dhcp4'},
-+ {'type': 'static',
-+ 'address': '2000:abc:4444:9876::42:999',
-+ 'gateway': '2000:abc:4444:9876::42:000',
-+ 'netmask': '127', }
-+ ]
-+
-+ }]
-+ }
-+ self.assertEqual(netcfg, resp)
-+
-+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
-+ @mock.patch('cloudinit.util.get_cmdline')
-+ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
-+ """
-+ network_config() should return the same data if a network config
-+ already exists
-+ """
-+ m_get_cmdline.return_value = 'scaleway'
-+ self.datasource._network_config = '0xdeadbeef'
-+
-+ netcfg = self.datasource.network_config
-+ self.assertEqual(netcfg, '0xdeadbeef')
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/patches/Scaleway-Add-network-configuration-to-the-DataSource.patch cloud-init-18.3-24-gf6249277/debian/patches/Scaleway-Add-network-configuration-to-the-DataSource.patch
--- cloud-init-18.3-9-g2e62cb8a/debian/patches/Scaleway-Add-network-configuration-to-the-DataSource.patch 1970-01-01 00:00:00.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/patches/Scaleway-Add-network-configuration-to-the-DataSource.patch 2018-08-24 08:45:57.000000000 +0000
@@ -0,0 +1,238 @@
+Index: cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceScaleway.py
+===================================================================
+--- cloud-init-18.3-24-gf6249277.orig/cloudinit/sources/DataSourceScaleway.py
++++ cloud-init-18.3-24-gf6249277/cloudinit/sources/DataSourceScaleway.py
+@@ -29,7 +29,9 @@ from cloudinit import log as logging
+ from cloudinit import sources
+ from cloudinit import url_helper
+ from cloudinit import util
+-
++from cloudinit import net
++from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
++from cloudinit.event import EventType
+
+ LOG = logging.getLogger(__name__)
+
+@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address
+
+
+ class DataSourceScaleway(sources.DataSource):
+-
+ dsname = "Scaleway"
++ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
+@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSou
+
+ self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
++ self._fallback_interface = None
++ self._network_config = None
+
+- def _get_data(self):
+- if not on_scaleway():
+- return False
+-
++ def _crawl_metadata(self):
+ resp = url_helper.readurl(self.metadata_address,
+ timeout=self.timeout,
+ retries=self.retries)
+@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSou
+ 'vendor-data', self.vendordata_address,
+ self.retries, self.timeout
+ )
++
++ def _get_data(self):
++ if not on_scaleway():
++ return False
++
++ if self._fallback_interface is None:
++ self._fallback_interface = net.find_fallback_nic()
++ try:
++ with EphemeralDHCPv4(self._fallback_interface):
++ util.log_time(
++ logfunc=LOG.debug, msg='Crawl of metadata service',
++ func=self._crawl_metadata)
++ except (NoDHCPLeaseError) as e:
++ util.logexc(LOG, str(e))
++ return False
+ return True
+
+ @property
++ def network_config(self):
++ """
++ Configure networking according to data received from the
++ metadata API.
++ """
++ if self._network_config:
++ return self._network_config
++
++ if self._fallback_interface is None:
++ self._fallback_interface = net.find_fallback_nic()
++
++ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
++ subnets = [{'type': 'dhcp4'}]
++ if self.metadata['ipv6']:
++ subnets += [{'type': 'static',
++ 'address': '%s' % self.metadata['ipv6']['address'],
++ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
++ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
++ }]
++ netcfg['subnets'] = subnets
++ self._network_config = {'version': 1, 'config': [netcfg]}
++ return self._network_config
++
++ @property
+ def launch_index(self):
+ return None
+
+@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSou
+
+
+ datasources = [
+- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
++ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
+ ]
+
+
+Index: cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_common.py
+===================================================================
+--- cloud-init-18.3-24-gf6249277.orig/tests/unittests/test_datasource/test_common.py
++++ cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_common.py
+@@ -41,6 +41,7 @@ DEFAULT_LOCAL = [
+ SmartOS.DataSourceSmartOS,
+ Ec2.DataSourceEc2Local,
+ OpenStack.DataSourceOpenStackLocal,
++ Scaleway.DataSourceScaleway,
+ ]
+
+ DEFAULT_NETWORK = [
+@@ -55,7 +56,6 @@ DEFAULT_NETWORK = [
+ NoCloud.DataSourceNoCloudNet,
+ OpenStack.DataSourceOpenStack,
+ OVF.DataSourceOVFNet,
+- Scaleway.DataSourceScaleway,
+ ]
+
+
+Index: cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_scaleway.py
+===================================================================
+--- cloud-init-18.3-24-gf6249277.orig/tests/unittests/test_datasource/test_scaleway.py
++++ cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_scaleway.py
+@@ -176,11 +176,18 @@ class TestDataSourceScaleway(HttprettyTe
+ self.vendordata_url = \
+ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
+
++ self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway',
++ '_m_on_scaleway', return_value=True)
++ self.add_patch(
++ 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic',
++ '_m_find_fallback_nic', return_value='scalewaynic0')
++
++ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+- def test_metadata_ok(self, sleep, m_get_cmdline):
++ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, user data and vendor data.
+ """
+@@ -211,11 +218,12 @@ class TestDataSourceScaleway(HttprettyTe
+ self.assertIsNone(self.datasource.region)
+ self.assertEqual(sleep.call_count, 0)
+
++ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+- def test_metadata_404(self, sleep, m_get_cmdline):
++ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, but no user data nor vendor data.
+ """
+@@ -234,11 +242,12 @@ class TestDataSourceScaleway(HttprettyTe
+ self.assertIsNone(self.datasource.get_vendordata_raw())
+ self.assertEqual(sleep.call_count, 0)
+
++ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+- def test_metadata_rate_limit(self, sleep, m_get_cmdline):
++ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() is rate limited two times by the metadata API when fetching
+ user data.
+@@ -262,3 +271,67 @@ class TestDataSourceScaleway(HttprettyTe
+ self.assertEqual(self.datasource.get_userdata_raw(),
+ DataResponses.FAKE_USER_DATA)
+ self.assertEqual(sleep.call_count, 2)
++
++ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
++ @mock.patch('cloudinit.util.get_cmdline')
++ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
++ """
++ network_config will only generate IPv4 config if no ipv6 data is
++ available in the metadata
++ """
++ m_get_cmdline.return_value = 'scaleway'
++ fallback_nic.return_value = 'ens2'
++ self.datasource.metadata['ipv6'] = None
++
++ netcfg = self.datasource.network_config
++ resp = {'version': 1,
++ 'config': [{
++ 'type': 'physical',
++ 'name': 'ens2',
++ 'subnets': [{'type': 'dhcp4'}]}]
++ }
++ self.assertEqual(netcfg, resp)
++
++ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
++ @mock.patch('cloudinit.util.get_cmdline')
++ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
++ """
++ network_config will only generate IPv4/v6 configs if ipv6 data is
++ available in the metadata
++ """
++ m_get_cmdline.return_value = 'scaleway'
++ fallback_nic.return_value = 'ens2'
++ self.datasource.metadata['ipv6'] = {
++ 'address': '2000:abc:4444:9876::42:999',
++ 'gateway': '2000:abc:4444:9876::42:000',
++ 'netmask': '127',
++ }
++
++ netcfg = self.datasource.network_config
++ resp = {'version': 1,
++ 'config': [{
++ 'type': 'physical',
++ 'name': 'ens2',
++ 'subnets': [{'type': 'dhcp4'},
++ {'type': 'static',
++ 'address': '2000:abc:4444:9876::42:999',
++ 'gateway': '2000:abc:4444:9876::42:000',
++ 'netmask': '127', }
++ ]
++
++ }]
++ }
++ self.assertEqual(netcfg, resp)
++
++ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
++ @mock.patch('cloudinit.util.get_cmdline')
++ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
++ """
++ network_config() should return the same data if a network config
++ already exists
++ """
++ m_get_cmdline.return_value = 'scaleway'
++ self.datasource._network_config = '0xdeadbeef'
++
++ netcfg = self.datasource.network_config
++ self.assertEqual(netcfg, '0xdeadbeef')
diff -Nru cloud-init-18.3-9-g2e62cb8a/debian/patches/series cloud-init-18.3-24-gf6249277/debian/patches/series
--- cloud-init-18.3-9-g2e62cb8a/debian/patches/series 2018-07-20 13:52:14.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/debian/patches/series 2018-08-24 08:45:57.000000000 +0000
@@ -1 +1 @@
-add_network_config_to_DataSourceScaleway.patch
+Scaleway-Add-network-configuration-to-the-DataSource.patch
diff -Nru cloud-init-18.3-9-g2e62cb8a/doc/rtd/topics/debugging.rst cloud-init-18.3-24-gf6249277/doc/rtd/topics/debugging.rst
--- cloud-init-18.3-9-g2e62cb8a/doc/rtd/topics/debugging.rst 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/doc/rtd/topics/debugging.rst 2018-08-07 14:35:29.000000000 +0000
@@ -45,7 +45,7 @@
.. code-block:: shell-session
- $ cloud-init analyze blame -i my-cloud-init.log
+ $ cloud-init analyze dump -i my-cloud-init.log
[
{
"description": "running config modules",
diff -Nru cloud-init-18.3-9-g2e62cb8a/integration-requirements.txt cloud-init-18.3-24-gf6249277/integration-requirements.txt
--- cloud-init-18.3-9-g2e62cb8a/integration-requirements.txt 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/integration-requirements.txt 2018-08-07 14:35:29.000000000 +0000
@@ -9,7 +9,7 @@
boto3==1.5.9
# ssh communication
-paramiko==2.4.0
+paramiko==2.4.1
# lxd backend
# 04/03/2018: enables use of lxd 3.0
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/platforms/instances.py cloud-init-18.3-24-gf6249277/tests/cloud_tests/platforms/instances.py
--- cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/platforms/instances.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/cloud_tests/platforms/instances.py 2018-08-07 14:35:29.000000000 +0000
@@ -97,7 +97,8 @@
return self._ssh_client
if not self.ssh_ip or not self.ssh_port:
- raise ValueError
+ raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" %
+ (self.ssh_ip, self.ssh_port))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/platforms/lxd/instance.py cloud-init-18.3-24-gf6249277/tests/cloud_tests/platforms/lxd/instance.py
--- cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/platforms/lxd/instance.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/cloud_tests/platforms/lxd/instance.py 2018-08-07 14:35:29.000000000 +0000
@@ -12,6 +12,8 @@
from ..instances import Instance
+from pylxd import exceptions as pylxd_exc
+
class LXDInstance(Instance):
"""LXD container backed instance."""
@@ -30,6 +32,9 @@
@param config: image config
@param features: supported feature flags
"""
+ if not pylxd_container:
+ raise ValueError("Invalid value pylxd_container: %s" %
+ pylxd_container)
self._pylxd_container = pylxd_container
super(LXDInstance, self).__init__(
platform, name, properties, config, features)
@@ -40,9 +45,19 @@
@property
def pylxd_container(self):
"""Property function."""
+ if self._pylxd_container is None:
+ raise RuntimeError(
+ "%s: Attempted use of pylxd_container after deletion." % self)
self._pylxd_container.sync()
return self._pylxd_container
+ def __str__(self):
+ return (
+ '%s(name=%s) status=%s' %
+ (self.__class__.__name__, self.name,
+ ("deleted" if self._pylxd_container is None else
+ self.pylxd_container.status)))
+
def _execute(self, command, stdin=None, env=None):
if env is None:
env = {}
@@ -165,10 +180,27 @@
self.shutdown(wait=wait)
self.start(wait=wait)
- def shutdown(self, wait=True):
+ def shutdown(self, wait=True, retry=1):
"""Shutdown instance."""
- if self.pylxd_container.status != 'Stopped':
+ if self.pylxd_container.status == 'Stopped':
+ return
+
+ try:
+ LOG.debug("%s: shutting down (wait=%s)", self, wait)
self.pylxd_container.stop(wait=wait)
+ except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
+ # An exception happens here sometimes (LP: #1783198)
+ # LOG it, and try again.
+ LOG.warning(
+ ("%s: shutdown(retry=%d) caught %s in shutdown "
+ "(response=%s): %s"),
+ self, retry, e.__class__.__name__, e.response, e)
+ if isinstance(e, pylxd_exc.NotFound):
+ LOG.debug("container_exists(%s) == %s",
+ self.name, self.platform.container_exists(self.name))
+ if retry == 0:
+ raise e
+ return self.shutdown(wait=wait, retry=retry - 1)
def start(self, wait=True, wait_for_cloud_init=False):
"""Start instance."""
@@ -189,12 +221,14 @@
def destroy(self):
"""Clean up instance."""
+ LOG.debug("%s: deleting container.", self)
self.unfreeze()
self.shutdown()
self.pylxd_container.delete(wait=True)
+ self._pylxd_container = None
+
if self.platform.container_exists(self.name):
- raise OSError('container {} was not properly removed'
- .format(self.name))
+ raise OSError('%s: container was not properly removed' % self)
if self._console_log_file and os.path.exists(self._console_log_file):
os.unlink(self._console_log_file)
shutil.rmtree(self.tmpd)
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/setup_image.py cloud-init-18.3-24-gf6249277/tests/cloud_tests/setup_image.py
--- cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/setup_image.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/cloud_tests/setup_image.py 2018-08-07 14:35:29.000000000 +0000
@@ -4,6 +4,7 @@
from functools import partial
import os
+import yaml
from tests.cloud_tests import LOG
from tests.cloud_tests import stage, util
@@ -220,7 +221,14 @@
calls = [partial(stage.run_single, desc, partial(func, args, image))
for name, func, desc in handlers if getattr(args, name, None)]
- LOG.info('setting up %s', image)
+ try:
+ data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True))
+ info = ' '.join(["%s=%s" % (k, data.get(k))
+ for k in ("build_name", "serial") if k in data])
+ except Exception as e:
+ info = "N/A (%s)" % e
+
+ LOG.info('setting up %s (%s)', image, info)
res = stage.run_stage(
'set up for {}'.format(image), calls, continue_after_error=False)
return res
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/testcases.yaml cloud-init-18.3-24-gf6249277/tests/cloud_tests/testcases.yaml
--- cloud-init-18.3-9-g2e62cb8a/tests/cloud_tests/testcases.yaml 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/cloud_tests/testcases.yaml 2018-08-07 14:35:29.000000000 +0000
@@ -27,6 +27,10 @@
package-versions: |
#!/bin/sh
dpkg-query --show
+ build.info: |
+ #!/bin/sh
+ binfo=/etc/cloud/build.info
+ [ -f "$binfo" ] && cat "$binfo" || echo "N/A"
system.journal.gz: |
#!/bin/sh
[ -d /run/systemd ] || { echo "not systemd."; exit 0; }
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_cli.py cloud-init-18.3-24-gf6249277/tests/unittests/test_cli.py
--- cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_cli.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/unittests/test_cli.py 2018-08-07 14:35:29.000000000 +0000
@@ -208,8 +208,7 @@
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
- @mock.patch('cloudinit.config.schema.handle_schema_args')
- def test_wb_devel_schema_subcommand_parser(self, m_schema):
+ def test_wb_devel_schema_subcommand_parser(self):
"""The subcommand cloud-init schema calls the correct subparser."""
exit_code = self._call_main(['cloud-init', 'devel', 'schema'])
self.assertEqual(1, exit_code)
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_opennebula.py cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_opennebula.py
--- cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_opennebula.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_opennebula.py 2018-08-07 14:35:29.000000000 +0000
@@ -354,6 +354,412 @@
system_nics = ('eth0', 'ens3')
+ def test_context_devname(self):
+ """Verify context_devname correctly returns mac and name."""
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH1_MAC': '02:00:0a:12:0f:0f', }
+ expected = {
+ '02:00:0a:12:01:01': 'ETH0',
+ '02:00:0a:12:0f:0f': 'ETH1', }
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(expected, net.context_devname)
+
+ def test_get_nameservers(self):
+ """
+ Verify get_nameservers('device') correctly returns DNS server addresses
+ and search domains.
+ """
+ context = {
+ 'DNS': '1.2.3.8',
+ 'ETH0_DNS': '1.2.3.6 1.2.3.7',
+ 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ expected = {
+ 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
+ 'search': ['example.com', 'example.org']}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_nameservers('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_mtu(self):
+ """Verify get_mtu('device') correctly returns MTU size."""
+ context = {'ETH0_MTU': '1280'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mtu('eth0')
+ self.assertEqual('1280', val)
+
+ def test_get_ip(self):
+ """Verify get_ip('device') correctly returns IPv4 address."""
+ context = {'ETH0_IP': PUBLIC_IP}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip('eth0', MACADDR)
+ self.assertEqual(PUBLIC_IP, val)
+
+ def test_get_ip_emptystring(self):
+ """
+ Verify get_ip('device') correctly returns IPv4 address.
+ It returns IP address created by MAC address if ETH0_IP has empty
+ string.
+ """
+ context = {'ETH0_IP': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip('eth0', MACADDR)
+ self.assertEqual(IP_BY_MACADDR, val)
+
+ def test_get_ip6(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6.
+ """
+ context = {
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_ULA': '', }
+ expected = [IP6_GLOBAL]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_ula(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6_ULA.
+ """
+ context = {
+ 'ETH0_IP6': '',
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ expected = [IP6_ULA]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_dual(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
+ """
+ context = {
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ expected = [IP6_GLOBAL, IP6_ULA]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_prefix(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ """
+ context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6_prefix('eth0')
+ self.assertEqual(IP6_PREFIX, val)
+
+ def test_get_ip6_prefix_emptystring(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
+ string.
+ """
+ context = {'ETH0_IP6_PREFIX_LENGTH': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6_prefix('eth0')
+ self.assertEqual('64', val)
+
+ def test_get_gateway(self):
+ """
+ Verify get_gateway('device') correctly returns IPv4 default gateway
+ address.
+ """
+ context = {'ETH0_GATEWAY': '1.2.3.5'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_gateway('eth0')
+ self.assertEqual('1.2.3.5', val)
+
+ def test_get_gateway6(self):
+ """
+ Verify get_gateway6('device') correctly returns IPv6 default gateway
+ address.
+ """
+ context = {'ETH0_GATEWAY6': IP6_GW}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_gateway6('eth0')
+ self.assertEqual(IP6_GW, val)
+
+ def test_get_mask(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ """
+ context = {'ETH0_MASK': '255.255.0.0'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mask('eth0')
+ self.assertEqual('255.255.0.0', val)
+
+ def test_get_mask_emptystring(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ It returns default value '255.255.255.0' if ETH0_MASK has empty string.
+ """
+ context = {'ETH0_MASK': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mask('eth0')
+ self.assertEqual('255.255.255.0', val)
+
+ def test_get_network(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ """
+ context = {'ETH0_NETWORK': '1.2.3.0'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_network('eth0', MACADDR)
+ self.assertEqual('1.2.3.0', val)
+
+ def test_get_network_emptystring(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ It returns network address created by MAC address if ETH0_NETWORK has
+ empty string.
+ """
+ context = {'ETH0_NETWORK': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_network('eth0', MACADDR)
+ self.assertEqual('10.18.1.0', val)
+
+ def test_get_field(self):
+ """
+ Verify get_field('device', 'name') returns *context* value.
+ """
+ context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual('DUMMY_VALUE', val)
+
+ def test_get_field_withdefaultvalue(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *context*
+ value.
+ """
+ context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
+ self.assertEqual('DUMMY_VALUE', val)
+
+ def test_get_field_withdefaultvalue_emptycontext(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *default*
+ value if context value is empty string.
+ """
+ context = {'ETH9_DUMMY': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
+ self.assertEqual('DEFAULT_VALUE', val)
+
+ def test_get_field_emptycontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ empty string.
+ """
+ context = {'ETH9_DUMMY': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual(None, val)
+
+ def test_get_field_nonecontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ None.
+ """
+ context = {'ETH9_DUMMY': None}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual(None, val)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv4 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY': '1.2.3.5', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'gateway4': '1.2.3.5',
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway6(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY6
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY6': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY6
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY6': IP6_GW, }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'gateway6': IP6_GW,
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_ipv6address(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 address"""
+ self.maxDiff = None
+ # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_IP6': '',
+ 'ETH0_IP6_ULA': '',
+ 'ETH0_IP6_PREFIX_LENGTH': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [
+ IP_BY_MACADDR + '/' + IP4_PREFIX,
+ IP6_GLOBAL + '/' + IP6_PREFIX,
+ IP6_ULA + '/' + IP6_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_dns(self, m_get_phys_by_mac):
+ """Test rendering with/without DNS server, search domain"""
+ self.maxDiff = None
+ # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'DNS': '',
+ 'ETH0_DNS': '',
+ 'ETH0_SEARCH_DOMAIN': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'DNS': '1.2.3.8',
+ 'ETH0_DNS': '1.2.3.6 1.2.3.7',
+ 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'nameservers': {
+ 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
+ 'search': ['example.com', 'example.org']},
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_mtu(self, m_get_phys_by_mac):
+ """Test rendering with/without MTU"""
+ self.maxDiff = None
+ # empty ETH0_MTU
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_MTU': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_MTU
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_MTU': '1280', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'mtu': '1280',
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_eth0(self, m_get_phys_by_mac):
for nic in self.system_nics:
@@ -395,7 +801,6 @@
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/16'],
'gateway4': '1.2.3.5',
- 'gateway6': None,
'nameservers': {
'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}}
@@ -494,7 +899,6 @@
'match': {'macaddress': MAC_1},
'addresses': ['10.3.1.3/16'],
'gateway4': '10.3.0.1',
- 'gateway6': None,
'nameservers': {
'addresses': ['10.3.1.2', '1.2.3.8'],
'search': [
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_openstack.py cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_openstack.py
--- cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_datasource/test_openstack.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/unittests/test_datasource/test_openstack.py 2018-08-07 14:35:29.000000000 +0000
@@ -510,6 +510,24 @@
ds.detect_openstack(),
'Expected detect_openstack == True on OpenTelekomCloud')
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack reporting Oracle cloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'Standard PC (i440FX + PIIX, 1996)' # No match
+ if dmi_key == 'chassis-asset-tag':
+ return 'OracleCloud.com'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on OracleCloud.com')
+
@test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
@test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_net.py cloud-init-18.3-24-gf6249277/tests/unittests/test_net.py
--- cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_net.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/unittests/test_net.py 2018-08-07 14:35:29.000000000 +0000
@@ -643,6 +643,7 @@
bridge_stp off
bridge_waitport 1 eth3
bridge_waitport 2 eth4
+ hwaddress bb:bb:bb:bb:bb:aa
# control-alias br0
iface br0 inet6 static
@@ -708,6 +709,7 @@
interfaces:
- eth1
- eth2
+ macaddress: aa:bb:cc:dd:ee:ff
parameters:
mii-monitor-interval: 100
mode: active-backup
@@ -720,6 +722,7 @@
interfaces:
- eth3
- eth4
+ macaddress: bb:bb:bb:bb:bb:aa
nameservers:
addresses:
- 8.8.8.8
@@ -803,6 +806,7 @@
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
IPV6_DEFAULTGW=2001:4800:78ff:1b::1
+ MACADDR=bb:bb:bb:bb:bb:aa
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
@@ -973,6 +977,7 @@
use_tempaddr: 1
forwarding: 1
# basically anything in /proc/sys/net/ipv6/conf/.../
+ mac_address: bb:bb:bb:bb:bb:aa
params:
bridge_ageing: 250
bridge_bridgeprio: 22
@@ -1075,6 +1080,7 @@
interfaces:
- bond0s0
- bond0s1
+ macaddress: aa:bb:cc:dd:e8:ff
mtu: 9000
parameters:
mii-monitor-interval: 100
diff -Nru cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_rh_subscription.py cloud-init-18.3-24-gf6249277/tests/unittests/test_rh_subscription.py
--- cloud-init-18.3-9-g2e62cb8a/tests/unittests/test_rh_subscription.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tests/unittests/test_rh_subscription.py 2018-08-07 14:35:29.000000000 +0000
@@ -8,10 +8,16 @@
from cloudinit.config import cc_rh_subscription
from cloudinit import util
-from cloudinit.tests.helpers import TestCase, mock
+from cloudinit.tests.helpers import CiTestCase, mock
+SUBMGR = cc_rh_subscription.SubscriptionManager
+SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli'
+
+
+@mock.patch(SUB_MAN_CLI)
+class GoodTests(CiTestCase):
+ with_logs = True
-class GoodTests(TestCase):
def setUp(self):
super(GoodTests, self).setUp()
self.name = "cc_rh_subscription"
@@ -19,7 +25,6 @@
self.log = logging.getLogger("good_tests")
self.args = []
self.handle = cc_rh_subscription.handle
- self.SM = cc_rh_subscription.SubscriptionManager
self.config = {'rh_subscription':
{'username': 'scooby@do.com',
@@ -35,55 +40,47 @@
'disable-repo': ['repo4', 'repo5']
}}
- def test_already_registered(self):
+ def test_already_registered(self, m_sman_cli):
'''
Emulates a system that is already registered. Ensure it gets
a non-ProcessExecution error from is_registered()
'''
- with mock.patch.object(cc_rh_subscription.SubscriptionManager,
- '_sub_man_cli') as mockobj:
- self.SM.log_success = mock.MagicMock()
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(mockobj.call_count, 1)
+ self.handle(self.name, self.config, self.cloud_init,
+ self.log, self.args)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assertIn('System is already registered', self.logs.getvalue())
- def test_simple_registration(self):
+ def test_simple_registration(self, m_sman_cli):
'''
Simple registration with username and password
'''
- self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')]
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
- self.assertIn(mock.call(['identity']),
- self.SM._sub_man_cli.call_args_list)
+ self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
self.assertIn(mock.call(['register', '--username=scooby@do.com',
'--password=scooby-snacks'],
logstring_val=True),
- self.SM._sub_man_cli.call_args_list)
-
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+ m_sman_cli.call_args_list)
+ self.assertIn('rh_subscription plugin completed successfully',
+ self.logs.getvalue())
+ self.assertEqual(m_sman_cli.call_count, 2)
@mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
- @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_sub_man_cli")
- def test_update_repos_disable_with_none(self, m_sub_man_cli, m_get_repos):
+ def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
cfg = copy.deepcopy(self.config)
m_get_repos.return_value = ([], ['repo1'])
- m_sub_man_cli.return_value = (b'', b'')
cfg['rh_subscription'].update(
{'enable-repo': ['repo1'], 'disable-repo': None})
mysm = cc_rh_subscription.SubscriptionManager(cfg)
self.assertEqual(True, mysm.update_repos())
m_get_repos.assert_called_with()
- self.assertEqual(m_sub_man_cli.call_args_list,
+ self.assertEqual(m_sman_cli.call_args_list,
[mock.call(['repos', '--enable=repo1'])])
- def test_full_registration(self):
+ def test_full_registration(self, m_sman_cli):
'''
Registration with auto-attach, service-level, adding pools,
and enabling and disabling yum repos
@@ -93,26 +90,28 @@
call_lists.append(['repos', '--disable=repo5', '--enable=repo2',
'--enable=repo3'])
call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
- self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar'),
- ('Service level set to: self-support', ''),
- ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
- ('Repo ID: repo1\nRepo ID: repo5\n', ''),
- ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: '
- 'repo4', ''),
- ('', '')])
+ m_sman_cli.side_effect = [
+ util.ProcessExecutionError,
+ (reg, 'bar'),
+ ('Service level set to: self-support', ''),
+ ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
+ ('Repo ID: repo1\nRepo ID: repo5\n', ''),
+ ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''),
+ ('', '')]
self.handle(self.name, self.config_full, self.cloud_init,
self.log, self.args)
+ self.assertEqual(m_sman_cli.call_count, 9)
for call in call_lists:
- self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 9)
+ self.assertIn(mock.call(call), m_sman_cli.call_args_list)
+ self.assertIn("rh_subscription plugin completed successfully",
+ self.logs.getvalue())
-class TestBadInput(TestCase):
+@mock.patch(SUB_MAN_CLI)
+class TestBadInput(CiTestCase):
+ with_logs = True
name = "cc_rh_subscription"
cloud_init = None
log = logging.getLogger("bad_tests")
@@ -155,81 +154,81 @@
super(TestBadInput, self).setUp()
self.handle = cc_rh_subscription.handle
- def test_no_password(self):
- '''
- Attempt to register without the password key/value
- '''
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ def assert_logged_warnings(self, warnings):
+ logs = self.logs.getvalue()
+ missing = [w for w in warnings if "WARNING: " + w not in logs]
+ self.assertEqual([], missing, "Missing expected warnings.")
+
+ def test_no_password(self, m_sman_cli):
+ '''Attempt to register without the password key/value.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_no_password, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 0)
-
- def test_no_org(self):
- '''
- Attempt to register without the org key/value
- '''
- self.input_is_missing_data(self.config_no_key)
+ self.assertEqual(m_sman_cli.call_count, 0)
- def test_service_level_without_auto(self):
- '''
- Attempt to register using service-level without the auto-attach key
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ def test_no_org(self, m_sman_cli):
+ '''Attempt to register without the org key/value.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError]
+ self.handle(self.name, self.config_no_key, self.cloud_init,
+ self.log, self.args)
+ m_sman_cli.assert_called_with(['identity'])
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'Unable to register system due to incomplete information.',
+ 'Use either activationkey and org *or* userid and password',
+ 'Registration failed or did not run completely',
+ 'rh_subscription plugin did not complete successfully'))
+
+ def test_service_level_without_auto(self, m_sman_cli):
+ '''Attempt to register using service-level without auto-attach key.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_service, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
- self.assertEqual(self.SM.log_warn.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'The service-level key must be used in conjunction with ',
+ 'rh_subscription plugin did not complete successfully'))
- def test_pool_not_a_list(self):
+ def test_pool_not_a_list(self, m_sman_cli):
'''
Register with pools that are not in the format of a list
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badpool, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
- self.assertEqual(self.SM.log_warn.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings((
+ 'Pools must in the format of a list',
+ 'rh_subscription plugin did not complete successfully'))
- def test_repo_not_a_list(self):
+ def test_repo_not_a_list(self, m_sman_cli):
'''
Register with repos that are not in the format of a list
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badrepo, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 3)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings((
+ 'Repo IDs must in the format of a list.',
+ 'Unable to add or remove repos',
+ 'rh_subscription plugin did not complete successfully'))
- def test_bad_key_value(self):
+ def test_bad_key_value(self, m_sman_cli):
'''
Attempt to register with a key that we don't know
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badkey, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 2)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
-
- def input_is_missing_data(self, config):
- '''
- Helper def for tests that having missing information
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError])
- self.handle(self.name, config, self.cloud_init,
- self.log, self.args)
- self.SM._sub_man_cli.assert_called_with(['identity'])
- self.assertEqual(self.SM.log_warn.call_count, 4)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'fookey is not a valid key for rh_subscription. Valid keys are:',
+ 'rh_subscription plugin did not complete successfully'))
# vi: ts=4 expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/tools/net-convert.py cloud-init-18.3-24-gf6249277/tools/net-convert.py
--- cloud-init-18.3-9-g2e62cb8a/tools/net-convert.py 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tools/net-convert.py 1970-01-01 00:00:00.000000000 +0000
@@ -1,84 +0,0 @@
-#!/usr/bin/python3
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import argparse
-import json
-import os
-import yaml
-
-from cloudinit.sources.helpers import openstack
-
-from cloudinit.net import eni
-from cloudinit.net import netplan
-from cloudinit.net import network_state
-from cloudinit.net import sysconfig
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--network-data", "-p", type=open,
- metavar="PATH", required=True)
- parser.add_argument("--kind", "-k",
- choices=['eni', 'network_data.json', 'yaml'],
- required=True)
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--output-kind", "-ok",
- choices=['eni', 'netplan', 'sysconfig'],
- required=True)
- args = parser.parse_args()
-
- if not os.path.isdir(args.directory):
- os.makedirs(args.directory)
-
- if args.mac:
- known_macs = {}
- for item in args.mac:
- iface_name, iface_mac = item.split(",", 1)
- known_macs[iface_mac] = iface_name
- else:
- known_macs = None
-
- net_data = args.network_data.read()
- if args.kind == "eni":
- pre_ns = eni.convert_eni_data(net_data)
- ns = network_state.parse_net_config_data(pre_ns)
- elif args.kind == "yaml":
- pre_ns = yaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
- print("Input YAML")
- print(yaml.dump(pre_ns, default_flow_style=False, indent=4))
- ns = network_state.parse_net_config_data(pre_ns)
- else:
- pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- ns = network_state.parse_net_config_data(pre_ns)
-
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- "input data")
-
- print("\nInternal State")
- print(yaml.dump(ns, default_flow_style=False, indent=4))
- if args.output_kind == "eni":
- r_cls = eni.Renderer
- elif args.output_kind == "netplan":
- r_cls = netplan.Renderer
- else:
- r_cls = sysconfig.Renderer
-
- r = r_cls()
- r.render_network_state(network_state=ns, target=args.directory)
-
-
-if __name__ == '__main__':
- main()
-
-# vi: ts=4 expandtab
diff -Nru cloud-init-18.3-9-g2e62cb8a/tools/Z99-cloudinit-warnings.sh cloud-init-18.3-24-gf6249277/tools/Z99-cloudinit-warnings.sh
--- cloud-init-18.3-9-g2e62cb8a/tools/Z99-cloudinit-warnings.sh 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tools/Z99-cloudinit-warnings.sh 2018-08-07 14:35:29.000000000 +0000
@@ -4,9 +4,11 @@
# Purpose: show user warnings on login.
cloud_init_warnings() {
- local warning="" idir="/var/lib/cloud/instance" n=0
- local warndir="$idir/warnings"
- local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip"
+ command -v local >/dev/null && local _local="local" ||
+ typeset _local="typeset"
+ $_local warning="" idir="/var/lib/cloud/instance" n=0
+ $_local warndir="$idir/warnings"
+ $_local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip"
[ -d "$warndir" ] || return 0
[ ! -f "$ufile" ] || return 0
[ ! -f "$sfile" ] || return 0
diff -Nru cloud-init-18.3-9-g2e62cb8a/tools/Z99-cloud-locale-test.sh cloud-init-18.3-24-gf6249277/tools/Z99-cloud-locale-test.sh
--- cloud-init-18.3-9-g2e62cb8a/tools/Z99-cloud-locale-test.sh 2018-07-09 20:13:47.000000000 +0000
+++ cloud-init-18.3-24-gf6249277/tools/Z99-cloud-locale-test.sh 2018-08-07 14:35:29.000000000 +0000
@@ -11,8 +11,11 @@
# of how to fix them.
locale_warn() {
- local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
- local w1 w2 w3 w4 remain
+ command -v local >/dev/null && local _local="local" ||
+ typeset _local="typeset"
+
+ $_local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
+ $_local w1 w2 w3 w4 remain
# if shell is zsh, act like sh only for this function (-L).
# The behavior change will not permenently affect user's shell.
@@ -53,8 +56,8 @@
printf " This can affect your user experience significantly, including the\n"
printf " ability to manage packages. You may install the locales by running:\n\n"
- local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
- local pkgs=""
+ $_local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
+ $_local local pkgs=""
if [ -e "$sfile" ]; then
for bad in ${bad_lcs}; do
grep -q -i "${bad}" "$sfile" &&
@@ -67,7 +70,7 @@
fi
to_gen=${to_gen# }
- local pkgs=""
+ $_local pkgs=""
for bad in ${to_gen}; do
pkgs="${pkgs} language-pack-${bad%%_*}"
done