diff -Nru cloud-init-18.4/bash_completion/cloud-init cloud-init-18.5-21-g8ee294d5/bash_completion/cloud-init --- cloud-init-18.4/bash_completion/cloud-init 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/bash_completion/cloud-init 2019-01-28 20:07:03.000000000 +0000 @@ -30,7 +30,10 @@ devel) COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word)) ;; - dhclient-hook|features) + dhclient-hook) + COMPREPLY=($(compgen -W "--help up down" -- $cur_word)) + ;; + features) COMPREPLY=($(compgen -W "--help" -- $cur_word)) ;; init) diff -Nru cloud-init-18.4/ChangeLog cloud-init-18.5-21-g8ee294d5/ChangeLog --- cloud-init-18.4/ChangeLog 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/ChangeLog 2019-01-28 20:07:03.000000000 +0000 @@ -1,3 +1,57 @@ +18.5: + - tests: add Disco release [Joshua Powers] + - net: render 'metric' values in per-subnet routes (LP: #1805871) + - write_files: add support for appending to files. [James Baxter] + - config: On ubuntu select cloud archive mirrors for armel, armhf, arm64. + (LP: #1805854) + - dhclient-hook: cleanups, tests and fix a bug on 'down' event. + - NoCloud: Allow top level 'network' key in network-config. (LP: #1798117) + - ovf: Fix ovf network config generation gateway/routes (LP: #1806103) + - azure: detect vnet migration via netlink media change event + [Tamilmani Manoharan] + - Azure: fix copy/paste error in error handling when reading azure ovf. + [Adam DePue] + - tests: fix incorrect order of mocks in test_handle_zfs_root. + - doc: Change dns_nameserver property to dns_nameservers. [Tomer Cohen] + - OVF: identify label iso9660 filesystems with label 'OVF ENV'. + - logs: collect-logs ignore instance-data-sensitive.json on non-root user + (LP: #1805201) + - net: Ephemeral*Network: add connectivity check via URL + - azure: _poll_imds only retry on 404. Fail on Timeout (LP: #1803598) + - resizefs: Prefix discovered devpath with '/dev/' when path does not + exist [Igor Galić] + - azure: retry imds polling on requests.Timeout (LP: #1800223) + - azure: Accept variation in error msg from mount for ntfs volumes + [Jason Zions] (LP: #1799338) + - azure: fix regression introduced when persisting ephemeral dhcp lease + [asakkurr] + - azure: add udev rules to create cloud-init Gen2 disk name symlinks + (LP: #1797480) + - tests: ec2 mock missing httpretty user-data and instance-identity routes + - azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS + - azure: report ready to fabric after reprovision and reduce logging + [asakkurr] (LP: #1799594) + - query: better error when missing read permission on instance-data + - instance-data: fallback to instance-data.json if sensitive is absent. + (LP: #1798189) + - docs: remove colon from network v1 config example. [Tomer Cohen] + - Add cloud-id binary to packages for SUSE [Jason Zions] + - systemd: On SUSE ensure cloud-init.service runs before wicked + [Robert Schweikert] (LP: #1799709) + - update detection of openSUSE variants [Robert Schweikert] + - azure: Add apply_network_config option to disable network from IMDS + (LP: #1798424) + - Correct spelling in an error message (udevadm). [Katie McLaughlin] + - tests: meta_data key changed to meta-data in ec2 instance-data.json + (LP: #1797231) + - tests: fix kvm integration test to assert flexible config-disk path + (LP: #1797199) + - tools: Add cloud-id command line utility + - instance-data: Add standard keys platform and subplatform. Refactor ec2. + - net: ignore nics that have "zero" mac address. (LP: #1796917) + - tests: fix apt_configure_primary to be more flexible + - Ubuntu: update sources.list to comment out deb-src entries. (LP: #74747) + 18.4: - add rtd example docs about new standardized keys - use ds._crawled_metadata instance attribute if set when writing diff -Nru cloud-init-18.4/cloudinit/cmd/cloud_id.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/cloud_id.py --- cloud-init-18.4/cloudinit/cmd/cloud_id.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/cloud_id.py 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,90 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Commandline utility to list the canonical cloud-id for an instance.""" + +import argparse +import json +import sys + +from cloudinit.sources import ( + INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + +DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE + +NAME = 'cloud-id' + + +def get_parser(parser=None): + """Build or extend an arg parser for the cloud-id utility. + + @param parser: Optional existing ArgumentParser instance representing the + query subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog=NAME, + description='Report the canonical cloud-id for this instance') + parser.add_argument( + '-j', '--json', action='store_true', default=False, + help='Report all standardized cloud-id information as json.') + parser.add_argument( + '-l', '--long', action='store_true', default=False, + help='Report extended cloud-id information as tab-delimited string.') + parser.add_argument( + '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, + help=('Path to instance-data.json file. Default is %s' % + DEFAULT_INSTANCE_JSON)) + return parser + + +def error(msg): + sys.stderr.write('ERROR: %s\n' % msg) + return 1 + + +def handle_args(name, args): + """Handle calls to 'cloud-id' cli. + + Print the canonical cloud-id on which the instance is running. + + @return: 0 on success, 1 otherwise. + """ + try: + instance_data = json.load(open(args.instance_data)) + except IOError: + return error( + "File not found '%s'. Provide a path to instance data json file" + ' using --instance-data' % args.instance_data) + except ValueError as e: + return error( + "File '%s' is not valid json. %s" % (args.instance_data, e)) + v1 = instance_data.get('v1', {}) + cloud_id = canonical_cloud_id( + v1.get('cloud_name', METADATA_UNKNOWN), + v1.get('region', METADATA_UNKNOWN), + v1.get('platform', METADATA_UNKNOWN)) + if args.json: + v1['cloud_id'] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(',', ': ')) + elif args.long: + response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + else: + response = cloud_id + sys.stdout.write('%s\n' % response) + return 0 + + +def main(): + """Tool to query specific instance-data values.""" + parser = get_parser() + sys.exit(handle_args(NAME, parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/cmd/devel/logs.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/logs.py --- cloud-init-18.4/cloudinit/cmd/devel/logs.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/logs.py 2019-01-28 20:07:03.000000000 +0000 @@ -5,14 +5,16 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from cloudinit.util import ( - ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file) -from cloudinit.temp_utils import tempdir from datetime import datetime import os import shutil import sys +from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.temp_utils import tempdir +from cloudinit.util import ( + ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file) + CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] CLOUDINIT_RUN_DIR = '/run/cloud-init' @@ -46,6 +48,13 @@ return parser +def _copytree_ignore_sensitive_files(curdir, files): + """Return a list of files to ignore if we are non-root""" + if os.getuid() == 0: + return () + return (INSTANCE_JSON_SENSITIVE_FILE,) # Ignore root-permissioned files + + def _write_command_output_to_file(cmd, filename, msg, verbosity): """Helper which runs a command and writes output or error to filename.""" try: @@ -78,6 +87,11 @@ @param tarfile: The path of the tar-gzipped file to create. @param include_userdata: Boolean, true means include user-data. """ + if include_userdata and os.getuid() != 0: + sys.stderr.write( + "To include userdata, root user is required." + " Try sudo cloud-init collect-logs\n") + return 1 tarfile = os.path.abspath(tarfile) date = datetime.utcnow().date().strftime('%Y-%m-%d') log_dir = 'cloud-init-logs-{0}'.format(date) @@ -110,7 +124,8 @@ ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init')) + os.path.join(run_dir, 'cloud-init'), + ignore=_copytree_ignore_sensitive_files) _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, @@ -118,21 +133,21 @@ with chdir(tmp_dir): subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) sys.stderr.write("Wrote %s\n" % tarfile) + return 0 def handle_collect_logs_args(name, args): """Handle calls to 'cloud-init collect-logs' as a subcommand.""" - collect_logs(args.tarfile, args.userdata, args.verbosity) + return collect_logs(args.tarfile, args.userdata, args.verbosity) def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - handle_collect_logs_args('collect-logs', parser.parse_args()) - return 0 + return handle_collect_logs_args('collect-logs', parser.parse_args()) if __name__ == '__main__': - main() + sys.exit(main()) # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/cmd/devel/net_convert.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/net_convert.py --- cloud-init-18.4/cloudinit/cmd/devel/net_convert.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/net_convert.py 2019-01-28 20:07:03.000000000 +0000 @@ -9,6 +9,7 @@ from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure +from cloudinit.sources import DataSourceOVF as ovf from cloudinit import distros from cloudinit.net import eni, netplan, network_state, sysconfig @@ -31,7 +32,7 @@ metavar="PATH", required=True) parser.add_argument("-k", "--kind", choices=['eni', 'network_data.json', 'yaml', - 'azure-imds'], + 'azure-imds', 'vmware-imc'], required=True) parser.add_argument("-d", "--directory", metavar="PATH", @@ -76,7 +77,6 @@ net_data = args.network_data.read() if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == "yaml": pre_ns = yaml.load(net_data) if 'network' in pre_ns: @@ -85,15 +85,16 @@ sys.stderr.write('\n'.join( ["Input YAML", yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == 'azure-imds': pre_ns = azure.parse_network_config(json.loads(net_data)) - ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == 'vmware-imc': + config = ovf.Config(ovf.ConfigFile(args.network_data.name)) + pre_ns = ovf.get_network_config_from_conf(config, False) + ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" "input data") @@ -111,6 +112,10 @@ elif args.output_kind == "netplan": r_cls = netplan.Renderer config = distro.renderer_configs.get('netplan') + # don't run netplan generate/apply + config['postcmds'] = False + # trim leading slash + config['netplan_path'] = config['netplan_path'][1:] else: r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') diff -Nru cloud-init-18.4/cloudinit/cmd/devel/render.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/render.py --- cloud-init-18.4/cloudinit/cmd/devel/render.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/render.py 2019-01-28 20:07:03.000000000 +0000 @@ -8,11 +8,10 @@ from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log -from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE from . import addLogHandlerCLI, read_cfg_paths NAME = 'render' -DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json' LOG = log.getLogger(NAME) @@ -47,12 +46,22 @@ @return 0 on success, 1 on failure. """ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) - if not args.instance_data: - paths = read_cfg_paths() - instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_FILE) - else: + if args.instance_data: instance_data_fn = args.instance_data + else: + paths = read_cfg_paths() + uid = os.getuid() + redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) + if uid == 0: + instance_data_fn = os.path.join( + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + if not os.path.exists(instance_data_fn): + LOG.warning( + 'Missing root-readable %s. Using redacted %s instead.', + instance_data_fn, redacted_data_fn) + instance_data_fn = redacted_data_fn + else: + instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): LOG.error('Missing instance-data.json file: %s', instance_data_fn) return 1 @@ -62,10 +71,14 @@ except IOError: LOG.error('Missing user-data file: %s', args.user_data) return 1 - rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, - instance_data_file=instance_data_fn, - debug=True if args.debug else False) + try: + rendered_payload = render_jinja_payload_from_file( + payload=user_data, payload_fn=args.user_data, + instance_data_file=instance_data_fn, + debug=True if args.debug else False) + except RuntimeError as e: + LOG.error('Cannot render from instance data: %s', str(e)) + return 1 if not rendered_payload: LOG.error('Unable to render user-data file: %s', args.user_data) return 1 diff -Nru cloud-init-18.4/cloudinit/cmd/devel/tests/test_logs.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/tests/test_logs.py --- cloud-init-18.4/cloudinit/cmd/devel/tests/test_logs.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/tests/test_logs.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,13 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.cmd.devel import logs -from cloudinit.util import ensure_dir, load_file, subp, write_file -from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call from datetime import datetime -import mock import os +from six import StringIO + +from cloudinit.cmd.devel import logs +from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, mock, wrap_and_call) +from cloudinit.util import ensure_dir, load_file, subp, write_file +@mock.patch('cloudinit.cmd.devel.logs.os.getuid') class TestCollectLogs(FilesystemMockingTestCase): def setUp(self): @@ -15,14 +19,29 @@ self.new_root = self.tmp_dir() self.run_dir = self.tmp_path('run', self.new_root) - def test_collect_logs_creates_tarfile(self): + def test_collect_logs_with_userdata_requires_root_user(self, m_getuid): + """collect-logs errors when non-root user collects userdata .""" + m_getuid.return_value = 100 # non-root + output_tarfile = self.tmp_path('logs.tgz') + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + self.assertEqual( + 1, logs.collect_logs(output_tarfile, include_userdata=True)) + self.assertEqual( + 'To include userdata, root user is required.' + ' Try sudo cloud-init collect-logs\n', + m_stderr.getvalue()) + + def test_collect_logs_creates_tarfile(self, m_getuid): """collect-logs creates a tarfile with all related cloud-init info.""" + m_getuid.return_value = 100 log1 = self.tmp_path('cloud-init.log', self.new_root) write_file(log1, 'cloud-init-log') log2 = self.tmp_path('cloud-init-output.log', self.new_root) write_file(log2, 'cloud-init-output-log') ensure_dir(self.run_dir) write_file(self.tmp_path('results.json', self.run_dir), 'results') + write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + 'sensitive') output_tarfile = self.tmp_path('logs.tgz') date = datetime.utcnow().date().strftime('%Y-%m-%d') @@ -59,6 +78,11 @@ # unpack the tarfile and check file contents subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) out_logdir = self.tmp_path(date_logdir, self.new_root) + self.assertFalse( + os.path.exists( + os.path.join(out_logdir, 'run', 'cloud-init', + INSTANCE_JSON_SENSITIVE_FILE)), + 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE) self.assertEqual( '0.7fake\n', load_file(os.path.join(out_logdir, 'dpkg-version'))) @@ -82,8 +106,9 @@ os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) - def test_collect_logs_includes_optional_userdata(self): + def test_collect_logs_includes_optional_userdata(self, m_getuid): """collect-logs include userdata when --include-userdata is set.""" + m_getuid.return_value = 0 log1 = self.tmp_path('cloud-init.log', self.new_root) write_file(log1, 'cloud-init-log') log2 = self.tmp_path('cloud-init-output.log', self.new_root) @@ -92,6 +117,8 @@ write_file(userdata, 'user-data') ensure_dir(self.run_dir) write_file(self.tmp_path('results.json', self.run_dir), 'results') + write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + 'sensitive') output_tarfile = self.tmp_path('logs.tgz') date = datetime.utcnow().date().strftime('%Y-%m-%d') @@ -132,4 +159,8 @@ self.assertEqual( 'user-data', load_file(os.path.join(out_logdir, 'user-data.txt'))) + self.assertEqual( + 'sensitive', + load_file(os.path.join(out_logdir, 'run', 'cloud-init', + INSTANCE_JSON_SENSITIVE_FILE))) fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) diff -Nru cloud-init-18.4/cloudinit/cmd/devel/tests/test_render.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/tests/test_render.py --- cloud-init-18.4/cloudinit/cmd/devel/tests/test_render.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/devel/tests/test_render.py 2019-01-28 20:07:03.000000000 +0000 @@ -6,7 +6,7 @@ from collections import namedtuple from cloudinit.cmd.devel import render from cloudinit.helpers import Paths -from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja from cloudinit.util import ensure_dir, write_file @@ -63,6 +63,49 @@ 'Missing instance-data.json file: %s' % json_file, self.logs.getvalue()) + def test_handle_args_root_fallback_from_sensitive_instance_data(self): + """When root user defaults to sensitive.json.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(1, render.handle_args('anyname', args)) + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + self.assertIn( + 'WARNING: Missing root-readable %s. Using redacted %s' % ( + json_sensitive, json_file), self.logs.getvalue()) + self.assertIn( + 'ERROR: Missing instance-data.json file: %s' % json_file, + self.logs.getvalue()) + + def test_handle_args_root_uses_sensitive_instance_data(self): + """When root user, and no instance-data arg, use sensitive.json.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + write_file(json_sensitive, '{"my-var": "jinja worked"}') + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(0, render.handle_args('anyname', args)) + self.assertIn('rendering: jinja worked', m_stdout.getvalue()) + @skipUnlessJinja() def test_handle_args_renders_instance_data_vars_in_template(self): """If user_data file is a jinja template render instance-data vars.""" diff -Nru cloud-init-18.4/cloudinit/cmd/main.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/main.py --- cloud-init-18.4/cloudinit/cmd/main.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/main.py 2019-01-28 20:07:03.000000000 +0000 @@ -41,7 +41,7 @@ from cloudinit import atomic_helper from cloudinit.config import cc_set_hostname -from cloudinit.dhclient_hook import LogDhclient +from cloudinit import dhclient_hook # Welcome message template @@ -586,12 +586,6 @@ return 0 -def dhclient_hook(name, args): - record = LogDhclient(args) - record.check_hooks_dir() - record.record() - - def status_wrapper(name, args, data_d=None, link_d=None): if data_d is None: data_d = os.path.normpath("/var/lib/cloud/data") @@ -795,15 +789,9 @@ 'query', help='Query standardized instance metadata from the command line.') - parser_dhclient = subparsers.add_parser('dhclient-hook', - help=('run the dhclient hook' - 'to record network info')) - parser_dhclient.add_argument("net_action", - help=('action taken on the interface')) - parser_dhclient.add_argument("net_interface", - help=('the network interface being acted' - ' upon')) - parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook)) + parser_dhclient = subparsers.add_parser( + dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.get_parser(parser_dhclient) parser_features = subparsers.add_parser('features', help=('list defined features')) diff -Nru cloud-init-18.4/cloudinit/cmd/query.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/query.py --- cloud-init-18.4/cloudinit/cmd/query.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/query.py 2019-01-28 20:07:03.000000000 +0000 @@ -3,6 +3,7 @@ """Query standardized instance metadata from the command line.""" import argparse +from errno import EACCES import os import six import sys @@ -79,27 +80,38 @@ uid = os.getuid() if not all([args.instance_data, args.user_data, args.vendor_data]): paths = read_cfg_paths() - if not args.instance_data: + if args.instance_data: + instance_data_fn = args.instance_data + else: + redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: - default_json_fn = INSTANCE_JSON_SENSITIVE_FILE + sensitive_data_fn = os.path.join( + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + if os.path.exists(sensitive_data_fn): + instance_data_fn = sensitive_data_fn + else: + LOG.warning( + 'Missing root-readable %s. Using redacted %s instead.', + sensitive_data_fn, redacted_data_fn) + instance_data_fn = redacted_data_fn else: - default_json_fn = INSTANCE_JSON_FILE # World readable - instance_data_fn = os.path.join(paths.run_dir, default_json_fn) + instance_data_fn = redacted_data_fn + if args.user_data: + user_data_fn = args.user_data else: - instance_data_fn = args.instance_data - if not args.user_data: user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + if args.vendor_data: + vendor_data_fn = args.vendor_data else: - user_data_fn = args.user_data - if not args.vendor_data: vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') - else: - vendor_data_fn = args.vendor_data try: instance_json = util.load_file(instance_data_fn) - except IOError: - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + except (IOError, OSError) as e: + if e.errno == EACCES: + LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) + else: + LOG.error('Missing instance-data file: %s', instance_data_fn) return 1 instance_data = util.load_json(instance_json) diff -Nru cloud-init-18.4/cloudinit/cmd/tests/test_cloud_id.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/tests/test_cloud_id.py --- cloud-init-18.4/cloudinit/cmd/tests/test_cloud_id.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/tests/test_cloud_id.py 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,127 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloud-id command line utility.""" + +from cloudinit import util +from collections import namedtuple +from six import StringIO + +from cloudinit.cmd import cloud_id + +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestCloudId(CiTestCase): + + args = namedtuple('cloudidargs', ('instance_data json long')) + + def setUp(self): + super(TestCloudId, self).setUp() + self.tmp = self.tmp_dir() + self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp) + + def test_cloud_id_arg_parser_defaults(self): + """Validate the argument defaults when not provided by the end-user.""" + cmd = ['cloud-id'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual( + '/run/cloud-init/instance-data.json', + args.instance_data) + self.assertEqual(False, args.long) + self.assertEqual(False, args.json) + + def test_cloud_id_arg_parse_overrides(self): + """Override argument defaults by specifying values for each param.""" + util.write_file(self.instance_data, '{}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long', + '--json'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual(self.instance_data, args.instance_data) + self.assertEqual(True, args.long) + self.assertEqual(True, args.json) + + def test_cloud_id_missing_instance_data_json(self): + """Exit error when the provided instance-data.json does not exist.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File not found '%s'" % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_non_json_instance_data(self): + """Exit error when the provided instance-data.json is not json.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + util.write_file(self.instance_data, '{') + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File '%s' is not valid json." % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_from_cloud_name_in_instance_data(self): + """Report canonical cloud-id from cloud_name in instance-data.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\n", m_stdout.getvalue()) + + def test_cloud_id_long_name_from_instance_data(self): + """Report long cloud-id format from cloud_name and region.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_from_instance_data_region(self): + """Report discovered canonical cloud_id when region lookup matches.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' + ' "platform": "ec2"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self): + """Report v1 instance-data content with cloud_id when --json set.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "unknown", "region": "dfw",' + ' "platform": "openstack", "public_ssh_keys": []}}') + expected = util.json_dumps({ + 'cloud_id': 'openstack', 'cloud_name': 'unknown', + 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'}) + cmd = ['cloud-id', '--instance-data', self.instance_data, '--json'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual(expected + '\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/cmd/tests/test_query.py cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/tests/test_query.py --- cloud-init-18.4/cloudinit/cmd/tests/test_query.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/cmd/tests/test_query.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import errno from six import StringIO from textwrap import dedent import os @@ -7,7 +8,8 @@ from collections import namedtuple from cloudinit.cmd import query from cloudinit.helpers import Paths -from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE +from cloudinit.sources import ( + REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) from cloudinit.tests.helpers import CiTestCase, mock from cloudinit.util import ensure_dir, write_file @@ -50,10 +52,28 @@ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: self.assertEqual(1, query.handle_args('anyname', args)) self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % absent_fn, + 'ERROR: Missing instance-data file: %s' % absent_fn, self.logs.getvalue()) self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % absent_fn, + 'ERROR: Missing instance-data file: %s' % absent_fn, + m_stderr.getvalue()) + + def test_handle_args_error_when_no_read_permission_instance_data(self): + """When instance_data file is unreadable, log an error.""" + noread_fn = self.tmp_path('unreadable', dir=self.tmp) + write_file(noread_fn, 'thou shall not pass') + args = self.args( + debug=False, dump_all=True, format=None, instance_data=noread_fn, + list_keys=False, user_data='ud', vendor_data='vd', varname=None) + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') + self.assertEqual(1, query.handle_args('anyname', args)) + self.assertIn( + "ERROR: No read permission on '%s'. Try sudo" % noread_fn, + self.logs.getvalue()) + self.assertIn( + "ERROR: No read permission on '%s'. Try sudo" % noread_fn, m_stderr.getvalue()) def test_handle_args_defaults_instance_data(self): @@ -70,12 +90,58 @@ self.assertEqual(1, query.handle_args('anyname', args)) json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % json_file, + 'ERROR: Missing instance-data file: %s' % json_file, self.logs.getvalue()) self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % json_file, + 'ERROR: Missing instance-data file: %s' % json_file, m_stderr.getvalue()) + def test_handle_args_root_fallsback_to_instance_data(self): + """When no instance_data argument, root falls back to redacted json.""" + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=None, vendor_data=None, varname=None) + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(1, query.handle_args('anyname', args)) + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) + sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + self.assertIn( + 'WARNING: Missing root-readable %s. Using redacted %s instead.' % ( + sensitive_file, json_file), + m_stderr.getvalue()) + + def test_handle_args_root_uses_instance_sensitive_data(self): + """When no instance_data argument, root uses semsitive json.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + vendor_data = self.tmp_path('vendor-data', dir=self.tmp) + write_file(user_data, 'ud') + write_file(vendor_data, 'vd') + run_dir = self.tmp_path('run_dir', dir=self.tmp) + sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + write_file(sensitive_file, '{"my-var": "it worked"}') + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=vendor_data, vendor_data=vendor_data, + varname=None) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(0, query.handle_args('anyname', args)) + self.assertEqual( + '{\n "my_var": "it worked",\n "userdata": "vd",\n ' + '"vendordata": "vd"\n}\n', m_stdout.getvalue()) + def test_handle_args_dumps_all_instance_data(self): """When --all is specified query will dump all instance data vars.""" write_file(self.instance_data, '{"my-var": "it worked"}') diff -Nru cloud-init-18.4/cloudinit/config/cc_disk_setup.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_disk_setup.py --- cloud-init-18.4/cloudinit/config/cc_disk_setup.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_disk_setup.py 2019-01-28 20:07:03.000000000 +0000 @@ -743,7 +743,7 @@ util.udevadm_settle() if not os.path.exists(device): raise RuntimeError("Device %s did not exist and was not created " - "with a udevamd settle." % device) + "with a udevadm settle." % device) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have diff -Nru cloud-init-18.4/cloudinit/config/cc_lxd.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_lxd.py --- cloud-init-18.4/cloudinit/config/cc_lxd.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_lxd.py 2019-01-28 20:07:03.000000000 +0000 @@ -89,7 +89,7 @@ packages.append('lxd') if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): - packages.append('zfs') + packages.append('zfsutils-linux') if len(packages): try: diff -Nru cloud-init-18.4/cloudinit/config/cc_resizefs.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_resizefs.py --- cloud-init-18.4/cloudinit/config/cc_resizefs.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_resizefs.py 2019-01-28 20:07:03.000000000 +0000 @@ -197,6 +197,13 @@ if devpath.startswith('gpt/'): log.debug('We have a gpt label - just go ahead') return devpath + # Alternatively, our device could simply be a name as returned by gpart, + # such as da0p3 + if not devpath.startswith('/dev/') and not os.path.exists(devpath): + fulldevpath = '/dev/' + devpath.lstrip('/') + log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'", + devpath, fulldevpath) + devpath = fulldevpath try: statret = os.stat(devpath) diff -Nru cloud-init-18.4/cloudinit/config/cc_rh_subscription.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_rh_subscription.py --- cloud-init-18.4/cloudinit/config/cc_rh_subscription.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_rh_subscription.py 2019-01-28 20:07:03.000000000 +0000 @@ -249,14 +249,14 @@ except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): - if line is not '': + if line != '': self.log_warn(line) else: self.log_warn("Setting the service level failed with: " "{0}".format(e.stderr.strip())) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True @@ -268,7 +268,7 @@ self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True diff -Nru cloud-init-18.4/cloudinit/config/cc_set_passwords.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_set_passwords.py --- cloud-init-18.4/cloudinit/config/cc_set_passwords.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_set_passwords.py 2019-01-28 20:07:03.000000000 +0000 @@ -160,7 +160,7 @@ hashed_users = [] randlist = [] users = [] - prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}') + prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}') for line in plist: u, p = line.split(':', 1) if prog.match(p) is not None and ":" not in p: diff -Nru cloud-init-18.4/cloudinit/config/cc_write_files.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_write_files.py --- cloud-init-18.4/cloudinit/config/cc_write_files.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/cc_write_files.py 2019-01-28 20:07:03.000000000 +0000 @@ -49,6 +49,10 @@ ... path: /bin/arch permissions: '0555' + - content: | + 15 * * * * root ship_logs + path: /etc/crontab + append: true """ import base64 @@ -113,7 +117,8 @@ contents = extract_contents(f_info.get('content', ''), extractions) (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER)) perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS) - util.write_file(path, contents, mode=perms) + omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb' + util.write_file(path, contents, omode=omode, mode=perms) util.chownbyname(path, u, g) diff -Nru cloud-init-18.4/cloudinit/config/tests/test_set_passwords.py cloud-init-18.5-21-g8ee294d5/cloudinit/config/tests/test_set_passwords.py --- cloud-init-18.4/cloudinit/config/tests/test_set_passwords.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/config/tests/test_set_passwords.py 2019-01-28 20:07:03.000000000 +0000 @@ -68,4 +68,44 @@ m_update.assert_called_with({optname: optval}) m_subp.assert_not_called() + +class TestSetPasswordsHandle(CiTestCase): + """Test cc_set_passwords.handle""" + + with_logs = True + + def test_handle_on_empty_config(self): + """handle logs that no password has changed when config is empty.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle( + 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) + self.assertEqual( + "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. " + 'ssh_pwauth=None\n', + self.logs.getvalue()) + + @mock.patch(MODPATH + "util.subp") + def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): + """handle parses command password hashes.""" + cloud = self.tmp_cloud(distro='ubuntu') + valid_hashed_pwds = [ + 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' + 'Dlew1Va', + 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' + 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] + cfg = {'chpasswd': {'list': valid_hashed_pwds}} + with mock.patch(MODPATH + 'util.subp') as m_subp: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertIn( + "DEBUG: Setting hashed password for ['root', 'ubuntu']", + self.logs.getvalue()) + self.assertEqual( + [mock.call(['chpasswd', '-e'], + '\n'.join(valid_hashed_pwds) + '\n')], + m_subp.call_args_list) + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/dhclient_hook.py cloud-init-18.5-21-g8ee294d5/cloudinit/dhclient_hook.py --- cloud-init-18.4/cloudinit/dhclient_hook.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/dhclient_hook.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,5 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. +"""Run the dhclient hook to record network info.""" + +import argparse import os from cloudinit import atomic_helper @@ -8,44 +11,75 @@ LOG = logging.getLogger(__name__) +NAME = "dhclient-hook" +UP = "up" +DOWN = "down" +EVENTS = (UP, DOWN) + + +def _get_hooks_dir(): + i = stages.Init() + return os.path.join(i.paths.get_runpath(), 'dhclient.hooks') + + +def _filter_env_vals(info): + """Given info (os.environ), return a dictionary with + lower case keys for each entry starting with DHCP4_ or new_.""" + new_info = {} + for k, v in info.items(): + if k.startswith("DHCP4_") or k.startswith("new_"): + key = (k.replace('DHCP4_', '').replace('new_', '')).lower() + new_info[key] = v + return new_info + + +def run_hook(interface, event, data_d=None, env=None): + if event not in EVENTS: + raise ValueError("Unexpected event '%s'. Expected one of: %s" % + (event, EVENTS)) + if data_d is None: + data_d = _get_hooks_dir() + if env is None: + env = os.environ + hook_file = os.path.join(data_d, interface + ".json") + + if event == UP: + if not os.path.exists(data_d): + os.makedirs(data_d) + atomic_helper.write_json(hook_file, _filter_env_vals(env)) + LOG.debug("Wrote dhclient options in %s", hook_file) + elif event == DOWN: + if os.path.exists(hook_file): + os.remove(hook_file) + LOG.debug("Removed dhclient options file %s", hook_file) + + +def get_parser(parser=None): + if parser is None: + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) + parser.add_argument( + "event", help='event taken on the interface', choices=EVENTS) + parser.add_argument( + "interface", help='the network interface being acted upon') + # cloud-init main uses 'action' + parser.set_defaults(action=(NAME, handle_args)) + return parser + + +def handle_args(name, args, data_d=None): + """Handle the Namespace args. + Takes 'name' as passed by cloud-init main. not used here.""" + return run_hook(interface=args.interface, event=args.event, data_d=data_d) + + +if __name__ == '__main__': + import sys + parser = get_parser() + args = parser.parse_args(args=sys.argv[1:]) + return_value = handle_args( + NAME, args, data_d=os.environ.get('_CI_DHCP_HOOK_DATA_D')) + if return_value: + sys.exit(return_value) -class LogDhclient(object): - - def __init__(self, cli_args): - self.hooks_dir = self._get_hooks_dir() - self.net_interface = cli_args.net_interface - self.net_action = cli_args.net_action - self.hook_file = os.path.join(self.hooks_dir, - self.net_interface + ".json") - - @staticmethod - def _get_hooks_dir(): - i = stages.Init() - return os.path.join(i.paths.get_runpath(), 'dhclient.hooks') - - def check_hooks_dir(self): - if not os.path.exists(self.hooks_dir): - os.makedirs(self.hooks_dir) - else: - # If the action is down and the json file exists, we need to - # delete the file - if self.net_action is 'down' and os.path.exists(self.hook_file): - os.remove(self.hook_file) - - @staticmethod - def get_vals(info): - new_info = {} - for k, v in info.items(): - if k.startswith("DHCP4_") or k.startswith("new_"): - key = (k.replace('DHCP4_', '').replace('new_', '')).lower() - new_info[key] = v - return new_info - - def record(self): - envs = os.environ - if self.hook_file is None: - return - atomic_helper.write_json(self.hook_file, self.get_vals(envs)) - LOG.debug("Wrote dhclient options in %s", self.hook_file) # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/handlers/jinja_template.py cloud-init-18.5-21-g8ee294d5/cloudinit/handlers/jinja_template.py --- cloud-init-18.4/cloudinit/handlers/jinja_template.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/handlers/jinja_template.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +from errno import EACCES import os import re @@ -76,7 +77,14 @@ raise RuntimeError( 'Cannot render jinja template vars. Instance data not yet' ' present at %s' % instance_data_file) - instance_data = load_json(load_file(instance_data_file)) + try: + instance_data = load_json(load_file(instance_data_file)) + except (IOError, OSError) as e: + if e.errno == EACCES: + raise RuntimeError( + 'Cannot render jinja template vars. No read permission on' + " '%s'. Try sudo" % instance_data_file) + rendered_payload = render_jinja_payload( payload, payload_fn, instance_data, debug) if not rendered_payload: diff -Nru cloud-init-18.4/cloudinit/net/dhcp.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/dhcp.py --- cloud-init-18.4/cloudinit/net/dhcp.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/dhcp.py 2019-01-28 20:07:03.000000000 +0000 @@ -9,9 +9,11 @@ import os import re import signal +import time from cloudinit.net import ( - EphemeralIPv4Network, find_fallback_nic, get_devicelist) + EphemeralIPv4Network, find_fallback_nic, get_devicelist, + has_url_connectivity) from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util @@ -37,37 +39,69 @@ class EphemeralDHCPv4(object): - def __init__(self, iface=None): + def __init__(self, iface=None, connectivity_url=None): self.iface = iface self._ephipv4 = None + self.lease = None + self.connectivity_url = connectivity_url def __enter__(self): + """Setup sandboxed dhcp context, unless connectivity_url can already be + reached.""" + if self.connectivity_url: + if has_url_connectivity(self.connectivity_url): + LOG.debug( + 'Skip ephemeral DHCP setup, instance has connectivity' + ' to %s', self.connectivity_url) + return + return self.obtain_lease() + + def __exit__(self, excp_type, excp_value, excp_traceback): + """Teardown sandboxed dhcp context.""" + self.clean_network() + + def clean_network(self): + """Exit _ephipv4 context to teardown of ip configuration performed.""" + if self.lease: + self.lease = None + if not self._ephipv4: + return + self._ephipv4.__exit__(None, None, None) + + def obtain_lease(self): + """Perform dhcp discovery in a sandboxed environment if possible. + + @return: A dict representing dhcp options on the most recent lease + obtained from the dhclient discovery if run, otherwise an error + is raised. + + @raises: NoDHCPLeaseError if no leases could be obtained. + """ + if self.lease: + return self.lease try: leases = maybe_perform_dhcp_discovery(self.iface) except InvalidDHCPLeaseFileError: raise NoDHCPLeaseError() if not leases: raise NoDHCPLeaseError() - lease = leases[-1] + self.lease = leases[-1] LOG.debug("Received dhcp lease on %s for %s/%s", - lease['interface'], lease['fixed-address'], - lease['subnet-mask']) + self.lease['interface'], self.lease['fixed-address'], + self.lease['subnet-mask']) nmap = {'interface': 'interface', 'ip': 'fixed-address', 'prefix_or_mask': 'subnet-mask', 'broadcast': 'broadcast-address', 'router': 'routers'} - kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) if not kwargs['broadcast']: kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + if self.connectivity_url: + kwargs['connectivity_url'] = self.connectivity_url ephipv4 = EphemeralIPv4Network(**kwargs) ephipv4.__enter__() self._ephipv4 = ephipv4 - return lease - - def __exit__(self, excp_type, excp_value, excp_traceback): - if not self._ephipv4: - return - self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + return self.lease def maybe_perform_dhcp_discovery(nic=None): @@ -94,7 +128,9 @@ if not dhclient_path: LOG.debug('Skip dhclient configuration: No dhclient command found.') return [] - with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: + with temp_utils.tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-', + needs_exe=True) as tdir: # Use /var/tmp because /run/cloud-init/tmp is mounted noexec return dhcp_discovery(dhclient_path, nic, tdir) @@ -162,24 +198,39 @@ '-pf', pid_file, interface, '-sf', '/bin/true'] util.subp(cmd, capture=True) - # dhclient doesn't write a pid file until after it forks when it gets a - # proper lease response. Since cleandir is a temp directory that gets - # removed, we need to wait for that pidfile creation before the - # cleandir is removed, otherwise we get FileNotFound errors. + # Wait for pid file and lease file to appear, and for the process + # named by the pid file to daemonize (have pid 1 as its parent). If we + # try to read the lease file before daemonization happens, we might try + # to read it before the dhclient has actually written it. We also have + # to wait until the dhclient has become a daemon so we can be sure to + # kill the correct process, thus freeing cleandir to be deleted back + # up the callstack. missing = util.wait_for_files( [pid_file, lease_file], maxwait=5, naplen=0.01) if missing: LOG.warning("dhclient did not produce expected files: %s", ', '.join(os.path.basename(f) for f in missing)) return [] - pid_content = util.load_file(pid_file).strip() - try: - pid = int(pid_content) - except ValueError: - LOG.debug( - "pid file contains non-integer content '%s'", pid_content) - else: - os.kill(pid, signal.SIGKILL) + + ppid = 'unknown' + for _ in range(0, 1000): + pid_content = util.load_file(pid_file).strip() + try: + pid = int(pid_content) + except ValueError: + pass + else: + ppid = util.get_proc_ppid(pid) + if ppid == 1: + LOG.debug('killing dhclient with pid=%s', pid) + os.kill(pid, signal.SIGKILL) + return parse_dhcp_lease_file(lease_file) + time.sleep(0.01) + + LOG.error( + 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', + pid_content, ppid, 0.01 * 1000 + ) return parse_dhcp_lease_file(lease_file) diff -Nru cloud-init-18.4/cloudinit/net/eni.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/eni.py --- cloud-init-18.4/cloudinit/net/eni.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/eni.py 2019-01-28 20:07:03.000000000 +0000 @@ -371,22 +371,23 @@ 'gateway': 'gw', 'metric': 'metric', } + + default_gw = '' if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0': - default_gw = " default gw %s" % route['gateway'] - content.append(up + default_gw + or_true) - content.append(down + default_gw + or_true) + default_gw = ' default' elif route['network'] == '::' and route['prefix'] == 0: - # ipv6! - default_gw = " -A inet6 default gw %s" % route['gateway'] - content.append(up + default_gw + or_true) - content.append(down + default_gw + or_true) - else: - route_line = "" - for k in ['network', 'netmask', 'gateway', 'metric']: - if k in route: - route_line += " %s %s" % (mapping[k], route[k]) - content.append(up + route_line + or_true) - content.append(down + route_line + or_true) + default_gw = ' -A inet6 default' + + route_line = '' + for k in ['network', 'netmask', 'gateway', 'metric']: + if default_gw and k in ['network', 'netmask']: + continue + if k == 'gateway': + route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) + elif k in route: + route_line += ' %s %s' % (mapping[k], route[k]) + content.append(up + route_line + or_true) + content.append(down + route_line + or_true) return content def _render_iface(self, iface, render_hwaddress=False): diff -Nru cloud-init-18.4/cloudinit/net/__init__.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/__init__.py --- cloud-init-18.4/cloudinit/net/__init__.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/__init__.py 2019-01-28 20:07:03.000000000 +0000 @@ -12,6 +12,7 @@ from cloudinit.net.network_state import mask_to_net_prefix from cloudinit import util +from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) SYS_CLASS_NET = "/sys/class/net/" @@ -612,7 +613,8 @@ Bridges and any devices that have a 'stolen' mac are excluded.""" ret = [] devs = get_devicelist() - empty_mac = '00:00:00:00:00:00' + # 16 somewhat arbitrarily chosen. Normally a mac is 6 '00:' tokens. + zero_mac = ':'.join(('00',) * 16) for name in devs: if not interface_has_own_mac(name): continue @@ -624,7 +626,8 @@ # some devices may not have a mac (tun0) if not mac: continue - if mac == empty_mac and name != 'lo': + # skip nics that have no mac (00:00....) + if name != 'lo' and mac == zero_mac[:len(mac)]: continue ret.append((name, mac, device_driver(name), device_devid(name))) return ret @@ -645,16 +648,36 @@ return ret +def has_url_connectivity(url): + """Return true when the instance has access to the provided URL + + Logs a warning if url is not the expected format. + """ + if not any([url.startswith('http://'), url.startswith('https://')]): + LOG.warning( + "Ignoring connectivity check. Expected URL beginning with http*://" + " received '%s'", url) + return False + try: + readurl(url, timeout=5) + except UrlError: + return False + return True + + class EphemeralIPv4Network(object): """Context manager which sets up temporary static network configuration. - No operations are performed if the provided interface is already connected. + No operations are performed if the provided interface already has the + specified configuration. + This can be verified with the connectivity_url. If unconnected, bring up the interface with valid ip, prefix and broadcast. If router is provided setup a default route for that interface. Upon context exit, clean up the interface leaving no configuration behind. """ - def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None): + def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None, + connectivity_url=None): """Setup context manager and validate call signature. @param interface: Name of the network interface to bring up. @@ -663,6 +686,8 @@ prefix. @param broadcast: Broadcast address for the IPv4 network. @param router: Optionally the default gateway IP. + @param connectivity_url: Optionally, a URL to verify if a usable + connection already exists. """ if not all([interface, ip, prefix_or_mask, broadcast]): raise ValueError( @@ -673,6 +698,8 @@ except ValueError as e: raise ValueError( 'Cannot setup network: {0}'.format(e)) + + self.connectivity_url = connectivity_url self.interface = interface self.ip = ip self.broadcast = broadcast @@ -681,6 +708,13 @@ def __enter__(self): """Perform ephemeral network setup if interface is not connected.""" + if self.connectivity_url: + if has_url_connectivity(self.connectivity_url): + LOG.debug( + 'Skip ephemeral network setup, instance has connectivity' + ' to %s', self.connectivity_url) + return + self._bringup_device() if self.router: self._bringup_router() diff -Nru cloud-init-18.4/cloudinit/net/netplan.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/netplan.py --- cloud-init-18.4/cloudinit/net/netplan.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/netplan.py 2019-01-28 20:07:03.000000000 +0000 @@ -114,13 +114,13 @@ for route in subnet.get('routes', []): to_net = "%s/%s" % (route.get('network'), route.get('prefix')) - route = { + new_route = { 'via': route.get('gateway'), 'to': to_net, } if 'metric' in route: - route.update({'metric': route.get('metric', 100)}) - routes.append(route) + new_route.update({'metric': route.get('metric', 100)}) + routes.append(new_route) addresses.append(addr) diff -Nru cloud-init-18.4/cloudinit/net/sysconfig.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/sysconfig.py --- cloud-init-18.4/cloudinit/net/sysconfig.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/sysconfig.py 2019-01-28 20:07:03.000000000 +0000 @@ -10,11 +10,14 @@ from cloudinit import log as logging from cloudinit import util +from configobj import ConfigObj + from . import renderer from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) LOG = logging.getLogger(__name__) +NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" def _make_header(sep='#'): @@ -46,6 +49,24 @@ return value +def enable_ifcfg_rh(path): + """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" + config = ConfigObj(path) + if 'main' in config: + if 'plugins' in config['main']: + if 'ifcfg-rh' in config['main']['plugins']: + return + else: + config['main']['plugins'] = [] + + if isinstance(config['main']['plugins'], list): + config['main']['plugins'].append('ifcfg-rh') + else: + config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh'] + config.write() + LOG.debug('Enabled ifcfg-rh NetworkManager plugins') + + class ConfigMap(object): """Sysconfig like dictionary object.""" @@ -156,13 +177,23 @@ _quote_value(gateway_value))) buf.write("%s=%s\n" % ('NETMASK' + str(reindex), _quote_value(netmask_value))) + metric_key = 'METRIC' + index + if metric_key in self._conf: + metric_value = str(self._conf['METRIC' + index]) + buf.write("%s=%s\n" % ('METRIC' + str(reindex), + _quote_value(metric_value))) elif proto == "ipv6" and self.is_ipv6_route(address_value): netmask_value = str(self._conf['NETMASK' + index]) gateway_value = str(self._conf['GATEWAY' + index]) - buf.write("%s/%s via %s dev %s\n" % (address_value, - netmask_value, - gateway_value, - self._route_name)) + metric_value = ( + 'metric ' + str(self._conf['METRIC' + index]) + if 'METRIC' + index in self._conf else '') + buf.write( + "%s/%s via %s %s dev %s\n" % (address_value, + netmask_value, + gateway_value, + metric_value, + self._route_name)) return buf.getvalue() @@ -242,6 +273,7 @@ ('USERCTL', False), ('NM_CONTROLLED', False), ('BOOTPROTO', 'none'), + ('STARTMODE', 'auto'), ]) # If these keys exist, then their values will be used to form @@ -336,6 +368,7 @@ iface_cfg.name)) if subnet.get('control') == 'manual': iface_cfg['ONBOOT'] = False + iface_cfg['STARTMODE'] = 'manual' # set IPv4 and IPv6 static addresses ipv4_index = -1 @@ -370,6 +403,9 @@ else: iface_cfg['GATEWAY'] = subnet['gateway'] + if 'metric' in subnet: + iface_cfg['METRIC'] = subnet['metric'] + if 'dns_search' in subnet: iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) @@ -414,15 +450,19 @@ else: iface_cfg['GATEWAY'] = route['gateway'] route_cfg.has_set_default_ipv4 = True + if 'metric' in route: + iface_cfg['METRIC'] = route['metric'] else: gw_key = 'GATEWAY%s' % route_cfg.last_idx nm_key = 'NETMASK%s' % route_cfg.last_idx addr_key = 'ADDRESS%s' % route_cfg.last_idx + metric_key = 'METRIC%s' % route_cfg.last_idx route_cfg.last_idx += 1 # add default routes only to ifcfg files, not # to route-* or route6-* for (old_key, new_key) in [('gateway', gw_key), + ('metric', metric_key), ('netmask', nm_key), ('network', addr_key)]: if old_key in route: @@ -519,6 +559,8 @@ content.add_nameserver(nameserver) for searchdomain in network_state.dns_searchdomains: content.add_search_domain(searchdomain) + if not str(content): + return None header = _make_header(';') content_str = str(content) if not content_str.startswith(header): @@ -628,7 +670,8 @@ dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) - util.write_file(dns_path, resolv_content, file_mode) + if resolv_content: + util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) @@ -640,6 +683,8 @@ netrules_content = self._render_persistent_net(network_state) netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) + if available_nm(target=target): + enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) sysconfig_path = util.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos @@ -654,6 +699,13 @@ def available(target=None): + sysconfig = available_sysconfig(target=target) + nm = available_nm(target=target) + + return any([nm, sysconfig]) + + +def available_sysconfig(target=None): expected = ['ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: @@ -669,4 +721,10 @@ return True +def available_nm(target=None): + if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): + return False + return True + + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/net/tests/test_dhcp.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/tests/test_dhcp.py --- cloud-init-18.4/cloudinit/net/tests/test_dhcp.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/tests/test_dhcp.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,15 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. +import httpretty import os import signal from textwrap import dedent +import cloudinit.net as net from cloudinit.net.dhcp import ( InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) from cloudinit.util import ensure_file, write_file from cloudinit.tests.helpers import ( - CiTestCase, mock, populate_dir, wrap_and_call) + CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) class TestParseDHCPLeasesFile(CiTestCase): @@ -143,16 +145,20 @@ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertIn( - "pid file contains non-integer content ''", self.logs.getvalue()) + "dhclient(pid=, parentpid=unknown) failed " + "to daemonize after 10.0 seconds", + self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.wait_for_files') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, - m_kill): + m_kill, + m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') @@ -162,6 +168,7 @@ pidfile = self.tmp_path('dhclient.pid', tmpdir) leasefile = self.tmp_path('dhcp.leases', tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), @@ -171,9 +178,10 @@ self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. @@ -195,6 +203,7 @@ pid_file = os.path.join(tmpdir, 'dhclient.pid') my_pid = 1 write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertItemsEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', @@ -321,3 +330,37 @@ '9': self.lxd_lease}) self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, networkd_load_leases(self.lease_d)) + + +class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): + """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" + url = 'http://example.org/index.html' + + httpretty.register_uri(httpretty.GET, url) + with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease: + self.assertIsNone(lease) + # Ensure that no teardown happens: + m_dhcp.assert_not_called() + + @mock.patch('cloudinit.net.dhcp.util.subp') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_ephemeral_dhcp_setup_network_if_url_connectivity( + self, m_dhcp, m_subp): + """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" + url = 'http://example.org/index.html' + fake_lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.2', + 'subnet-mask': '255.255.0.0'} + m_dhcp.return_value = [fake_lease] + m_subp.return_value = ('', '') + + httpretty.register_uri(httpretty.GET, url, body={}, status=404) + with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease: + self.assertEqual(fake_lease, lease) + # Ensure that dhcp discovery occurs + m_dhcp.called_once_with() + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/net/tests/test_init.py cloud-init-18.5-21-g8ee294d5/cloudinit/net/tests/test_init.py --- cloud-init-18.4/cloudinit/net/tests/test_init.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/net/tests/test_init.py 2019-01-28 20:07:03.000000000 +0000 @@ -2,14 +2,16 @@ import copy import errno +import httpretty import mock import os +import requests import textwrap import yaml import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError -from cloudinit.tests.helpers import CiTestCase +from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase class TestSysDevPath(CiTestCase): @@ -458,6 +460,22 @@ self.assertEqual(expected_setup_calls, m_subp.call_args_list) m_subp.assert_has_calls(expected_teardown_calls) + @mock.patch('cloudinit.net.readurl') + def test_ephemeral_ipv4_no_network_if_url_connectivity( + self, m_readurl, m_subp): + """No network setup is performed if we can successfully connect to + connectivity_url.""" + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'connectivity_url': 'http://example.org/index.html'} + + with net.EphemeralIPv4Network(**params): + self.assertEqual([mock.call('http://example.org/index.html', + timeout=5)], m_readurl.call_args_list) + # Ensure that no teardown happens: + m_subp.assert_has_calls([]) + def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): """EphemeralIPv4Network handles exception when address is setup. @@ -619,3 +637,35 @@ def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): with self.assertRaises(RuntimeError): net.apply_network_config_names(yaml.load("version: 3")) + + +class TestHasURLConnectivity(HttprettyTestCase): + + def setUp(self): + super(TestHasURLConnectivity, self).setUp() + self.url = 'http://fake/' + self.kwargs = {'allow_redirects': True, 'timeout': 5.0} + + @mock.patch('cloudinit.net.readurl') + def test_url_timeout_on_connectivity_check(self, m_readurl): + """A timeout of 5 seconds is provided when reading a url.""" + self.assertTrue( + net.has_url_connectivity(self.url), 'Expected True on url connect') + + def test_true_on_url_connectivity_success(self): + httpretty.register_uri(httpretty.GET, self.url) + self.assertTrue( + net.has_url_connectivity(self.url), 'Expected True on url connect') + + @mock.patch('requests.Session.request') + def test_true_on_url_connectivity_timeout(self, m_request): + """A timeout raised accessing the url will return False.""" + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + self.assertFalse( + net.has_url_connectivity(self.url), + 'Expected False on url timeout') + + def test_true_on_url_connectivity_failure(self): + httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) + self.assertFalse( + net.has_url_connectivity(self.url), 'Expected False on url fail') diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceAliYun.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAliYun.py --- cloud-init-18.4/cloudinit/sources/DataSourceAliYun.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAliYun.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,7 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -import os - from cloudinit import sources from cloudinit.sources import DataSourceEc2 as EC2 from cloudinit import util @@ -18,25 +16,17 @@ min_metadata_version = '2016-01-01' extended_metadata_versions = [] - def __init__(self, sys_cfg, distro, paths): - super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) - self.seed_dir = os.path.join(paths.seed_dir, "AliYun") - def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata.get('hostname', 'localhost.localdomain') def get_public_ssh_keys(self): return parse_public_keys(self.metadata.get('public-keys', {})) - @property - def cloud_platform(self): - if self._cloud_platform is None: - if _is_aliyun(): - self._cloud_platform = EC2.Platforms.ALIYUN - else: - self._cloud_platform = EC2.Platforms.NO_EC2_METADATA - - return self._cloud_platform + def _get_cloud_name(self): + if _is_aliyun(): + return EC2.CloudNames.ALIYUN + else: + return EC2.CloudNames.NO_EC2_METADATA def _is_aliyun(): diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceAltCloud.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAltCloud.py --- cloud-init-18.4/cloudinit/sources/DataSourceAltCloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAltCloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -89,7 +89,9 @@ ''' Description: Get the type for the cloud back end this instance is running on - by examining the string returned by reading the dmi data. + by examining the string returned by reading either: + CLOUD_INFO_FILE or + the dmi data. Input: None @@ -99,7 +101,14 @@ 'RHEV', 'VSPHERE' or 'UNKNOWN' ''' - + if os.path.exists(CLOUD_INFO_FILE): + try: + cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() + except IOError: + util.logexc(LOG, 'Unable to access cloud info file at %s.', + CLOUD_INFO_FILE) + return 'UNKNOWN' + return cloud_type system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' @@ -134,15 +143,7 @@ LOG.debug('Invoked get_data()') - if os.path.exists(CLOUD_INFO_FILE): - try: - cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() - except IOError: - util.logexc(LOG, 'Unable to access cloud info file at %s.', - CLOUD_INFO_FILE) - return False - else: - cloud_type = self.get_cloud_type() + cloud_type = self.get_cloud_type() LOG.debug('cloud_type: %s', str(cloud_type)) @@ -161,6 +162,15 @@ util.logexc(LOG, 'Failed accessing user data.') return False + def _get_subplatform(self): + """Return the subplatform metadata details.""" + cloud_type = self.get_cloud_type() + if not hasattr(self, 'source'): + self.source = sources.METADATA_UNKNOWN + if cloud_type == 'RHEV': + self.source = '/dev/fd0' + return '%s (%s)' % (cloud_type.lower(), self.source) + def user_data_rhevm(self): ''' RHEVM specific userdata read @@ -232,6 +242,7 @@ try: return_str = util.mount_cb(cdrom_dev, read_user_data_callback) if return_str: + self.source = cdrom_dev break except OSError as err: if err.errno != errno.ENOENT: diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceAzure.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAzure.py --- cloud-init-18.4/cloudinit/sources/DataSourceAzure.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceAzure.py 2019-01-28 20:07:03.000000000 +0000 @@ -22,7 +22,8 @@ from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric -from cloudinit.url_helper import readurl, UrlError +from cloudinit.sources.helpers import netlink +from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util LOG = logging.getLogger(__name__) @@ -57,7 +58,7 @@ # List of static scripts and network config artifacts created by # stock ubuntu suported images. UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ - '/etc/netplan/90-azure-hotplug.yaml', + '/etc/netplan/90-hotplug-azure.yaml', '/usr/local/sbin/ephemeral_eth.sh', '/etc/udev/rules.d/10-net-device-added.rules', '/run/network/interfaces.ephemeral.d', @@ -207,7 +208,9 @@ }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, + 'apply_network_config': True, # Use IMDS published network configuration } +# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False BUILTIN_CLOUD_CONFIG = { 'disk_setup': { @@ -278,6 +281,7 @@ self._network_config = None # Regenerate network config new_instance boot and every boot self.update_events['network'].add(EventType.BOOT) + self._ephemeral_dhcp_ctx = None def __str__(self): root = sources.DataSource.__str__(self) @@ -351,6 +355,14 @@ metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): + subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) + def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -396,10 +408,15 @@ LOG.warning("%s was not mountable", cdev) continue - if reprovision or self._should_reprovision(ret): + perform_reprovision = reprovision or self._should_reprovision(ret) + if perform_reprovision: + if util.is_FreeBSD(): + msg = "Free BSD is not supported for PPS VMs" + LOG.error(msg) + raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( - self.fallback_interface, retries=3) + self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ @@ -424,6 +441,18 @@ crawled_data['metadata']['random_seed'] = seed crawled_data['metadata']['instance-id'] = util.read_dmi_data( 'system-uuid') + + if perform_reprovision: + LOG.info("Reporting ready to Azure after getting ReprovisionData") + use_cached_ephemeral = (net.is_up(self.fallback_interface) and + getattr(self, '_ephemeral_dhcp_ctx', None)) + if use_cached_ephemeral: + self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) + self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral + else: + with EphemeralDHCPv4() as lease: + self._report_ready(lease=lease) + return crawled_data def _is_platform_viable(self): @@ -450,7 +479,8 @@ except sources.InvalidMetaDataException as e: LOG.warning('Could not crawl Azure metadata: %s', e) return False - if self.distro and self.distro.name == 'ubuntu': + if (self.distro and self.distro.name == 'ubuntu' and + self.ds_cfg.get('apply_network_config')): maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults @@ -498,8 +528,8 @@ response. Then return the returned JSON object.""" url = IMDS_URL + "reprovisiondata?api-version=2017-04-02" headers = {"Metadata": "true"} + nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) - LOG.debug("Start polling IMDS") def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: @@ -508,25 +538,47 @@ # call DHCP and setup the ephemeral network to acquire the new IP. return False + LOG.debug("Wait for vnetswitch to happen") while True: try: - with EphemeralDHCPv4() as lease: - if report_ready: - path = REPORTED_READY_MARKER_FILE - LOG.info( - "Creating a marker file to report ready: %s", path) - util.write_file(path, "{pid}: {time}\n".format( - pid=os.getpid(), time=time())) - self._report_ready(lease=lease) - report_ready = False + # Save our EphemeralDHCPv4 context so we avoid repeated dhcp + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + if report_ready: + try: + nl_sock = netlink.create_bound_netlink_socket() + except netlink.NetlinkCreateSocketError as e: + LOG.warning(e) + self._ephemeral_dhcp_ctx.clean_network() + return + path = REPORTED_READY_MARKER_FILE + LOG.info( + "Creating a marker file to report ready: %s", path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) + self._report_ready(lease=lease) + report_ready = False + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + LOG.error(error) + return + self._ephemeral_dhcp_ctx.clean_network() + else: return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True).contents + exception_cb=exc_cb, infinite=True, + log_req_resp=False).contents except UrlError: + # Teardown our EphemeralDHCPv4 context on failure as we retry + self._ephemeral_dhcp_ctx.clean_network() pass + finally: + if nl_sock: + nl_sock.close() def _report_ready(self, lease): - """Tells the fabric provisioning has completed - before we go into our polling loop.""" + """Tells the fabric provisioning has completed """ try: get_metadata_from_fabric(None, lease['unknown-245']) except Exception: @@ -611,7 +663,11 @@ the blacklisted devices. """ if not self._network_config: - self._network_config = parse_network_config(self._metadata_imds) + if self.ds_cfg.get('apply_network_config'): + nc_src = self._metadata_imds + else: + nc_src = None + self._network_config = parse_network_config(nc_src) return self._network_config @@ -692,7 +748,7 @@ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", update_env_for_mount={'LANG': 'C'}) except util.MountFailedError as e: - if "mount: unknown filesystem type 'ntfs'" in str(e): + if "unknown filesystem type 'ntfs'" in str(e): return True, (bmsg + ' but this system cannot mount NTFS,' ' assuming there are no important files.' ' Formatting allowed.') @@ -920,12 +976,12 @@ lambda n: n.localName == "LinuxProvisioningConfigurationSet") - if len(results) == 0: + if len(lpcs_nodes) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") - if len(results) > 1: + if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % - ("LinuxProvisioningConfigurationSet", - len(results))) + (len(lpcs_nodes), + "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): @@ -1154,17 +1210,12 @@ def _get_metadata_from_imds(retries): - def retry_on_url_error(msg, exception): - if isinstance(exception, UrlError) and exception.code == 404: - return True # Continue retries - return False # Stop retries on all other exceptions - url = IMDS_URL + "instance?api-version=2017-12-01" headers = {"Metadata": "true"} try: response = readurl( url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_error) + exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} @@ -1187,7 +1238,7 @@ additional interfaces which get attached by a customer at some point after initial boot. Since the Azure datasource can now regenerate network configuration as metadata reports these new devices, we no longer - want the udev rules or netplan's 90-azure-hotplug.yaml to configure + want the udev rules or netplan's 90-hotplug-azure.yaml to configure networking on eth1 or greater as it might collide with cloud-init's configuration. diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceBigstep.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceBigstep.py --- cloud-init-18.4/cloudinit/sources/DataSourceBigstep.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceBigstep.py 2019-01-28 20:07:03.000000000 +0000 @@ -36,6 +36,10 @@ self.userdata_raw = decoded["userdata_raw"] return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'metadata (%s)' % get_url_from_file() + def get_url_from_file(): try: diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceCloudSigma.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceCloudSigma.py --- cloud-init-18.4/cloudinit/sources/DataSourceCloudSigma.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceCloudSigma.py 2019-01-28 20:07:03.000000000 +0000 @@ -7,7 +7,7 @@ from base64 import b64decode import re -from cloudinit.cs_utils import Cepko +from cloudinit.cs_utils import Cepko, SERIAL_PORT from cloudinit import log as logging from cloudinit import sources @@ -84,6 +84,10 @@ return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'cepko (%s)' % SERIAL_PORT + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): """ Cleans up and uses the server's name if the latter is set. Otherwise diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceConfigDrive.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceConfigDrive.py --- cloud-init-18.4/cloudinit/sources/DataSourceConfigDrive.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceConfigDrive.py 2019-01-28 20:07:03.000000000 +0000 @@ -160,6 +160,18 @@ LOG.debug("no network configuration available") return self._network_config + @property + def platform(self): + return 'openstack' + + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed_dir in self.source: + subplatform_type = 'seed-dir' + elif self.source.startswith('/dev'): + subplatform_type = 'config-disk' + return '%s (%s)' % (subplatform_type, self.source) + def read_config_drive(source_dir): reader = openstack.ConfigDriveReader(source_dir) diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceEc2.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceEc2.py --- cloud-init-18.4/cloudinit/sources/DataSourceEc2.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceEc2.py 2019-01-28 20:07:03.000000000 +0000 @@ -28,18 +28,16 @@ STRICT_ID_DEFAULT = "warn" -class Platforms(object): - # TODO Rename and move to cloudinit.cloud.CloudNames - ALIYUN = "AliYun" - AWS = "AWS" - BRIGHTBOX = "Brightbox" - SEEDED = "Seeded" +class CloudNames(object): + ALIYUN = "aliyun" + AWS = "aws" + BRIGHTBOX = "brightbox" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. - UNKNOWN = "Unknown" + UNKNOWN = "unknown" # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata # service available. No attempt at the Ec2 Metadata service will be made. - NO_EC2_METADATA = "No-EC2-Metadata" + NO_EC2_METADATA = "no-ec2-metadata" class DataSourceEc2(sources.DataSource): @@ -61,8 +59,6 @@ url_max_wait = 120 url_timeout = 50 - _cloud_platform = None - _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. @@ -71,30 +67,21 @@ def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None - self.seed_dir = os.path.join(paths.seed_dir, "ec2") def _get_cloud_name(self): """Return the cloud name as identified during _get_data.""" - return self.cloud_platform + return identify_platform() def _get_data(self): - seed_ret = {} - if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): - self.userdata_raw = seed_ret['user-data'] - self.metadata = seed_ret['meta-data'] - LOG.debug("Using seeded ec2 data from %s", self.seed_dir) - self._cloud_platform = Platforms.SEEDED - return True - strict_mode, _sleep = read_strict_mode( util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), ("warn", None)) - LOG.debug("strict_mode: %s, cloud_platform=%s", - strict_mode, self.cloud_platform) - if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s", + strict_mode, self.cloud_name, self.platform) + if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: return False - elif self.cloud_platform == Platforms.NO_EC2_METADATA: + elif self.cloud_name == CloudNames.NO_EC2_METADATA: return False if self.perform_dhcp_setup: # Setup networking in init-local stage. @@ -103,13 +90,22 @@ return False try: with EphemeralDHCPv4(self.fallback_interface): - return util.log_time( + self._crawled_metadata = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) + func=self.crawl_metadata) except NoDHCPLeaseError: return False else: - return self._crawl_metadata() + self._crawled_metadata = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self.crawl_metadata) + if not self._crawled_metadata: + return False + self.metadata = self._crawled_metadata.get('meta-data', None) + self.userdata_raw = self._crawled_metadata.get('user-data', None) + self.identity = self._crawled_metadata.get( + 'dynamic', {}).get('instance-identity', {}).get('document', {}) + return True @property def launch_index(self): @@ -117,6 +113,15 @@ return None return self.metadata.get('ami-launch-index') + @property + def platform(self): + # Handle upgrade path of pickled ds + if not hasattr(self, '_platform_type'): + self._platform_type = DataSourceEc2.dsname.lower() + if not self._platform_type: + self._platform_type = DataSourceEc2.dsname.lower() + return self._platform_type + def get_metadata_api_version(self): """Get the best supported api version from the metadata service. @@ -144,7 +149,7 @@ return self.min_metadata_version def get_instance_id(self): - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: # Prefer the ID from the instance identity document, but fall back if not getattr(self, 'identity', None): # If re-using cached datasource, it's get_data run didn't @@ -254,7 +259,7 @@ @property def availability_zone(self): try: - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: return self.identity.get( 'availabilityZone', self.metadata['placement']['availability-zone']) @@ -265,7 +270,7 @@ @property def region(self): - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: region = self.identity.get('region') # Fallback to trimming the availability zone if region is missing if self.availability_zone and not region: @@ -277,16 +282,10 @@ return az[:-1] return None - @property - def cloud_platform(self): # TODO rename cloud_name - if self._cloud_platform is None: - self._cloud_platform = identify_platform() - return self._cloud_platform - def activate(self, cfg, is_new_instance): if not is_new_instance: return - if self.cloud_platform == Platforms.UNKNOWN: + if self.cloud_name == CloudNames.UNKNOWN: warn_if_necessary( util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), cfg) @@ -306,13 +305,13 @@ result = None no_network_metadata_on_aws = bool( 'network' not in self.metadata and - self.cloud_platform == Platforms.AWS) + self.cloud_name == CloudNames.AWS) if no_network_metadata_on_aws: LOG.debug("Metadata 'network' not present:" " Refreshing stale metadata from prior to upgrade.") util.log_time( logfunc=LOG.debug, msg='Re-crawl of metadata service', - func=self._crawl_metadata) + func=self.get_data) # Limit network configuration to only the primary/fallback nic iface = self.fallback_interface @@ -340,28 +339,32 @@ return super(DataSourceEc2, self).fallback_interface return self._fallback_interface - def _crawl_metadata(self): + def crawl_metadata(self): """Crawl metadata service when available. - @returns: True on success, False otherwise. + @returns: Dictionary of crawled metadata content containing the keys: + meta-data, user-data and dynamic. """ if not self.wait_for_metadata_service(): - return False + return {} api_version = self.get_metadata_api_version() + crawled_metadata = {} try: - self.userdata_raw = ec2.get_instance_userdata( + crawled_metadata['user-data'] = ec2.get_instance_userdata( api_version, self.metadata_address) - self.metadata = ec2.get_instance_metadata( + crawled_metadata['meta-data'] = ec2.get_instance_metadata( api_version, self.metadata_address) - if self.cloud_platform == Platforms.AWS: - self.identity = ec2.get_instance_identity( - api_version, self.metadata_address).get('document', {}) + if self.cloud_name == CloudNames.AWS: + identity = ec2.get_instance_identity( + api_version, self.metadata_address) + crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( LOG, "Failed reading from metadata address %s", self.metadata_address) - return False - return True + return {} + crawled_metadata['_metadata_api_version'] = api_version + return crawled_metadata class DataSourceEc2Local(DataSourceEc2): @@ -375,10 +378,10 @@ perform_dhcp_setup = True # Use dhcp before querying metadata def get_data(self): - supported_platforms = (Platforms.AWS,) - if self.cloud_platform not in supported_platforms: + supported_platforms = (CloudNames.AWS,) + if self.cloud_name not in supported_platforms: LOG.debug("Local Ec2 mode only supported on %s, not %s", - supported_platforms, self.cloud_platform) + supported_platforms, self.cloud_name) return False return super(DataSourceEc2Local, self).get_data() @@ -439,20 +442,20 @@ if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return Platforms.AWS + return CloudNames.AWS return None def identify_brightbox(data): if data['serial'].endswith('brightbox.com'): - return Platforms.BRIGHTBOX + return CloudNames.BRIGHTBOX def identify_platform(): - # identify the platform and return an entry in Platforms. + # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) + checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceIBMCloud.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceIBMCloud.py --- cloud-init-18.4/cloudinit/sources/DataSourceIBMCloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceIBMCloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -157,6 +157,10 @@ return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return '%s (%s)' % (self.platform, self.source) + def check_instance_id(self, sys_cfg): """quickly (local check only) if self.instance_id is still valid diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceMAAS.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceMAAS.py --- cloud-init-18.4/cloudinit/sources/DataSourceMAAS.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceMAAS.py 2019-01-28 20:07:03.000000000 +0000 @@ -109,6 +109,10 @@ LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'seed-dir (%s)' % self.base_url + def wait_for_metadata_service(self, url): mcfg = self.ds_cfg max_wait = 120 diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceNoCloud.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceNoCloud.py --- cloud-init-18.4/cloudinit/sources/DataSourceNoCloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceNoCloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -186,6 +186,27 @@ self._network_eni = mydata['meta-data'].get('network-interfaces') return True + @property + def platform_type(self): + # Handle upgrade path of pickled ds + if not hasattr(self, '_platform_type'): + self._platform_type = None + if not self._platform_type: + self._platform_type = 'lxd' if util.is_lxd() else 'nocloud' + return self._platform_type + + def _get_cloud_name(self): + """Return unknown when 'cloud-name' key is absent from metadata.""" + return sources.METADATA_UNKNOWN + + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): + subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. @@ -290,6 +311,35 @@ return True +def _maybe_remove_top_network(cfg): + """If network-config contains top level 'network' key, then remove it. + + Some providers of network configuration may provide a top level + 'network' key (LP: #1798117) even though it is not necessary. + + Be friendly and remove it if it really seems so. + + Return the original value if no change or the updated value if changed.""" + nullval = object() + network_val = cfg.get('network', nullval) + if network_val is nullval: + return cfg + bmsg = 'Top level network key in network-config %s: %s' + if not isinstance(network_val, dict): + LOG.debug(bmsg, "was not a dict", cfg) + return cfg + if len(list(cfg.keys())) != 1: + LOG.debug(bmsg, "had multiple top level keys", cfg) + return cfg + if network_val.get('config') == "disabled": + LOG.debug(bmsg, "was config/disabled", cfg) + elif not all(('config' in network_val, 'version' in network_val)): + LOG.debug(bmsg, "but missing 'config' or 'version'", cfg) + return cfg + LOG.debug(bmsg, "fixed by removing shifting network.", cfg) + return network_val + + def _merge_new_seed(cur, seeded): ret = cur.copy() @@ -299,7 +349,8 @@ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd]) if seeded.get('network-config'): - ret['network-config'] = util.load_yaml(seeded['network-config']) + ret['network-config'] = _maybe_remove_top_network( + util.load_yaml(seeded.get('network-config'))) if 'user-data' in seeded: ret['user-data'] = seeded['user-data'] diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceNone.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceNone.py --- cloud-init-18.4/cloudinit/sources/DataSourceNone.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceNone.py 2019-01-28 20:07:03.000000000 +0000 @@ -28,6 +28,10 @@ self.metadata = self.ds_cfg['metadata'] return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'config' + def get_instance_id(self): return 'iid-datasource-none' diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceOpenNebula.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOpenNebula.py --- cloud-init-18.4/cloudinit/sources/DataSourceOpenNebula.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOpenNebula.py 2019-01-28 20:07:03.000000000 +0000 @@ -95,6 +95,14 @@ self.userdata_raw = results.get('userdata') return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed_dir in self.seed: + subplatform_type = 'seed-dir' + else: + subplatform_type = 'config-disk' + return '%s (%s)' % (subplatform_type, self.seed) + @property def network_config(self): if self.network is not None: @@ -329,7 +337,9 @@ (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ( + "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_", + "__v") preset = {} ret = {} target = None diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceOracle.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOracle.py --- cloud-init-18.4/cloudinit/sources/DataSourceOracle.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOracle.py 2019-01-28 20:07:03.000000000 +0000 @@ -91,6 +91,10 @@ def crawl_metadata(self): return read_metadata() + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'metadata (%s)' % METADATA_ENDPOINT + def check_instance_id(self, sys_cfg): """quickly check (local only) if self.instance_id is still valid diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceOVF.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOVF.py --- cloud-init-18.4/cloudinit/sources/DataSourceOVF.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceOVF.py 2019-01-28 20:07:03.000000000 +0000 @@ -232,11 +232,11 @@ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: - np = {'iso': transport_iso9660, - 'vmware-guestd': transport_vmware_guestd, } + np = [('com.vmware.guestInfo', transport_vmware_guestinfo), + ('iso', transport_iso9660)] name = None - for (name, transfunc) in np.items(): - (contents, _dev, _fname) = transfunc() + for name, transfunc in np: + contents = transfunc() if contents: break if contents: @@ -275,6 +275,12 @@ self.cfg = cfg return True + def _get_subplatform(self): + system_type = util.read_dmi_data("system-product-name").lower() + if system_type == 'vmware': + return 'vmware (%s)' % self.seed + return 'ovf (%s)' % self.seed + def get_public_ssh_keys(self): if 'public-keys' not in self.metadata: return [] @@ -458,8 +464,8 @@ return cdmatch.match(devname) is not None -# Transport functions take no input and return -# a 3 tuple of content, path, filename +# Transport functions are called with no arguments and return +# either None (indicating not present) or string content of an ovf-env.xml def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted @@ -471,9 +477,9 @@ if not maybe_cdrom_device(dev): continue mp = info['mountpoint'] - (fname, contents) = get_ovf_env(mp) + (_fname, contents) = get_ovf_env(mp) if contents is not False: - return (contents, dev, fname) + return contents if require_iso: mtype = "iso9660" @@ -486,29 +492,33 @@ if maybe_cdrom_device(dev)] for dev in devs: try: - (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) + (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660", dev) continue if contents is not False: - return (contents, dev, fname) + return contents - return (False, None, None) + return None -def transport_vmware_guestd(): - # http://blogs.vmware.com/vapp/2009/07/ \ - # selfconfiguration-and-the-ovf-environment.html - # try: - # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv'] - # (out, err) = subp(cmd) - # return(out, 'guestinfo.ovfEnv', 'vmware-guestd') - # except: - # # would need to error check here and see why this failed - # # to know if log/error should be raised - # return(False, None, None) - return (False, None, None) +def transport_vmware_guestinfo(): + rpctool = "vmware-rpctool" + not_found = None + if not util.which(rpctool): + return not_found + cmd = [rpctool, "info-get guestinfo.ovfEnv"] + try: + out, _err = util.subp(cmd) + if out: + return out + LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + LOG.warning("%s exited with code %d", rpctool, e.exit_code) + LOG.debug(e) + return not_found def find_child(node, filter_func): diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceScaleway.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceScaleway.py --- cloud-init-18.4/cloudinit/sources/DataSourceScaleway.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceScaleway.py 2019-01-28 20:07:03.000000000 +0000 @@ -253,7 +253,16 @@ return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff -Nru cloud-init-18.4/cloudinit/sources/DataSourceSmartOS.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceSmartOS.py --- cloud-init-18.4/cloudinit/sources/DataSourceSmartOS.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/DataSourceSmartOS.py 2019-01-28 20:07:03.000000000 +0000 @@ -303,6 +303,9 @@ self._set_provisioned() return True + def _get_subplatform(self): + return 'serial (%s)' % SERIAL_DEVICE + def device_name_to_device(self, name): return self.ds_cfg['disk_aliases'].get(name) diff -Nru cloud-init-18.4/cloudinit/sources/helpers/netlink.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/netlink.py --- cloud-init-18.4/cloudinit/sources/helpers/netlink.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/netlink.py 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,250 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import util +from collections import namedtuple + +import os +import select +import socket +import struct + +LOG = logging.getLogger(__name__) + +# http://man7.org/linux/man-pages/man7/netlink.7.html +RTMGRP_LINK = 1 +NLMSG_NOOP = 1 +NLMSG_ERROR = 2 +NLMSG_DONE = 3 +RTM_NEWLINK = 16 +RTM_DELLINK = 17 +RTM_GETLINK = 18 +RTM_SETLINK = 19 +MAX_SIZE = 65535 +RTA_DATA_OFFSET = 32 +MSG_TYPE_OFFSET = 16 +SELECT_TIMEOUT = 60 + +NLMSGHDR_FMT = "IHHII" +IFINFOMSG_FMT = "BHiII" +NLMSGHDR_SIZE = struct.calcsize(NLMSGHDR_FMT) +IFINFOMSG_SIZE = struct.calcsize(IFINFOMSG_FMT) +RTATTR_START_OFFSET = NLMSGHDR_SIZE + IFINFOMSG_SIZE +RTA_DATA_START_OFFSET = 4 +PAD_ALIGNMENT = 4 + +IFLA_IFNAME = 3 +IFLA_OPERSTATE = 16 + +# https://www.kernel.org/doc/Documentation/networking/operstates.txt +OPER_UNKNOWN = 0 +OPER_NOTPRESENT = 1 +OPER_DOWN = 2 +OPER_LOWERLAYERDOWN = 3 +OPER_TESTING = 4 +OPER_DORMANT = 5 +OPER_UP = 6 + +RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data']) +InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate']) +NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq', + 'pid']) + + +class NetlinkCreateSocketError(RuntimeError): + '''Raised if netlink socket fails during create or bind.''' + pass + + +def create_bound_netlink_socket(): + '''Creates netlink socket and bind on netlink group to catch interface + down/up events. The socket will bound only on RTMGRP_LINK (which only + includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to + non-blocking mode since we're only receiving messages. + + :returns: netlink socket in non-blocking mode + :raises: NetlinkCreateSocketError + ''' + try: + netlink_socket = socket.socket(socket.AF_NETLINK, + socket.SOCK_RAW, + socket.NETLINK_ROUTE) + netlink_socket.bind((os.getpid(), RTMGRP_LINK)) + netlink_socket.setblocking(0) + except socket.error as e: + msg = "Exception during netlink socket create: %s" % e + raise NetlinkCreateSocketError(msg) + LOG.debug("Created netlink socket") + return netlink_socket + + +def get_netlink_msg_header(data): + '''Gets netlink message type and length + + :param: data read from netlink socket + :returns: netlink message type + :raises: AssertionError if data is None or data is not >= NLMSGHDR_SIZE + struct nlmsghdr { + __u32 nlmsg_len; /* Length of message including header */ + __u16 nlmsg_type; /* Type of message content */ + __u16 nlmsg_flags; /* Additional flags */ + __u32 nlmsg_seq; /* Sequence number */ + __u32 nlmsg_pid; /* Sender port ID */ + }; + ''' + assert (data is not None), ("data is none") + assert (len(data) >= NLMSGHDR_SIZE), ( + "data is smaller than netlink message header") + msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT, + data[:MSG_TYPE_OFFSET]) + LOG.debug("Got netlink msg of type %d", msg_type) + return NetlinkHeader(msg_len, msg_type, flags, seq, pid) + + +def read_netlink_socket(netlink_socket, timeout=None): + '''Select and read from the netlink socket if ready. + + :param: netlink_socket: specify which socket object to read from + :param: timeout: specify a timeout value (integer) to wait while reading, + if none, it will block indefinitely until socket ready for read + :returns: string of data read (max length = ) from socket, + if no data read, returns None + :raises: AssertionError if netlink_socket is None + ''' + assert (netlink_socket is not None), ("netlink socket is none") + read_set, _, _ = select.select([netlink_socket], [], [], timeout) + # Incase of timeout,read_set doesn't contain netlink socket. + # just return from this function + if netlink_socket not in read_set: + return None + LOG.debug("netlink socket ready for read") + data = netlink_socket.recv(MAX_SIZE) + if data is None: + LOG.error("Reading from Netlink socket returned no data") + return data + + +def unpack_rta_attr(data, offset): + '''Unpack a single rta attribute. + + :param: data: string of data read from netlink socket + :param: offset: starting offset of RTA Attribute + :return: RTAAttr object with length, type and data. On error, return None. + :raises: AssertionError if data is None or offset is not integer. + ''' + assert (data is not None), ("data is none") + assert (type(offset) == int), ("offset is not integer") + assert (offset >= RTATTR_START_OFFSET), ( + "rta offset is less than expected length") + length = rta_type = 0 + attr_data = None + try: + length = struct.unpack_from("H", data, offset=offset)[0] + rta_type = struct.unpack_from("H", data, offset=offset+2)[0] + except struct.error: + return None # Should mean our offset is >= remaining data + + # Unpack just the attribute's data. Offset by 4 to skip length/type header + attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length] + return RTAAttr(length, rta_type, attr_data) + + +def read_rta_oper_state(data): + '''Reads Interface name and operational state from RTA Data. + + :param: data: string of data read from netlink socket + :returns: InterfaceOperstate object containing if_name and oper_state. + None if data does not contain valid IFLA_OPERSTATE and + IFLA_IFNAME messages. + :raises: AssertionError if data is None or length of data is + smaller than RTATTR_START_OFFSET. + ''' + assert (data is not None), ("data is none") + assert (len(data) > RTATTR_START_OFFSET), ( + "length of data is smaller than RTATTR_START_OFFSET") + ifname = operstate = None + offset = RTATTR_START_OFFSET + while offset <= len(data): + attr = unpack_rta_attr(data, offset) + if not attr or attr.length == 0: + break + # Each attribute is 4-byte aligned. Determine pad length. + padlen = (PAD_ALIGNMENT - + (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT + offset += attr.length + padlen + + if attr.rta_type == IFLA_OPERSTATE: + operstate = ord(attr.data) + elif attr.rta_type == IFLA_IFNAME: + interface_name = util.decode_binary(attr.data, 'utf-8') + ifname = interface_name.strip('\0') + if not ifname or operstate is None: + return None + LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate) + return InterfaceOperstate(ifname, operstate) + + +def wait_for_media_disconnect_connect(netlink_socket, ifname): + '''Block until media disconnect and connect has happened on an interface. + Listens on netlink socket to receive netlink events and when the carrier + changes from 0 to 1, it considers event has happened and + return from this function + + :param: netlink_socket: netlink_socket to receive events + :param: ifname: Interface name to lookout for netlink events + :raises: AssertionError if netlink_socket is None or ifname is None. + ''' + assert (netlink_socket is not None), ("netlink socket is none") + assert (ifname is not None), ("interface name is none") + assert (len(ifname) > 0), ("interface name cannot be empty") + carrier = OPER_UP + prevCarrier = OPER_UP + data = bytes() + LOG.debug("Wait for media disconnect and reconnect to happen") + while True: + recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT) + if recv_data is None: + continue + LOG.debug('read %d bytes from socket', len(recv_data)) + data += recv_data + LOG.debug('Length of data after concat %d', len(data)) + offset = 0 + datalen = len(data) + while offset < datalen: + nl_msg = data[offset:] + if len(nl_msg) < NLMSGHDR_SIZE: + LOG.debug("Data is smaller than netlink header") + break + nlheader = get_netlink_msg_header(nl_msg) + if len(nl_msg) < nlheader.length: + LOG.debug("Partial data. Smaller than netlink message") + break + padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1) + offset = offset + padlen + LOG.debug('offset to next netlink message: %d', offset) + # Ignore any messages not new link or del link + if nlheader.type not in [RTM_NEWLINK, RTM_DELLINK]: + continue + interface_state = read_rta_oper_state(nl_msg) + if interface_state is None: + LOG.debug('Failed to read rta attributes: %s', interface_state) + continue + if interface_state.ifname != ifname: + LOG.debug( + "Ignored netlink event on interface %s. Waiting for %s.", + interface_state.ifname, ifname) + continue + if interface_state.operstate not in [OPER_UP, OPER_DOWN]: + continue + prevCarrier = carrier + carrier = interface_state.operstate + # check for carrier down, up sequence + isVnetSwitch = (prevCarrier == OPER_DOWN) and (carrier == OPER_UP) + if isVnetSwitch: + LOG.debug("Media switch happened on %s.", ifname) + return + data = data[offset:] + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/sources/helpers/tests/test_netlink.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/tests/test_netlink.py --- cloud-init-18.4/cloudinit/sources/helpers/tests/test_netlink.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/tests/test_netlink.py 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,373 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.tests.helpers import CiTestCase, mock +import socket +import struct +import codecs +from cloudinit.sources.helpers.netlink import ( + NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, + read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, + OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, + OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_SETLINK, + RTM_GETLINK, MAX_SIZE) + + +def int_to_bytes(i): + '''convert integer to binary: eg: 1 to \x01''' + hex_value = '{0:x}'.format(i) + hex_value = '0' * (len(hex_value) % 2) + hex_value + return codecs.decode(hex_value, 'hex_codec') + + +class TestCreateBoundNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + def test_socket_error_on_create(self, m_socket): + '''create_bound_netlink_socket catches socket creation exception''' + + """NetlinkCreateSocketError is raised when socket creation errors.""" + m_socket.side_effect = socket.error("Fake socket failure") + with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: + create_bound_netlink_socket() + self.assertEqual( + 'Exception during netlink socket create: Fake socket failure', + str(ctx_mgr.exception)) + + +class TestReadNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_read_netlink_socket(self, m_select, m_socket): + '''read_netlink_socket able to receive data''' + data = 'netlinktest' + m_select.return_value = [m_socket], None, None + m_socket.recv.return_value = data + recv_data = read_netlink_socket(m_socket, 2) + m_select.assert_called_with([m_socket], [], [], 2) + m_socket.recv.assert_called_with(MAX_SIZE) + self.assertIsNotNone(recv_data) + self.assertEqual(recv_data, data) + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_netlink_read_timeout(self, m_select, m_socket): + '''read_netlink_socket should timeout if nothing to read''' + m_select.return_value = [], None, None + data = read_netlink_socket(m_socket, 1) + m_select.assert_called_with([m_socket], [], [], 1) + self.assertEqual(m_socket.recv.call_count, 0) + self.assertIsNone(data) + + def test_read_invalid_socket(self): + '''read_netlink_socket raises assert error if socket is invalid''' + socket = None + with self.assertRaises(AssertionError) as context: + read_netlink_socket(socket, 1) + self.assertTrue('netlink socket is none' in str(context.exception)) + + +class TestParseNetlinkMessage(CiTestCase): + + def test_read_rta_oper_state(self): + '''read_rta_oper_state could parse netlink message and extract data''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + buf = bytearray(48) + struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, + 16, int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertEqual(interface_state.ifname, ifname) + self.assertEqual(interface_state.operstate, OPER_DOWN) + + def test_read_none_data(self): + '''read_rta_oper_state raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(data) + self.assertTrue('data is none', str(context.exception)) + + def test_read_invalid_rta_operstate_none(self): + '''read_rta_oper_state returns none if operstate is none''' + ifname = "eth0" + buf = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_rta_ifname_none(self): + '''read_rta_oper_state returns none if ifname is none''' + buf = bytearray(40) + struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_data_len(self): + '''raise assert error if data size is smaller than required size''' + buf = bytearray(32) + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(buf) + self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in + str(context.exception)) + + def test_unpack_rta_attr_none_data(self): + '''unpack_rta_attr raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, RTATTR_START_OFFSET) + self.assertTrue('data is none' in str(context.exception)) + + def test_unpack_rta_attr_invalid_offset(self): + '''unpack_rta_attr raises assert error if offset is invalid''' + data = bytearray(48) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, "offset") + self.assertTrue('offset is not integer' in str(context.exception)) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, 31) + self.assertTrue('rta offset is less than expected length' in + str(context.exception)) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestWaitForMediaDisconnectConnect(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_media_down_up_scenario(self, m_read_netlink_socket, + m_socket): + '''Test for media down up sequence for required interface name''' + ifname = "eth0" + # construct data for Oper State down + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + # construct data for Oper State up + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) + + def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect ignores unexpected interfaces. + + The first two messages are for other interfaces and last two are for + expected interface. So the function exit only after receiving last + 2 messages and therefore the call count for m_read_netlink_socket + has to be 4 + ''' + other_ifname = "eth1" + expected_ifname = "eth0" + data_op_down_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_UP) + data_op_down_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down_eth1, + data_op_up_eth1, + data_op_down_eth0, + data_op_up_eth0] + wait_for_media_disconnect_connect(m_socket, expected_ifname) + self.assertIn('Ignored netlink event on interface %s' % other_ifname, + self.logs.getvalue()) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores GETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which netlink module will ignore. The last 2 messages are RTM_NEWLINK + with oper state down and up messages. Therefore the call count for + m_read_netlink_socket has to be 4 ignoring first 2 messages + of RTM_GETLINK + ''' + ifname = "eth0" + data_getlink_down = self._media_switch_data( + ifname, RTM_GETLINK, OPER_DOWN) + data_getlink_up = self._media_switch_data( + ifname, RTM_GETLINK, OPER_UP) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_getlink_down, + data_getlink_up, + data_newlink_down, + data_newlink_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores SETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down + and up messages. This function should exit after 4th messages since it + sees down->up scenario. So the call count for m_read_netlink_socket + has to be 4 ignoring first 2 messages of RTM_GETLINK and + last 2 messages of RTM_NEWLINK + ''' + ifname = "eth0" + data_setlink_down = self._media_switch_data( + ifname, RTM_SETLINK, OPER_DOWN) + data_setlink_up = self._media_switch_data( + ifname, RTM_SETLINK, OPER_UP) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_setlink_down, + data_setlink_up, + data_newlink_down, + data_newlink_up, + data_newlink_down, + data_newlink_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, + m_socket): + '''returns only if it receives UP event after a DOWN event''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_DORMANT) + data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_NOTPRESENT) + data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_LOWERLAYERDOWN) + data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_TESTING) + data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [data_op_up, data_op_up, + data_op_dormant, data_op_up, + data_op_notpresent, data_op_up, + data_op_lowerdown, data_op_up, + data_op_testing, data_op_up, + data_op_unknown, data_op_up, + data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 14) + + def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect handles in between transitions''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_DORMANT) + data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant, + data_op_unknown, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect should handle invalid operstates. + + The function should not fail and return even if it receives invalid + operstates. It always should wait for down up sequence. + ''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) + m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up, + data_op_down, data_op_invalid, + data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 5) + + def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none netlink socket.''' + socket = None + ifname = "eth0" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(socket, ifname) + self.assertTrue('netlink socket is none' in str(context.exception)) + + def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none interface name''' + ifname = None + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name is none' in str(context.exception)) + ifname = "" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name cannot be empty' in + str(context.exception)) + + def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): + ''' wait_for_media_disconnect_connect handles invalid rta data''' + ifname = "eth0" + data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) + data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2, + data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read multiple messages in single receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data = bytearray(96) + struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, + 3, bytes, 5, 16, int_to_bytes(OPER_UP)) + m_read_netlink_socket.return_value = data + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 1) + + def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read partial messages in receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data1 = bytearray(112) + data2 = bytearray(32) + struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, + int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, + int_to_bytes(OPER_UP)) + m_read_netlink_socket.side_effect = [data1, data2] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) diff -Nru cloud-init-18.4/cloudinit/sources/helpers/vmware/imc/config_nic.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/vmware/imc/config_nic.py --- cloud-init-18.4/cloudinit/sources/helpers/vmware/imc/config_nic.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/helpers/vmware/imc/config_nic.py 2019-01-28 20:07:03.000000000 +0000 @@ -165,9 +165,8 @@ # Add routes if there is no primary nic if not self._primaryNic and v4.gateways: - route_list.extend(self.gen_ipv4_route(nic, - v4.gateways, - v4.netmask)) + subnet.update( + {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}) return ([subnet], route_list) diff -Nru cloud-init-18.4/cloudinit/sources/__init__.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/__init__.py --- cloud-init-18.4/cloudinit/sources/__init__.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/__init__.py 2019-01-28 20:07:03.000000000 +0000 @@ -54,9 +54,18 @@ METADATA_CLOUD_NAME_KEY = 'cloud-name' UNSET = "_unset" +METADATA_UNKNOWN = 'unknown' LOG = logging.getLogger(__name__) +# CLOUD_ID_REGION_PREFIX_MAP format is: +# : (: ) +CLOUD_ID_REGION_PREFIX_MAP = { + 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions + 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions + 'china': ('azure-china', lambda c: c == 'azure'), # only change azure +} + class DataSourceNotFoundException(Exception): pass @@ -133,6 +142,14 @@ # Cached cloud_name as determined by _get_cloud_name _cloud_name = None + # Cached cloud platform api type: e.g. ec2, openstack, kvm, lxd, azure etc. + _platform_type = None + + # More details about the cloud platform: + # - metadata (http://169.254.169.254/) + # - seed-dir () + _subplatform = None + # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None @@ -192,21 +209,24 @@ local_hostname = self.get_hostname() instance_id = self.get_instance_id() availability_zone = self.availability_zone - cloud_name = self.cloud_name - # When adding new standard keys prefer underscore-delimited instead - # of hyphen-delimted to support simple variable references in jinja - # templates. + # In the event of upgrade from existing cloudinit, pickled datasource + # will not contain these new class attributes. So we need to recrawl + # metadata to discover that content. return { 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': availability_zone, 'availability_zone': availability_zone, - 'cloud-name': cloud_name, - 'cloud_name': cloud_name, + 'cloud-name': self.cloud_name, + 'cloud_name': self.cloud_name, + 'platform': self.platform_type, + 'public_ssh_keys': self.get_public_ssh_keys(), 'instance-id': instance_id, 'instance_id': instance_id, 'local-hostname': local_hostname, 'local_hostname': local_hostname, - 'region': self.region}} + 'region': self.region, + 'subplatform': self.subplatform}} def clear_cached_attrs(self, attr_defaults=()): """Reset any cached metadata attributes to datasource defaults. @@ -247,19 +267,27 @@ @return True on successful write, False otherwise. """ - instance_data = { - 'ds': {'_doc': EXPERIMENTAL_TEXT, - 'meta_data': self.metadata}} - if hasattr(self, 'network_json'): - network_json = getattr(self, 'network_json') - if network_json != UNSET: - instance_data['ds']['network_json'] = network_json - if hasattr(self, 'ec2_metadata'): - ec2_metadata = getattr(self, 'ec2_metadata') - if ec2_metadata != UNSET: - instance_data['ds']['ec2_metadata'] = ec2_metadata + if hasattr(self, '_crawled_metadata'): + # Any datasource with _crawled_metadata will best represent + # most recent, 'raw' metadata + crawled_metadata = copy.deepcopy( + getattr(self, '_crawled_metadata')) + crawled_metadata.pop('user-data', None) + crawled_metadata.pop('vendor-data', None) + instance_data = {'ds': crawled_metadata} + else: + instance_data = {'ds': {'meta_data': self.metadata}} + if hasattr(self, 'network_json'): + network_json = getattr(self, 'network_json') + if network_json != UNSET: + instance_data['ds']['network_json'] = network_json + if hasattr(self, 'ec2_metadata'): + ec2_metadata = getattr(self, 'ec2_metadata') + if ec2_metadata != UNSET: + instance_data['ds']['ec2_metadata'] = ec2_metadata instance_data.update( self._get_standardized_metadata()) + instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT try: # Process content base64encoding unserializable values content = util.json_dumps(instance_data) @@ -347,6 +375,40 @@ return self._fallback_interface @property + def platform_type(self): + if not hasattr(self, '_platform_type'): + # Handle upgrade path where pickled datasource has no _platform. + self._platform_type = self.dsname.lower() + if not self._platform_type: + self._platform_type = self.dsname.lower() + return self._platform_type + + @property + def subplatform(self): + """Return a string representing subplatform details for the datasource. + + This should be guidance for where the metadata is sourced. + Examples of this on different clouds: + ec2: metadata (http://169.254.169.254) + openstack: configdrive (/dev/path) + openstack: metadata (http://169.254.169.254) + nocloud: seed-dir (/seed/dir/path) + lxd: nocloud (/seed/dir/path) + """ + if not hasattr(self, '_subplatform'): + # Handle upgrade path where pickled datasource has no _platform. + self._subplatform = self._get_subplatform() + if not self._subplatform: + self._subplatform = self._get_subplatform() + return self._subplatform + + def _get_subplatform(self): + """Subclasses should implement to return a "slug (detail)" string.""" + if hasattr(self, 'metadata_address'): + return 'metadata (%s)' % getattr(self, 'metadata_address') + return METADATA_UNKNOWN + + @property def cloud_name(self): """Return lowercase cloud name as determined by the datasource. @@ -359,9 +421,11 @@ cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) if isinstance(cloud_name, six.string_types): self._cloud_name = cloud_name.lower() - LOG.debug( - 'Ignoring metadata provided key %s: non-string type %s', - METADATA_CLOUD_NAME_KEY, type(cloud_name)) + else: + self._cloud_name = self._get_cloud_name().lower() + LOG.debug( + 'Ignoring metadata provided key %s: non-string type %s', + METADATA_CLOUD_NAME_KEY, type(cloud_name)) else: self._cloud_name = self._get_cloud_name().lower() return self._cloud_name @@ -714,6 +778,25 @@ return instance_id.lower() == dmi_value.lower() +def canonical_cloud_id(cloud_name, region, platform): + """Lookup the canonical cloud-id for a given cloud_name and region.""" + if not cloud_name: + cloud_name = METADATA_UNKNOWN + if not region: + region = METADATA_UNKNOWN + if region == METADATA_UNKNOWN: + if cloud_name != METADATA_UNKNOWN: + return cloud_name + return platform + for prefix, cloud_id_test in CLOUD_ID_REGION_PREFIX_MAP.items(): + (cloud_id, valid_cloud) = cloud_id_test + if region.startswith(prefix) and valid_cloud(cloud_name): + return cloud_id + if cloud_name != METADATA_UNKNOWN: + return cloud_name + return platform + + def convert_vendordata(data, recurse=True): """data: a loaded object (strings, arrays, dicts). return something suitable for cloudinit vendordata_raw. diff -Nru cloud-init-18.4/cloudinit/sources/tests/test_init.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/tests/test_init.py --- cloud-init-18.4/cloudinit/sources/tests/test_init.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/tests/test_init.py 2019-01-28 20:07:03.000000000 +0000 @@ -11,7 +11,8 @@ from cloudinit import importer from cloudinit.sources import ( EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, - REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys) + METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, + canonical_cloud_id, redact_sensitive_keys) from cloudinit.tests.helpers import CiTestCase, skipIf, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -295,6 +296,7 @@ 'base64_encoded_keys': [], 'sensitive_keys': [], 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', @@ -303,7 +305,10 @@ 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', - 'region': 'myregion'}, + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'region': 'myregion', + 'subplatform': 'unknown'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': {'availability_zone': 'myaz', @@ -339,6 +344,7 @@ 'base64_encoded_keys': [], 'sensitive_keys': ['ds/meta_data/some/security-credentials'], 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', @@ -347,7 +353,10 @@ 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', - 'region': 'myregion'}, + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'region': 'myregion', + 'subplatform': 'unknown'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { @@ -599,4 +608,75 @@ redact_sensitive_keys(md)) +class TestCanonicalCloudID(CiTestCase): + + def test_cloud_id_returns_platform_on_unknowns(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region=METADATA_UNKNOWN, + platform='platform')) + + def test_cloud_id_returns_platform_on_none(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=None, + region=None, + platform='platform')) + + def test_cloud_id_returns_cloud_name_on_unknown_region(self): + """When region is unknown, return cloud_name.""" + for region in (None, METADATA_UNKNOWN): + self.assertEqual( + 'cloudname', + canonical_cloud_id(cloud_name='cloudname', + region=region, + platform='platform')) + + def test_cloud_id_returns_platform_on_unknown_cloud_name(self): + """When region is set but cloud_name is unknown return cloud_name.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region='region', + platform='platform')) + + def test_cloud_id_aws_based_on_region_and_cloud_name(self): + """When cloud_name is aws, return proper cloud-id based on region.""" + self.assertEqual( + 'aws-china', + canonical_cloud_id(cloud_name='aws', + region='cn-north-1', + platform='platform')) + self.assertEqual( + 'aws', + canonical_cloud_id(cloud_name='aws', + region='us-east-1', + platform='platform')) + self.assertEqual( + 'aws-gov', + canonical_cloud_id(cloud_name='aws', + region='us-gov-1', + platform='platform')) + self.assertEqual( # Overrideen non-aws cloud_name is returned + '!aws', + canonical_cloud_id(cloud_name='!aws', + region='us-gov-1', + platform='platform')) + + def test_cloud_id_azure_based_on_region_and_cloud_name(self): + """Report cloud-id when cloud_name is azure and region is in china.""" + self.assertEqual( + 'azure-china', + canonical_cloud_id(cloud_name='azure', + region='chinaeast', + platform='platform')) + self.assertEqual( + 'azure', + canonical_cloud_id(cloud_name='azure', + region='!chinaeast', + platform='platform')) + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/sources/tests/test_oracle.py cloud-init-18.5-21-g8ee294d5/cloudinit/sources/tests/test_oracle.py --- cloud-init-18.4/cloudinit/sources/tests/test_oracle.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/sources/tests/test_oracle.py 2019-01-28 20:07:03.000000000 +0000 @@ -71,6 +71,14 @@ self.assertFalse(ds._get_data()) mocks._is_platform_viable.assert_called_once_with() + def test_platform_info(self): + """Return platform-related information for Oracle Datasource.""" + ds, _mocks = self._get_ds() + self.assertEqual('oracle', ds.cloud_name) + self.assertEqual('oracle', ds.platform_type) + self.assertEqual( + 'metadata (http://169.254.169.254/openstack/)', ds.subplatform) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) def test_without_userdata(self, m_is_iscsi_root): """If no user-data is provided, it should not be in return dict.""" diff -Nru cloud-init-18.4/cloudinit/temp_utils.py cloud-init-18.5-21-g8ee294d5/cloudinit/temp_utils.py --- cloud-init-18.4/cloudinit/temp_utils.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/temp_utils.py 2019-01-28 20:07:03.000000000 +0000 @@ -81,7 +81,7 @@ @contextlib.contextmanager -def tempdir(**kwargs): +def tempdir(rmtree_ignore_errors=False, **kwargs): # This seems like it was only added in python 3.2 # Make it since its useful... # See: http://bugs.python.org/file12970/tempdir.patch @@ -89,7 +89,7 @@ try: yield tdir finally: - shutil.rmtree(tdir) + shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) def mkdtemp(**kwargs): diff -Nru cloud-init-18.4/cloudinit/tests/test_dhclient_hook.py cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_dhclient_hook.py --- cloud-init-18.4/cloudinit/tests/test_dhclient_hook.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_dhclient_hook.py 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,105 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.dhclient_hook.""" + +from cloudinit import dhclient_hook as dhc +from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir + +import argparse +import json +import mock +import os + + +class TestDhclientHook(CiTestCase): + + ex_env = { + 'interface': 'eth0', + 'new_dhcp_lease_time': '3600', + 'new_host_name': 'x1', + 'new_ip_address': '10.145.210.163', + 'new_subnet_mask': '255.255.255.0', + 'old_host_name': 'x1', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + 'reason': 'BOUND', + } + + # some older versions of dhclient put the same content, + # but in upper case with DHCP4_ instead of new_ + ex_env_dhcp4 = { + 'REASON': 'BOUND', + 'DHCP4_dhcp_lease_time': '3600', + 'DHCP4_host_name': 'x1', + 'DHCP4_ip_address': '10.145.210.163', + 'DHCP4_subnet_mask': '255.255.255.0', + 'INTERFACE': 'eth0', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + } + + expected = { + 'dhcp_lease_time': '3600', + 'host_name': 'x1', + 'ip_address': '10.145.210.163', + 'subnet_mask': '255.255.255.0'} + + def setUp(self): + super(TestDhclientHook, self).setUp() + self.tmp = self.tmp_dir() + + def test_handle_args(self): + """quick test of call to handle_args.""" + nic = 'eth0' + args = argparse.Namespace(event=dhc.UP, interface=nic) + with mock.patch.dict("os.environ", clear=True, values=self.ex_env): + dhc.handle_args(dhc.NAME, args, data_d=self.tmp) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_creates_dir(self): + """If dir does not exist, run_hook should create it.""" + subd = self.tmp_path("subdir", self.tmp) + nic = 'eth1' + dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env) + self.assertEqual( + set([nic + ".json"]), set(dir2dict(subd + os.path.sep))) + + def test_run_hook_up(self): + """Test expected use of run_hook_up.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_dhcp4_prefix(self): + """Test run_hook filters correctly with older DHCP4_ data.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_down_deletes(self): + """down should delete the created json file.""" + nic = 'eth1' + populate_dir( + self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'}) + dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'}) + self.assertEqual( + set(['myfile.txt']), + set(dir2dict(self.tmp + os.path.sep))) + + def test_get_parser(self): + """Smoke test creation of get_parser.""" + # cloud-init main uses 'action'. + event, interface = (dhc.UP, 'mynic0') + self.assertEqual( + argparse.Namespace(event=event, interface=interface, + action=(dhc.NAME, dhc.handle_args)), + dhc.get_parser().parse_args([event, interface])) + + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/tests/test_temp_utils.py cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_temp_utils.py --- cloud-init-18.4/cloudinit/tests/test_temp_utils.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_temp_utils.py 2019-01-28 20:07:03.000000000 +0000 @@ -2,8 +2,9 @@ """Tests for cloudinit.temp_utils""" -from cloudinit.temp_utils import mkdtemp, mkstemp +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir from cloudinit.tests.helpers import CiTestCase, wrap_and_call +import os class TestTempUtils(CiTestCase): @@ -98,4 +99,19 @@ self.assertEqual('/fake/return/path', retval) self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/tests/test_url_helper.py cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_url_helper.py --- cloud-init-18.4/cloudinit/tests/test_url_helper.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_url_helper.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.url_helper import oauth_headers, read_file_or_url +from cloudinit.url_helper import ( + NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util import httpretty +import requests try: @@ -64,3 +66,24 @@ result = read_file_or_url(url) self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + + +class TestRetryOnUrlExc(CiTestCase): + + def test_do_not_retry_non_urlerror(self): + """When exception is not UrlError return False.""" + myerror = IOError('something unexcpected') + self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_not_found(self): + """When exception is UrlError with a 404 status code return True.""" + myerror = UrlError(cause=RuntimeError( + 'something was not found'), code=NOT_FOUND) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_timeout(self): + """When exception is a requests.Timout return True.""" + myerror = UrlError(cause=requests.Timeout('something timed out')) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/tests/test_util.py cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_util.py --- cloud-init-18.4/cloudinit/tests/test_util.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/tests/test_util.py 2019-01-28 20:07:03.000000000 +0000 @@ -18,25 +18,51 @@ ] OS_RELEASE_SLES = dedent("""\ - NAME="SLES"\n - VERSION="12-SP3"\n - VERSION_ID="12.3"\n - PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n - ID="sles"\nANSI_COLOR="0;32"\n - CPE_NAME="cpe:/o:suse:sles:12:sp3"\n + NAME="SLES" + VERSION="12-SP3" + VERSION_ID="12.3" + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" + ID="sles" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:suse:sles:12:sp3" """) OS_RELEASE_OPENSUSE = dedent("""\ -NAME="openSUSE Leap" -VERSION="42.3" -ID=opensuse -ID_LIKE="suse" -VERSION_ID="42.3" -PRETTY_NAME="openSUSE Leap 42.3" -ANSI_COLOR="0;32" -CPE_NAME="cpe:/o:opensuse:leap:42.3" -BUG_REPORT_URL="https://bugs.opensuse.org" -HOME_URL="https://www.opensuse.org/" + NAME="openSUSE Leap" + VERSION="42.3" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.3" + PRETTY_NAME="openSUSE Leap 42.3" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.3" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""") + +OS_RELEASE_OPENSUSE_L15 = dedent("""\ + NAME="openSUSE Leap" + VERSION="15.0" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.0" + PRETTY_NAME="openSUSE Leap 15.0" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.0" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""") + +OS_RELEASE_OPENSUSE_TW = dedent("""\ + NAME="openSUSE Tumbleweed" + ID="opensuse-tumbleweed" + ID_LIKE="opensuse suse" + VERSION_ID="20180920" + PRETTY_NAME="openSUSE Tumbleweed" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" """) OS_RELEASE_CENTOS = dedent("""\ @@ -447,12 +473,35 @@ @mock.patch('cloudinit.util.load_file') def test_get_linux_opensuse(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on OpenSUSE.""" + """Verify we get the correct name and machine arch on openSUSE + prior to openSUSE Leap 15. + """ m_os_release.return_value = OS_RELEASE_OPENSUSE m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() self.assertEqual(('opensuse', '42.3', platform.machine()), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Leap 15.0 and later. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Tumbleweed + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_TW + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual( + ('opensuse-tumbleweed', '20180920', platform.machine()), dist) + @mock.patch('platform.dist') def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): """Verify we get no information if os-release does not exist""" @@ -478,4 +527,20 @@ dist = util.get_linux_distro() self.assertEqual(('foo', '1.1', 'aarch64'), dist) + +@mock.patch('os.path.exists') +class TestIsLXD(CiTestCase): + + def test_is_lxd_true_on_sock_device(self, m_exists): + """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" + m_exists.return_value = True + self.assertTrue(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + + def test_is_lxd_false_when_sock_device_absent(self, m_exists): + """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" + m_exists.return_value = False + self.assertFalse(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/url_helper.py cloud-init-18.5-21-g8ee294d5/cloudinit/url_helper.py --- cloud-init-18.4/cloudinit/url_helper.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/url_helper.py 2019-01-28 20:07:03.000000000 +0000 @@ -199,7 +199,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False): + session=None, infinite=False, log_req_resp=True): url = _cleanurl(url) req_args = { 'url': url, @@ -256,9 +256,11 @@ continue filtered_req_args[k] = v try: - LOG.debug("[%s/%s] open '%s' with %s configuration", i, - "infinite" if infinite else manual_tries, url, - filtered_req_args) + + if log_req_resp: + LOG.debug("[%s/%s] open '%s' with %s configuration", i, + "infinite" if infinite else manual_tries, url, + filtered_req_args) if session is None: session = requests.Session() @@ -294,8 +296,11 @@ break if (infinite and sec_between > 0) or \ (i + 1 < manual_tries and sec_between > 0): - LOG.debug("Please wait %s seconds while we wait to try again", - sec_between) + + if log_req_resp: + LOG.debug( + "Please wait %s seconds while we wait to try again", + sec_between) time.sleep(sec_between) if excps: raise excps[-1] @@ -549,4 +554,18 @@ _uri, signed_headers, _body = client.sign(url) return signed_headers + +def retry_on_url_exc(msg, exc): + """readurl exception_cb that will retry on NOT_FOUND and Timeout. + + Returns False to raise the exception from readurl, True to retry. + """ + if not isinstance(exc, UrlError): + return False + if exc.code == NOT_FOUND: + return True + if exc.cause and isinstance(exc.cause, requests.Timeout): + return True + return False + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/util.py cloud-init-18.5-21-g8ee294d5/cloudinit/util.py --- cloud-init-18.4/cloudinit/util.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/util.py 2019-01-28 20:07:03.000000000 +0000 @@ -615,8 +615,8 @@ distro_name = os_release.get('ID', '') distro_version = os_release.get('VERSION_ID', '') if 'sles' in distro_name or 'suse' in distro_name: - # RELEASE_BLOCKER: We will drop this sles ivergent behavior in - # before 18.4 so that get_linux_distro returns a named tuple + # RELEASE_BLOCKER: We will drop this sles divergent behavior in + # the future so that get_linux_distro returns a named tuple # which will include both version codename and architecture # on all distributions. flavor = platform.machine() @@ -668,7 +668,8 @@ var = 'ubuntu' elif linux_dist == 'redhat': var = 'rhel' - elif linux_dist in ('opensuse', 'sles'): + elif linux_dist in ( + 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'): var = 'suse' else: var = 'linux' @@ -2171,6 +2172,11 @@ return False +def is_lxd(): + """Check to see if we are running in a lxd container.""" + return os.path.exists('/dev/lxd/sock') + + def get_proc_env(pid, encoding='utf-8', errors='replace'): """ Return the environment in a dict that a given process id was started with. @@ -2870,4 +2876,20 @@ return subp(settle_cmd) +def get_proc_ppid(pid): + """ + Return the parent pid of a process. + """ + ppid = 0 + try: + contents = load_file("/proc/%s/stat" % pid, quiet=True) + except IOError as e: + LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) + if contents: + parts = contents.split(" ", 4) + # man proc says + # ppid %d (4) The PID of the parent. + ppid = int(parts[3]) + return ppid + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/cloudinit/version.py cloud-init-18.5-21-g8ee294d5/cloudinit/version.py --- cloud-init-18.4/cloudinit/version.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/cloudinit/version.py 2019-01-28 20:07:03.000000000 +0000 @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "18.4" +__VERSION__ = "18.5" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ diff -Nru cloud-init-18.4/config/cloud.cfg.tmpl cloud-init-18.5-21-g8ee294d5/config/cloud.cfg.tmpl --- cloud-init-18.4/config/cloud.cfg.tmpl 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/config/cloud.cfg.tmpl 2019-01-28 20:07:03.000000000 +0000 @@ -167,7 +167,17 @@ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ security: [] - - arches: [armhf, armel, default] + - arches: [arm64, armel, armhf] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + search: + primary: + - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/ + - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/ + - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/ + security: [] + - arches: [default] failsafe: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports diff -Nru cloud-init-18.4/debian/changelog cloud-init-18.5-21-g8ee294d5/debian/changelog --- cloud-init-18.4/debian/changelog 2018-10-17 18:51:09.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/debian/changelog 2019-01-29 03:27:06.000000000 +0000 @@ -1,3 +1,97 @@ +cloud-init (18.5-21-g8ee294d5-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1813346) + - opennebula: also exclude epochseconds from changed environment vars + - systemd: Render generator from template to account for system + differences. [Robert Schweikert] + - sysconfig: On SUSE, use STARTMODE instead of ONBOOT [Robert Schweikert] + - flake8: use ==/!= to compare str, bytes, and int literals + [Paride Legovini] + + -- Chad Smith Mon, 28 Jan 2019 20:27:06 -0700 + +cloud-init (18.5-17-gd1a2fe73-0ubuntu1~16.04.1) xenial; urgency=medium + + * drop the following cherry-picks now included: + + cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable + * refresh patches: + + debian/patches/azure-apply-network-config-false.patch + * refresh patches: + + debian/patches/azure-apply-network-config-false.patch + + debian/patches/azure-use-walinux-agent.patch + * New upstream snapshot. + - opennebula: exclude EPOCHREALTIME as known bash env variable with a delta + - tox: fix disco httpretty dependencies for py37 + - run-container: uncomment baseurl in yum.repos.d/*.repo when using a + proxy [Paride Legovini] + - lxd: install zfs-linux instead of zfs meta package [Johnson Shi] + - net/sysconfig: do not write a resolv.conf file with only the header. + [Robert Schweikert] + - net: Make sysconfig renderer compatible with Network Manager. + [Eduardo Otubo] + - cc_set_passwords: Fix regex when parsing hashed passwords + [Marlin Cremers] + - net: Wait for dhclient to daemonize before reading lease file + [Jason Zions] + - [Azure] Increase retries when talking to Wireserver during metadata walk + [Jason Zions] + - Add documentation on adding a datasource. + - doc: clean up some datasource documentation. + - ds-identify: fix wrong variable name in ovf_vmware_transport_guestinfo. + - Scaleway: Support ssh keys provided inside an instance tag. [PORTE Loïc] + - OVF: simplify expected return values of transport functions. + - Vmware: Add support for the com.vmware.guestInfo OVF transport. + - HACKING.rst: change contact info to Josh Powers + - Update to pylint 2.2.2. + - Release 18.5 + - tests: add Disco release [Joshua Powers] + - net: render 'metric' values in per-subnet routes + - write_files: add support for appending to files. [James Baxter] + - config: On ubuntu select cloud archive mirrors for armel, armhf, arm64. + - dhclient-hook: cleanups, tests and fix a bug on 'down' event. + - NoCloud: Allow top level 'network' key in network-config. + - ovf: Fix ovf network config generation gateway/routes + - azure: detect vnet migration via netlink media change event + [Tamilmani Manoharan] + - Azure: fix copy/paste error in error handling when reading azure ovf. + [Adam DePue] + - tests: fix incorrect order of mocks in test_handle_zfs_root. + - doc: Change dns_nameserver property to dns_nameservers. [Tomer Cohen] + - OVF: identify label iso9660 filesystems with label 'OVF ENV'. + - logs: collect-logs ignore instance-data-sensitive.json on non-root user + - net: Ephemeral*Network: add connectivity check via URL + - azure: _poll_imds only retry on 404. Fail on Timeout + - resizefs: Prefix discovered devpath with '/dev/' when path does not + exist [Igor Galić] + - azure: retry imds polling on requests.Timeout + - azure: Accept variation in error msg from mount for ntfs volumes + [Jason Zions] + - azure: fix regression introduced when persisting ephemeral dhcp lease + [Aswin Rajamannar] + - azure: add udev rules to create cloud-init Gen2 disk name symlinks + - tests: ec2 mock missing httpretty user-data and instance-identity routes + - azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS + - azure: report ready to fabric after reprovision and reduce logging + [Aswin Rajamannar] + - query: better error when missing read permission on instance-data + - instance-data: fallback to instance-data.json if sensitive is absent. + - docs: remove colon from network v1 config example. [Tomer Cohen] + - Add cloud-id binary to packages for SUSE [Jason Zions] + - systemd: On SUSE ensure cloud-init.service runs before wicked + [Robert Schweikert] + - update detection of openSUSE variants [Robert Schweikert] + - azure: Add apply_network_config option to disable network from IMDS + - Correct spelling in an error message (udevadm). [Katie McLaughlin] + - tests: meta_data key changed to meta-data in ec2 instance-data.json + - tests: fix kvm integration test to assert flexible config-disk path + - tools: Add cloud-id command line utility + - instance-data: Add standard keys platform and subplatform. Refactor ec2. + - net: ignore nics that have "zero" mac address. + - tests: fix apt_configure_primary to be more flexible + - Ubuntu: update sources.list to comment out deb-src entries. + + -- Chad Smith Sat, 26 Jan 2019 14:08:36 -0700 + cloud-init (18.4-0ubuntu1~16.04.2) xenial; urgency=medium * cherry-pick 1d5e9aef: azure: Add apply_network_config option to diff -Nru cloud-init-18.4/debian/patches/azure-apply-network-config-false.patch cloud-init-18.5-21-g8ee294d5/debian/patches/azure-apply-network-config-false.patch --- cloud-init-18.4/debian/patches/azure-apply-network-config-false.patch 2018-10-17 18:51:09.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/debian/patches/azure-apply-network-config-false.patch 2019-01-29 03:27:06.000000000 +0000 @@ -8,11 +8,9 @@ Bug: https://bugs.launchpad.net/cloud-init/+bug/1798424 Forwarded: not-needed Last-Update: 2018-10-17 -Index: cloud-init/cloudinit/sources/DataSourceAzure.py -=================================================================== ---- cloud-init.orig/cloudinit/sources/DataSourceAzure.py -+++ cloud-init/cloudinit/sources/DataSourceAzure.py -@@ -207,7 +207,7 @@ BUILTIN_DS_CONFIG = { +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -208,7 +208,7 @@ BUILTIN_DS_CONFIG = { }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, diff -Nru cloud-init-18.4/debian/patches/azure-use-walinux-agent.patch cloud-init-18.5-21-g8ee294d5/debian/patches/azure-use-walinux-agent.patch --- cloud-init-18.4/debian/patches/azure-use-walinux-agent.patch 2018-10-17 18:51:09.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/debian/patches/azure-use-walinux-agent.patch 2019-01-29 03:27:06.000000000 +0000 @@ -6,7 +6,7 @@ Author: Scott Moser --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py -@@ -196,7 +196,7 @@ if util.is_FreeBSD(): +@@ -197,7 +197,7 @@ if util.is_FreeBSD(): LOG.debug("resource disk is None") BUILTIN_DS_CONFIG = { diff -Nru cloud-init-18.4/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable cloud-init-18.5-21-g8ee294d5/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable --- cloud-init-18.4/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable 2018-10-17 18:51:09.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -From 1d5e9aefdab06a2574d78e644deed6c6fa1da171 Mon Sep 17 00:00:00 2001 -From: Chad Smith -Date: Wed, 17 Oct 2018 18:47:35 +0000 -Subject: [PATCH] azure: Add apply_network_config option to disable network - from IMDS -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Azure generates network configuration from the IMDS service and removes -any preexisting hotplug network scripts which exist in Azure cloud images. -Add a datasource configuration option which allows for writing a default -network configuration which sets up dhcp on eth0 and leave the hotplug -handling to the cloud-image scripts. - -To disable network-config from Azure IMDS, add the following to -/etc/cloud/cloud.cfg.d/99-azure-no-imds-network.cfg: -datasource: -  Azure: -    apply_network_config: False - -LP: #1798424 ---- - cloudinit/sources/DataSourceAzure.py | 11 +++- - doc/rtd/topics/datasources/azure.rst | 46 +++++++++++++++ - tests/unittests/test_datasource/test_azure.py | 56 +++++++++++++++++-- - 3 files changed, 107 insertions(+), 6 deletions(-) - ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = { - }, - 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, - 'dhclient_lease_file': LEASE_FILE, -+ 'apply_network_config': True, # Use IMDS published network configuration - } -+# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False - - BUILTIN_CLOUD_CONFIG = { - 'disk_setup': { -@@ -450,7 +452,8 @@ class DataSourceAzure(sources.DataSource - except sources.InvalidMetaDataException as e: - LOG.warning('Could not crawl Azure metadata: %s', e) - return False -- if self.distro and self.distro.name == 'ubuntu': -+ if (self.distro and self.distro.name == 'ubuntu' and -+ self.ds_cfg.get('apply_network_config')): - maybe_remove_ubuntu_network_config_scripts() - - # Process crawled data and augment with various config defaults -@@ -611,7 +614,11 @@ class DataSourceAzure(sources.DataSource - the blacklisted devices. - """ - if not self._network_config: -- self._network_config = parse_network_config(self._metadata_imds) -+ if self.ds_cfg.get('apply_network_config'): -+ nc_src = self._metadata_imds -+ else: -+ nc_src = None -+ self._network_config = parse_network_config(nc_src) - return self._network_config - - ---- a/doc/rtd/topics/datasources/azure.rst -+++ b/doc/rtd/topics/datasources/azure.rst -@@ -57,6 +57,52 @@ in order to use waagent.conf with cloud- - ResourceDisk.MountPoint=/mnt - - -+Configuration -+------------- -+The following configuration can be set for the datasource in system -+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). -+ -+The settings that may be configured are: -+ -+ * **agent_command**: Either __builtin__ (default) or a command to run to getcw -+ metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the -+ provided command to obtain metadata. -+ * **apply_network_config**: Boolean set to True to use network configuration -+ described by Azure's IMDS endpoint instead of fallback network config of -+ dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. -+ * **data_dir**: Path used to read metadata files and write crawled data. -+ * **dhclient_lease_file**: The fallback lease file to source when looking for -+ custom DHCP option 245 from Azure fabric. -+ * **disk_aliases**: A dictionary defining which device paths should be -+ interpreted as ephemeral images. See cc_disk_setup module for more info. -+ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to -+ metadata changes. -+ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to -+ metadata changes. Azure will throttle ifup/down in some cases after metadata -+ has been updated to inform dhcp server about updated hostnames. -+ * **set_hostname**: Boolean set to True when we want Azure to set the hostname -+ based on metadata. -+ -+An example configuration with the default values is provided below: -+ -+.. sourcecode:: yaml -+ -+ datasource: -+ Azure: -+ agent_command: __builtin__ -+ apply_network_config: true -+ data_dir: /var/lib/waagent -+ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases -+ disk_aliases: -+ ephemeral0: /dev/disk/cloud/azure_resource -+ hostname_bounce: -+ interface: eth0 -+ command: builtin -+ policy: true -+ hostname_command: hostname -+ set_hostname: true -+ -+ - Userdata - -------- - Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -254,7 +254,8 @@ scbus-1 on xpt0 bus 0 - ]) - return dsaz - -- def _get_ds(self, data, agent_command=None, distro=None): -+ def _get_ds(self, data, agent_command=None, distro=None, -+ apply_network=None): - - def dsdevs(): - return data.get('dsdevs', []) -@@ -310,6 +311,8 @@ scbus-1 on xpt0 bus 0 - data.get('sys_cfg', {}), distro=distro, paths=self.paths) - if agent_command is not None: - dsrc.ds_cfg['agent_command'] = agent_command -+ if apply_network is not None: -+ dsrc.ds_cfg['apply_network_config'] = apply_network - - return dsrc - -@@ -414,14 +417,26 @@ fdescfs /dev/fd fdes - - def test_get_data_on_ubuntu_will_remove_network_scripts(self): - """get_data will remove ubuntu net scripts on Ubuntu distro.""" -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), -- 'sys_cfg': {}} -+ 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data, distro='ubuntu') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_called_once_with() - -+ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): -+ """When apply_network_config false, do not remove scripts on Ubuntu.""" -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = {'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg} -+ -+ dsrc = self._get_ds(data, distro='ubuntu') -+ dsrc.get_data() -+ self.m_remove_ubuntu_network_scripts.assert_not_called() -+ - def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): - """Return all structured metadata and cache no class attributes.""" - yaml_cfg = "{agent_command: my_command}\n" -@@ -503,8 +518,10 @@ fdescfs /dev/fd fdes - - def test_network_config_set_from_imds(self): - """Datasource.network_config returns IMDS network data.""" -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} -- data = {'ovfcontent': construct_valid_ovf_env(data=odata)} -+ data = {'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', -@@ -783,9 +800,10 @@ fdescfs /dev/fd fdes - @mock.patch('cloudinit.net.generate_fallback_config') - def test_imds_network_config(self, mock_fallback): - """Network config is generated from IMDS network data when present.""" -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), -- 'sys_cfg': {}} -+ 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() -@@ -803,6 +821,36 @@ fdescfs /dev/fd fdes - - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') -+ @mock.patch('cloudinit.net.device_driver') -+ @mock.patch('cloudinit.net.generate_fallback_config') -+ def test_imds_network_ignored_when_apply_network_config_false( -+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): -+ """When apply_network_config is False, use fallback instead of IMDS.""" -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = {'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg} -+ fallback_config = { -+ 'version': 1, -+ 'config': [{ -+ 'type': 'physical', 'name': 'eth0', -+ 'mac_address': '00:11:22:33:44:55', -+ 'params': {'driver': 'hv_netsvc'}, -+ 'subnets': [{'type': 'dhcp'}], -+ }] -+ } -+ mock_fallback.return_value = fallback_config -+ -+ mock_devlist.return_value = ['eth0'] -+ mock_dd.return_value = ['hv_netsvc'] -+ mock_get_mac.return_value = '00:11:22:33:44:55' -+ -+ dsrc = self._get_ds(data) -+ self.assertTrue(dsrc.get_data()) -+ self.assertEqual(dsrc.network_config, fallback_config) -+ -+ @mock.patch('cloudinit.net.get_interface_mac') -+ @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config') - def test_fallback_network_config(self, mock_fallback, mock_dd, diff -Nru cloud-init-18.4/debian/patches/series cloud-init-18.5-21-g8ee294d5/debian/patches/series --- cloud-init-18.4/debian/patches/series 2018-10-17 18:51:09.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/debian/patches/series 2019-01-29 03:27:06.000000000 +0000 @@ -2,5 +2,4 @@ ds-identify-behavior-xenial.patch stable-release-no-jsonschema-dep.patch openstack-no-network-config.patch -cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable azure-apply-network-config-false.patch diff -Nru cloud-init-18.4/doc/rtd/topics/datasources/azure.rst cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/datasources/azure.rst --- cloud-init-18.4/doc/rtd/topics/datasources/azure.rst 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/datasources/azure.rst 2019-01-28 20:07:03.000000000 +0000 @@ -23,18 +23,18 @@ In order for cloud-init to leverage this method to find the endpoint, the cloud.cfg file must contain: -datasource: - Azure: - set_hostname: False - agent_command: __builtin__ +.. sourcecode:: yaml + + datasource: + Azure: + set_hostname: False + agent_command: __builtin__ If those files are not available, the fallback is to check the leases file for the endpoint server (again option 245). You can define the path to the lease file with the 'dhclient_lease_file' -configuration. The default value is /var/lib/dhcp/dhclient.eth0.leases. - - dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases +configuration. walinuxagent ------------ @@ -57,6 +57,64 @@ ResourceDisk.MountPoint=/mnt +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + +The settings that may be configured are: + + * **agent_command**: Either __builtin__ (default) or a command to run to getcw + metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the + provided command to obtain metadata. + * **apply_network_config**: Boolean set to True to use network configuration + described by Azure's IMDS endpoint instead of fallback network config of + dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. + * **data_dir**: Path used to read metadata files and write crawled data. + * **dhclient_lease_file**: The fallback lease file to source when looking for + custom DHCP option 245 from Azure fabric. + * **disk_aliases**: A dictionary defining which device paths should be + interpreted as ephemeral images. See cc_disk_setup module for more info. + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to + metadata changes. The '``hostname_bounce: command``' entry can be either + the literal string 'builtin' or a command to execute. The command will be + invoked after the hostname is set, and will have the 'interface' in its + environment. If ``set_hostname`` is not true, then ``hostname_bounce`` + will be ignored. An example might be: + + ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` + + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to + metadata changes. Azure will throttle ifup/down in some cases after metadata + has been updated to inform dhcp server about updated hostnames. + * **set_hostname**: Boolean set to True when we want Azure to set the hostname + based on metadata. + +Configuration for the datasource can also be read from a +``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Azure: + agent_command: __builtin__ + apply_network_config: true + data_dir: /var/lib/waagent + dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases + disk_aliases: + ephemeral0: /dev/disk/cloud/azure_resource + hostname_bounce: + interface: eth0 + command: builtin + policy: true + hostname_command: hostname + set_hostname: true + + Userdata -------- Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init @@ -97,37 +155,6 @@ -Configuration -------------- -Configuration for the datasource can be read from the system config's or set -via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in -dscfg node is expected to be base64 encoded yaml content, and it will be -merged into the 'datasource: Azure' entry. - -The '``hostname_bounce: command``' entry can be either the literal string -'builtin' or a command to execute. The command will be invoked after the -hostname is set, and will have the 'interface' in its environment. If -``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. - -An example might be: - command: ["sh", "-c", "killall dhclient; dhclient $interface"] - -.. code:: yaml - - datasource: - agent_command - Azure: - agent_command: [service, walinuxagent, start] - set_hostname: True - hostname_bounce: - # the name of the interface to bounce - interface: eth0 - # policy can be 'on', 'off' or 'force' - policy: on - # the method 'bounce' command. - command: "builtin" - hostname_command: "hostname" - hostname -------- When the user launches an instance, they provide a hostname for that instance. diff -Nru cloud-init-18.4/doc/rtd/topics/datasources.rst cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/datasources.rst --- cloud-init-18.4/doc/rtd/topics/datasources.rst 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/datasources.rst 2019-01-28 20:07:03.000000000 +0000 @@ -18,7 +18,7 @@ through the typical usage of subclasses. Any metadata processed by cloud-init's datasources is persisted as -``/run/cloud0-init/instance-data.json``. Cloud-init provides tooling +``/run/cloud-init/instance-data.json``. Cloud-init provides tooling to quickly introspect some of that data. See :ref:`instance_metadata` for more information. @@ -80,6 +80,65 @@ def get_package_mirror_info(self) +Adding a new Datasource +----------------------- +The datasource objects have a few touch points with cloud-init. If you +are interested in adding a new datasource for your cloud platform you'll +need to take care of the following items: + +* **Identify a mechanism for positive identification of the platform**: + It is good practice for a cloud platform to positively identify itself + to the guest. This allows the guest to make educated decisions based + on the platform on which it is running. On the x86 and arm64 architectures, + many clouds identify themselves through DMI data. For example, + Oracle's public cloud provides the string 'OracleCloud.com' in the + DMI chassis-asset field. + + cloud-init enabled images produce a log file with details about the + platform. Reading through this log in ``/run/cloud-init/ds-identify.log`` + may provide the information needed to uniquely identify the platform. + If the log is not present, you can generate it by running from source + ``./tools/ds-identify`` or the installed location + ``/usr/lib/cloud-init/ds-identify``. + + The mechanism used to identify the platform will be required for the + ds-identify and datasource module sections below. + +* **Add datasource module ``cloudinit/sources/DataSource.py``**: + It is suggested that you start by copying one of the simpler datasources + such as DataSourceHetzner. + +* **Add tests for datasource module**: + Add a new file with some tests for the module to + ``cloudinit/sources/test_.py``. For example see + ``cloudinit/sources/tests/test_oracle.py`` + +* **Update ds-identify**: In systemd systems, ds-identify is used to detect + which datasource should be enabled or if cloud-init should run at all. + You'll need to make changes to ``tools/ds-identify``. + +* **Add tests for ds-identify**: Add relevant tests in a new class to + ``tests/unittests/test_ds_identify.py``. You can use ``TestOracle`` as an + example. + +* **Add your datasource name to the builtin list of datasources:** Add + your datasource module name to the end of the ``datasource_list`` + entry in ``cloudinit/settings.py``. + +* **Add your your cloud platform to apport collection prompts:** Update the + list of cloud platforms in ``cloudinit/apport.py``. This list will be + provided to the user who invokes ``ubuntu-bug cloud-init``. + +* **Enable datasource by default in ubuntu packaging branches:** + Ubuntu packaging branches contain a template file + ``debian/cloud-init.templates`` that ultimately sets the default + datasource_list when installed via package. This file needs updating when + the commit gets into a package. + +* **Add documentation for your datasource**: You should add a new + file in ``doc/datasources/.rst`` + + Datasource Documentation ======================== The following is a list of the implemented datasources. diff -Nru cloud-init-18.4/doc/rtd/topics/instancedata.rst cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/instancedata.rst --- cloud-init-18.4/doc/rtd/topics/instancedata.rst 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/instancedata.rst 2019-01-28 20:07:03.000000000 +0000 @@ -90,24 +90,46 @@ The standardized keys present: -+----------------------+-----------------------------------------------+---------------------------+ -| Key path | Description | Examples | -+======================+===============================================+===========================+ -| v1.cloud_name | The name of the cloud provided by metadata | aws, openstack, azure, | -| | key 'cloud-name' or the cloud-init datasource | configdrive, nocloud, | -| | name which was discovered. | ovf, etc. | -+----------------------+-----------------------------------------------+---------------------------+ -| v1.instance_id | Unique instance_id allocated by the cloud | i- | -+----------------------+-----------------------------------------------+---------------------------+ -| v1.local_hostname | The internal or local hostname of the system | ip-10-41-41-70, | -| | | | -+----------------------+-----------------------------------------------+---------------------------+ -| v1.region | The physical region/datacenter in which the | us-east-2 | -| | instance is deployed | | -+----------------------+-----------------------------------------------+---------------------------+ -| v1.availability_zone | The physical availability zone in which the | us-east-2b, nova, null | -| | instance is deployed | | -+----------------------+-----------------------------------------------+---------------------------+ ++----------------------+-----------------------------------------------+-----------------------------------+ +| Key path | Description | Examples | ++======================+===============================================+===================================+ +| v1._beta_keys | List of standardized keys still in 'beta'. | [subplatform] | +| | The format, intent or presence of these keys | | +| | can change. Do not consider them | | +| | production-ready. | | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.cloud_name | Where possible this will indicate the 'name' | aws, openstack, azure, | +| | of the cloud this system is running on. This | configdrive, nocloud, | +| | is specifically different than the 'platform' | ovf, etc. | +| | below. As an example, the name of Amazon Web | | +| | Services is 'aws' while the platform is 'ec2'.| | +| | | | +| | If no specific name is determinable or | | +| | provided in meta-data, then this field may | | +| | contain the same content as 'platform'. | | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.instance_id | Unique instance_id allocated by the cloud | i- | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.local_hostname | The internal or local hostname of the system | ip-10-41-41-70, | +| | | | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.platform | An attempt to identify the cloud platform | ec2, openstack, lxd, gce | +| | instance that the system is running on. | nocloud, ovf | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.subplatform | Additional platform details describing the | metadata (http://168.254.169.254),| +| | specific source or type of metadata used. | seed-dir (/path/to/seed-dir/), | +| | The format of subplatform will be: | config-disk (/dev/cd0), | +| | () | configdrive (/dev/sr0) | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.public_ssh_keys | A list of ssh keys provided to the instance | ['ssh-rsa AA...', ...] | +| | by the datasource metadata. | | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.region | The physical region/datacenter in which the | us-east-2 | +| | instance is deployed | | ++----------------------+-----------------------------------------------+-----------------------------------+ +| v1.availability_zone | The physical availability zone in which the | us-east-2b, nova, null | +| | instance is deployed | | ++----------------------+-----------------------------------------------+-----------------------------------+ Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2 @@ -117,10 +139,75 @@ { "base64_encoded_keys": [], - "sensitive_keys": [], "ds": { - "meta_data": { - "ami-id": "ami-014e1416b628b0cbf", + "_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.", + "_metadata_api_version": "2016-09-02", + "dynamic": { + "instance-identity": { + "document": { + "accountId": "437526006925", + "architecture": "x86_64", + "availabilityZone": "us-east-2b", + "billingProducts": null, + "devpayProductCodes": null, + "imageId": "ami-079638aae7046bdd2", + "instanceId": "i-075f088c72ad3271c", + "instanceType": "t2.micro", + "kernelId": null, + "marketplaceProductCodes": null, + "pendingTime": "2018-10-05T20:10:43Z", + "privateIp": "10.41.41.95", + "ramdiskId": null, + "region": "us-east-2", + "version": "2017-09-30" + }, + "pkcs7": [ + "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog", + "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi", + "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw", + "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls", + "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh", + "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh", + "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj", + "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk", + "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i", + "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV", + "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX", + "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq", + "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k", + "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU", + "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA=" + ], + "rsa2048": [ + "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB", + "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv", + "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6", + "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg", + "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK", + "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1", + "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg", + "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw", + "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn", + "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw", + "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6", + "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN", + "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B", + "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA", + "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz", + "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM", + "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU", + "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM", + "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA==" + ], + "signature": [ + "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf", + "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt", + "r4B0mN3p7EcqD8G+ll0=" + ] + } + }, + "meta-data": { + "ami-id": "ami-079638aae7046bdd2", "ami-launch-index": "0", "ami-manifest-path": "(unknown)", "block-device-mapping": { @@ -129,31 +216,31 @@ "ephemeral1": "sdc", "root": "/dev/sda1" }, - "hostname": "ip-10-41-41-70.us-east-2.compute.internal", + "hostname": "ip-10-41-41-95.us-east-2.compute.internal", "instance-action": "none", - "instance-id": "i-04fa31cfc55aa7976", + "instance-id": "i-075f088c72ad3271c", "instance-type": "t2.micro", - "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal", - "local-ipv4": "10.41.41.70", - "mac": "06:b6:92:dd:9d:24", + "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", + "local-ipv4": "10.41.41.95", + "mac": "06:74:8f:39:cd:a6", "metrics": { "vhostmd": "" }, "network": { "interfaces": { "macs": { - "06:b6:92:dd:9d:24": { + "06:74:8f:39:cd:a6": { "device-number": "0", - "interface-id": "eni-08c0c9fdb99b6e6f4", + "interface-id": "eni-052058bbd7831eaae", "ipv4-associations": { - "18.224.22.43": "10.41.41.70" + "18.218.221.122": "10.41.41.95" }, - "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal", - "local-ipv4s": "10.41.41.70", - "mac": "06:b6:92:dd:9d:24", + "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", + "local-ipv4s": "10.41.41.95", + "mac": "06:74:8f:39:cd:a6", "owner-id": "437526006925", - "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.224.22.43", + "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", + "public-ipv4s": "18.218.221.122", "security-group-ids": "sg-828247e9", "security-groups": "Cloud-init integration test secgroup", "subnet-id": "subnet-282f3053", @@ -171,16 +258,14 @@ "availability-zone": "us-east-2b" }, "profile": "default-hvm", - "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.224.22.43", + "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", + "public-ipv4": "18.218.221.122", "public-keys": { "cloud-init-integration": [ - "ssh-rsa - AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB - cloud-init-integration" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" ] }, - "reservation-id": "r-06ab75e9346f54333", + "reservation-id": "r-0594a20e31f6cfe46", "security-groups": "Cloud-init integration test secgroup", "services": { "domain": "amazonaws.com", @@ -188,16 +273,22 @@ } } }, + "sensitive_keys": [], "v1": { + "_beta_keys": [ + "subplatform" + ], "availability-zone": "us-east-2b", "availability_zone": "us-east-2b", - "cloud-name": "aws", "cloud_name": "aws", - "instance-id": "i-04fa31cfc55aa7976", - "instance_id": "i-04fa31cfc55aa7976", - "local-hostname": "ip-10-41-41-70", - "local_hostname": "ip-10-41-41-70", - "region": "us-east-2" + "instance_id": "i-075f088c72ad3271c", + "local_hostname": "ip-10-41-41-95", + "platform": "ec2", + "public_ssh_keys": [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" + ], + "region": "us-east-2", + "subplatform": "metadata (http://169.254.169.254)" } } diff -Nru cloud-init-18.4/doc/rtd/topics/network-config-format-v1.rst cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/network-config-format-v1.rst --- cloud-init-18.4/doc/rtd/topics/network-config-format-v1.rst 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/doc/rtd/topics/network-config-format-v1.rst 2019-01-28 20:07:03.000000000 +0000 @@ -384,7 +384,7 @@ - ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation. - ``netmask``: IPv4 subnet mask in dotted format or CIDR notation. - ``gateway``: IPv4 address of the default gateway for this subnet. -- ``dns_nameserver``: Specify a list of IPv4 dns server IPs to end up in +- ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in resolv.conf. - ``dns_search``: Specify a list of search paths to be included in resolv.conf. diff -Nru cloud-init-18.4/HACKING.rst cloud-init-18.5-21-g8ee294d5/HACKING.rst --- cloud-init-18.4/HACKING.rst 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/HACKING.rst 2019-01-28 20:07:03.000000000 +0000 @@ -11,10 +11,10 @@ * To contribute, you must sign the Canonical `contributor license agreement`_ - If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser `_ or ping smoser in ``#cloud-init`` channel via freenode. + If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Josh Powers `_ or ping powersj in ``#cloud-init`` channel via freenode. When prompted for 'Project contact' or 'Canonical Project Manager' enter - 'Scott Moser'. + 'Josh Powers'. * Configure git with your email and name for commit messages. diff -Nru cloud-init-18.4/packages/redhat/cloud-init.spec.in cloud-init-18.5-21-g8ee294d5/packages/redhat/cloud-init.spec.in --- cloud-init-18.4/packages/redhat/cloud-init.spec.in 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/packages/redhat/cloud-init.spec.in 2019-01-28 20:07:03.000000000 +0000 @@ -191,6 +191,7 @@ # Program binaries %{_bindir}/cloud-init* +%{_bindir}/cloud-id* # Docs %doc LICENSE ChangeLog TODO.rst requirements.txt diff -Nru cloud-init-18.4/packages/suse/cloud-init.spec.in cloud-init-18.5-21-g8ee294d5/packages/suse/cloud-init.spec.in --- cloud-init-18.4/packages/suse/cloud-init.spec.in 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/packages/suse/cloud-init.spec.in 2019-01-28 20:07:03.000000000 +0000 @@ -93,6 +93,7 @@ # Program binaries %{_bindir}/cloud-init* +%{_bindir}/cloud-id* # systemd files /usr/lib/systemd/system-generators/* diff -Nru cloud-init-18.4/setup.py cloud-init-18.5-21-g8ee294d5/setup.py --- cloud-init-18.4/setup.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/setup.py 2019-01-28 20:07:03.000000000 +0000 @@ -30,6 +30,8 @@ def is_f(p): return os.path.isfile(p) +def is_generator(p): + return '-generator' in p def tiny_p(cmd, capture=True): # Darn python 2.6 doesn't have check_output (argggg) @@ -90,7 +92,7 @@ return str(deps).splitlines() -def render_tmpl(template): +def render_tmpl(template, mode=None): """render template into a tmpdir under same dir as setup.py This is rendered to a temporary directory under the top level @@ -119,6 +121,8 @@ VARIANT, template, fpath]) else: tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) + if mode: + os.chmod(fpath, mode) # return path relative to setup.py return os.path.join(os.path.basename(tmpd), bname) @@ -138,8 +142,11 @@ 'systemd': [render_tmpl(f) for f in (glob('systemd/*.tmpl') + glob('systemd/*.service') + - glob('systemd/*.target')) if is_f(f)], - 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + glob('systemd/*.target')) + if (is_f(f) and not is_generator(f))], + 'systemd.generators': [ + render_tmpl(f, mode=0o755) + for f in glob('systemd/*') if is_f(f) and is_generator(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { @@ -282,7 +289,8 @@ cmdclass=cmdclass, entry_points={ 'console_scripts': [ - 'cloud-init = cloudinit.cmd.main:main' + 'cloud-init = cloudinit.cmd.main:main', + 'cloud-id = cloudinit.cmd.cloud_id:main' ], } ) diff -Nru cloud-init-18.4/systemd/cloud-init-generator cloud-init-18.5-21-g8ee294d5/systemd/cloud-init-generator --- cloud-init-18.4/systemd/cloud-init-generator 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/systemd/cloud-init-generator 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -#!/bin/sh -set -f - -LOG="" -DEBUG_LEVEL=1 -LOG_D="/run/cloud-init" -ENABLE="enabled" -DISABLE="disabled" -FOUND="found" -NOTFOUND="notfound" -RUN_ENABLED_FILE="$LOG_D/$ENABLE" -CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" -CLOUD_TARGET_NAME="cloud-init.target" -# lxc sets 'container', but lets make that explicitly a global -CONTAINER="${container}" - -debug() { - local lvl="$1" - shift - [ "$lvl" -gt "$DEBUG_LEVEL" ] && return - if [ -z "$LOG" ]; then - local log="$LOG_D/${0##*/}.log" - { [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } && - { : > "$log"; } >/dev/null 2>&1 && LOG="$log" || - LOG="/dev/kmsg" - fi - echo "$@" >> "$LOG" -} - -etc_file() { - local pprefix="${1:-/etc/cloud/cloud-init.}" - _RET="unset" - [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0 - [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0 - return 0 -} - -read_proc_cmdline() { - # return /proc/cmdline for non-container, and /proc/1/cmdline for container - local ctname="systemd" - if [ -n "$CONTAINER" ] && ctname=$CONTAINER || - systemd-detect-virt --container --quiet; then - if { _RET=$(tr '\0' ' ' < /proc/1/cmdline); } 2>/dev/null; then - _RET_MSG="container[$ctname]: pid 1 cmdline" - return - fi - _RET="" - _RET_MSG="container[$ctname]: pid 1 cmdline not available" - return 0 - fi - - _RET_MSG="/proc/cmdline" - read _RET < /proc/cmdline -} - -kernel_cmdline() { - local cmdline="" tok="" - if [ -n "${KERNEL_CMDLINE+x}" ]; then - # use KERNEL_CMDLINE if present in environment even if empty - cmdline=${KERNEL_CMDLINE} - debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline" - elif read_proc_cmdline; then - read_proc_cmdline && cmdline="$_RET" - debug 1 "kernel command line ($_RET_MSG): $cmdline" - fi - _RET="unset" - cmdline=" $cmdline " - tok=${cmdline##* cloud-init=} - [ "$tok" = "$cmdline" ] && _RET="unset" - tok=${tok%% *} - [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok" - return 0 -} - -default() { - _RET="$ENABLE" -} - -check_for_datasource() { - local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" - if [ ! -x "$dsidentify" ]; then - debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" - return 0 - fi - $dsidentify - ds_rc=$? - debug 1 "ds-identify rc=$ds_rc" - if [ "$ds_rc" = "0" ]; then - _RET="$FOUND" - debug 1 "ds-identify _RET=$_RET" - return 0 - fi - _RET="$NOTFOUND" - debug 1 "ds-identify _RET=$_RET" - return 1 -} - -main() { - local normal_d="$1" early_d="$2" late_d="$3" - local target_name="multi-user.target" gen_d="$early_d" - local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" - local ds="$NOTFOUND" - - debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" - debug 2 "$0 $*" - - local search result="error" ret="" - for search in kernel_cmdline etc_file default; do - if $search; then - debug 1 "$search found $_RET" - [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] && - result=$_RET && break - else - ret=$? - debug 0 "search $search returned $ret" - fi - done - - # enable AND ds=found == enable - # enable AND ds=notfound == disable - # disable || == disabled - if [ "$result" = "$ENABLE" ]; then - debug 1 "checking for datasource" - check_for_datasource - ds=$_RET - if [ "$ds" = "$NOTFOUND" ]; then - debug 1 "cloud-init is enabled but no datasource found, disabling" - result="$DISABLE" - fi - fi - - if [ "$result" = "$ENABLE" ]; then - if [ -e "$link_path" ]; then - debug 1 "already enabled: no change needed" - else - [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" || - debug 0 "failed to make dir $link_path" - if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then - debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET" - else - ret=$? - debug 0 "[$ret] enable failed:" \ - "ln $CLOUD_SYSTEM_TARGET $link_path" - fi - fi - : > "$RUN_ENABLED_FILE" - elif [ "$result" = "$DISABLE" ]; then - if [ -f "$link_path" ]; then - if rm -f "$link_path"; then - debug 1 "disabled. removed existing $link_path" - else - ret=$? - debug 0 "[$ret] disable failed, remove $link_path" - fi - else - debug 1 "already disabled: no change needed [no $link_path]" - fi - if [ -e "$RUN_ENABLED_FILE" ]; then - rm -f "$RUN_ENABLED_FILE" - fi - else - debug 0 "unexpected result '$result' 'ds=$ds'" - ret=3 - fi - return $ret -} - -main "$@" - -# vi: ts=4 expandtab diff -Nru cloud-init-18.4/systemd/cloud-init-generator.tmpl cloud-init-18.5-21-g8ee294d5/systemd/cloud-init-generator.tmpl --- cloud-init-18.4/systemd/cloud-init-generator.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/systemd/cloud-init-generator.tmpl 2019-01-28 20:07:03.000000000 +0000 @@ -0,0 +1,175 @@ +## template:jinja +#!/bin/sh +set -f + +LOG="" +DEBUG_LEVEL=1 +LOG_D="/run/cloud-init" +ENABLE="enabled" +DISABLE="disabled" +FOUND="found" +NOTFOUND="notfound" +RUN_ENABLED_FILE="$LOG_D/$ENABLE" +{% if variant in ["suse"] %} +CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target" +{% else %} +CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" +{% endif %} +CLOUD_TARGET_NAME="cloud-init.target" +# lxc sets 'container', but lets make that explicitly a global +CONTAINER="${container}" + +debug() { + local lvl="$1" + shift + [ "$lvl" -gt "$DEBUG_LEVEL" ] && return + if [ -z "$LOG" ]; then + local log="$LOG_D/${0##*/}.log" + { [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } && + { : > "$log"; } >/dev/null 2>&1 && LOG="$log" || + LOG="/dev/kmsg" + fi + echo "$@" >> "$LOG" +} + +etc_file() { + local pprefix="${1:-/etc/cloud/cloud-init.}" + _RET="unset" + [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0 + [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0 + return 0 +} + +read_proc_cmdline() { + # return /proc/cmdline for non-container, and /proc/1/cmdline for container + local ctname="systemd" + if [ -n "$CONTAINER" ] && ctname=$CONTAINER || + systemd-detect-virt --container --quiet; then + if { _RET=$(tr '\0' ' ' < /proc/1/cmdline); } 2>/dev/null; then + _RET_MSG="container[$ctname]: pid 1 cmdline" + return + fi + _RET="" + _RET_MSG="container[$ctname]: pid 1 cmdline not available" + return 0 + fi + + _RET_MSG="/proc/cmdline" + read _RET < /proc/cmdline +} + +kernel_cmdline() { + local cmdline="" tok="" + if [ -n "${KERNEL_CMDLINE+x}" ]; then + # use KERNEL_CMDLINE if present in environment even if empty + cmdline=${KERNEL_CMDLINE} + debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline" + elif read_proc_cmdline; then + read_proc_cmdline && cmdline="$_RET" + debug 1 "kernel command line ($_RET_MSG): $cmdline" + fi + _RET="unset" + cmdline=" $cmdline " + tok=${cmdline##* cloud-init=} + [ "$tok" = "$cmdline" ] && _RET="unset" + tok=${tok%% *} + [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok" + return 0 +} + +default() { + _RET="$ENABLE" +} + +check_for_datasource() { + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + if [ ! -x "$dsidentify" ]; then + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" + return 0 + fi + $dsidentify + ds_rc=$? + debug 1 "ds-identify rc=$ds_rc" + if [ "$ds_rc" = "0" ]; then + _RET="$FOUND" + debug 1 "ds-identify _RET=$_RET" + return 0 + fi + _RET="$NOTFOUND" + debug 1 "ds-identify _RET=$_RET" + return 1 +} + +main() { + local normal_d="$1" early_d="$2" late_d="$3" + local target_name="multi-user.target" gen_d="$early_d" + local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" + local ds="$NOTFOUND" + + debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" + debug 2 "$0 $*" + + local search result="error" ret="" + for search in kernel_cmdline etc_file default; do + if $search; then + debug 1 "$search found $_RET" + [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] && + result=$_RET && break + else + ret=$? + debug 0 "search $search returned $ret" + fi + done + + # enable AND ds=found == enable + # enable AND ds=notfound == disable + # disable || == disabled + if [ "$result" = "$ENABLE" ]; then + debug 1 "checking for datasource" + check_for_datasource + ds=$_RET + if [ "$ds" = "$NOTFOUND" ]; then + debug 1 "cloud-init is enabled but no datasource found, disabling" + result="$DISABLE" + fi + fi + + if [ "$result" = "$ENABLE" ]; then + if [ -e "$link_path" ]; then + debug 1 "already enabled: no change needed" + else + [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" || + debug 0 "failed to make dir $link_path" + if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then + debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET" + else + ret=$? + debug 0 "[$ret] enable failed:" \ + "ln $CLOUD_SYSTEM_TARGET $link_path" + fi + fi + : > "$RUN_ENABLED_FILE" + elif [ "$result" = "$DISABLE" ]; then + if [ -f "$link_path" ]; then + if rm -f "$link_path"; then + debug 1 "disabled. removed existing $link_path" + else + ret=$? + debug 0 "[$ret] disable failed, remove $link_path" + fi + else + debug 1 "already disabled: no change needed [no $link_path]" + fi + if [ -e "$RUN_ENABLED_FILE" ]; then + rm -f "$RUN_ENABLED_FILE" + fi + else + debug 0 "unexpected result '$result' 'ds=$ds'" + ret=3 + fi + return $ret +} + +main "$@" + +# vi: ts=4 expandtab diff -Nru cloud-init-18.4/systemd/cloud-init.service.tmpl cloud-init-18.5-21-g8ee294d5/systemd/cloud-init.service.tmpl --- cloud-init-18.4/systemd/cloud-init.service.tmpl 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/systemd/cloud-init.service.tmpl 2019-01-28 20:07:03.000000000 +0000 @@ -14,8 +14,7 @@ After=network.service {% endif %} {% if variant in ["suse"] %} -Requires=wicked.service -After=wicked.service +Before=wicked.service # setting hostname via hostnamectl depends on dbus, which otherwise # would not be guaranteed at this point. After=dbus.service diff -Nru cloud-init-18.4/templates/sources.list.ubuntu.tmpl cloud-init-18.5-21-g8ee294d5/templates/sources.list.ubuntu.tmpl --- cloud-init-18.4/templates/sources.list.ubuntu.tmpl 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/templates/sources.list.ubuntu.tmpl 2019-01-28 20:07:03.000000000 +0000 @@ -10,30 +10,30 @@ # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to # newer versions of the distribution. deb {{mirror}} {{codename}} main restricted -deb-src {{mirror}} {{codename}} main restricted +# deb-src {{mirror}} {{codename}} main restricted ## Major bug fix updates produced after the final release of the ## distribution. deb {{mirror}} {{codename}}-updates main restricted -deb-src {{mirror}} {{codename}}-updates main restricted +# deb-src {{mirror}} {{codename}}-updates main restricted ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team. Also, please note that software in universe WILL NOT receive any ## review or updates from the Ubuntu security team. deb {{mirror}} {{codename}} universe -deb-src {{mirror}} {{codename}} universe +# deb-src {{mirror}} {{codename}} universe deb {{mirror}} {{codename}}-updates universe -deb-src {{mirror}} {{codename}}-updates universe +# deb-src {{mirror}} {{codename}}-updates universe -## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu -## team, and may not be under a free licence. Please satisfy yourself as to -## your rights to use the software. Also, please note that software in +## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu +## team, and may not be under a free licence. Please satisfy yourself as to +## your rights to use the software. Also, please note that software in ## multiverse WILL NOT receive any review or updates from the Ubuntu ## security team. deb {{mirror}} {{codename}} multiverse -deb-src {{mirror}} {{codename}} multiverse +# deb-src {{mirror}} {{codename}} multiverse deb {{mirror}} {{codename}}-updates multiverse -deb-src {{mirror}} {{codename}}-updates multiverse +# deb-src {{mirror}} {{codename}}-updates multiverse ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes @@ -41,14 +41,7 @@ ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. deb {{mirror}} {{codename}}-backports main restricted universe multiverse -deb-src {{mirror}} {{codename}}-backports main restricted universe multiverse - -deb {{security}} {{codename}}-security main restricted -deb-src {{security}} {{codename}}-security main restricted -deb {{security}} {{codename}}-security universe -deb-src {{security}} {{codename}}-security universe -deb {{security}} {{codename}}-security multiverse -deb-src {{security}} {{codename}}-security multiverse +# deb-src {{mirror}} {{codename}}-backports main restricted universe multiverse ## Uncomment the following two lines to add software from Canonical's ## 'partner' repository. @@ -56,3 +49,10 @@ ## respective vendors as a service to Ubuntu users. # deb http://archive.canonical.com/ubuntu {{codename}} partner # deb-src http://archive.canonical.com/ubuntu {{codename}} partner + +deb {{security}} {{codename}}-security main restricted +# deb-src {{security}} {{codename}}-security main restricted +deb {{security}} {{codename}}-security universe +# deb-src {{security}} {{codename}}-security universe +deb {{security}} {{codename}}-security multiverse +# deb-src {{security}} {{codename}}-security multiverse diff -Nru cloud-init-18.4/tests/cloud_tests/releases.yaml cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/releases.yaml --- cloud-init-18.4/tests/cloud_tests/releases.yaml 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/releases.yaml 2019-01-28 20:07:03.000000000 +0000 @@ -129,6 +129,22 @@ releases: # UBUNTU ================================================================= + disco: + # EOL: Jan 2020 + default: + enabled: true + release: disco + version: 19.04 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: disco + setup_overrides: null + override_templates: false cosmic: # EOL: Jul 2019 default: diff -Nru cloud-init-18.4/tests/cloud_tests/testcases/base.py cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/base.py --- cloud-init-18.4/tests/cloud_tests/testcases/base.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/base.py 2019-01-28 20:07:03.000000000 +0000 @@ -177,7 +177,7 @@ instance_data['base64_encoded_keys']) ds = instance_data.get('ds', {}) v1_data = instance_data.get('v1', {}) - metadata = ds.get('meta_data', {}) + metadata = ds.get('meta-data', {}) macs = metadata.get( 'network', {}).get('interfaces', {}).get('macs', {}) if not macs: @@ -195,6 +195,9 @@ self.assertIsNotNone( v1_data['availability_zone'], 'expected ec2 availability_zone') self.assertEqual('aws', v1_data['cloud_name']) + self.assertEqual('ec2', v1_data['platform']) + self.assertEqual( + 'metadata (http://169.254.169.254)', v1_data['subplatform']) self.assertIn('i-', v1_data['instance_id']) self.assertIn('ip-', v1_data['local_hostname']) self.assertIsNotNone(v1_data['region'], 'expected ec2 region') @@ -220,7 +223,11 @@ instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys'])) - self.assertEqual('nocloud', v1_data['cloud_name']) + self.assertEqual('unknown', v1_data['cloud_name']) + self.assertEqual('lxd', v1_data['platform']) + self.assertEqual( + 'seed-dir (/var/lib/cloud/seed/nocloud-net)', + v1_data['subplatform']) self.assertIsNone( v1_data['availability_zone'], 'found unexpected lxd availability_zone %s' % @@ -253,7 +260,12 @@ instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) self.assertItemsEqual([], instance_data['base64_encoded_keys']) - self.assertEqual('nocloud', v1_data['cloud_name']) + self.assertEqual('unknown', v1_data['cloud_name']) + self.assertEqual('nocloud', v1_data['platform']) + subplatform = v1_data['subplatform'] + self.assertIsNotNone( + re.match(r'config-disk \(\/dev\/[a-z]{3}\)', subplatform), + 'kvm subplatform "%s" != "config-disk (/dev/...)"' % subplatform) self.assertIsNone( v1_data['availability_zone'], 'found unexpected kvm availability_zone %s' % diff -Nru cloud-init-18.4/tests/cloud_tests/testcases/modules/apt_configure_primary.py cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/modules/apt_configure_primary.py --- cloud-init-18.4/tests/cloud_tests/testcases/modules/apt_configure_primary.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/modules/apt_configure_primary.py 2019-01-28 20:07:03.000000000 +0000 @@ -9,12 +9,16 @@ def test_ubuntu_sources(self): """Test no default Ubuntu entries exist.""" - out = self.get_data_file('ubuntu.sources.list') - self.assertEqual(0, int(out)) + out = self.get_data_file('sources.list') + ubuntu_source_count = len( + [line for line in out.split('\n') if 'archive.ubuntu.com' in line]) + self.assertEqual(0, ubuntu_source_count) def test_gatech_sources(self): - """Test GaTech entires exist.""" - out = self.get_data_file('gatech.sources.list') - self.assertEqual(20, int(out)) + """Test GaTech entries exist.""" + out = self.get_data_file('sources.list') + gatech_source_count = len( + [line for line in out.split('\n') if 'gtlib.gatech.edu' in line]) + self.assertGreater(gatech_source_count, 0) # vi: ts=4 expandtab diff -Nru cloud-init-18.4/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml --- cloud-init-18.4/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml 2019-01-28 20:07:03.000000000 +0000 @@ -12,13 +12,6 @@ - default uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/" collect_scripts: - ubuntu.sources.list: | - #!/bin/bash - grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d' | grep -c archive.ubuntu.com - gatech.sources.list: | - #!/bin/bash - grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d' | grep -c gtlib.gatech.edu - sources.list: | #!/bin/bash cat /etc/apt/sources.list diff -Nru cloud-init-18.4/tests/unittests/test_builtin_handlers.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_builtin_handlers.py --- cloud-init-18.4/tests/unittests/test_builtin_handlers.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_builtin_handlers.py 2019-01-28 20:07:03.000000000 +0000 @@ -3,6 +3,7 @@ """Tests of the built-in user data handlers.""" import copy +import errno import os import shutil import tempfile @@ -201,6 +202,30 @@ self.assertFalse( os.path.exists(script_file), 'Unexpected file created %s' % script_file) + + def test_jinja_template_handle_errors_on_unreadable_instance_data(self): + """If instance-data is unreadable, raise an error from handle_part.""" + script_handler = ShellScriptPartHandler(self.paths) + instance_json = os.path.join(self.run_dir, 'instance-data.json') + util.write_file(instance_json, util.json_dumps({})) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with mock.patch(self.mpath + 'load_file') as m_load: + with self.assertRaises(RuntimeError) as context_manager: + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \n#!/bin/bash\necho himom', + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertEqual( + 'Cannot render jinja template vars. No read permission on' + " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir), + str(context_manager.exception)) + self.assertFalse( + os.path.exists(script_file), + 'Unexpected file created %s' % script_file) @skipUnlessJinja() def test_jinja_template_handle_renders_jinja_content(self): diff -Nru cloud-init-18.4/tests/unittests/test_cli.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_cli.py --- cloud-init-18.4/tests/unittests/test_cli.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_cli.py 2019-01-28 20:07:03.000000000 +0000 @@ -246,18 +246,18 @@ self.assertEqual('cc_ntp', parseargs.name) self.assertFalse(parseargs.report) - @mock.patch('cloudinit.cmd.main.dhclient_hook') - def test_dhclient_hook_subcommand(self, m_dhclient_hook): + @mock.patch('cloudinit.cmd.main.dhclient_hook.handle_args') + def test_dhclient_hook_subcommand(self, m_handle_args): """The subcommand 'dhclient-hook' calls dhclient_hook with args.""" - self._call_main(['cloud-init', 'dhclient-hook', 'net_action', 'eth0']) - (name, parseargs) = m_dhclient_hook.call_args_list[0][0] - self.assertEqual('dhclient_hook', name) + self._call_main(['cloud-init', 'dhclient-hook', 'up', 'eth0']) + (name, parseargs) = m_handle_args.call_args_list[0][0] + self.assertEqual('dhclient-hook', name) self.assertEqual('dhclient-hook', parseargs.subcommand) - self.assertEqual('dhclient_hook', parseargs.action[0]) + self.assertEqual('dhclient-hook', parseargs.action[0]) self.assertFalse(parseargs.debug) self.assertFalse(parseargs.force) - self.assertEqual('net_action', parseargs.net_action) - self.assertEqual('eth0', parseargs.net_interface) + self.assertEqual('up', parseargs.event) + self.assertEqual('eth0', parseargs.interface) @mock.patch('cloudinit.cmd.main.main_features') def test_features_hook_subcommand(self, m_features): diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_aliyun.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_aliyun.py --- cloud-init-18.4/tests/unittests/test_datasource/test_aliyun.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_aliyun.py 2019-01-28 20:07:03.000000000 +0000 @@ -140,6 +140,10 @@ self._test_get_sshkey() self._test_get_iid() self._test_host_name() + self.assertEqual('aliyun', self.ds.cloud_name) + self.assertEqual('ec2', self.ds.platform) + self.assertEqual( + 'metadata (http://100.100.100.200)', self.ds.subplatform) @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_altcloud.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_altcloud.py --- cloud-init-18.4/tests/unittests/test_datasource/test_altcloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_altcloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -10,7 +10,6 @@ This test file exercises the code in sources DataSourceAltCloud.py ''' -import mock import os import shutil import tempfile @@ -18,32 +17,13 @@ from cloudinit import helpers from cloudinit import util -from cloudinit.tests.helpers import CiTestCase +from cloudinit.tests.helpers import CiTestCase, mock import cloudinit.sources.DataSourceAltCloud as dsac OS_UNAME_ORIG = getattr(os, 'uname') -def _write_cloud_info_file(value): - ''' - Populate the CLOUD_INFO_FILE which would be populated - with a cloud backend identifier ImageFactory when building - an image with ImageFactory. - ''' - cifile = open(dsac.CLOUD_INFO_FILE, 'w') - cifile.write(value) - cifile.close() - os.chmod(dsac.CLOUD_INFO_FILE, 0o664) - - -def _remove_cloud_info_file(): - ''' - Remove the test CLOUD_INFO_FILE - ''' - os.remove(dsac.CLOUD_INFO_FILE) - - def _write_user_data_files(mount_dir, value): ''' Populate the deltacloud_user_data_file the user_data_file @@ -98,13 +78,15 @@ class TestGetCloudType(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.get_cloud_type() - ''' + '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' + + with_logs = True def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + super(TestGetCloudType, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.dmi_data = util.read_dmi_data # We have a different code path for arm to deal with LP1243287 # We have to switch arch to x86_64 to avoid test failure @@ -115,6 +97,26 @@ util.read_dmi_data = self.dmi_data force_arch() + def test_cloud_info_file_ioerror(self): + """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" + self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): + self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + self.assertIn( + "[Errno 21] Is a directory: '%s'" % self.tmp, + self.logs.getvalue()) + + def test_cloud_info_file(self): + """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, ' OverRiDdeN CloudType ') + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): + self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) + def test_rhev(self): ''' Test method get_cloud_type() for RHEVm systems. @@ -153,60 +155,57 @@ self.tmp = self.tmp_dir() self.paths = helpers.Paths( {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.cloud_info_file = tempfile.mkstemp()[1] - self.dmi_data = util.read_dmi_data - dsac.CLOUD_INFO_FILE = self.cloud_info_file - - def tearDown(self): - # Reset - - # Attempt to remove the temp file ignoring errors - try: - os.remove(self.cloud_info_file) - except OSError: - pass - - util.read_dmi_data = self.dmi_data - dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' + self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) def test_rhev(self): '''Success Test module get_data() forcing RHEV.''' - _write_cloud_info_file('RHEV') + util.write_file(self.cloud_info_file, 'RHEV') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: True - self.assertEqual(True, dsrc.get_data()) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) def test_vsphere(self): '''Success Test module get_data() forcing VSPHERE.''' - _write_cloud_info_file('VSPHERE') + util.write_file(self.cloud_info_file, 'VSPHERE') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: True - self.assertEqual(True, dsrc.get_data()) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('vsphere (unknown)', dsrc.subplatform) def test_fail_rhev(self): '''Failure Test module get_data() forcing RHEV.''' - _write_cloud_info_file('RHEV') + util.write_file(self.cloud_info_file, 'RHEV') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: False - self.assertEqual(False, dsrc.get_data()) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) def test_fail_vsphere(self): '''Failure Test module get_data() forcing VSPHERE.''' - _write_cloud_info_file('VSPHERE') + util.write_file(self.cloud_info_file, 'VSPHERE') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: False - self.assertEqual(False, dsrc.get_data()) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) def test_unrecognized(self): '''Failure Test module get_data() forcing unrecognized.''' - _write_cloud_info_file('unrecognized') + util.write_file(self.cloud_info_file, 'unrecognized') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.get_data()) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) class TestGetDataNoCloudInfoFile(CiTestCase): @@ -322,7 +321,8 @@ ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.mount_dir = tempfile.mkdtemp() _write_user_data_files(self.mount_dir, 'test user data') @@ -363,6 +363,22 @@ self.assertEqual(1, m_find_devs_with.call_count) self.assertEqual(1, m_mount_cb.call_count) + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): + """Test user_data_vsphere() where successful.""" + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.return_value = 'raw userdata from cdrom' + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, 'VSPHERE') + self.assertEqual(True, dsrc.user_data_vsphere()) + m_find_devs_with.assert_called_once_with('LABEL=CDROM') + m_mount_cb.assert_called_once_with( + '/dev/mock/cdrom', dsac.read_user_data_callback) + with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): + self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) + class TestReadUserDataCallback(CiTestCase): ''' diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_azure.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_azure.py --- cloud-init-18.4/tests/unittests/test_datasource/test_azure.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_azure.py 2019-01-28 20:07:03.000000000 +0000 @@ -17,6 +17,7 @@ import httpretty import json import os +import requests import stat import xml.etree.ElementTree as ET import yaml @@ -110,6 +111,8 @@ } } +MOCKPATH = 'cloudinit.sources.DataSourceAzure.' + class TestGetMetadataFromIMDS(HttprettyTestCase): @@ -119,9 +122,9 @@ super(TestGetMetadataFromIMDS, self).setUp() self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01" - @mock.patch('cloudinit.sources.DataSourceAzure.readurl') - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + @mock.patch(MOCKPATH + 'readurl') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_does_not_dhcp_if_network_is_up( self, m_net_is_up, m_dhcp, m_readurl): """Do not perform DHCP setup when nic is already up.""" @@ -138,9 +141,9 @@ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) - @mock.patch('cloudinit.sources.DataSourceAzure.readurl') - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + @mock.patch(MOCKPATH + 'readurl') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): """Perform DHCP setup when nic is not up.""" @@ -163,7 +166,7 @@ headers={'Metadata': 'true'}, retries=2, timeout=1) @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_from_imds_empty_when_no_imds_present( self, m_net_is_up, m_sleep): """Return empty dict when IMDS network metadata is absent.""" @@ -182,6 +185,35 @@ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) + @mock.patch('requests.Session.request') + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_from_imds_retries_on_timeout( + self, m_net_is_up, m_sleep, m_request): + """Retry IMDS network metadata on timeout errors.""" + + self.attempt = 0 + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + + def retry_callback(request, uri, headers): + self.attempt += 1 + raise requests.Timeout('Fake connection timeout') + + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body=retry_callback) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + class TestAzureDataSource(CiTestCase): @@ -254,7 +286,8 @@ ]) return dsaz - def _get_ds(self, data, agent_command=None, distro=None): + def _get_ds(self, data, agent_command=None, distro=None, + apply_network=None): def dsdevs(): return data.get('dsdevs', []) @@ -310,6 +343,8 @@ data.get('sys_cfg', {}), distro=distro, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command + if apply_network is not None: + dsrc.ds_cfg['apply_network_config'] = apply_network return dsrc @@ -380,7 +415,7 @@ res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) - @mock.patch('cloudinit.sources.DataSourceAzure._is_platform_viable') + @mock.patch(MOCKPATH + '_is_platform_viable') def test_call_is_platform_viable_seed(self, m_is_platform_viable): """Check seed_dir using _is_platform_viable and return False.""" # Return a non-matching asset tag value @@ -401,6 +436,24 @@ self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) self.assertTrue(os.path.isfile( os.path.join(self.waagent_d, 'ovf-env.xml'))) + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) + + def test_basic_dev_file(self): + """When a device path is used, present that in subplatform.""" + data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} + dsrc = self._get_ds(data) + with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: + m_mount_cb.return_value = ( + {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.userdata_raw, 'ud') + self.assertEqual(dsrc.metadata['local-hostname'], 'me') + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): """get_data on non-Ubuntu will not remove ubuntu net scripts.""" @@ -414,14 +467,26 @@ def test_get_data_on_ubuntu_will_remove_network_scripts(self): """get_data will remove ubuntu net scripts on Ubuntu distro.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + 'sys_cfg': sys_cfg} dsrc = self._get_ds(data, distro='ubuntu') dsrc.get_data() self.m_remove_ubuntu_network_scripts.assert_called_once_with() + def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): + """When apply_network_config false, do not remove scripts on Ubuntu.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): """Return all structured metadata and cache no class attributes.""" yaml_cfg = "{agent_command: my_command}\n" @@ -478,6 +543,61 @@ dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_on_reprovision_reports_ready( + self, poll_imds_func, + report_ready_func, + m_write, m_dhcp): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + + data = {'ovfcontent': ovfenv, + 'sys_cfg': {}} + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + def test_crawl_metadata_on_reprovision_reports_ready_using_lease( + self, m_readurl, m_dhcp, + m_net, report_ready_func, + m_media_switch, m_write): + """If reprovisioning, report ready using the obtained lease""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + + data = {'ovfcontent': ovfenv, + 'sys_cfg': {}} + dsrc = self._get_ds(data) + + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + m_media_switch.return_value = None + + reprovision_ovfenv = construct_valid_ovf_env() + m_readurl.return_value = url_helper.StringResponse( + reprovision_ovfenv.encode('utf-8')) + + dsrc.crawl_metadata() + self.assertEqual(2, report_ready_func.call_count) + report_ready_func.assert_called_with(lease=lease) + def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -503,8 +623,10 @@ def test_network_config_set_from_imds(self): """Datasource.network_config returns IMDS network data.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} expected_network_config = { 'ethernets': { 'eth0': {'set-name': 'eth0', @@ -769,8 +891,8 @@ ds.get_data() self.assertEqual(self.instance_id, ds.metadata['instance-id']) - @mock.patch("cloudinit.sources.DataSourceAzure.util.is_FreeBSD") - @mock.patch("cloudinit.sources.DataSourceAzure._check_freebsd_cdrom") + @mock.patch(MOCKPATH + 'util.is_FreeBSD') + @mock.patch(MOCKPATH + '_check_freebsd_cdrom') def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom, m_is_FreeBSD): """On FreeBSD, possible devs should show /dev/cd0.""" @@ -783,9 +905,10 @@ @mock.patch('cloudinit.net.generate_fallback_config') def test_imds_network_config(self, mock_fallback): """Network config is generated from IMDS network data when present.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + 'sys_cfg': sys_cfg} dsrc = self._get_ds(data) ret = dsrc.get_data() @@ -805,6 +928,36 @@ @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_ignored_when_apply_network_config_false( + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): + """When apply_network_config is False, use fallback instead of IMDS.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.network_config, fallback_config) + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') def test_fallback_network_config(self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): """On absent IMDS network data, generate network fallback config.""" @@ -885,17 +1038,17 @@ expected_config['config'].append(blacklist_config) self.assertEqual(netconfig, expected_config) - @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + @mock.patch(MOCKPATH + 'util.subp') def test_get_hostname_with_no_args(self, subp): dsaz.get_hostname() subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + @mock.patch(MOCKPATH + 'util.subp') def test_get_hostname_with_string_arg(self, subp): dsaz.get_hostname(hostname_command="hostname") subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + @mock.patch(MOCKPATH + 'util.subp') def test_get_hostname_with_iterable_arg(self, subp): dsaz.get_hostname(hostname_command=("hostname",)) subp.assert_called_once_with(("hostname",), capture=True) @@ -949,7 +1102,7 @@ self.set_hostname = self.patches.enter_context( mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( - mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) + mock.patch(MOCKPATH + 'util.subp')) self.find_fallback_nic = self.patches.enter_context( mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) @@ -989,7 +1142,7 @@ ds.get_data() self.assertEqual(0, self.set_hostname.call_count) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_disabled_bounce_does_not_perform_bounce( self, perform_hostname_bounce): cfg = {'hostname_bounce': {'policy': 'off'}} @@ -1005,7 +1158,7 @@ ds.get_data() self.assertEqual(0, self.set_hostname.call_count) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_unchanged_hostname_does_not_perform_bounce( self, perform_hostname_bounce): host_name = 'unchanged-host-name' @@ -1015,7 +1168,7 @@ ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_force_performs_bounce_regardless(self, perform_hostname_bounce): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name @@ -1032,7 +1185,7 @@ cfg = {'hostname_bounce': {'policy': 'force'}} dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), agent_command=['not', '__builtin__']) - patch_path = 'cloudinit.sources.DataSourceAzure.util.which' + patch_path = MOCKPATH + 'util.which' with mock.patch(patch_path) as m_which: m_which.return_value = None ret = self._get_and_setup(dsrc) @@ -1053,7 +1206,7 @@ self.assertEqual(expected_hostname, self.set_hostname.call_args_list[0][0][0]) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_different_hostnames_performs_bounce( self, perform_hostname_bounce): expected_hostname = 'azure-expected-host-name' @@ -1076,7 +1229,7 @@ self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_failure_in_bounce_still_resets_host_name( self, perform_hostname_bounce): perform_hostname_bounce.side_effect = Exception @@ -1117,7 +1270,7 @@ self.assertEqual( dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + @mock.patch(MOCKPATH + 'perform_hostname_bounce') def test_set_hostname_option_can_disable_bounce( self, perform_hostname_bounce): cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} @@ -1218,12 +1371,12 @@ def has_ntfs_fs(device): return bypath.get(device, {}).get('fs') == 'ntfs' - p = 'cloudinit.sources.DataSourceAzure' - self._domock(p + "._partitions_on_device", 'm_partitions_on_device') - self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') - self._domock(p + ".util.mount_cb", 'm_mount_cb') - self._domock(p + ".os.path.realpath", 'm_realpath') - self._domock(p + ".os.path.exists", 'm_exists') + p = MOCKPATH + self._domock(p + "_partitions_on_device", 'm_partitions_on_device') + self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + "util.mount_cb", 'm_mount_cb') + self._domock(p + "os.path.realpath", 'm_realpath') + self._domock(p + "os.path.exists", 'm_exists') self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath @@ -1391,21 +1544,20 @@ '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) - err = ("Unexpected error while running command.\n", - "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", - "'/dev/sda1', '/fake-tmp/dir']\n" - "Exit code: 32\n" - "Reason: -\n" - "Stdout: -\n" - "Stderr: mount: unknown filesystem type 'ntfs'") - self.m_mount_cb.side_effect = MountFailedError( - 'Failed mounting %s to %s due to: %s' % - ('/dev/sda', '/fake-tmp/dir', err)) + error_msgs = [ + "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES + ] - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn('cannot mount NTFS, assuming', msg) + for err_msg in error_msgs: + self.m_mount_cb.side_effect = MountFailedError( + "Failed mounting %s to %s due to: \nUnexpected.\n%s" % + ('/dev/sda', '/fake-tmp/dir', err_msg)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) def test_never_destroy_ntfs_config_false(self): """Normally formattable situation with never_destroy_ntfs set.""" @@ -1488,7 +1640,7 @@ self.paths = helpers.Paths({'cloud_dir': tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch(MOCKPATH + 'util.write_file') def test__should_reprovision_with_true_cfg(self, isfile, write_f): """The _should_reprovision method should return true with config flag present.""" @@ -1512,7 +1664,7 @@ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): """_reprovision will poll IMDS.""" isfile.return_value = False @@ -1527,9 +1679,10 @@ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') @mock.patch('requests.Session.request') -@mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') +@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') class TestPreprovisioningPollIMDS(CiTestCase): def setUp(self): @@ -1539,45 +1692,69 @@ self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - def test_poll_imds_calls_report_ready(self, write_f, report_ready_func, - fake_resp, m_dhcp, m_net): - """The poll_imds will call report_ready after creating marker file.""" - report_marker = self.tmp_path('report_marker', self.tmp) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, + fake_resp, m_media_switch, m_dhcp, + m_net): + """The poll_imds will retry DHCP on IMDS timeout.""" + report_file = self.tmp_path('report_marker', self.tmp) lease = { 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'} m_dhcp.return_value = [lease] + m_media_switch.return_value = None + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + self.tries = 0 + + def fake_timeout_once(**kwargs): + self.tries += 1 + if self.tries == 1: + raise requests.Timeout('Fake connection timeout') + elif self.tries == 2: + response = requests.Response() + response.status_code = 404 + raise requests.exceptions.HTTPError( + "fake 404", response=response) + # Third try should succeed and stop retries or redhcp + return mock.MagicMock(status_code=200, text="good", content="good") + + fake_resp.side_effect = fake_timeout_once + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - mock_path = ( - 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') - with mock.patch(mock_path, report_marker): + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): dsa._poll_imds() self.assertEqual(report_ready_func.call_count, 1) report_ready_func.assert_called_with(lease=lease) + self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') + self.assertEqual(3, self.tries, 'Expected 3 total reads from IMDS') - def test_poll_imds_report_ready_false(self, report_ready_func, - fake_resp, m_dhcp, m_net): + def test_poll_imds_report_ready_false(self, + report_ready_func, fake_resp, + m_media_switch, m_dhcp, m_net): """The poll_imds should not call reporting ready when flag is false""" - report_marker = self.tmp_path('report_marker', self.tmp) - write_file(report_marker, content='dont run report_ready :)') + report_file = self.tmp_path('report_marker', self.tmp) + write_file(report_file, content='dont run report_ready :)') m_dhcp.return_value = [{ 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'}] + m_media_switch.return_value = None dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - mock_path = ( - 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') - with mock.patch(mock_path, report_marker): + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): dsa._poll_imds() self.assertEqual(report_ready_func.call_count, 0) -@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') -@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') -@mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') +@mock.patch(MOCKPATH + 'util.subp') +@mock.patch(MOCKPATH + 'util.write_file') +@mock.patch(MOCKPATH + 'util.is_FreeBSD') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('requests.Session.request') @@ -1590,10 +1767,13 @@ self.paths = helpers.Paths({'cloud_dir': tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + def test_poll_imds_returns_ovf_env(self, fake_resp, + m_dhcp, m_net, + m_media_switch, m_is_bsd, write_f, subp): """The _poll_imds method should return the ovf_env.xml.""" m_is_bsd.return_value = False + m_media_switch.return_value = None m_dhcp.return_value = [{ 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] @@ -1611,16 +1791,19 @@ 'Cloud-Init/%s' % vs() }, method='GET', timeout=1, url=full_url)]) - self.assertEqual(m_dhcp.call_count, 1) + self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', prefix_or_mask='255.255.255.0', router='192.168.2.1') - self.assertEqual(m_net.call_count, 1) + self.assertEqual(m_net.call_count, 2) - def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + def test__reprovision_calls__poll_imds(self, fake_resp, + m_dhcp, m_net, + m_media_switch, m_is_bsd, write_f, subp): """The _reprovision method should call poll IMDS.""" m_is_bsd.return_value = False + m_media_switch.return_value = None m_dhcp.return_value = [{ 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', @@ -1644,11 +1827,11 @@ 'User-Agent': 'Cloud-Init/%s' % vs()}, method='GET', timeout=1, url=full_url)]) - self.assertEqual(m_dhcp.call_count, 1) + self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', prefix_or_mask='255.255.255.0', router='192.168.2.1') - self.assertEqual(m_net.call_count, 1) + self.assertEqual(m_net.call_count, 2) class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): @@ -1688,7 +1871,7 @@ self.tmp_path('notfilehere', dir=self.tmp)]) self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs - @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + @mock.patch(MOCKPATH + 'os.path.exists') def test_remove_network_scripts_default_removes_stock_scripts(self, m_exists): """Azure's stock ubuntu image scripts and artifacts are removed.""" @@ -1704,14 +1887,14 @@ """White box tests for _is_platform_viable.""" with_logs = True - @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + @mock.patch(MOCKPATH + 'util.read_dmi_data') def test_true_on_non_azure_chassis(self, m_read_dmi_data): """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) - @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') - @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + @mock.patch(MOCKPATH + 'os.path.exists') + @mock.patch(MOCKPATH + 'util.read_dmi_data') def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): """Return True if ovf-env.xml exists in known seed dirs.""" # Non-matching Azure chassis-asset-tag @@ -1729,7 +1912,7 @@ and no devices have a label starting with prefix 'rd_rdfe_'. """ self.assertFalse(wrap_and_call( - 'cloudinit.sources.DataSourceAzure', + MOCKPATH, {'os.path.exists': False, # Non-matching Azure chassis-asset-tag 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_cloudsigma.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_cloudsigma.py --- cloud-init-18.4/tests/unittests/test_datasource/test_cloudsigma.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_cloudsigma.py 2019-01-28 20:07:03.000000000 +0000 @@ -68,6 +68,12 @@ self.assertEqual(SERVER_CONTEXT['uuid'], self.datasource.get_instance_id()) + def test_platform(self): + """All platform-related attributes are set.""" + self.assertEqual(self.datasource.cloud_name, 'cloudsigma') + self.assertEqual(self.datasource.platform_type, 'cloudsigma') + self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') + def test_metadata(self): self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_configdrive.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_configdrive.py --- cloud-init-18.4/tests/unittests/test_datasource/test_configdrive.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_configdrive.py 2019-01-28 20:07:03.000000000 +0000 @@ -478,6 +478,9 @@ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertEqual(myds.get_public_ssh_keys(), [OSTACK_META['public_keys']['mykey']]) + self.assertEqual('configdrive', myds.cloud_name) + self.assertEqual('openstack', myds.platform) + self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) class TestNetJson(CiTestCase): diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_ec2.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ec2.py --- cloud-init-18.4/tests/unittests/test_datasource/test_ec2.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ec2.py 2019-01-28 20:07:03.000000000 +0000 @@ -211,9 +211,9 @@ self.metadata_addr = self.datasource.metadata_urls[0] self.tmp = self.tmp_dir() - def data_url(self, version): + def data_url(self, version, data_item='meta-data'): """Return a metadata url based on the version provided.""" - return '/'.join([self.metadata_addr, version, 'meta-data', '']) + return '/'.join([self.metadata_addr, version, data_item]) def _patch_add_cleanup(self, mpath, *args, **kwargs): p = mock.patch(mpath, *args, **kwargs) @@ -238,10 +238,18 @@ all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) for version in all_versions: - metadata_url = self.data_url(version) + metadata_url = self.data_url(version) + '/' if version == md_version: # Register all metadata for desired version - register_mock_metaserver(metadata_url, md) + register_mock_metaserver( + metadata_url, md.get('md', DEFAULT_METADATA)) + userdata_url = self.data_url( + version, data_item='user-data') + register_mock_metaserver(userdata_url, md.get('ud', '')) + identity_url = self.data_url( + version, data_item='dynamic/instance-identity') + register_mock_metaserver( + identity_url, md.get('id', DYNAMIC_METADATA)) else: instance_id_url = metadata_url + 'instance-id' if version == ds.min_metadata_version: @@ -261,7 +269,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) find_fallback_path = ( 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') with mock.patch(find_fallback_path) as m_find_fallback: @@ -293,7 +301,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) find_fallback_path = ( 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') with mock.patch(find_fallback_path) as m_find_fallback: @@ -322,7 +330,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ds._network_config = {'cached': 'data'} self.assertEqual({'cached': 'data'}, ds.network_config) @@ -338,7 +346,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=old_metadata) + md={'md': old_metadata}) self.assertTrue(ds.get_data()) # Provide new revision of metadata that contains network data register_mock_metaserver( @@ -351,7 +359,9 @@ m_get_interface_mac.return_value = mac1 nc = ds.network_config # Will re-crawl network metadata self.assertIsNotNone(nc) - self.assertIn('Re-crawl of metadata service', self.logs.getvalue()) + self.assertIn( + 'Refreshing stale metadata from prior to upgrade', + self.logs.getvalue()) expected = {'version': 1, 'config': [ {'mac_address': '06:17:04:d7:26:09', 'name': 'eth9', @@ -370,7 +380,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) # Mock 404s on all versions except latest all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) @@ -386,7 +396,7 @@ register_mock_metaserver( '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), DYNAMIC_METADATA) - ds._cloud_platform = ec2.Platforms.AWS + ds._cloud_name = ec2.CloudNames.AWS # Setup cached metadata on the Datasource ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) @@ -397,17 +407,20 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertTrue(ret) self.assertEqual(0, m_dhcp.call_count) + self.assertEqual('aws', ds.cloud_name) + self.assertEqual('ec2', ds.platform_type) + self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) def test_valid_platform_with_strict_false(self): """Valid platform data should return true with strict_id false.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertTrue(ret) @@ -417,7 +430,7 @@ ds = self._setup_ds( platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertFalse(ret) @@ -427,7 +440,7 @@ ds = self._setup_ds( platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertTrue(ret) @@ -437,18 +450,19 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) platform_attrs = [ - attr for attr in ec2.Platforms.__dict__.keys() + attr for attr in ec2.CloudNames.__dict__.keys() if not attr.startswith('__')] for attr_name in platform_attrs: - platform_name = getattr(ec2.Platforms, attr_name) - if platform_name != 'AWS': - ds._cloud_platform = platform_name + platform_name = getattr(ec2.CloudNames, attr_name) + if platform_name != 'aws': + ds._cloud_name = platform_name ret = ds.get_data() + self.assertEqual('ec2', ds.platform_type) self.assertFalse(ret) message = ( - "Local Ec2 mode only supported on ('AWS',)," + "Local Ec2 mode only supported on ('aws',)," ' not {0}'.format(platform_name)) self.assertIn(message, self.logs.getvalue()) @@ -463,7 +477,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertFalse(ret) self.assertIn( @@ -493,7 +507,7 @@ ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=DEFAULT_METADATA) + md={'md': DEFAULT_METADATA}) ret = ds.get_data() self.assertTrue(ret) diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_ibmcloud.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ibmcloud.py --- cloud-init-18.4/tests/unittests/test_datasource/test_ibmcloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ibmcloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,14 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit.helpers import Paths from cloudinit.sources import DataSourceIBMCloud as ibm from cloudinit.tests import helpers as test_helpers +from cloudinit import util import base64 import copy import json -import mock from textwrap import dedent +mock = test_helpers.mock + D_PATH = "cloudinit.sources.DataSourceIBMCloud." @@ -309,4 +312,39 @@ self.assertIn("no reference file", self.logs.getvalue()) +class TestDataSourceIBMCloud(test_helpers.CiTestCase): + + def setUp(self): + super(TestDataSourceIBMCloud, self).setUp() + self.tmp = self.tmp_dir() + self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) + util.ensure_dir(self.cloud_dir) + paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) + self.ds = ibm.DataSourceIBMCloud( + sys_cfg={}, distro=None, paths=paths) + + def test_get_data_false(self): + """When read_md returns None, get_data returns False.""" + with mock.patch(D_PATH + 'read_md', return_value=None): + self.assertFalse(self.ds.get_data()) + + def test_get_data_processes_read_md(self): + """get_data processes and caches content returned by read_md.""" + md = { + 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', + 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', + 'vendordata': 'vd'} + with mock.patch(D_PATH + 'read_md', return_value=md): + self.assertTrue(self.ds.get_data()) + self.assertEqual('src', self.ds.source) + self.assertEqual('plat', self.ds.platform) + self.assertEqual({}, self.ds.metadata) + self.assertEqual('ud', self.ds.userdata_raw) + self.assertEqual('net', self.ds.network_json) + self.assertEqual('vd', self.ds.vendordata_pure) + self.assertEqual('uuid', self.ds.system_uuid) + self.assertEqual('ibmcloud', self.ds.cloud_name) + self.assertEqual('ibmcloud', self.ds.platform_type) + self.assertEqual('plat (src)', self.ds.subplatform) + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_nocloud.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_nocloud.py --- cloud-init-18.4/tests/unittests/test_datasource/test_nocloud.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_nocloud.py 2019-01-28 20:07:03.000000000 +0000 @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.sources import DataSourceNoCloud +from cloudinit.sources.DataSourceNoCloud import ( + DataSourceNoCloud as dsNoCloud, + _maybe_remove_top_network, + parse_cmdline_data) from cloudinit import util from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack @@ -10,6 +13,7 @@ import yaml +@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') class TestNoCloudDataSource(CiTestCase): def setUp(self): @@ -28,28 +32,46 @@ self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) - def test_nocloud_seed_dir(self): + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) sys_cfg = { 'datasource': {'NoCloud': {'fs_label': None}} } - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) + self.assertEqual(dsrc.platform_type, 'lxd') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) self.assertTrue(ret) - def test_fs_label(self): - # find_devs_with should not be called ff fs_label is None - ds = DataSourceNoCloud.DataSourceNoCloud + def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): + """Non-lxd environments will list nocloud as the platform.""" + m_is_lxd.return_value = False + md = {'instance-id': 'IID', 'dsmode': 'local'} + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, + {'user-data': '', 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.platform_type, 'nocloud') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + def test_fs_label(self, m_is_lxd): + # find_devs_with should not be called ff fs_label is None class PsuedoException(Exception): pass @@ -59,26 +81,23 @@ # by default, NoCloud should search for filesystems by label sys_cfg = {'datasource': {'NoCloud': {}}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertRaises(PsuedoException, dsrc.get_data) # but disabling searching should just end up with None found sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertFalse(ret) - def test_no_datasource_expected(self): + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) - def test_seed_in_config(self): - ds = DataSourceNoCloud.DataSourceNoCloud - + def test_seed_in_config(self, m_is_lxd): data = { 'fs_label': None, 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), @@ -86,13 +105,13 @@ } sys_cfg = {'datasource': {'NoCloud': data}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') self.assertTrue(ret) - def test_nocloud_seed_with_vendordata(self): + def test_nocloud_seed_with_vendordata(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" vd = b"THIS IS MY VENDOR_DATA" @@ -105,30 +124,26 @@ 'datasource': {'NoCloud': {'fs_label': None}} } - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) self.assertEqual(dsrc.vendordata_raw, vd) self.assertTrue(ret) - def test_nocloud_no_vendordata(self): + def test_nocloud_no_vendordata(self, m_is_lxd): populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"ud") self.assertFalse(dsrc.vendordata) self.assertTrue(ret) - def test_metadata_network_interfaces(self): + def test_metadata_network_interfaces(self, m_is_lxd): gateway = "103.225.10.1" md = { 'instance-id': 'i-abcd', @@ -149,15 +164,13 @@ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) # very simple check just for the strings above self.assertIn(gateway, str(dsrc.network_config)) - def test_metadata_network_config(self): + def test_metadata_network_config(self, m_is_lxd): # network-config needs to get into network_config netconf = {'version': 1, 'config': [{'type': 'physical', 'name': 'interface0', @@ -170,14 +183,28 @@ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): + """network-config may have 'network' top level key.""" + netconf = {'config': 'disabled'} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump({'network': netconf}) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config) - def test_metadata_network_config_over_interfaces(self): + def test_metadata_network_config_over_interfaces(self, m_is_lxd): # network-config should override meta-data/network-interfaces gateway = "103.225.10.1" md = { @@ -203,9 +230,7 @@ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config) @@ -233,8 +258,7 @@ for (fmt, expected) in pairs: fill = {} cmdline = fmt % {'ds_id': ds_id} - ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, - cmdline=cmdline) + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) self.assertEqual(expected, fill) self.assertTrue(ret) @@ -251,10 +275,43 @@ for cmdline in cmdlines: fill = {} - ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, - cmdline=cmdline) + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) self.assertEqual(fill, {}) self.assertFalse(ret) +class TestMaybeRemoveToplevelNetwork(CiTestCase): + """test _maybe_remove_top_network function.""" + basecfg = [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}] + + def test_should_remove_safely(self): + mcfg = {'config': self.basecfg, 'version': 1} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + def test_no_remove_if_other_keys(self): + """should not shift if other keys at top level.""" + mcfg = {'network': {'config': self.basecfg, 'version': 1}, + 'unknown_keyname': 'keyval'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_non_dict(self): + """should not shift if not a dict.""" + mcfg = {'network': '"content here'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_missing_config_or_version(self): + """should not shift unless network entry has config and version.""" + mcfg = {'network': {'config': self.basecfg}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + mcfg = {'network': {'version': 1}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_remove_with_config_disabled(self): + """network/config=disabled should be shifted.""" + mcfg = {'config': 'disabled'} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + # vi: ts=4 expandtab diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_opennebula.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_opennebula.py --- cloud-init-18.4/tests/unittests/test_datasource/test_opennebula.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_opennebula.py 2019-01-28 20:07:03.000000000 +0000 @@ -123,6 +123,10 @@ self.assertTrue(ret) finally: util.find_devs_with = orig_find_devs_with + self.assertEqual('opennebula', dsrc.cloud_name) + self.assertEqual('opennebula', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) def test_seed_dir_non_contextdisk(self): self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, diff -Nru cloud-init-18.4/tests/unittests/test_datasource/test_ovf.py cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ovf.py --- cloud-init-18.4/tests/unittests/test_datasource/test_ovf.py 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tests/unittests/test_datasource/test_ovf.py 2019-01-28 20:07:03.000000000 +0000 @@ -11,12 +11,16 @@ from textwrap import dedent from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, wrap_and_call +from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +MPATH = 'cloudinit.sources.DataSourceOVF.' + +NOT_FOUND = None + OVF_ENV_CONTENT = """ \n/dev/null 2>&1 || return 1 + local out="" ret="" + out=$(vmware-rpctool "info-get guestinfo.ovfEnv" 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + debug 1 "Running on vmware but rpctool query returned $ret: $out" + return 1 + fi + case "$out" in + "=label, like /dev/sr0=OVF-TRANSPORT + ovf_vmware_transport_guestinfo && return "${DS_FOUND}" + + # DI_ISO9660_DEVS is =label,=label2 + # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then - for tok in ${DI_ISO9660_DEVS}; do + local oifs="$IFS" + # shellcheck disable=2086 + { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } + for tok in "$@"; do is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND done fi diff -Nru cloud-init-18.4/tools/run-container cloud-init-18.5-21-g8ee294d5/tools/run-container --- cloud-init-18.4/tools/run-container 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tools/run-container 2019-01-28 20:07:03.000000000 +0000 @@ -373,6 +373,7 @@ inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf + inside "$name" sh -c "sed -i '/^#baseurl=/s/#//' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi diff -Nru cloud-init-18.4/tox.ini cloud-init-18.5-21-g8ee294d5/tox.ini --- cloud-init-18.4/tox.ini 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/tox.ini 2019-01-28 20:07:03.000000000 +0000 @@ -21,7 +21,7 @@ basepython = python3 deps = # requirements - pylint==1.8.1 + pylint==2.2.2 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} @@ -75,7 +75,7 @@ jsonpatch==1.16 six==1.10.0 # test-requirements - httpretty==0.8.6 + httpretty==0.9.6 mock==1.3.0 nose==1.3.7 unittest2==1.1.0 diff -Nru cloud-init-18.4/udev/66-azure-ephemeral.rules cloud-init-18.5-21-g8ee294d5/udev/66-azure-ephemeral.rules --- cloud-init-18.4/udev/66-azure-ephemeral.rules 2018-10-02 20:52:24.000000000 +0000 +++ cloud-init-18.5-21-g8ee294d5/udev/66-azure-ephemeral.rules 2019-01-28 20:07:03.000000000 +0000 @@ -4,10 +4,26 @@ ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end" ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end" -# Root has a GUID of 0000 as the second value +# Root has a GUID of 0000 as the second value on Gen1 instances # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" + +# Azure well known SCSI controllers on Gen2 instances +ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" +# Do not create symlinks for scsi[1-3] or unmatched device_ids +ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="cloud_init_end" +ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="cloud_init_end" +ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="cloud_init_end" +GOTO="cloud_init_end" + +# Map scsi#/lun# fabric_name to azure_root|resource on Gen2 instances +LABEL="azure_datadisk" +ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" +ENV{DEVTYPE}=="disk", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result" + +ENV{fabric_name}=="scsi0/lun0", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" +ENV{fabric_name}=="scsi0/lun1", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" GOTO="cloud_init_end" # Create the symlinks