diff -Nru cloud-init-23.4.4/.github/workflows/check_format.yml cloud-init-24.1.3/.github/workflows/check_format.yml --- cloud-init-23.4.4/.github/workflows/check_format.yml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/.github/workflows/check_format.yml 2024-03-27 13:14:04.000000000 +0000 @@ -87,10 +87,12 @@ - name: "Install dependencies" run: | sudo DEBIAN_FRONTEND=noninteractive apt-get -qy update - sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox lintian + sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox - name: "Spellcheck" run: | - make check_spelling + tox + env: + TOXENV: doc-spelling - name: "Build docs" env: TOXENV: doc diff -Nru cloud-init-23.4.4/.github/workflows/integration.yml cloud-init-24.1.3/.github/workflows/integration.yml --- cloud-init-23.4.4/.github/workflows/integration.yml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/.github/workflows/integration.yml 2024-03-27 13:14:04.000000000 +0000 @@ -17,7 +17,7 @@ shell: sh -ex {0} env: - RELEASE: bionic + RELEASE: focal jobs: package-build: diff -Nru cloud-init-23.4.4/.github/workflows/linkcheck.yml cloud-init-24.1.3/.github/workflows/linkcheck.yml --- cloud-init-23.4.4/.github/workflows/linkcheck.yml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/.github/workflows/linkcheck.yml 2024-03-27 13:14:04.000000000 +0000 @@ -37,6 +37,8 @@ broken_count=$(grep -c "broken" output.txt) if [[ $broken_count -ge 5 ]]; then echo "Too many broken links detected: $broken_count" + broken_matches=$(grep "broken" output.txt) + echo "Broken links \n$broken_matches" exit 1 else echo "Number of broken links is below threshold: $broken_count" diff -Nru cloud-init-23.4.4/.github/workflows/unit.yml cloud-init-24.1.3/.github/workflows/unit.yml --- cloud-init-23.4.4/.github/workflows/unit.yml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/.github/workflows/unit.yml 2024-03-27 13:14:04.000000000 +0000 @@ -14,19 +14,28 @@ unittests: strategy: matrix: - python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11", "3.12" ] - toxenv: [ py3 ] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + toxenv: [py3] experimental: [false] + check-latest: [false] + continue-on-error: [false] include: - python-version: "3.6" toxenv: lowest-supported + continue-on-error: false + check-latest: false experimental: false + - python-version: "3.13-dev" + toxenv: py3 + check-latest: true + experimental: true + continue-on-error: true name: unittest / ${{ matrix.toxenv }} / python ${{matrix.python-version}} runs-on: ubuntu-20.04 continue-on-error: ${{ matrix.experimental }} steps: - name: "Checkout" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Fetch all tags for tools/read-version fetch-depth: 0 @@ -34,9 +43,8 @@ uses: actions/setup-python@v4 with: python-version: ${{matrix.python-version}} + check-latest: ${{matrix.check-latest}} - name: Install tox run: pip install tox - name: Run unittest - env: - PYTEST_ADDOPTS: -v run: tox -e ${{ matrix.toxenv }} diff -Nru cloud-init-23.4.4/.pylintrc cloud-init-24.1.3/.pylintrc --- cloud-init-23.4.4/.pylintrc 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/.pylintrc 2024-03-27 13:14:04.000000000 +0000 @@ -6,7 +6,7 @@ [MESSAGES CONTROL] -# Errors and warings with some filtered: +# Errors and warnings with some filtered: # W0201(attribute-defined-outside-init) # W0212(protected-access) # W0221(arguments-differ) diff -Nru cloud-init-23.4.4/ChangeLog cloud-init-24.1.3/ChangeLog --- cloud-init-23.4.4/ChangeLog 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/ChangeLog 2024-03-27 13:14:04.000000000 +0000 @@ -1,3 +1,284 @@ +24.1.3 + - fix: Always use single datasource if specified (#5098) + - fix: Allow caret at the end of apt package (#5099) + +24.1.2 + - test: Don't assume ordering of ThreadPoolExecutor submissions (#5052) + - refactor(ec2): simplify convert_ec2_metadata_network_config + - tests: drop CiTestCase and convert to pytest + - bug(tests): mock reads of host's /sys/class/net via get_sys_class_path + - fix: Fix breaking changes in package install (#5069) + - fix: Undeprecate 'network' in schema route definition (#5072) + - fix(ec2): fix ipv6 policy routing + - fix: document and add 'accept-ra' to network schema (#5060) + - bug(maas): register the correct DatasourceMAASLocal in init-local + (#5068) (LP: #2057763) + +24.1.1 + - fix: Include DataSourceCloudStack attribute in unpickle test (#5039) + - bug(vmware): initialize new DataSourceVMware attributes at unpickle (#5021) + - fix(apt): Don't warn on apt 822 source format (#5028) + - fix: Add "broadcast" to network v1 schema (#5034) + - pro: honor but warn on custom ubuntu_advantage in /etc/cloud/cloud.cfg (#5030) + - net/dhcp: handle timeouts for dhcpcd (#5022) + - fix: Make wait_for_url respect explicit arguments + - bug(wait_for_url): when exceptions occur url is unset, use url_exc + - test: Fix scaleway retry assumptions + - fix: Make DataSourceOracle more resilient to early network issues (#5025) + - tests: Fix wsl test (#5008) + +24.1 + - fix: Don't warn on vendor directory (#4986) + - apt: kill spawned keyboxd after gpg cmd interaction + - tests: upgrade tests should only validate current boot log + - net/dhcp: fix maybe_perform_dhcp_discovery check for interface=None + [Chris Patterson] + - doc(network-v2): fix section nesting levels + - fix(tests): don't check for clean log on minimal image (#4965) [Cat Red] + - fix(cc_resize): Don't warn if zpool command not found (#4969) + (LP: #2055219) + - feat(subp): Make invalid command warning more user-friendly (#4972) + - docs: Remove statement about device path matching (#4966) + - test: Fix xfail to check the dhcp client name (#4971) + - tests: avoid console prompts when removing gpg on Noble + - test: fix test_get_status_systemd_failure + - fix: Remove hardcoded /var/lib/cloud hotplug path (#4940) + - refactor: Refactor status.py (#4864) + - test: Use correct lxd network-config keys (#4950) + - test: limit temp dhcp6 changes to < NOBLE (#4942) + - test: allow downgrades when install debs (#4941) + - tests: on noble, expect default /etc/apt/sources.list + - tests: lxd_vm early boot status test ordered After=systemd-remount-fs + (#4936) + - tests: pro integration tests supply ubuntu_advantage until pro v32 + (#4935) + - feat(hotplug): add cmd to enable hotplug (#4821) + - test: fix test_combined_cloud_config_json (#4925) + - test: xfail udhcpc on azure (#4924) + - feat: Implement the WSL datasource (#4786) [Carlos Nihelton] + - refactor(openrc): Improve the OpenRC files (#4916) [dermotbradley] + - tests: use apt install instead of dpkg -i to install pkg deps + - tests: inactive module rename ubuntu_advantage to ubuntu_pro + - test: fix tmpdir in test_cc_apk_configure (#4914) + - test: fix jsonschema version checking in pro test (#4915) + - feat(dhcp): Make dhcpcd the default dhcp client (#4912) + - feat(Alpine) cc_growpart.py: fix handling of /dev/mapper devices (#4876) + [dermotbradley] + - test: Retry longer in test_status.py integration test (#4910) + - test: fix kernel override test (#4913) + - chore: Rename sysvinit/gentoo directory to sysvinit/openrc (#4906) + [dermotbradley] + - doc: update ubuntu_advantage references to pro + - chore: rename cc_ubuntu_advantage to cc_ubuntu_pro (SC-1555) + - feat(ubuntu pro): deprecate ubuntu_pro key in favor of ubuntu_advantage + - feat(schema): support ubuntu_pro key and deprecate ubuntu_advantage + - test: fix verify_clean_log (#4903) + - test: limit test_no_hotplug_triggered_by_docker to stable releases + - tests: generalize warning Open vSwitch warning from netplan apply (#4894) + - fix(hotplug): remove literal quotes in args + - feat(apt): skip known /etc/apt/sources.list content + - feat(apt): use APT deb822 source format by default + - test(ubuntu-pro): change livepatch to esm-infra + - doc(ec2): fix metadata urls (#4880) + - fix: unpin jsonschema and update tests (#4882) + - distro: add eject FreeBSD code path (#4838) [Mina Galić] + - feat(ec2): add hotplug as a default network update event (#4799) + - feat(ec2): support instances with repeated device-number (#4799) + - feat(cc_install_hotplug): trigger hook on known ec2 drivers (#4799) + - feat(ec2): support multi NIC/IP setups (#4799) + - feat(hotplug): hook-hotplug is now POSIX shell add OpenRC init script + [dermotbradley] + - test: harden test_dhcp.py::test_noble_and_newer_force_client + - test: fix test_combined_cloud_config_json (#4868) + - feat(apport): Disable hook when disabled (#4874) + - chore: Add pyright ignore comments (#4874) + - bug(apport): Fix invalid typing (#4874) + - refactor: Move general apport hook to main branch (#4874) + - feat(bootspeed)!: cloud-config.service drop After=snapd.seeded + - chore: update CI package build to oldest supported Ubuntu release focal + (#4871) + - test: fix test_cli.test_valid_userdata + - feat: handle error when log file is empty (#4859) [Hasan] + - test: fix test_ec2_ipv6 + - fix: Address TIOBE abstract interpretation issues (#4866) + - feat(dhcp): Make udhcpc use same client id (#4830) + - feat(dhcp): Support InfiniBand with dhcpcd (#4830) + - feat(azure): Add ProvisionGuestProxyAgent OVF setting (#4860) + [Ksenija Stanojevic] + - test: Bring back dhcp6 integration test changes (#4855) + - tests: add status --wait blocking test from early boot + - tests: fix retry decorator to return the func value + - docs: add create_hostname_file to all hostname user-data examples + (#4727) [Cat Red] + - fix: Fix typos (#4850) [Viktor Szépe] + - feat(dhcpcd): Read dhcp option 245 for azure wireserver (#4835) + - tests(dhcp): Add udhcpc client to test matrix (#4839) + - fix: Add types to network v1 schema (#4841) + - docs(vmware): fixed indentation on example userdata yaml (#4854) + [Alec Warren] + - tests: Remove invalid keyword from method call + - fix: Handle systemctl when dbus not ready (#4842) (LP: #2046483) + - fix(schema cli): avoid netplan validation on net-config version 1 + - tests: reduce expected reports due to dropped rightscale module + - tests(net-config): add awareness of netplan on stable Ubuntu + [Gilbert Gilb's] + - feat: fall back to cdrom_id eject if eject is not available (#4769) + [Cat Red] + - fix(packages/bddeb): restrict debhelper-compat to 12 in focal (#4831) + - tests: Add kernel commandline test (#4833) + - fix: Ensure NetworkManager renderer works without gateway (#4829) + - test: Correct log parsing in schema test (#4832) + - refactor: Remove cc_rightscale_userdata (#4813) + - refactor: Replace load_file with load_binary_file to simplify typing + (#4823) + - refactor: Add load_text_file function to simplify typing (#4823) + - refactor: Change variable name for consistent typing (#4823) + - feat(dhcp): Add support for dhcpcd (#4746) + - refactor: Remove unused networking code (#4810) + - test: Add more DNS net tests + - BREAKING CHANGE: Stop adding network v2 DNS to global DNS + - doc: update DataSource.default_update_events doc (#4815) + - chore: do not modify instance attribute (#4815) + - test: fix mocking leaks (#4815) + - Revert "ci: Pin pytest<8.0.0. (#4816)" (#4815) + - test: Update tests for passlib (#4818) + - fix(net-schema): no warn when skipping schema check on non-netplan + - feat(SUSE): reboot marker file is written as /run/reboot-needed (#4788) + [Robert Schweikert] + - test: Cleanup unwanted logger setup calls (#4817) + - refactor(cloudinit.util): Modernize error handling, add better warnings + (#4812) + - ci: Pin pytest<8.0.0. (#4816) + - fix(tests): fixing KeyError on integrations tests (#4811) [Cat Red] + - tests: integration for network schema on netplan systems (#4767) + - feat(schema): use netplan API to validate network-config (#4767) + - chore: define CLOUDINIT_NETPLAN_FILE static var (#4767) + - fix: cli schema config-file option report network-config type (#4767) + - refactor(azure): replace BrokenAzureDataSource with reportable errors + (#4807) [Chris Patterson] + - Fix Alpine and Mariner /etc/hosts templates (#4780) [dermotbradley] + - tests: revert #4792 as noble images no longer return 2 (#4809) [Cat Red] + - tests: use client fixture instead of class_client in cleantest (#4806) + - tests: enable ds-idenitfy xfail test LXD-kvm-not-MAAS-1 (#4808) + - fix(tests): failing integration tests due to missing ua token (#4802) + [Cat Red] + - Revert "Use grep for faster parsing of cloud config in ds-identify + (#4327)" + - tests: Demonstrate ds-identify yaml parsing broken + - tests: add exit 2 on noble from cloud-init status (#4792) + - fix: linkcheck for ci to ignore scaleway anchor URL (#4793) + - feat: Update cacerts to support VMware Photon (#4763) + [Christopher McCann] + - fix: netplan rendering integrations tests (#4795) [Cat Red] + - azure: remove cloud-init.log reporting via KVP (#4715) [Chris Patterson] + - feat(Alpine): Modify ds-identify for Alpine support and add OpenRC + init.d script (#4785) [dermotbradley] + - doc: Add DatasourceScaleway documentation (#4773) [Louis Bouchard] + - fix: packaged logrotate file lacks suffix on ubuntu (#4790) + - feat(logrotate): config flexibility more backups (#4790) + - fix(clean): stop warning when running clean command (#4761) [d1r3ct0r] + - feat: network schema v1 strict on nic name length 15 (#4774) + - logrotate config (#4721) [Fabian Lichtenegger-Lukas] + - test: Enable coverage in integration tests (#4682) + - test: Move unit test helpers to global test helpers (#4682) + - test: Remove snapshot option from install_new_cloud_init (#4682) + - docs: fix cloud-init single param docs (#4682) + - Alpine: fix location of dhclient leases file (#4782) [dermotbradley] + - test(jsonschema): Pin jsonschema version (#4781) + - refactor(IscDhclient): discover DHCP leases at distro-provided location + (#4683) [Phsm Qwerty] + - feat: datasource check for WSL (#4730) [Carlos Nihelton] + - test: Update hostname integration tests (#4744) + - test: Add mantic and noble releases to integration tests (#4744) + - refactor: Ensure internal DNS state same for v1 and v2 (#4756) + - feat: Add v2 route mtu rendering to NetworkManager (#4748) + - tests: stable ubuntu releases will not exit 2 on warnings (#4757) + - doc(ds-identify): Describe ds-identify irrespective of distro (#4742) + - fix: relax NetworkManager renderer rules (#4745) + - fix: fix growpart race (#4618) + - feat: apply global DNS to interfaces in network-manager (#4723) + [Florian Apolloner] + - feat(apt): remove /etc/apt/sources.list when deb22 preferred (#4740) + - chore: refactor schema data as enums and namedtuples (#4585) + - feat(schema): improve CLI message on unprocessed data files (#4585) + - fix(config): relocate /run to /var/run on BSD (canonical#4677) + [Mina Galić] + - fix(ds-identify): relocate /run on *BSD (#4677) [Mina Galić] + - fix(sysvinit): make code a bit more consistent (#4677) [Mina Galić] + - doc: Document how cloud-init is, not how it was (#4737) + - tests: add expected exit 2 on noble from cloud-init status (#4738) + - test(linkcheck): ignore github md and rst link headers (#4734) + - test: Update webhook test due to removed cc_migrator module (#4726) + - fix(ds-identify): Return code 2 is a valid result, use cached value + - fix(cloudstack): Use parsed lease file for virtual router in cloudstack + - fix(dhcp): Guard against FileNotFoundError and NameError exceptions + - fix(apt_configure): disable sources.list if rendering deb822 (#4699) + (LP: #2045086) + - docs: Add link to contributing to docs (#4725) [Cat Red] + - chore: remove commented code (#4722) + - chore: Add log message when create_hostname_file key is false (#4724) + [Cat Red] + - fix: Correct v2 NetworkManager route rendering (#4637) + - azure/imds: log http failures as warnings instead of info (#4714) + [Chris Patterson] + - fix(setup): Relocate libexec on OpenBSD (#4708) [Mina Galić] + - feat(jinja): better jinja feedback and error catching (#4629) + [Alec Warren] + - test: Fix silent swallowing of unexpected subp error (#4702) + - fix: Move cloud-final.service after time-sync.target (#4610) + [Dave Jones] (LP: #1951639) + - feat(log): Make logger name more useful for __init__.py + - chore: Remove cc_migrator module (#4690) + - fix(tests): make cmd/devel/tests work on non-GNU [Mina Galić] + - chore: Remove cmdline from spelling list (#4670) + - doc: Document boot status meaning (#4670) + - doc: Set expectations for new datasources (#4670) + - ci: Show linkcheck broken links in job output (#4670) + - dmi: Add support for OpenBSD (#4654) [Mina Galić] + - ds-identify: fake dmidecode support on OpenBSD (#4654) [Mina Galić] + - ds-identify: add OpenBSD support in uname (#4654) [Mina Galić] + - refactor: Ensure '_cfg' in Init class is dict (#4674) + - refactor: Make event scope required in stages.py (#4674) + - refactor: Remove unused argument (#4674) + - chore: Move from lintian to a sphinx spelling plugin (#3639) + - fix(doc): Fix spelling errors found by sphinxcontrib-spelling (#3639) + - ci: Add Python 3.13 (#4567) + - Add AlexSv04047 to CLA signers file (#4671) [AlexSv04047] + - fix(openbsd): services & build tool (#4660) [CodeBleu] + - tests/unittests: add a new unit test for network manager net activator + (#4672) [Ani Sinha] + - Implement DataSourceCloudStack.get_hostname() (#4433) [Phsm Qwerty] + - net/nm: check for presence of ifcfg files when nm connection files + are absent (#4645) [Ani Sinha] + - doc: Overhaul debugging documentation (#4578) + - doc: Move dangerous commands to dev docs (#4578) + - doc: Relocate file location docs (#4578) + - doc: Remove the debugging page (#4578) + - fix(util): Fix boottime to work on OpenBSD (#4667) [Mina Galić] + - net: allow dhcp6 configuration from generate_fallback_configuration() + [Ani Sinha] + - net/network_manager: do not set "may-fail" to False for both ipv4 and + ipv6 dhcp [Ani Sinha] + - feat(subp): Measure subprocess command time (#4606) + - fix(python3.13): Fix import error for passlib on Python 3.13 (#4669) + - style(brpm/bddeb): add black and ruff for packages build scripts (#4666) + - copr: remove TODO.rst from spec file + - fix(packages/brpm): correct syntax error and typo + - style(ruff): fix tip target + - config: Module documentation updates (#4599) + - refactor(subp): Remove redundant parameter 'env' (#4555) + - refactor(subp): Remove unused parameter 'target' (#4555) + - refactor: Remove 'target' boilerplate from cc_apt_configure (#4555) + - refactor(subp): Re-add return type to subp() (#4555) + - refactor(subp): Add type information to args (#4555) + - refactor(subp): Use subprocess.DEVNULL (#4555) + - refactor(subp): Remove parameter 'combine_capture' (#4555) + - refactor(subp): Remove unused parameter 'status_cb' (#4555) + - fix(cli): fix parsing of argparse subcommands (#4559) + [Calvin Mwadime] (LP: #2040325) + - chore!: drop support for dsa ssh hostkeys in docs and schema (#4456) + - chore!: do not generate ssh dsa host keys (#4456) [shixuantong] + 23.4.4 - fix(nocloud): smbios datasource definition - tests: Check that smbios seed works diff -Nru cloud-init-23.4.4/Makefile cloud-init-24.1.3/Makefile --- cloud-init-23.4.4/Makefile 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/Makefile 2024-03-27 13:14:04.000000000 +0000 @@ -124,47 +124,8 @@ fmt-tip: tox -e do_format_tip && tox -e check_format_tip -# Spell check && filter false positives -_CHECK_SPELLING := find doc -type f -exec spellintian {} + | \ - grep -v -e 'doc/rtd/topics/cli.rst: modules modules' \ - -e 'doc/examples/cloud-config-mcollective.txt: WARNING WARNING' \ - -e 'doc/examples/cloud-config-power-state.txt: Bye Bye' \ - -e 'doc/examples/cloud-config.txt: Bye Bye' \ - -e 'doc/rtd/topics/cli.rst: DOCS DOCS' \ - -e 'doc/summit/2023_summit_shared_notes.md: Moustafa Moustafa' \ - -e 'dependant' - - -# For CI we require a failing return code when spellintian finds spelling errors -check_spelling: - @! $(_CHECK_SPELLING) - -# Manipulate the output of spellintian into a valid "sed" command which is run -# to fix the error -# -# Example spellintian output: -# -# doc/examples/kernel-cmdline.txt: everthing -> everything -# -# The "fix_spelling" target manipulates the above output into the following command -# and runs that command. -# -# sed -i "s/everthing/everything/g" doc/examples/kernel-cmdline.txt -# -# awk notes: -# -# -F ': | -> ' means use the strings ": " or " -> " as field delimeters -# \046 is octal for double quote -# $$2 will contain the second field, ($ must be escaped because this is in a Makefile) -# -# Limitation: duplicate words with newline between them are not automatically fixed -fix_spelling: - @$(_CHECK_SPELLING) | \ - sed 's/ (duplicate word)//g' | \ - awk -F ': | -> ' '{printf "sed -i \047s/%s/%s/g\047 %s\n", $$2, $$3, $$1}' | \ - sh .PHONY: all check test lint clean rpm srpm deb deb-src yaml .PHONY: check_version clean_pyc -.PHONY: unittest style-check fix_spelling render-template benchmark-generator -.PHONY: clean_pytest clean_packaging check_spelling clean_release doc +.PHONY: unittest style-check render-template benchmark-generator +.PHONY: clean_pytest clean_packaging clean_release doc diff -Nru cloud-init-23.4.4/bash_completion/cloud-init cloud-init-24.1.3/bash_completion/cloud-init --- cloud-init-23.4.4/bash_completion/cloud-init 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/bash_completion/cloud-init 2024-03-27 13:14:04.000000000 +0000 @@ -11,7 +11,7 @@ prev_word="${COMP_WORDS[COMP_CWORD-1]}" subcmds="analyze clean collect-logs devel features init modules query schema single status" - base_params="--help --file --version --debug --force" + base_params="--help --version --debug --force" case ${COMP_CWORD} in 1) COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word)) @@ -34,10 +34,10 @@ COMPREPLY=($(compgen -W "--help" -- $cur_word)) ;; init) - COMPREPLY=($(compgen -W "--help --local" -- $cur_word)) + COMPREPLY=($(compgen -W "--help --local --file" -- $cur_word)) ;; modules) - COMPREPLY=($(compgen -W "--help --mode" -- $cur_word)) + COMPREPLY=($(compgen -W "--help --mode --file" -- $cur_word)) ;; query) @@ -46,7 +46,7 @@ COMPREPLY=($(compgen -W "--help --config-file --docs --annotate --system" -- $cur_word)) ;; single) - COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word)) + COMPREPLY=($(compgen -W "--help --name --frequency --report --file" -- $cur_word)) ;; status) COMPREPLY=($(compgen -W "--help --long --wait" -- $cur_word)) diff -Nru cloud-init-23.4.4/cloudinit/analyze/show.py cloud-init-24.1.3/cloudinit/analyze/show.py --- cloud-init-23.4.4/cloudinit/analyze/show.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/analyze/show.py 2024-03-27 13:14:04.000000000 +0000 @@ -7,6 +7,7 @@ import datetime import json import os +import sys import time from cloudinit import subp, util @@ -297,7 +298,7 @@ boot_records = [] unprocessed = [] - for e in range(0, len(sorted_events)): + for e in range(len(sorted_events)): event = events[e] try: next_evt = events[e + 1] @@ -370,6 +371,9 @@ :return: json version of logfile, raw file """ data = infile.read() + if not data.strip(): + sys.stderr.write("Empty file %s\n" % infile.name) + sys.exit(1) try: return json.loads(data), data except ValueError: diff -Nru cloud-init-23.4.4/cloudinit/apport.py cloud-init-24.1.3/cloudinit/apport.py --- cloud-init-23.4.4/cloudinit/apport.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/apport.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,13 +5,16 @@ """Cloud-init apport interface""" import json +import logging import os +from typing import Dict from cloudinit.cmd.devel import read_cfg_paths from cloudinit.cmd.devel.logs import ( INSTALLER_APPORT_FILES, INSTALLER_APPORT_SENSITIVE_FILES, ) +from cloudinit.cmd.status import is_cloud_init_enabled try: from apport.hookutils import ( @@ -61,6 +64,7 @@ "Vultr", "ZStack", "Outscale", + "WSL", "Other", ] @@ -76,7 +80,7 @@ def attach_cloud_init_logs(report, ui=None): """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" - attach_root_command_outputs( + attach_root_command_outputs( # pyright: ignore report, { "cloud-init-log-warnings": ( @@ -85,10 +89,12 @@ "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", }, ) - root_command_output( + root_command_output( # pyright: ignore ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] ) - attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") + attach_file( # pyright: ignore + report, "/tmp/cloud-init-logs.tgz", "logs.tgz" + ) def attach_hwinfo(report, ui=None): @@ -158,7 +164,7 @@ def attach_ubuntu_pro_info(report, ui=None): """Attach ubuntu pro logs and tag if keys present in user-data.""" realpath = os.path.realpath("/var/log/ubuntu-advantage.log") - attach_file_if_exists(report, realpath) + attach_file_if_exists(report, realpath) # pyright: ignore if os.path.exists(realpath): report.setdefault("Tags", "") if report["Tags"]: @@ -181,10 +187,12 @@ raise StopIteration # User cancelled if response: realpath = os.path.realpath(user_data_file) - attach_file(report, realpath, "user_data.txt") + attach_file(report, realpath, "user_data.txt") # pyright: ignore for apport_file in INSTALLER_APPORT_SENSITIVE_FILES: realpath = os.path.realpath(apport_file.path) - attach_file_if_exists(report, realpath, apport_file.label) + attach_file_if_exists( # pyright: ignore + report, realpath, apport_file.label + ) def add_bug_tags(report): @@ -208,7 +216,7 @@ def add_info(report, ui): - """This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's package-specific hook Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info @@ -226,3 +234,102 @@ attach_ubuntu_pro_info(report, ui) add_bug_tags(report) return True + + +def _get_azure_data(ds_data) -> Dict[str, str]: + compute = ds_data.get("meta_data", {}).get("imds", {}).get("compute") + if not compute: + return {} + name_to_report_map = { + "publisher": "ImagePublisher", + "offer": "ImageOffer", + "sku": "ImageSKU", + "version": "ImageVersion", + "vmSize": "VMSize", + } + azure_data = {} + for src_key, report_key_name in name_to_report_map.items(): + azure_data[report_key_name] = compute[src_key] + return azure_data + + +def _get_ec2_data(ds_data) -> Dict[str, str]: + document = ( + ds_data.get("dynamic", {}).get("instance-identity", {}).get("document") + ) + if not document: + return {} + wanted_keys = { + "architecture", + "billingProducts", + "imageId", + "instanceType", + "region", + } + return { + key: value for key, value in document.items() if key in wanted_keys + } + + +PLATFORM_SPECIFIC_INFO = {"azure": _get_azure_data, "ec2": _get_ec2_data} + + +def add_datasource_specific_info(report, platform: str, ds_data) -> None: + """Add datasoure specific information from the ds dictionary. + + ds_data contains the "ds" entry from data from + /run/cloud/instance-data.json. + """ + platform_info = PLATFORM_SPECIFIC_INFO.get(platform) + if not platform_info: + return + retrieved_data = platform_info(ds_data) + for key, value in retrieved_data.items(): + if not value: + continue + report[platform.capitalize() + key.capitalize()] = value + + +def general_add_info(report, _) -> None: + """Entry point for Apport. + + This hook runs for every apport report + + Add a subset of non-sensitive cloud-init data from + /run/cloud/instance-data.json that will be helpful for debugging. + """ + try: + if not is_cloud_init_enabled(): + return + with open("/run/cloud-init/instance-data.json", "r") as fopen: + instance_data = json.load(fopen) + except FileNotFoundError: + logging.getLogger().warning( + "cloud-init run data not found on system. " + "Unable to add cloud-specific data." + ) + return + + v1 = instance_data.get("v1") + if not v1: + logging.getLogger().warning( + "instance-data.json lacks 'v1' metadata. Present keys: %s", + sorted(instance_data.keys()), + ) + return + + for key, report_key in { + "cloud_id": "CloudID", + "cloud_name": "CloudName", + "machine": "CloudArchitecture", + "platform": "CloudPlatform", + "region": "CloudRegion", + "subplatform": "CloudSubPlatform", + }.items(): + value = v1.get(key) + if value: + report[report_key] = value + + add_datasource_specific_info( + report, v1["platform"], instance_data.get("ds") + ) diff -Nru cloud-init-23.4.4/cloudinit/cloud.py cloud-init-24.1.3/cloudinit/cloud.py --- cloud-init-23.4.4/cloudinit/cloud.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cloud.py 2024-03-27 13:14:04.000000000 +0000 @@ -57,6 +57,17 @@ return copy.deepcopy(self._cfg) def run(self, name, functor, args, freq=None, clear_on_fail=False): + """Run a function gated by a named semaphore for a desired frequency. + + The typical case for this method would be to limit running of the + provided func to a single well-defined frequency: + PER_INSTANCE, PER_BOOT or PER_ONCE + + The semaphore provides a gate that persists across cloud-init + boot stage boundaries so multiple modules can share this state + even if they happen to be run in different boot stages or across + reboots. + """ return self._runners.run(name, functor, args, freq, clear_on_fail) def get_template_filename(self, name): diff -Nru cloud-init-23.4.4/cloudinit/cmd/clean.py cloud-init-24.1.3/cloudinit/cmd/clean.py --- cloud-init-23.4.4/cloudinit/cmd/clean.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/clean.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,6 +13,7 @@ from cloudinit import settings from cloudinit.distros import uses_systemd +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, runparts, subp from cloudinit.util import ( @@ -26,7 +27,7 @@ ETC_MACHINE_ID = "/etc/machine-id" GEN_NET_CONFIG_FILES = [ - "/etc/netplan/50-cloud-init.yaml", + CLOUDINIT_NETPLAN_FILE, "/etc/NetworkManager/conf.d/99-cloud-init.conf", "/etc/NetworkManager/conf.d/30-cloud-init-ip6-addr-gen-mode.conf", "/etc/NetworkManager/system-connections/cloud-init-*.nmconnection", diff -Nru cloud-init-23.4.4/cloudinit/cmd/cloud_id.py cloud-init-24.1.3/cloudinit/cmd/cloud_id.py --- cloud-init-23.4.4/cloudinit/cmd/cloud_id.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/cloud_id.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,7 @@ import sys from cloudinit.cmd.devel import read_cfg_paths -from cloudinit.cmd.status import UXAppStatus, get_status_details +from cloudinit.cmd.status import RunningStatus, get_status_details from cloudinit.sources import METADATA_UNKNOWN, canonical_cloud_id from cloudinit.util import error @@ -66,11 +66,11 @@ @return: 0 on success, 1 on error, 2 on disabled, 3 on cloud-init not run. """ status_details = get_status_details() - if status_details.status == UXAppStatus.DISABLED: - sys.stdout.write("{0}\n".format(status_details.status.value)) + if status_details.running_status == RunningStatus.DISABLED: + sys.stdout.write("{0}\n".format(status_details.running_status.value)) return 2 - elif status_details.status == UXAppStatus.NOT_RUN: - sys.stdout.write("{0}\n".format(status_details.status.value)) + elif status_details.running_status == RunningStatus.NOT_STARTED: + sys.stdout.write("{0}\n".format(status_details.running_status.value)) return 3 try: diff -Nru cloud-init-23.4.4/cloudinit/cmd/devel/hotplug_hook.py cloud-init-24.1.3/cloudinit/cmd/devel/hotplug_hook.py --- cloud-init-23.4.4/cloudinit/cmd/devel/hotplug_hook.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/devel/hotplug_hook.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,12 +4,14 @@ """Handle reconfiguration on hotplug events.""" import abc import argparse +import json import logging import os import sys import time -from cloudinit import log, reporting, stages +from cloudinit import log, reporting, stages, util +from cloudinit.config.cc_install_hotplug import install_hotplug from cloudinit.event import EventScope, EventType from cloudinit.net import read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data @@ -68,6 +70,10 @@ choices=["add", "remove"], ) + subparsers.add_parser( + "enable", help="Enable hotplug for a given subsystem." + ) + return parser @@ -160,14 +166,14 @@ return len(found) > 0 -SUBSYSTEM_PROPERTES_MAP = { +SUBSYSTEM_PROPERTIES_MAP = { "net": (NetHandler, EventScope.NETWORK), } def is_enabled(hotplug_init, subsystem): try: - scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] + scope = SUBSYSTEM_PROPERTIES_MAP[subsystem][1] except KeyError as e: raise RuntimeError( "hotplug-hook: cannot handle events for subsystem: {}".format( @@ -201,7 +207,7 @@ datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return - handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] + handler_cls = SUBSYSTEM_PROPERTIES_MAP[subsystem][0] LOG.debug("Creating %s event handler", subsystem) event_handler: UeventHandler = handler_cls( datasource=datasource, @@ -237,6 +243,41 @@ raise last_exception +def enable_hotplug(hotplug_init: Init, subsystem) -> bool: + datasource = hotplug_init.fetch(existing="trust") + if not datasource: + return False + scope = SUBSYSTEM_PROPERTIES_MAP[subsystem][1] + hotplug_supported = EventType.HOTPLUG in ( + datasource.get_supported_events([EventType.HOTPLUG]).get(scope, set()) + ) + if not hotplug_supported: + print( + f"hotplug not supported for event of {subsystem}", file=sys.stderr + ) + return False + hotplug_enabled_file = util.read_hotplug_enabled_file(hotplug_init.paths) + if scope.value in hotplug_enabled_file["scopes"]: + print( + f"Not installing hotplug for event of type {subsystem}." + " Reason: Already done.", + file=sys.stderr, + ) + return True + + hotplug_enabled_file["scopes"].append(scope.value) + util.write_file( + hotplug_init.paths.get_cpath("hotplug.enabled"), + json.dumps(hotplug_enabled_file), + omode="w", + mode=0o640, + ) + install_hotplug( + datasource, network_hotplug_enabled=True, cfg=hotplug_init.cfg + ) + return True + + def handle_args(name, args): # Note that if an exception happens between now and when logging is # setup, we'll only see it in the journal @@ -275,13 +316,29 @@ ) sys.exit(1) print("enabled" if datasource else "disabled") - else: + elif args.hotplug_action == "handle": handle_hotplug( hotplug_init=hotplug_init, devpath=args.devpath, subsystem=args.subsystem, udevaction=args.udevaction, ) + else: + if os.getuid() != 0: + sys.stderr.write( + "Root is required. Try prepending your command with" + " sudo.\n" + ) + sys.exit(1) + if not enable_hotplug( + hotplug_init=hotplug_init, subsystem=args.subsystem + ): + sys.exit(1) + print( + f"Enabled cloud-init hotplug for " + f"subsystem={args.subsystem}" + ) + except Exception: LOG.exception("Received fatal exception handling hotplug!") raise diff -Nru cloud-init-23.4.4/cloudinit/cmd/devel/logs.py cloud-init-24.1.3/cloudinit/cmd/devel/logs.py --- cloud-init-23.4.4/cloudinit/cmd/devel/logs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/devel/logs.py 2024-03-27 13:14:04.000000000 +0000 @@ -17,11 +17,17 @@ from cloudinit.cmd.devel import read_cfg_paths from cloudinit.helpers import Paths +from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.util import chdir, copy, ensure_dir, write_file +from cloudinit.util import ( + chdir, + copy, + ensure_dir, + get_config_logfiles, + write_file, +) -CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] CLOUDINIT_RUN_DIR = "/run/cloud-init" @@ -209,6 +215,8 @@ " Try sudo cloud-init collect-logs\n" ) return 1 + + init = Init(ds_deps=[]) tarfile = os.path.abspath(tarfile) log_dir = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d") with tempdir(dir="/tmp") as tmp_dir: @@ -242,7 +250,8 @@ verbosity=verbosity, ) - for log in CLOUDINIT_LOGS: + init.read_cfg() + for log in get_config_logfiles(init.cfg): _collect_file(log, log_dir, verbosity) if include_userdata: user_data_file = _get_user_data_file() diff -Nru cloud-init-23.4.4/cloudinit/cmd/devel/render.py cloud-init-24.1.3/cloudinit/cmd/devel/render.py --- cloud-init-23.4.4/cloudinit/cmd/devel/render.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/devel/render.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,6 +12,7 @@ from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers.jinja_template import ( JinjaLoadError, + JinjaSyntaxParsingException, NotJinjaError, render_jinja_payload_from_file, ) @@ -99,6 +100,13 @@ "Cannot render from instance data due to exception: %s", repr(e) ) return 1 + except JinjaSyntaxParsingException as e: + LOG.error( + "Failed to render templated user-data file '%s'. %s", + user_data_path, + str(e), + ) + return 1 if not rendered_payload: LOG.error("Unable to render user-data file: %s", user_data_path) return 1 diff -Nru cloud-init-23.4.4/cloudinit/cmd/main.py cloud-init-24.1.3/cloudinit/cmd/main.py --- cloud-init-23.4.4/cloudinit/cmd/main.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/main.py 2024-03-27 13:14:04.000000000 +0000 @@ -488,7 +488,7 @@ cloud_cfg_path = init.paths.get_ipath_cur("cloud_config") if os.path.exists(cloud_cfg_path) and os.stat(cloud_cfg_path).st_size != 0: validate_cloudconfig_schema( - config=load(util.load_file(cloud_cfg_path)), + config=load(util.load_text_file(cloud_cfg_path)), strict=False, log_details=False, log_deprecations=True, @@ -746,7 +746,7 @@ util.del_file(f) else: try: - status = json.loads(util.load_file(status_path)) + status = json.loads(util.load_text_file(status_path)) except Exception: pass @@ -870,14 +870,6 @@ help="Show program's version number and exit.", ) parser.add_argument( - "--file", - "-f", - action="append", - dest="files", - help="Use additional yaml configuration files.", - type=argparse.FileType("rb"), - ) - parser.add_argument( "--debug", "-d", action="store_true", @@ -910,6 +902,14 @@ help="Start in local mode (default: %(default)s).", default=False, ) + parser_init.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="Use additional yaml configuration files.", + type=argparse.FileType("rb"), + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand parser_init.set_defaults(action=("init", main_init)) @@ -926,6 +926,14 @@ default="config", choices=("init", "config", "final"), ) + parser_mod.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="Use additional yaml configuration files.", + type=argparse.FileType("rb"), + ) parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module @@ -958,6 +966,14 @@ metavar="argument", help="Any additional arguments to pass to this module.", ) + parser_single.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="Use additional yaml configuration files.", + type=argparse.FileType("rb"), + ) parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( @@ -996,7 +1012,10 @@ if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - subcommand = sysv_args[0] + subcommand = next( + (posarg for posarg in sysv_args if not posarg.startswith("-")), + None, + ) if subcommand == "analyze": from cloudinit.analyze import get_parser as analyze_parser diff -Nru cloud-init-23.4.4/cloudinit/cmd/query.py cloud-init-24.1.3/cloudinit/cmd/query.py --- cloud-init-23.4.4/cloudinit/cmd/query.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/query.py 2024-03-27 13:14:04.000000000 +0000 @@ -29,6 +29,7 @@ render_jinja_payload, ) from cloudinit.sources import REDACT_SENSITIVE_VALUE +from cloudinit.templater import JinjaSyntaxParsingException NAME = "query" LOG = logging.getLogger(__name__) @@ -130,7 +131,7 @@ @returns: String of uncompressed userdata if possible, otherwise bytes. """ - bdata = util.load_file(ud_file_path, decode=False) + bdata = util.load_binary_file(ud_file_path) try: return bdata.decode("utf-8") except UnicodeDecodeError: @@ -179,7 +180,7 @@ combined_cloud_config_fn = paths.get_runpath("combined_cloud_config") try: - instance_json = util.load_file(instance_data_fn) + instance_json = util.load_text_file(instance_data_fn) except (IOError, OSError) as e: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) @@ -190,7 +191,7 @@ instance_data = util.load_json(instance_json) try: combined_cloud_config = util.load_json( - util.load_file(combined_cloud_config_fn) + util.load_text_file(combined_cloud_config_fn) ) except (IOError, OSError): # File will not yet be present in init-local stage. @@ -277,12 +278,19 @@ return 1 if args.format: payload = "## template: jinja\n{fmt}".format(fmt=args.format) - rendered_payload = render_jinja_payload( - payload=payload, - payload_fn="query commandline", - instance_data=instance_data, - debug=True if args.debug else False, - ) + try: + rendered_payload = render_jinja_payload( + payload=payload, + payload_fn="query commandline", + instance_data=instance_data, + debug=True if args.debug else False, + ) + except JinjaSyntaxParsingException as e: + LOG.error( + "Failed to render templated data. %s", + str(e), + ) + return 1 if rendered_payload: print(rendered_payload) return 0 diff -Nru cloud-init-23.4.4/cloudinit/cmd/status.py cloud-init-24.1.3/cloudinit/cmd/status.py --- cloud-init-23.4.4/cloudinit/cmd/status.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/cmd/status.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,46 +13,38 @@ import sys from copy import deepcopy from time import gmtime, sleep, strftime -from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union +from typing import Any, Dict, List, NamedTuple, Optional, Tuple from cloudinit import safeyaml, subp from cloudinit.cmd.devel import read_cfg_paths from cloudinit.distros import uses_systemd from cloudinit.helpers import Paths -from cloudinit.util import get_cmdline, load_file, load_json +from cloudinit.util import get_cmdline, load_json, load_text_file CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" -# customer visible status messages @enum.unique -class UXAppStatus(enum.Enum): +class RunningStatus(enum.Enum): """Enum representing user-visible cloud-init application status.""" - NOT_RUN = "not run" + NOT_STARTED = "not started" RUNNING = "running" DONE = "done" - ERROR = "error" - DEGRADED_DONE = "degraded done" - DEGRADED_RUNNING = "degraded running" DISABLED = "disabled" -# Extend states when degraded -UXAppStatusDegradedMap = { - UXAppStatus.RUNNING: UXAppStatus.DEGRADED_RUNNING, - UXAppStatus.DONE: UXAppStatus.DEGRADED_DONE, -} - -# Map extended states back to simplified states -UXAppStatusDegradedMapCompat = { - UXAppStatus.DEGRADED_RUNNING: UXAppStatus.RUNNING, - UXAppStatus.DEGRADED_DONE: UXAppStatus.DONE, -} +@enum.unique +class ConditionStatus(enum.Enum): + """Enum representing user-visible cloud-init condition status.""" + + ERROR = "error" # cloud-init exited abnormally + DEGRADED = "degraded" # we have warnings + PEACHY = "healthy" # internal names can be fun, right? @enum.unique -class UXAppBootStatusCode(enum.Enum): +class EnabledStatus(enum.Enum): """Enum representing user-visible cloud-init boot status codes.""" DISABLED_BY_GENERATOR = "disabled-by-generator" @@ -67,17 +59,18 @@ DISABLED_BOOT_CODES = frozenset( [ - UXAppBootStatusCode.DISABLED_BY_GENERATOR, - UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE, - UXAppBootStatusCode.DISABLED_BY_MARKER_FILE, - UXAppBootStatusCode.DISABLED_BY_ENV_VARIABLE, + EnabledStatus.DISABLED_BY_GENERATOR, + EnabledStatus.DISABLED_BY_KERNEL_CMDLINE, + EnabledStatus.DISABLED_BY_MARKER_FILE, + EnabledStatus.DISABLED_BY_ENV_VARIABLE, ] ) class StatusDetails(NamedTuple): - status: UXAppStatus - boot_status_code: UXAppBootStatusCode + running_status: RunningStatus + condition_status: ConditionStatus + boot_status_code: EnabledStatus description: str errors: List[str] recoverable_errors: Dict[str, List[str]] @@ -89,38 +82,24 @@ TABULAR_LONG_TMPL = """\ extended_status: {extended_status} boot_status_code: {boot_code} -{last_update}detail: -{description}""" +{last_update}detail: {description} +errors:{errors} +recoverable_errors:{recoverable_errors}""" def query_systemctl( systemctl_args: List[str], *, wait: bool, - existing_status: Optional[UXAppStatus] = None, ) -> str: """Query systemd with retries and return output.""" while True: try: return subp.subp(["systemctl", *systemctl_args]).stdout.strip() - except subp.ProcessExecutionError as e: - if existing_status and existing_status in ( - UXAppStatus.DEGRADED_RUNNING, - UXAppStatus.RUNNING, - ): - return "" - last_exception = e - if wait: - sleep(0.25) - else: - break - print( - "Failed to get status from systemd. " - "Cloud-init status may be inaccurate. ", - f"Error from systemctl: {last_exception.stderr}", - file=sys.stderr, - ) - return "" + except subp.ProcessExecutionError: + if not wait: + raise + sleep(0.25) def get_parser(parser=None): @@ -163,82 +142,80 @@ return parser -def handle_status_args(name, args) -> int: - """Handle calls to 'cloud-init status' as a subcommand.""" - # Read configured paths - paths = read_cfg_paths() - details = get_status_details(paths, args.wait) - if args.wait: - while details.status in ( - UXAppStatus.NOT_RUN, - UXAppStatus.RUNNING, - UXAppStatus.DEGRADED_RUNNING, - ): - if args.format == "tabular": - sys.stdout.write(".") - sys.stdout.flush() - details = get_status_details(paths, args.wait) - sleep(0.25) - details_dict: Dict[str, Union[None, str, List[str], Dict[str, Any]]] = { +def translate_status( + running: RunningStatus, condition: ConditionStatus +) -> Tuple[str, str]: + """Translate running and condition status to human readable strings. + + Returns (status, extended_status). + Much of this is for backwards compatibility + """ + # If we're done and have errors, we're in an error state + if condition == ConditionStatus.ERROR: + return "error", f"{condition.value} - {running.value}" + # Handle the "degraded done" and "degraded running" states + elif condition == ConditionStatus.DEGRADED and running in [ + RunningStatus.DONE, + RunningStatus.RUNNING, + ]: + return running.value, f"{condition.value} {running.value}" + return running.value, running.value + + +def print_status(args, details: StatusDetails): + """Print status out to the CLI.""" + status, extended_status = translate_status( + details.running_status, details.condition_status + ) + details_dict: Dict[str, Any] = { "datasource": details.datasource, "boot_status_code": details.boot_status_code.value, - "status": UXAppStatusDegradedMapCompat.get( - details.status, details.status - ).value, - "extended_status": details.status.value, + "status": status, + "extended_status": extended_status, "detail": details.description, "errors": details.errors, "recoverable_errors": details.recoverable_errors, "last_update": details.last_update, **details.v1, } - if args.format == "tabular": prefix = "\n" if args.wait else "" - # For backwards compatability, don't report degraded status here, + # For backwards compatibility, don't report degraded status here, # extended_status key reports the complete status (includes degraded) - state = UXAppStatusDegradedMapCompat.get( - details.status, details.status - ).value + state = details_dict["status"] print(f"{prefix}status: {state}") if args.long: - if details.last_update: - last_update = f"last_update: {details.last_update}\n" + if details_dict.get("last_update"): + last_update = f"last_update: {details_dict['last_update']}\n" else: last_update = "" + errors_output = ( + "\n\t- " + "\n\t- ".join(details_dict["errors"]) + if details_dict["errors"] + else " []" + ) + recoverable_errors_output = ( + "\n" + + "\n".join( + [ + f"{k}:\n\t- " + + "\n\t- ".join([i.replace("\n", " ") for i in v]) + for k, v in details_dict["recoverable_errors"].items() + ] + ) + if details_dict["recoverable_errors"] + else " {}" + ) print( TABULAR_LONG_TMPL.format( - extended_status=details.status.value, + extended_status=details_dict["extended_status"], prefix=prefix, - boot_code=details.boot_status_code.value, - description=details.description, + boot_code=details_dict["boot_status_code"], + description=details_dict["detail"], last_update=last_update, - ) - + ( - "\nerrors:" - + ( - "\n\t- " + "\n\t- ".join(details.errors) - if details.errors - else f" {details.errors}" - ) - ) - + ( - "\nrecoverable_errors:" - + ( - "\n" - + "\n".join( - [ - f"{k}:\n\t- " - + "\n\t- ".join( - [i.replace("\n", " ") for i in v] - ) - for k, v in details.recoverable_errors.items() - ] - ) - if details.recoverable_errors - else f" {details.recoverable_errors}" - ) + errors=errors_output, + recoverable_errors=recoverable_errors_output, ) ) elif args.format == "json": @@ -250,90 +227,118 @@ elif args.format == "yaml": print(safeyaml.dumps(details_dict)) + +def handle_status_args(name, args) -> int: + """Handle calls to 'cloud-init status' as a subcommand.""" + # Read configured paths + paths = read_cfg_paths() + details = get_status_details(paths, args.wait) + if args.wait: + while details.running_status in ( + RunningStatus.NOT_STARTED, + RunningStatus.RUNNING, + ): + if args.format == "tabular": + sys.stdout.write(".") + sys.stdout.flush() + details = get_status_details(paths, args.wait) + sleep(0.25) + + print_status(args, details) + # Hard error - if details.status == UXAppStatus.ERROR: + if details.condition_status == ConditionStatus.ERROR: return 1 # Recoverable error - elif details.status in UXAppStatusDegradedMap.values(): + elif details.condition_status == ConditionStatus.DEGRADED: return 2 return 0 -def get_bootstatus( - disable_file, paths, wait -) -> Tuple[UXAppBootStatusCode, str]: +def _disabled_via_environment(wait) -> bool: + """Return whether cloud-init is disabled via environment variable.""" + try: + env = query_systemctl(["show-environment"], wait=wait) + except subp.ProcessExecutionError: + env = "" + return "cloud-init=disabled" in env + + +def get_bootstatus(disable_file, paths, wait) -> Tuple[EnabledStatus, str]: """Report whether cloud-init current boot status @param disable_file: The path to the cloud-init disable file. @param paths: An initialized cloudinit.helpers.Paths object. + @param wait: If user has indicated to wait for cloud-init to complete. @returns: A tuple containing (code, reason) about cloud-init's status and why. """ cmdline_parts = get_cmdline().split() if not uses_systemd(): - bootstatus_code = UXAppBootStatusCode.ENABLED_BY_SYSVINIT + bootstatus_code = EnabledStatus.ENABLED_BY_SYSVINIT reason = "Cloud-init enabled on sysvinit" elif "cloud-init=enabled" in cmdline_parts: - bootstatus_code = UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE + bootstatus_code = EnabledStatus.ENABLED_BY_KERNEL_CMDLINE reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): - bootstatus_code = UXAppBootStatusCode.DISABLED_BY_MARKER_FILE + bootstatus_code = EnabledStatus.DISABLED_BY_MARKER_FILE reason = "Cloud-init disabled by {0}".format(disable_file) elif "cloud-init=disabled" in cmdline_parts: - bootstatus_code = UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE + bootstatus_code = EnabledStatus.DISABLED_BY_KERNEL_CMDLINE reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" elif "cloud-init=disabled" in os.environ.get("KERNEL_CMDLINE", "") or ( - uses_systemd() - and "cloud-init=disabled" - in query_systemctl(["show-environment"], wait=wait) + uses_systemd() and _disabled_via_environment(wait=wait) ): - bootstatus_code = UXAppBootStatusCode.DISABLED_BY_ENV_VARIABLE + bootstatus_code = EnabledStatus.DISABLED_BY_ENV_VARIABLE reason = ( "Cloud-init disabled by environment variable " "KERNEL_CMDLINE=cloud-init=disabled" ) elif os.path.exists(os.path.join(paths.run_dir, "disabled")): - bootstatus_code = UXAppBootStatusCode.DISABLED_BY_GENERATOR + bootstatus_code = EnabledStatus.DISABLED_BY_GENERATOR reason = "Cloud-init disabled by cloud-init-generator" elif os.path.exists(os.path.join(paths.run_dir, "enabled")): - bootstatus_code = UXAppBootStatusCode.ENABLED_BY_GENERATOR + bootstatus_code = EnabledStatus.ENABLED_BY_GENERATOR reason = "Cloud-init enabled by systemd cloud-init-generator" else: - bootstatus_code = UXAppBootStatusCode.UNKNOWN + bootstatus_code = EnabledStatus.UNKNOWN reason = "Systemd generator may not have run yet." return (bootstatus_code, reason) -def _get_error_or_running_from_systemd( - existing_status: UXAppStatus, wait: bool -) -> Optional[UXAppStatus]: - """Get if systemd is in error or running state. - - Using systemd, we can get more fine-grained status of the - individual unit. Determine if we're still - running or if there's an error we haven't otherwise detected. +def is_cloud_init_enabled() -> bool: + return ( + get_status_details(read_cfg_paths()).boot_status_code + not in DISABLED_BOOT_CODES + ) - If we don't detect error or running, return None as we don't want to - report any other particular status based on systemd. - """ + +def systemd_failed(wait: bool) -> bool: + """Return if systemd units report a cloud-init error.""" for service in [ "cloud-final.service", "cloud-config.service", "cloud-init.service", "cloud-init-local.service", ]: - stdout = query_systemctl( - [ - "show", - "--property=ActiveState,UnitFileState,SubState,MainPID", - service, - ], - wait=wait, - existing_status=existing_status, - ) - if not stdout: - # Systemd isn't ready - return None + try: + stdout = query_systemctl( + [ + "show", + "--property=ActiveState,UnitFileState,SubState,MainPID", + service, + ], + wait=wait, + ) + except subp.ProcessExecutionError as e: + # Systemd isn't ready, assume the same state + print( + "Failed to get status from systemd. " + "Cloud-init status may be inaccurate. " + f"Error from systemctl: {e.stderr}", + file=sys.stderr, + ) + return False states = dict( [[x.strip() for x in r.split("=")] for r in stdout.splitlines()] ) @@ -342,80 +347,101 @@ or states["UnitFileState"] == "static" ): # Individual services should not get disabled - return UXAppStatus.ERROR - if states["ActiveState"] == "active": + return True + elif states["ActiveState"] == "active": if states["SubState"] == "exited": # Service exited normally, nothing interesting from systemd continue elif states["SubState"] == "running" and states["MainPID"] == "0": # Service is active, substate still reports running due to - # daemon or backgroud process spawned by CGroup/slice still + # daemon or background process spawned by CGroup/slice still # running. MainPID being set back to 0 means control of the # service/unit has exited in this case and # "the process is no longer around". - continue - if states["ActiveState"] == "failed" or states["SubState"] == "failed": - # We have an error - return UXAppStatus.ERROR + return False + elif ( + states["ActiveState"] == "failed" or states["SubState"] == "failed" + ): + return True # If we made it here, our unit is enabled and it hasn't exited # normally or exited with failure, so it is still running. - return UXAppStatus.RUNNING + return False # All services exited normally or aren't enabled, so don't report # any particular status based on systemd. - return None + return False -def get_status_details( - paths: Optional[Paths] = None, wait: bool = False -) -> StatusDetails: - """Return a dict with status, details and errors. +def is_running(status_file, result_file) -> bool: + """Return True if cloud-init is running.""" + return os.path.exists(status_file) and not os.path.exists(result_file) - @param paths: An initialized cloudinit.helpers.paths object. - Values are obtained from parsing paths.run_dir/status.json. +def get_running_status( + status_file, result_file, boot_status_code, latest_event +) -> RunningStatus: + """Return the running status of cloud-init.""" + if boot_status_code in DISABLED_BOOT_CODES: + return RunningStatus.DISABLED + elif is_running(status_file, result_file): + return RunningStatus.RUNNING + elif latest_event > 0: + return RunningStatus.DONE + else: + return RunningStatus.NOT_STARTED + + +def get_datasource(status_v1) -> str: + """Get the datasource from status.json. + + Return a lowercased non-prefixed version. So "DataSourceEc2" becomes "ec2" """ - paths = paths or read_cfg_paths() + datasource = status_v1.get("datasource", "") + if datasource: + ds, _, _ = datasource.partition(" ") + datasource = ds.lower().replace("datasource", "") + return datasource - status = UXAppStatus.NOT_RUN - errors = [] - datasource: Optional[str] = "" - status_v1 = {} - status_file = os.path.join(paths.run_dir, "status.json") - result_file = os.path.join(paths.run_dir, "result.json") +def get_description(status_v1, boot_description): + """Return a description of the current status. - boot_status_code, description = get_bootstatus( - CLOUDINIT_DISABLED_FILE, paths, wait - ) - if boot_status_code in DISABLED_BOOT_CODES: - status = UXAppStatus.DISABLED - if os.path.exists(status_file): - if not os.path.exists(result_file): - status = UXAppStatus.RUNNING - status_v1 = load_json(load_file(status_file)).get("v1", {}) + If we have a datasource, return that. If we're running in a particular + stage, return that. Otherwise, return the boot_description. + """ + datasource = status_v1.get("datasource") + if datasource: + return datasource + elif status_v1.get("stage"): + return f"Running in stage: {status_v1['stage']}" + else: + return boot_description + + +def get_latest_event(status_v1): + """Return the latest event time from status_v1.""" latest_event = 0 + for stage_info in status_v1.values(): + if isinstance(stage_info, dict): + latest_event = max( + latest_event, + stage_info.get("start") or 0, + stage_info.get("finished") or 0, + ) + return latest_event + + +def get_errors(status_v1) -> Tuple[List, Dict]: + """Return a list of errors and recoverable_errors from status_v1.""" + errors = [] recoverable_errors = {} - for key, value in sorted(status_v1.items()): - if key == "stage": - if value: - status = UXAppStatus.RUNNING - description = "Running in stage: {0}".format(value) - elif key == "datasource": - if value is None: - # If ds not yet written in status.json, then keep previous - # description - datasource = value - continue - description = value - ds, _, _ = value.partition(" ") - datasource = ds.lower().replace("datasource", "") - elif isinstance(value, dict): - errors.extend(value.get("errors", [])) - start = value.get("start") or 0 - finished = value.get("finished") or 0 + for _key, stage_info in sorted(status_v1.items()): + if isinstance(stage_info, dict): + errors.extend(stage_info.get("errors", [])) # Aggregate recoverable_errors from all stages - current_recoverable_errors = value.get("recoverable_errors", {}) + current_recoverable_errors = stage_info.get( + "recoverable_errors", {} + ) for err_type in current_recoverable_errors.keys(): if err_type not in recoverable_errors: recoverable_errors[err_type] = deepcopy( @@ -425,36 +451,70 @@ recoverable_errors[err_type].extend( current_recoverable_errors[err_type] ) - if finished == 0 and start != 0: - status = UXAppStatus.RUNNING - event_time = max(start, finished) - if event_time > latest_event: - latest_event = event_time - if errors: - status = UXAppStatus.ERROR - elif status == UXAppStatus.NOT_RUN and latest_event > 0: - status = UXAppStatus.DONE - if uses_systemd() and status not in ( - UXAppStatus.NOT_RUN, - UXAppStatus.DISABLED, - ): - systemd_status = _get_error_or_running_from_systemd(status, wait=wait) - if systemd_status: - status = systemd_status + return errors, recoverable_errors + +def get_status_details( + paths: Optional[Paths] = None, wait: bool = False +) -> StatusDetails: + """Return a dict with status, details and errors. + + @param paths: An initialized cloudinit.helpers.paths object. + @param wait: If user has indicated to wait for cloud-init to complete. + + Values are obtained from parsing paths.run_dir/status.json. + """ + condition_status = ConditionStatus.PEACHY + paths = paths or read_cfg_paths() + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") + boot_status_code, boot_description = get_bootstatus( + CLOUDINIT_DISABLED_FILE, paths, wait + ) + status_v1 = {} + if os.path.exists(status_file): + status_v1 = load_json(load_text_file(status_file)).get("v1", {}) + + datasource = get_datasource(status_v1) + description = get_description(status_v1, boot_description) + + latest_event = get_latest_event(status_v1) last_update = ( strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) if latest_event else "" ) - if recoverable_errors: - status = UXAppStatusDegradedMap.get(status, status) + errors, recoverable_errors = get_errors(status_v1) + if errors: + condition_status = ConditionStatus.ERROR + elif recoverable_errors: + condition_status = ConditionStatus.DEGRADED + + running_status = get_running_status( + status_file, result_file, boot_status_code, latest_event + ) + + if ( + running_status == RunningStatus.RUNNING + and uses_systemd() + and systemd_failed(wait=wait) + ): + running_status = RunningStatus.DONE + condition_status = ConditionStatus.ERROR + description = "Failed due to systemd unit failure" + errors.append( + "Failed due to systemd unit failure. Ensure all cloud-init " + "services are enabled, and check 'systemctl' or 'journalctl' " + "for more information." + ) # this key is a duplicate status_v1.pop("datasource", None) + return StatusDetails( - status, + running_status, + condition_status, boot_status_code, description, errors, diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ansible.py cloud-init-24.1.3/cloudinit/config/cc_ansible.py --- cloud-init-23.4.4/cloudinit/config/cc_ansible.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ansible.py 2024-03-27 13:14:04.000000000 +0000 @@ -70,12 +70,12 @@ self.cmd_pull = ["ansible-pull"] self.cmd_version = ["ansible-pull", "--version"] self.distro = distro - self.env = os.environ + self.env = {} self.run_user: Optional[str] = None # some ansible modules directly reference os.environ["HOME"] # and cloud-init might not have that set, default: /root - self.env["HOME"] = self.env.get("HOME", "/root") + self.env["HOME"] = os.environ.get("HOME", "/root") def get_version(self) -> Optional[Version]: stdout, _ = self.do_as(self.cmd_version) @@ -100,7 +100,7 @@ return self.distro.do_as(command, self.run_user, **kwargs) def subp(self, command, **kwargs): - return subp(command, env=self.env, **kwargs) + return subp(command, update_env=self.env, **kwargs) @abc.abstractmethod def is_installed(self): diff -Nru cloud-init-23.4.4/cloudinit/config/cc_apt_configure.py cloud-init-24.1.3/cloudinit/config/cc_apt_configure.py --- cloud-init-23.4.4/cloudinit/config/cc_apt_configure.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_apt_configure.py 2024-03-27 13:14:04.000000000 +0000 @@ -184,13 +184,27 @@ PRIMARY_ARCHES = ["amd64", "i386"] PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] +UBUNTU_DEFAULT_APT_SOURCES_LIST = """\ +# Ubuntu sources have moved to the /etc/apt/sources.list.d/ubuntu.sources +# file, which uses the deb822 format. Use deb822-formatted .sources files +# to manage package sources in the /etc/apt/sources.list.d/ directory. +# See the sources.list(5) manual page for details. +""" + +# List of allowed content in /etc/apt/sources.list when features +# APT_DEB822_SOURCE_LIST_FILE is set. Otherwise issue warning about +# invalid non-deb822 configuration. +DEB822_ALLOWED_APT_SOURCES_LIST = {"ubuntu": UBUNTU_DEFAULT_APT_SOURCES_LIST} -def get_default_mirrors(arch=None, target=None): + +def get_default_mirrors( + arch=None, +): """returns the default mirrors for the target. These depend on the architecture, for more see: https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: - arch = util.get_dpkg_architecture(target) + arch = util.get_dpkg_architecture() if arch in PRIMARY_ARCHES: return PRIMARY_ARCH_MIRRORS.copy() if arch in PORTS_ARCHES: @@ -202,8 +216,6 @@ """process the config for apt_config. This can be called from curthooks if a global apt config was provided or via the "apt" standalone command.""" - # keeping code close to curtin codebase via entry handler - target = None # feed back converted config, but only work on the subset under 'apt' cfg = convert_to_v3_apt_format(cfg) apt_cfg = cfg.get("apt", {}) @@ -215,8 +227,8 @@ ) ) - apply_debconf_selections(apt_cfg, target) - apply_apt(apt_cfg, cloud, target) + apply_debconf_selections(apt_cfg) + apply_apt(apt_cfg, cloud) def _should_configure_on_empty_apt(): @@ -228,7 +240,7 @@ return True, "Apt is available." -def apply_apt(cfg, cloud, target): +def apply_apt(cfg, cloud): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: should_config, msg = _should_configure_on_empty_apt() @@ -238,8 +250,8 @@ LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)["codename"] - arch = util.get_dpkg_architecture(target) + release = util.lsb_release()["codename"] + arch = util.get_dpkg_architecture() mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) @@ -250,9 +262,9 @@ _ensure_dependencies(cfg, matcher, cloud) if util.is_false(cfg.get("preserve_sources_list", False)): - add_mirror_keys(cfg, cloud, target) + add_mirror_keys(cfg, cloud) generate_sources_list(cfg, release, mirrors, cloud) - rename_apt_lists(mirrors, target, arch) + rename_apt_lists(mirrors, arch) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) @@ -268,15 +280,23 @@ add_apt_sources( cfg["sources"], cloud, - target=target, template_params=params, aa_repo_match=matcher, ) # GH: 4344 - stop gpg-agent/dirmgr daemons spawned by gpg key imports. # Daemons spawned by cloud-config.service on systemd v253 report (running) gpg_process_out, _err = subp.subp( - ["ps", "-o", "ppid,pid", "-C", "dirmngr", "-C", "gpg-agent"], - target=target, + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], capture=True, rcs=[0, 1], ) @@ -288,18 +308,17 @@ os.kill(gpg_pid, signal.SIGKILL) -def debconf_set_selections(selections, target=None): +def debconf_set_selections(selections): if not selections.endswith(b"\n"): selections += b"\n" subp.subp( ["debconf-set-selections"], data=selections, - target=target, capture=True, ) -def dpkg_reconfigure(packages, target=None): +def dpkg_reconfigure(packages): # For any packages that are already installed, but have preseed data # we populate the debconf database, but the filesystem configuration # would be preferred on a subsequent dpkg-reconfigure. @@ -310,7 +329,7 @@ for pkg in packages: if pkg in CONFIG_CLEANERS: LOG.debug("unconfiguring %s", pkg) - CONFIG_CLEANERS[pkg](target) + CONFIG_CLEANERS[pkg]() to_config.append(pkg) else: unhandled.append(pkg) @@ -327,12 +346,11 @@ ["dpkg-reconfigure", "--frontend=noninteractive"] + list(to_config), data=None, - target=target, capture=True, ) -def apply_debconf_selections(cfg, target=None): +def apply_debconf_selections(cfg): """apply_debconf_selections - push content to debconf""" # debconf_selections: # set1: | @@ -344,7 +362,7 @@ return selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) - debconf_set_selections(selections.encode(), target=target) + debconf_set_selections(selections.encode()) # get a complete list of packages listed in input pkgs_cfgd = set() @@ -355,7 +373,7 @@ pkg = re.sub(r"[:\s].*", "", line) pkgs_cfgd.add(pkg) - pkgs_installed = util.get_installed_packages(target) + pkgs_installed = util.get_installed_packages() LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) need_reconfig = pkgs_cfgd.intersection(pkgs_installed) @@ -364,14 +382,12 @@ LOG.debug("no need for reconfig") return - dpkg_reconfigure(need_reconfig, target=target) + dpkg_reconfigure(need_reconfig) -def clean_cloud_init(target): +def clean_cloud_init(): """clean out any local cloud-init config""" - flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") - ) + flist = glob.glob(subp.target_path(path="/etc/cloud/cloud.cfg.d/*dpkg*")) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -396,11 +412,11 @@ return string -def rename_apt_lists(new_mirrors, target, arch): +def rename_apt_lists(new_mirrors, arch): """rename_apt_lists - rename apt lists to preserve old cache data""" default_mirrors = get_default_mirrors(arch) - pre = subp.target_path(target, APT_LISTS) + pre = subp.target_path(APT_LISTS) for name, omirror in default_mirrors.items(): nmirror = new_mirrors.get(name) if not nmirror: @@ -542,11 +558,11 @@ return retsrc -def add_mirror_keys(cfg, cloud, target): +def add_mirror_keys(cfg, cloud): """Adds any keys included in the primary/security mirror clauses""" for key in ("primary", "security"): for mirror in cfg.get(key, []): - add_apt_key(mirror, cloud, target, file_name=key) + add_apt_key(mirror, cloud, file_name=key) def is_deb822_sources_format(apt_src_content: str) -> bool: @@ -590,7 +606,7 @@ Prefer python apt_pkg if present. Fallback to apt-config dump command if present out output parsed - Fallback to DEFAULT_APT_CFG if apt-config commmand absent or + Fallback to DEFAULT_APT_CFG if apt-config command absent or output unparsable. """ try: @@ -610,7 +626,6 @@ "Dir::Etc::sourceparts", DEFAULT_APT_CFG["Dir::Etc::sourceparts"] ) except ImportError: - try: apt_dump, _ = subp.subp(["apt-config", "dump"]) except subp.ProcessExecutionError: @@ -666,7 +681,7 @@ if not template_fn: LOG.warning("No template found, not rendering %s", aptsrc_file) return - tmpl = util.load_file(template_fn) + tmpl = util.load_text_file(template_fn) rendered = templater.render_string(tmpl, params) if tmpl: @@ -687,9 +702,27 @@ aptsrc_file = apt_sources_list disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc_file, disabled, mode=0o644) + if aptsrc_file == apt_sources_deb822 and os.path.exists(apt_sources_list): + expected_content = DEB822_ALLOWED_APT_SOURCES_LIST.get( + cloud.distro.name + ) + if expected_content: + if expected_content != util.load_text_file(apt_sources_list): + LOG.info( + "Replacing %s to favor deb822 source format", + apt_sources_list, + ) + util.write_file( + apt_sources_list, UBUNTU_DEFAULT_APT_SOURCES_LIST + ) + else: + LOG.info( + "Removing %s to favor deb822 source format", apt_sources_list + ) + util.del_file(apt_sources_list) -def add_apt_key_raw(key, file_name, hardened=False, target=None): +def add_apt_key_raw(key, file_name, hardened=False): """ actual adding of a key as defined in key argument to the system @@ -737,7 +770,7 @@ cloud.distro.install_packages(sorted(missing_packages)) -def add_apt_key(ent, cloud, target=None, hardened=False, file_name=None): +def add_apt_key(ent, cloud, hardened=False, file_name=None): """ Add key to the system as defined in ent (if any). Supports raw keys or keyid's @@ -760,9 +793,7 @@ cloud.distro.update_package_sources() -def add_apt_sources( - srcdict, cloud, target=None, template_params=None, aa_repo_match=None -): +def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): """ install keys and repo source .list files defined in 'sources' @@ -800,17 +831,13 @@ ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) if "filename" not in ent: - if target and filename.startswith(target): - # Strip target path prefix from filename - ent["filename"] = filename[len(target) :] - else: - ent["filename"] = filename + ent["filename"] = filename if "source" in ent and "$KEY_FILE" in ent["source"]: - key_file = add_apt_key(ent, cloud, target, hardened=True) + key_file = add_apt_key(ent, cloud, hardened=True) template_params["KEY_FILE"] = key_file else: - add_apt_key(ent, cloud, target) + add_apt_key(ent, cloud) if "source" not in ent: continue @@ -828,14 +855,13 @@ try: subp.subp( ["add-apt-repository", "--no-update", source], - target=target, ) except subp.ProcessExecutionError: LOG.exception("add-apt-repository failed.") raise continue - sourcefn = subp.target_path(target, ent["filename"]) + sourcefn = subp.target_path(path=ent["filename"]) try: contents = "%s\n" % (source) omode = "a" diff -Nru cloud-init-23.4.4/cloudinit/config/cc_apt_pipelining.py cloud-init-24.1.3/cloudinit/config/cc_apt_pipelining.py --- cloud-init-23.4.4/cloudinit/config/cc_apt_pipelining.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_apt_pipelining.py 2024-03-27 13:14:04.000000000 +0000 @@ -67,7 +67,7 @@ write_apt_snippet("0", LOG, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return - elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: + elif apt_pipe_value_s in [str(b) for b in range(6)]: write_apt_snippet(apt_pipe_value_s, LOG, DEFAULT_FILE) else: LOG.warning("Invalid option for apt_pipelining: %s", apt_pipe_value) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_bootcmd.py cloud-init-24.1.3/cloudinit/config/cc_bootcmd.py --- cloud-init-23.4.4/cloudinit/config/cc_bootcmd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_bootcmd.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,7 +10,6 @@ """Bootcmd: run arbitrary commands early in the boot process.""" import logging -import os from textwrap import dedent from cloudinit import subp, temp_utils, util @@ -83,12 +82,9 @@ raise try: - env = os.environ.copy() iid = cloud.get_instance_id() - if iid: - env["INSTANCE_ID"] = str(iid) - cmd = ["/bin/sh", tmpf.name] - subp.subp(cmd, env=env, capture=False) + env = {"INSTANCE_ID": str(iid)} if iid else {} + subp.subp(["/bin/sh", tmpf.name], update_env=env, capture=False) except Exception: util.logexc(LOG, "Failed to run bootcmd module %s", name) raise diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ca_certs.py cloud-init-24.1.3/cloudinit/config/cc_ca_certs.py --- cloud-init-23.4.4/cloudinit/config/cc_ca_certs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ca_certs.py 2024-03-27 13:14:04.000000000 +0000 @@ -45,6 +45,13 @@ "ca_cert_config": None, "ca_cert_update_cmd": ["update-ca-certificates"], }, + "photon": { + "ca_cert_path": "/etc/ssl/certs/", + "ca_cert_local_path": "/etc/pki/tls/certs/", + "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.crt", + "ca_cert_config": None, + "ca_cert_update_cmd": ["rehash_ca_certificates.sh"], + }, } for distro in ( @@ -84,6 +91,7 @@ "sle-micro", "sles", "ubuntu", + "photon", ] meta: MetaSchema = { @@ -165,7 +173,7 @@ @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_name == "rhel": + if distro_name in ["rhel", "photon"]: remove_default_ca_certs(distro_cfg) elif distro_name in ["alpine", "debian", "ubuntu"]: disable_system_ca_certs(distro_cfg) @@ -197,7 +205,7 @@ added_header = False if os.stat(ca_cert_cfg_fn).st_size: - orig = util.load_file(ca_cert_cfg_fn) + orig = util.load_text_file(ca_cert_cfg_fn) out_lines = [] for line in orig.splitlines(): if line == header_comment: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_disk_setup.py cloud-init-24.1.3/cloudinit/config/cc_disk_setup.py --- cloud-init-23.4.4/cloudinit/config/cc_disk_setup.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_disk_setup.py 2024-03-27 13:14:04.000000000 +0000 @@ -670,7 +670,7 @@ def purge_disk(device): """ - Remove parition table entries + Remove partition table entries """ # wipe any file systems first @@ -791,7 +791,7 @@ The following are supported values in the dict: overwrite: Should the partition table be created regardless - of any pre-exisiting data? + of any pre-existing data? layout: the layout of the partition table table_type: Which partition table to use, defaults to MBR device: the device to work on. @@ -940,12 +940,12 @@ LOG.warning("Destroying filesystem on %s", device) else: - LOG.debug("Device %s is cleared for formating", device) + LOG.debug("Device %s is cleared for formatting", device) elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device - LOG.debug("Identifying device to create %s filesytem on", label) + LOG.debug("Identifying device to create %s filesystem on", label) # 'any' means pick the first match on the device with matching fs_type label_match = True @@ -962,7 +962,7 @@ LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: - LOG.debug("Found filesystem match, skipping formating.") + LOG.debug("Found filesystem match, skipping formatting.") return if not reuse and fs_replace and device: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_final_message.py cloud-init-24.1.3/cloudinit/config/cc_final_message.py --- cloud-init-23.4.4/cloudinit/config/cc_final_message.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_final_message.py 2024-03-27 13:14:04.000000000 +0000 @@ -59,7 +59,7 @@ LOG = logging.getLogger(__name__) __doc__ = get_meta_doc(meta) -# Jinja formated default message +# Jinja formatted default message FINAL_MESSAGE_DEF = ( "## template: jinja\n" "Cloud-init v. {{version}} finished at {{timestamp}}." @@ -96,6 +96,10 @@ stderr=True, log=LOG, ) + except templater.JinjaSyntaxParsingException as e: + util.logexc( + LOG, "Failed to render templated final message: %s", str(e) + ) except Exception: util.logexc(LOG, "Failed to render final message template") diff -Nru cloud-init-23.4.4/cloudinit/config/cc_growpart.py cloud-init-24.1.3/cloudinit/config/cc_growpart.py --- cloud-init-23.4.4/cloudinit/config/cc_growpart.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_growpart.py 2024-03-27 13:14:04.000000000 +0000 @@ -19,7 +19,7 @@ from contextlib import suppress from pathlib import Path from textwrap import dedent -from typing import Tuple +from typing import Optional, Tuple from cloudinit import subp, temp_utils, util from cloudinit.cloud import Cloud @@ -167,11 +167,10 @@ class ResizeGrowPart(Resizer): def available(self, devices: list): - myenv = os.environ.copy() - myenv["LANG"] = "C" - try: - (out, _err) = subp.subp(["growpart", "--help"], env=myenv) + out = subp.subp( + ["growpart", "--help"], update_env={"LANG": "C"} + ).stdout if re.search(r"--update\s+", out): return True @@ -180,8 +179,6 @@ return False def resize(self, diskdev, partnum, partdev): - myenv = os.environ.copy() - myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -189,12 +186,13 @@ tmp_dir = self._distro.get_tmp_exec_path() with temp_utils.tempdir(dir=tmp_dir, needs_exe=True) as tmpd: growpart_tmp = os.path.join(tmpd, "growpart") + my_env = {"LANG": "C", "TMPDIR": growpart_tmp} if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv["TMPDIR"] = growpart_tmp try: subp.subp( - ["growpart", "--dry-run", diskdev, partnum], env=myenv + ["growpart", "--dry-run", diskdev, partnum], + update_env=my_env, ) except subp.ProcessExecutionError as e: if e.exit_code != 1: @@ -208,7 +206,7 @@ return (before, before) try: - subp.subp(["growpart", diskdev, partnum], env=myenv) + subp.subp(["growpart", diskdev, partnum], update_env=my_env) except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) raise ResizeFailedException(e) from e @@ -246,11 +244,10 @@ class ResizeGpart(Resizer): def available(self, devices: list): - myenv = os.environ.copy() - myenv["LANG"] = "C" - try: - (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) + err = subp.subp( + ["gpart", "help"], update_env={"LANG": "C"}, rcs=[0, 1] + ).stderr if re.search(r"gpart recover ", err): return True @@ -283,12 +280,16 @@ return (before, get_size(partdev)) -def get_size(filename): - fd = os.open(filename, os.O_RDONLY) +def get_size(filename) -> Optional[int]: + fd = None try: + fd = os.open(filename, os.O_RDONLY) return os.lseek(fd, 0, os.SEEK_END) + except FileNotFoundError: + return None finally: - os.close(fd) + if fd: + os.close(fd) def device_part_info(devpath): @@ -305,7 +306,7 @@ # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. fpart = "/dev/" + util.find_freebsd_part(devpath) - # Handle both GPT partions and MBR slices with partitions + # Handle both GPT partitions and MBR slices with partitions m = re.search( r"^(?P/dev/.+)[sp](?P\d+[a-z]*)$", fpart ) @@ -319,14 +320,14 @@ if not os.path.exists(ptpath): raise TypeError("%s not a partition" % devpath) - ptnum = util.load_file(ptpath).rstrip() + ptnum = util.load_text_file(ptpath).rstrip() # for a partition, real syspath is something like: # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 rsyspath = os.path.realpath(syspath) disksyspath = os.path.dirname(rsyspath) - diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip() + diskmajmin = util.load_text_file(os.path.join(disksyspath, "dev")).rstrip() diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) # diskdevpath has something like 253:0 @@ -357,7 +358,7 @@ return dev -def get_mapped_device(blockdev): +def get_mapped_device(blockdev, distro_name): """Returns underlying block device for a mapped device. If it is mapped, blockdev will usually take the form of @@ -367,6 +368,32 @@ the device pointed to. Otherwise, return None. """ realpath = os.path.realpath(blockdev) + + if distro_name == "alpine": + if blockdev.startswith("/dev/mapper"): + # For Alpine systems a /dev/mapper/ entry is *not* a + # symlink to the related /dev/dm-X block device, + # rather it is a block device itself. + + # Get the major/minor of the /dev/mapper block device + major = os.major(os.stat(blockdev).st_rdev) + minor = os.minor(os.stat(blockdev).st_rdev) + + # Find the /dev/dm-X device with the same major/minor + with os.scandir("/dev/") as it: + for deventry in it: + if deventry.name.startswith("dm-"): + res = os.lstat(deventry.path) + if stat.S_ISBLK(res.st_mode): + if ( + os.major(os.stat(deventry.path).st_rdev) + == major + and os.minor(os.stat(deventry.path).st_rdev) + == minor + ): + realpath = os.path.realpath(deventry.path) + break + if realpath.startswith("/dev/dm-"): LOG.debug("%s is a mapped device pointing to %s", blockdev, realpath) return realpath @@ -472,7 +499,7 @@ ) -def resize_devices(resizer, devices): +def resize_devices(resizer, devices, distro_name): # returns a tuple of tuples containing (entry-in-devices, action, message) devices = copy.copy(devices) info = [] @@ -515,7 +542,7 @@ ) continue - underlying_blockdev = get_mapped_device(blockdev) + underlying_blockdev = get_mapped_device(blockdev, distro_name) if underlying_blockdev: try: # We need to resize the underlying partition first @@ -571,7 +598,7 @@ continue try: - (old, new) = resizer.resize(disk, ptnum, blockdev) + old, new = resizer.resize(disk, ptnum, blockdev) if old == new: info.append( ( @@ -580,6 +607,15 @@ "no change necessary (%s, %s)" % (disk, ptnum), ) ) + elif new is None or old is None: + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) size, new size is unknown" + % (disk, ptnum), + ) + ) else: info.append( ( @@ -649,7 +685,7 @@ logfunc=LOG.debug, msg="resize_devices", func=resize_devices, - args=(resizer, devices), + args=(resizer, devices, cloud.distro.name), ) for entry, action, msg in resized: if action == RESIZE.CHANGED: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_install_hotplug.py cloud-init-24.1.3/cloudinit/config/cc_install_hotplug.py --- cloud-init-23.4.4/cloudinit/config/cc_install_hotplug.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_install_hotplug.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,6 +11,7 @@ from cloudinit.distros import ALL_DISTROS from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE +from cloudinit.sources import DataSource meta: MetaSchema = { "id": "cc_install_hotplug", @@ -21,11 +22,11 @@ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as - ``/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules``. + ``/etc/udev/rules.d/90-cloud-init-hook-hotplug.rules``. When hotplug is enabled, newly added network devices will be added to the system by cloud-init. After udev detects the event, - cloud-init will referesh the instance metadata from the datasource, + cloud-init will refresh the instance metadata from the datasource, detect the device in the updated metadata, then apply the updated network configuration. @@ -59,30 +60,30 @@ LOG = logging.getLogger(__name__) -HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" +# 90 to be sorted after 80-net-setup-link.rules which sets ID_NET_DRIVER and +# some datasources match on drivers +HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/90-cloud-init-hook-hotplug.rules" HOTPLUG_UDEV_RULES_TEMPLATE = """\ # Installed by cloud-init due to network hotplug userdata -ACTION!="add|remove", GOTO="cloudinit_end" +ACTION!="add|remove", GOTO="cloudinit_end"{extra_rules} LABEL="cloudinit_hook" SUBSYSTEM=="net", RUN+="{libexecdir}/hook-hotplug" LABEL="cloudinit_end" """ -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - network_hotplug_enabled = ( - "updates" in cfg - and "network" in cfg["updates"] - and "when" in cfg["updates"]["network"] - and "hotplug" in cfg["updates"]["network"]["when"] - ) +def install_hotplug( + datasource: DataSource, + cfg: Config, + network_hotplug_enabled: bool, +): hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + datasource.get_supported_events([EventType.HOTPLUG]).get( EventScope.NETWORK, set() ) ) hotplug_enabled = stages.update_event_enabled( - datasource=cloud.datasource, + datasource=datasource, cfg=cfg, event_source_type=EventType.HOTPLUG, scope=EventScope.NETWORK, @@ -104,12 +105,32 @@ LOG.debug("Skipping hotplug install, udevadm not found") return + extra_rules = ( + datasource.extra_hotplug_udev_rules + if datasource.extra_hotplug_udev_rules is not None + else "" + ) + if extra_rules: + extra_rules = "\n" + extra_rules # This may need to turn into a distro property at some point libexecdir = "/usr/libexec/cloud-init" if not os.path.exists(libexecdir): libexecdir = "/usr/lib/cloud-init" + LOG.info("Installing hotplug.") util.write_file( filename=HOTPLUG_UDEV_PATH, - content=HOTPLUG_UDEV_RULES_TEMPLATE.format(libexecdir=libexecdir), + content=HOTPLUG_UDEV_RULES_TEMPLATE.format( + extra_rules=extra_rules, libexecdir=libexecdir + ), ) subp.subp(["udevadm", "control", "--reload-rules"]) + + +def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: + network_hotplug_enabled = ( + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] + ) + install_hotplug(cloud.datasource, cfg, network_hotplug_enabled) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_keys_to_console.py cloud-init-24.1.3/cloudinit/config/cc_keys_to_console.py --- cloud-init-23.4.4/cloudinit/config/cc_keys_to_console.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_keys_to_console.py 2024-03-27 13:14:04.000000000 +0000 @@ -36,8 +36,8 @@ " ``ssh_fp_console_blacklist`` config key can be used. By default," " all types of keys will have their fingerprints written to console." " To avoid host keys of a key type being written to console the" - "``ssh_key_console_blacklist`` config key can be used. By default," - " ``ssh-dss`` host keys are not written to console." + "``ssh_key_console_blacklist`` config key can be used. By default" + " all supported host keys are written to console." ), "distros": distros, "examples": [ @@ -51,7 +51,7 @@ dedent( """\ # Do not print certain ssh key types to console - ssh_key_console_blacklist: [dsa, ssh-dss] + ssh_key_console_blacklist: [rsa] """ ), dedent( @@ -99,7 +99,7 @@ cfg, "ssh_fp_console_blacklist", [] ) key_blacklist = util.get_cfg_option_list( - cfg, "ssh_key_console_blacklist", ["ssh-dss"] + cfg, "ssh_key_console_blacklist", [] ) try: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_lxd.py cloud-init-24.1.3/cloudinit/config/cc_lxd.py --- cloud-init-23.4.4/cloudinit/config/cc_lxd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_lxd.py 2024-03-27 13:14:04.000000000 +0000 @@ -210,6 +210,7 @@ f" '{type(lxd_cfg).__name__}'" ) + util.wait_for_snap_seeded(cloud) # Grab the configuration init_cfg = lxd_cfg.get("init", {}) preseed_str = lxd_cfg.get("preseed", "") @@ -432,7 +433,7 @@ % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) ) - if bridge_cfg.get("ipv6_nat", "false") == "true": + if bridge_cfg.get("ipv6_nat") == "true": cmd_create.append("ipv6.nat=true") else: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_mcollective.py cloud-init-24.1.3/cloudinit/config/cc_mcollective.py --- cloud-init-23.4.4/cloudinit/config/cc_mcollective.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_mcollective.py 2024-03-27 13:14:04.000000000 +0000 @@ -98,7 +98,7 @@ # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: - old_contents = util.load_file(server_cfg, quiet=False, decode=False) + old_contents = util.load_binary_file(server_cfg, quiet=False) mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_migrator.py cloud-init-24.1.3/cloudinit/config/cc_migrator.py --- cloud-init-23.4.4/cloudinit/config/cc_migrator.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_migrator.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -"""Migrator: Migrate old versions of cloud-init data to new""" - -import logging -import os -import shutil - -from cloudinit import helpers, util -from cloudinit.cloud import Cloud -from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc -from cloudinit.settings import PER_ALWAYS - -MODULE_DESCRIPTION = """\ -This module handles moving old versions of cloud-init data to newer ones. -Currently, it only handles renaming cloud-init's per-frequency semaphore files -to canonicalized name and renaming legacy semaphore names to newer ones. This -module is enabled by default, but can be disabled by specifying ``migrate: -false`` in config. -""" - -distros = ["all"] -frequency = PER_ALWAYS - -meta: MetaSchema = { - "id": "cc_migrator", - "name": "Migrator", - "title": "Migrate old versions of cloud-init data to new", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": ["# Do not migrate cloud-init semaphores\nmigrate: false\n"], - "frequency": frequency, - "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) -LOG = logging.getLogger(__name__) - - -def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) - am_adjusted = 0 - for sem_path in paths: - if not sem_path or not os.path.exists(sem_path): - continue - for p in os.listdir(sem_path): - full_path = os.path.join(sem_path, p) - if os.path.isfile(full_path): - (name, ext) = os.path.splitext(p) - canon_name = helpers.canon_sem_name(name) - if canon_name != name: - new_path = os.path.join(sem_path, canon_name + ext) - shutil.move(full_path, new_path) - am_adjusted += 1 - return am_adjusted - - -def _migrate_legacy_sems(cloud): - legacy_adjust = { - "apt-update-upgrade": [ - "apt_configure", - "package_update_upgrade_install", - ], - } - paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) - for sem_path in paths: - if not sem_path or not os.path.exists(sem_path): - continue - sem_helper = helpers.FileSemaphores(sem_path) - for (mod_name, migrate_to) in legacy_adjust.items(): - possibles = [mod_name, helpers.canon_sem_name(mod_name)] - old_exists = [] - for p in os.listdir(sem_path): - (name, _ext) = os.path.splitext(p) - if name in possibles and os.path.isfile(p): - old_exists.append(p) - for p in old_exists: - util.del_file(os.path.join(sem_path, p)) - (_name, freq) = os.path.splitext(p) - for m in migrate_to: - LOG.debug( - "Migrating %s => %s with the same frequency", p, m - ) - with sem_helper.lock(m, freq): - pass - - -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - do_migrate = util.get_cfg_option_str(cfg, "migrate", True) - if not util.translate_bool(do_migrate): - LOG.debug("Skipping module named %s, migration disabled", name) - return - sems_moved = _migrate_canon_sems(cloud) - LOG.debug( - "Migrated %s semaphore files to there canonicalized names", sems_moved - ) - _migrate_legacy_sems(cloud) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_mounts.py cloud-init-24.1.3/cloudinit/config/cc_mounts.py --- cloud-init-23.4.4/cloudinit/config/cc_mounts.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_mounts.py 2024-03-27 13:14:04.000000000 +0000 @@ -397,7 +397,7 @@ ) return fname try: - for line in util.load_file("/proc/swaps").splitlines(): + for line in util.load_text_file("/proc/swaps").splitlines(): if line.startswith(fname + " "): LOG.debug("swap file %s already in use", fname) return fname @@ -450,7 +450,7 @@ fstab_removed = [] if os.path.exists(FSTAB_PATH): - for line in util.load_file(FSTAB_PATH).splitlines(): + for line in util.load_text_file(FSTAB_PATH).splitlines(): if MNT_COMMENT in line: fstab_removed.append(line) continue @@ -573,14 +573,14 @@ needswap = False need_mount_all = False dirs = [] - for line in actlist: + for entry in actlist: # write 'comment' in the fs_mntops, entry, claiming this - line[3] = "%s,%s" % (line[3], MNT_COMMENT) - if line[2] == "swap": + entry[3] = "%s,%s" % (entry[3], MNT_COMMENT) + if entry[2] == "swap": needswap = True - if line[1].startswith("/"): - dirs.append(line[1]) - cc_lines.append("\t".join(line)) + if entry[1].startswith("/"): + dirs.append(entry[1]) + cc_lines.append("\t".join(entry)) mount_points = [ v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ntp.py cloud-init-24.1.3/cloudinit/config/cc_ntp.py --- cloud-init-23.4.4/cloudinit/config/cc_ntp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ntp.py 2024-03-27 13:14:04.000000000 +0000 @@ -423,7 +423,7 @@ # so use general x.pool.ntp.org instead. The same applies to EuroLinux pool_distro = "" - for x in range(0, NR_POOL_SERVERS): + for x in range(NR_POOL_SERVERS): names.append( ".".join( [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] diff -Nru cloud-init-23.4.4/cloudinit/config/cc_package_update_upgrade_install.py cloud-init-24.1.3/cloudinit/config/cc_package_update_upgrade_install.py --- cloud-init-23.4.4/cloudinit/config/cc_package_update_upgrade_install.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_package_update_upgrade_install.py 2024-03-27 13:14:04.000000000 +0000 @@ -19,7 +19,7 @@ from cloudinit.log import flush_loggers from cloudinit.settings import PER_INSTANCE -REBOOT_FILE = "/var/run/reboot-required" +REBOOT_FILES = ("/var/run/reboot-required", "/run/reboot-needed") REBOOT_CMD = ["/sbin/reboot"] MODULE_DESCRIPTION = """\ @@ -120,18 +120,23 @@ try: cloud.distro.install_packages(pkglist) except Exception as e: - util.logexc(LOG, "Failed to install packages: %s", pkglist) + util.logexc( + LOG, "Failure when attempting to install packages: %s", pkglist + ) errors.append(e) # TODO(smoser): handle this less violently # kernel and openssl (possibly some other packages) # write a file /var/run/reboot-required after upgrading. # if that file exists and configured, then just stop right now and reboot - reboot_fn_exists = os.path.isfile(REBOOT_FILE) + for reboot_marker in REBOOT_FILES: + reboot_fn_exists = os.path.isfile(reboot_marker) + if reboot_fn_exists: + break if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: LOG.warning( - "Rebooting after upgrade or install per %s", REBOOT_FILE + "Rebooting after upgrade or install per %s", reboot_marker ) # Flush the above warning + anything else out... flush_loggers(LOG) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_phone_home.py cloud-init-24.1.3/cloudinit/config/cc_phone_home.py --- cloud-init-23.4.4/cloudinit/config/cc_phone_home.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_phone_home.py 2024-03-27 13:14:04.000000000 +0000 @@ -21,7 +21,6 @@ frequency = PER_INSTANCE POST_LIST_ALL = [ - "pub_key_dsa", "pub_key_rsa", "pub_key_ecdsa", "pub_key_ed25519", @@ -36,7 +35,6 @@ the id of the current instance. Either all data can be posted or a list of keys to post. Available keys are: - - ``pub_key_dsa`` - ``pub_key_rsa`` - ``pub_key_ecdsa`` - ``pub_key_ed25519`` @@ -57,7 +55,7 @@ Accept: */* Content-Type: application/x-www-form-urlencoded - pub_key_dsa=dsa_contents&pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal + pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal """ meta: MetaSchema = { @@ -80,7 +78,6 @@ phone_home: url: http://example.com/$INSTANCE_ID/ post: - - pub_key_dsa - pub_key_rsa - pub_key_ecdsa - pub_key_ed25519 @@ -103,7 +100,7 @@ # # phone_home: # url: http://my.foo.bar/$INSTANCE_ID/ -# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname, +# post: [ pub_key_rsa, pub_key_ecdsa, instance_id, hostname, # fqdn ] # @@ -152,7 +149,6 @@ } pubkeys = { - "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", @@ -160,7 +156,7 @@ for (n, path) in pubkeys.items(): try: - all_keys[n] = util.load_file(path) + all_keys[n] = util.load_text_file(path) except Exception: util.logexc( LOG, "%s: failed to open, can not phone home that data!", path diff -Nru cloud-init-23.4.4/cloudinit/config/cc_power_state_change.py cloud-init-24.1.3/cloudinit/config/cc_power_state_change.py --- cloud-init-23.4.4/cloudinit/config/cc_power_state_change.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_power_state_change.py 2024-03-27 13:14:04.000000000 +0000 @@ -95,7 +95,7 @@ m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: - return util.load_file("/proc/%s/cmdline" % pid) + return util.load_text_file("/proc/%s/cmdline" % pid) except IOError: return None diff -Nru cloud-init-23.4.4/cloudinit/config/cc_puppet.py cloud-init-24.1.3/cloudinit/config/cc_puppet.py --- cloud-init-23.4.4/cloudinit/config/cc_puppet.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_puppet.py 2024-03-27 13:14:04.000000000 +0000 @@ -240,7 +240,7 @@ if install_type == "packages": to_install: List[Union[str, List[str]]] - if package_name is None: # conf has no package_nam + if package_name is None: # conf has no package_name for puppet_name in PUPPET_PACKAGE_NAMES: with suppress(PackageInstallerError): to_install = ( @@ -287,7 +287,7 @@ # ... and then update the puppet configuration if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf - contents = util.load_file(p_constants.conf_path) + contents = util.load_text_file(p_constants.conf_path) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to diff -Nru cloud-init-23.4.4/cloudinit/config/cc_reset_rmc.py cloud-init-24.1.3/cloudinit/config/cc_reset_rmc.py --- cloud-init-23.4.4/cloudinit/config/cc_reset_rmc.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_reset_rmc.py 2024-03-27 13:14:04.000000000 +0000 @@ -98,7 +98,7 @@ def get_node_id(): try: - fp = util.load_file(NODE_ID_FILE) + fp = util.load_text_file(NODE_ID_FILE) node_id = fp.split("\n")[0] return node_id except Exception: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_resizefs.py cloud-init-24.1.3/cloudinit/config/cc_resizefs.py --- cloud-init-23.4.4/cloudinit/config/cc_resizefs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_resizefs.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,8 +11,10 @@ import errno import logging import os +import re import stat from textwrap import dedent +from typing import Optional from cloudinit import subp, util from cloudinit.cloud import Cloud @@ -29,7 +31,7 @@ "title": "Resize filesystem", "description": dedent( """\ - Resize a filesystem to use all avaliable space on partition. This + Resize a filesystem to use all available space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized along with it. By default, ``cc_resizefs`` will resize the root @@ -146,6 +148,36 @@ RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} +def get_device_info_from_zpool(zpool) -> Optional[str]: + # zpool has 10 second timeout waiting for /dev/zfs LP: #1760173 + log_warn = LOG.debug if util.is_container() else LOG.warning + if not os.path.exists("/dev/zfs"): + LOG.debug("Cannot get zpool info, no /dev/zfs") + return None + try: + zpoolstatus, err = subp.subp(["zpool", "status", zpool]) + if err: + LOG.info( + "zpool status returned error: [%s] for zpool [%s]", + err, + zpool, + ) + return None + except subp.ProcessExecutionError as err: + log_warn("Unable to get zpool status of %s: %s", zpool, err) + return None + r = r".*(ONLINE).*" + for line in zpoolstatus.split("\n"): + if re.search(r, line) and zpool not in line and "state" not in line: + disk = line.split()[0] + LOG.debug('found zpool "%s" on disk %s', zpool, disk) + return disk + log_warn( + "No zpool found: [%s]: out: [%s] err: %s", zpool, zpoolstatus, err + ) + return None + + def can_skip_resize(fs_type, resize_what, devpth): fstype_lc = fs_type.lower() for i, func in RESIZE_FS_PRECHECK_CMDS.items(): @@ -259,7 +291,7 @@ # so the _resize_zfs function gets the right attribute. if fs_type == "zfs": zpool = devpth.split("/")[0] - devpth = util.get_device_info_from_zpool(zpool) + devpth = get_device_info_from_zpool(zpool) if not devpth: return # could not find device from zpool resize_what = zpool diff -Nru cloud-init-23.4.4/cloudinit/config/cc_rh_subscription.py cloud-init-24.1.3/cloudinit/config/cc_rh_subscription.py --- cloud-init-23.4.4/cloudinit/config/cc_rh_subscription.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_rh_subscription.py 2024-03-27 13:14:04.000000000 +0000 @@ -505,7 +505,7 @@ def _sub_man_cli(cmd, logstring_val=False): """ - Uses the prefered cloud-init subprocess def of subp.subp + Uses the preferred cloud-init subprocess def of subp.subp and runs subscription-manager. Breaking this to a separate function for later use in mocking and unittests """ diff -Nru cloud-init-23.4.4/cloudinit/config/cc_rightscale_userdata.py cloud-init-24.1.3/cloudinit/config/cc_rightscale_userdata.py --- cloud-init-23.4.4/cloudinit/config/cc_rightscale_userdata.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_rightscale_userdata.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ -# Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# -# This file is part of cloud-init. See LICENSE file for license information. - -import logging -import os -from urllib.parse import parse_qs - -from cloudinit import url_helper as uhelp -from cloudinit import util -from cloudinit.cloud import Cloud -from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc -from cloudinit.distros import ALL_DISTROS -from cloudinit.settings import PER_INSTANCE - -MY_NAME = "cc_rightscale_userdata" -MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK" - -"""Rightscale Userdata: Support rightscale configuration hooks""" - -MODULE_DESCRIPTION = """\ -This module adds support for RightScale configuration hooks to cloud-init. -RightScale adds an entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to -ec2 user-data. This module checks for this line in the raw userdata and -retrieves any scripts linked by the RightScale user data and places them in the -user scripts configuration directory, to be run later by ``cc_scripts_user``. - -.. note:: - the ``CLOUD_INIT_REMOTE_HOOK`` config variable is present in the raw ec2 - user data only, not in any cloud-config parts - -**Raw user data schema**:: - - CLOUD_INIT_REMOTE_HOOK= -""" - -meta: MetaSchema = { - "id": "cc_rightscale_userdata", - "name": "RightScale Userdata", - "title": "Support rightscale configuration hooks", - "description": MODULE_DESCRIPTION, - "distros": [ALL_DISTROS], - "frequency": PER_INSTANCE, - "examples": [], - "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) -LOG = logging.getLogger(__name__) - -# -# The purpose of this script is to allow cloud-init to consume -# rightscale style userdata. rightscale user data is key-value pairs -# in a url-query-string like format. -# -# for cloud-init support, there will be a key named -# 'CLOUD_INIT_REMOTE_HOOK'. -# -# This cloud-config module will -# - read the blob of data from raw user data, and parse it as key/value -# - for each key that is found, download the content to -# the local instance/scripts directory and set them executable. -# - the files in that directory will be run by the scripts_user module -# Therefore, this must run before that. -# -# - - -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - get_userdata_raw = getattr(cloud, "get_userdata_raw", None) - if not get_userdata_raw or not callable(get_userdata_raw): - LOG.debug("Failed to get raw userdata in module %s", name) - return - - ud = get_userdata_raw() - try: - mdict = parse_qs(ud) - if not mdict or MY_HOOKNAME not in mdict: - LOG.debug( - "Skipping module %s, did not find %s in parsed raw userdata", - name, - MY_HOOKNAME, - ) - return - except Exception: - util.logexc( - LOG, "Failed to parse query string %s into a dictionary", ud - ) - raise - - wrote_fns = [] - captured_excps = [] - - # These will eventually be then ran by the cc_scripts_user - # TODO(harlowja): maybe this should just be a new user data handler?? - # Instead of a late module that acts like a user data handler? - scripts_d = cloud.get_ipath_cur("scripts") - urls = mdict[MY_HOOKNAME] - for (i, url) in enumerate(urls): - fname = os.path.join(scripts_d, "rightscale-%02i" % (i)) - try: - resp = uhelp.readurl(url) - # Ensure its a valid http response (and something gotten) - if resp.ok() and resp.contents: - util.write_file(fname, resp, mode=0o700) - wrote_fns.append(fname) - except Exception as e: - captured_excps.append(e) - util.logexc( - LOG, "%s failed to read %s and write %s", MY_NAME, url, fname - ) - - if wrote_fns: - LOG.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) - - if len(wrote_fns) != len(urls): - skipped = len(urls) - len(wrote_fns) - LOG.debug("%s urls were skipped or failed", skipped) - - if captured_excps: - LOG.warning( - "%s failed with exceptions, re-raising the last one", - len(captured_excps), - ) - raise captured_excps[-1] diff -Nru cloud-init-23.4.4/cloudinit/config/cc_seed_random.py cloud-init-24.1.3/cloudinit/config/cc_seed_random.py --- cloud-init-23.4.4/cloudinit/config/cc_seed_random.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_seed_random.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,7 +10,6 @@ import base64 import logging -import os from io import BytesIO from textwrap import dedent @@ -90,7 +89,7 @@ raise IOError("Unknown random_seed encoding: %s" % (encoding)) -def handle_random_seed_command(command, required, env=None): +def handle_random_seed_command(command, required, update_env): if not command and required: raise ValueError("no command found but required=true") elif not command: @@ -106,7 +105,7 @@ else: LOG.debug("command '%s' not found for seed_command", cmd) return - subp.subp(command, env=env, capture=False) + subp.subp(command, update_env=update_env, capture=False) def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: @@ -137,9 +136,11 @@ command = mycfg.get("command", None) req = mycfg.get("command_required", False) try: - env = os.environ.copy() - env["RANDOM_SEED_FILE"] = seed_path - handle_random_seed_command(command=command, required=req, env=env) + handle_random_seed_command( + command=command, + required=req, + update_env={"RANDOM_SEED_FILE": seed_path}, + ) except ValueError as e: LOG.warning("handling random command [%s] failed: %s", command, e) raise e diff -Nru cloud-init-23.4.4/cloudinit/config/cc_set_hostname.py cloud-init-24.1.3/cloudinit/config/cc_set_hostname.py --- cloud-init-23.4.4/cloudinit/config/cc_set_hostname.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_set_hostname.py 2024-03-27 13:14:04.000000000 +0000 @@ -60,10 +60,19 @@ dedent( """\ hostname: myhost + create_hostname_file: true fqdn: myhost.example.com prefer_fqdn_over_hostname: true """ ), + dedent( + """\ + # On a machine without an ``/etc/hostname`` file, don't create it + # In most clouds, this will result in a DHCP-configured hostname + # provided by the cloud + create_hostname_file: false + """ + ), ], "activate_by_schema_keys": [], } @@ -114,7 +123,7 @@ prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname") prev_hostname = {} if os.path.exists(prev_fn) and os.stat(prev_fn).st_size > 0: - prev_hostname = util.load_json(util.load_file(prev_fn)) + prev_hostname = util.load_json(util.load_text_file(prev_fn)) hostname_changed = hostname != prev_hostname.get( "hostname" ) or fqdn != prev_hostname.get("fqdn") diff -Nru cloud-init-23.4.4/cloudinit/config/cc_snap.py cloud-init-24.1.3/cloudinit/config/cc_snap.py --- cloud-init-23.4.4/cloudinit/config/cc_snap.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_snap.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,7 +6,6 @@ import logging import os -import sys from textwrap import dedent from cloudinit import subp, util @@ -174,7 +173,7 @@ for command in fixed_snap_commands: shell = isinstance(command, str) try: - subp.subp(command, shell=shell, status_cb=sys.stderr.write) + subp.subp(command, shell=shell) except subp.ProcessExecutionError as e: cmd_failures.append(str(e)) if cmd_failures: @@ -192,7 +191,7 @@ "Skipping module named %s, no 'snap' key in configuration", name ) return - + util.wait_for_snap_seeded(cloud) add_assertions( cfgin.get("assertions", []), os.path.join(cloud.paths.get_ipath_cur(), "snapd.assertions"), diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ssh.py cloud-init-24.1.3/cloudinit/config/cc_ssh.py --- cloud-init-23.4.4/cloudinit/config/cc_ssh.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ssh.py 2024-03-27 13:14:04.000000000 +0000 @@ -43,7 +43,6 @@ Supported public key types for the ``ssh_authorized_keys`` are: - - dsa - rsa - ecdsa - ed25519 @@ -57,8 +56,6 @@ - sk-ecdsa-sha2-nistp256@openssh.com - sk-ssh-ed25519-cert-v01@openssh.com - sk-ssh-ed25519@openssh.com - - ssh-dss-cert-v01@openssh.com - - ssh-dss - ssh-ed25519-cert-v01@openssh.com - ssh-ed25519 - ssh-rsa-cert-v01@openssh.com @@ -71,7 +68,7 @@ `OpenSSH`_ source, where the sigonly keys are removed. Please see ``ssh_util`` for more information. - ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy, + ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy, as they are valid public keys in some old distros. They can possibly be removed in the future when support for the older distros are dropped @@ -104,7 +101,6 @@ Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config flags are: - - dsa - ecdsa - ed25519 - rsa @@ -141,26 +137,18 @@ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ... rsa_certificate: | ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ... - dsa_private: | - -----BEGIN DSA PRIVATE KEY----- - MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco - ... - -----END DSA PRIVATE KEY----- - dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ... - dsa_certificate: | - ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ... ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... ssh_deletekeys: true - ssh_genkeytypes: [rsa, dsa, ecdsa, ed25519] + ssh_genkeytypes: [rsa, ecdsa, ed25519] disable_root: true disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding allow_public_ssh_keys: true ssh_quiet_keygen: true ssh_publish_hostkeys: enabled: true - blacklist: [dsa] + blacklist: [rsa] """ # noqa: E501 ) ], @@ -170,17 +158,16 @@ __doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) -GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"] -FIPS_UNSUPPORTED_KEY_NAMES = ["dsa", "ed25519"] +GENERATE_KEY_NAMES = ["rsa", "ecdsa", "ed25519"] +FIPS_UNSUPPORTED_KEY_NAMES = ["ed25519"] pattern_unsupported_config_keys = re.compile( "^(ecdsa-sk|ed25519-sk)_(private|public|certificate)$" ) KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key" PUBLISH_HOST_KEYS = True -# Don't publish the dsa hostkey by default since OpenSSH recommends not using -# it. -HOST_KEY_PUBLISH_BLACKLIST = ["dsa"] +# By default publish all supported hostkey types. +HOST_KEY_PUBLISH_BLACKLIST: List[str] = [] CONFIG_KEY_TO_FILE = {} PRIV_TO_PUB = {} @@ -275,8 +262,6 @@ ",".join(skipped_keys), ) - lang_c = os.environ.copy() - lang_c["LANG"] = "C" for keytype in key_names: keyfile = KEY_FILE_TPL % (keytype) if os.path.exists(keyfile): @@ -287,7 +272,9 @@ # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): try: - out, err = subp.subp(cmd, capture=True, env=lang_c) + out, err = subp.subp( + cmd, capture=True, update_env={"LANG": "C"} + ) if not util.get_cfg_option_bool( cfg, "ssh_quiet_keygen", False ): @@ -383,7 +370,7 @@ def get_public_host_keys(blacklist: Optional[Sequence[str]] = None): """Read host keys from /etc/ssh/*.pub files and return them as a list. - @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] + @param blacklist: List of key types to ignore. e.g. ['rsa'] @returns: List of keys, each formatted as a two-element tuple. e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] """ @@ -392,7 +379,7 @@ blacklist_files = [] if blacklist: # Convert blacklist to filenames: - # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' + # 'rsa' -> '/etc/ssh/ssh_host_rsa_key.pub' blacklist_files = [ public_key_file_tmpl % (key_type,) for key_type in blacklist ] @@ -406,7 +393,7 @@ # Read host key files, retrieve first two fields as a tuple and # append that tuple to key_list. for file_name in file_list: - file_contents = util.load_file(file_name) + file_contents = util.load_text_file(file_name) key_data = file_contents.split() if key_data and len(key_data) > 1: key_list.append(tuple(key_data[:2])) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ubuntu_advantage.py cloud-init-24.1.3/cloudinit/config/cc_ubuntu_advantage.py --- cloud-init-23.4.4/cloudinit/config/cc_ubuntu_advantage.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ubuntu_advantage.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,514 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""ubuntu_advantage: Configure Ubuntu Advantage support services""" - -import json -import logging -import re -from textwrap import dedent -from typing import Any, List -from urllib.parse import urlparse - -from cloudinit import subp, util -from cloudinit.cloud import Cloud -from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc -from cloudinit.settings import PER_INSTANCE - -UA_URL = "https://ubuntu.com/advantage" - -distros = ["ubuntu"] - -meta: MetaSchema = { - "id": "cc_ubuntu_advantage", - "name": "Ubuntu Advantage", - "title": "Configure Ubuntu Advantage support services", - "description": dedent( - """\ - Attach machine to an existing Ubuntu Advantage support contract and - enable or disable support services such as Livepatch, ESM, - FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage, - one can also specify services to enable. When the 'enable' - list is present, only named services will be activated. Whereas - if the 'enable' list is not present, the contract's default - services will be enabled. - - On Pro instances, when ``ubuntu_advantage`` config is provided to - cloud-init, Pro's auto-attach feature will be disabled and cloud-init - will perform the Pro auto-attach ignoring the ``token`` key. - The ``enable`` and ``enable_beta`` values will strictly determine what - services will be enabled, ignoring contract defaults. - - Note that when enabling FIPS or FIPS updates you will need to schedule - a reboot to ensure the machine is running the FIPS-compliant kernel. - See `Power State Change`_ for information on how to configure - cloud-init to perform this reboot. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - # Attach the machine to an Ubuntu Advantage support contract with a - # UA contract token obtained from %s. - ubuntu_advantage: - token: - """ - % UA_URL - ), - dedent( - """\ - # Attach the machine to an Ubuntu Advantage support contract enabling - # only fips and esm services. Services will only be enabled if - # the environment supports said service. Otherwise warnings will - # be logged for incompatible services specified. - ubuntu_advantage: - token: - enable: - - fips - - esm - """ - ), - dedent( - """\ - # Attach the machine to an Ubuntu Advantage support contract and enable - # the FIPS service. Perform a reboot once cloud-init has - # completed. - power_state: - mode: reboot - ubuntu_advantage: - token: - enable: - - fips - """ - ), - dedent( - """\ - # Set a http(s) proxy before attaching the machine to an - # Ubuntu Advantage support contract and enabling the FIPS service. - ubuntu_advantage: - token: - config: - http_proxy: 'http://some-proxy:8088' - https_proxy: 'https://some-proxy:8088' - global_apt_https_proxy: 'https://some-global-apt-proxy:8088/' - global_apt_http_proxy: 'http://some-global-apt-proxy:8088/' - ua_apt_http_proxy: 'http://10.0.10.10:3128' - ua_apt_https_proxy: 'https://10.0.10.10:3128' - enable: - - fips - """ - ), - dedent( - """\ - # On Ubuntu PRO instances, auto-attach but enable no PRO services. - ubuntu_advantage: - enable: [] - enable_beta: [] - """ - ), - dedent( - """\ - # Enable esm and beta realtime-kernel services in Ubuntu Pro instances. - ubuntu_advantage: - enable: - - esm - enable_beta: - - realtime-kernel - """ - ), - dedent( - """\ - # Disable auto-attach in Ubuntu Pro instances. - ubuntu_advantage: - features: - disable_auto_attach: True - """ - ), - ], - "frequency": PER_INSTANCE, - "activate_by_schema_keys": ["ubuntu_advantage", "ubuntu-advantage"], -} - -__doc__ = get_meta_doc(meta) - -LOG = logging.getLogger(__name__) -REDACTED = "REDACTED" -ERROR_MSG_SHOULD_AUTO_ATTACH = ( - "Unable to determine if this is an Ubuntu Pro instance." - " Fallback to normal UA attach." -) -KNOWN_UA_CONFIG_PROPS = ( - "http_proxy", - "https_proxy", - "global_apt_http_proxy", - "global_apt_https_proxy", - "ua_apt_http_proxy", - "ua_apt_https_proxy", -) - - -def validate_schema_features(ua_section: dict): - if "features" not in ua_section: - return - - # Validate ubuntu_advantage.features type - features = ua_section["features"] - if not isinstance(features, dict): - msg = ( - f"'ubuntu_advantage.features' should be a dict, not a" - f" {type(features).__name__}" - ) - LOG.error(msg) - raise RuntimeError(msg) - - # Validate ubuntu_advantage.features.disable_auto_attach - if "disable_auto_attach" not in features: - return - disable_auto_attach = features["disable_auto_attach"] - if not isinstance(disable_auto_attach, bool): - msg = ( - f"'ubuntu_advantage.features.disable_auto_attach' should be a bool" - f", not a {type(disable_auto_attach).__name__}" - ) - LOG.error(msg) - raise RuntimeError(msg) - - -def supplemental_schema_validation(ua_config: dict): - """Validate user-provided ua:config option values. - - This function supplements flexible jsonschema validation with specific - value checks to aid in triage of invalid user-provided configuration. - - Note: It does not log/raise config values as they could be urls containing - sensitive auth info. - - @param ua_config: Dictionary of config value under 'ubuntu_advantage'. - - @raises: ValueError describing invalid values provided. - """ - errors = [] - for key, value in sorted(ua_config.items()): - if key not in KNOWN_UA_CONFIG_PROPS: - LOG.warning( - "Not validating unknown ubuntu_advantage.config.%s property", - key, - ) - continue - elif value is None: - # key will be unset. No extra validation needed. - continue - try: - parsed_url = urlparse(value) - if parsed_url.scheme not in ("http", "https"): - errors.append( - f"Expected URL scheme http/https for ua:config:{key}" - ) - except (AttributeError, ValueError): - errors.append(f"Expected a URL for ua:config:{key}") - - if errors: - raise ValueError( - "Invalid ubuntu_advantage configuration:\n{}".format( - "\n".join(errors) - ) - ) - - -def set_ua_config(ua_config: Any = None): - if ua_config is None: - return - if not isinstance(ua_config, dict): - raise RuntimeError( - f"ubuntu_advantage: config should be a dict, not" - f" a {type(ua_config).__name__};" - " skipping enabling config parameters" - ) - supplemental_schema_validation(ua_config) - - enable_errors = [] - for key, value in sorted(ua_config.items()): - redacted_key_value = None - subp_kwargs: dict = {} - if value is None: - LOG.debug("Disabling UA config for %s", key) - config_cmd = ["pro", "config", "unset", key] - else: - redacted_key_value = f"{key}=REDACTED" - LOG.debug("Enabling UA config %s", redacted_key_value) - if re.search(r"\s", value): - key_value = f"{key}={re.escape(value)}" - else: - key_value = f"{key}={value}" - config_cmd = ["pro", "config", "set", key_value] - subp_kwargs = {"logstring": config_cmd[:-1] + [redacted_key_value]} - try: - subp.subp(config_cmd, **subp_kwargs) - except subp.ProcessExecutionError as e: - err_msg = str(e) - if redacted_key_value is not None: - err_msg = err_msg.replace(value, REDACTED) - enable_errors.append((key, err_msg)) - if enable_errors: - for param, error in enable_errors: - LOG.warning('Failure enabling/disabling "%s":\n%s', param, error) - raise RuntimeError( - "Failure enabling/disabling Ubuntu Advantage config(s): {}".format( - ", ".join('"{}"'.format(param) for param, _ in enable_errors) - ) - ) - - -def configure_ua(token, enable=None): - """Call ua commandline client to attach and/or enable services.""" - if enable is None: - enable = [] - elif isinstance(enable, str): - LOG.warning( - "ubuntu_advantage: enable should be a list, not" - " a string; treating as a single enable" - ) - enable = [enable] - elif not isinstance(enable, list): - LOG.warning( - "ubuntu_advantage: enable should be a list, not" - " a %s; skipping enabling services", - type(enable).__name__, - ) - enable = [] - - # Perform attach - if enable: - attach_cmd = ["pro", "attach", "--no-auto-enable", token] - else: - attach_cmd = ["pro", "attach", token] - redacted_cmd = attach_cmd[:-1] + [REDACTED] - LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(redacted_cmd)) - try: - # Allow `ua attach` to fail in already attached machines - subp.subp(attach_cmd, rcs={0, 2}, logstring=redacted_cmd) - except subp.ProcessExecutionError as e: - err = str(e).replace(token, REDACTED) - msg = f"Failure attaching Ubuntu Advantage:\n{err}" - util.logexc(LOG, msg) - raise RuntimeError(msg) from e - - # Enable services - if not enable: - return - cmd = ["pro", "enable", "--assume-yes", "--format", "json"] + enable - try: - enable_stdout, _ = subp.subp(cmd, capture=True, rcs={0, 1}) - except subp.ProcessExecutionError as e: - raise RuntimeError( - "Error while enabling service(s): " + ", ".join(enable) - ) from e - - try: - enable_resp = json.loads(enable_stdout) - except json.JSONDecodeError as e: - raise RuntimeError(f"UA response was not json: {enable_stdout}") from e - - # At this point we were able to load the json response from UA. This - # response contains a list of errors under the key 'errors'. E.g. - # - # { - # "errors": [ - # { - # "message": "UA Apps: ESM is already enabled ...", - # "message_code": "service-already-enabled", - # "service": "esm-apps", - # "type": "service" - # }, - # { - # "message": "Cannot enable unknown service 'asdf' ...", - # "message_code": "invalid-service-or-failure", - # "service": null, - # "type": "system" - # } - # ] - # } - # - # From our pov there are two type of errors, service and non-service - # related. We can distinguish them by checking if `service` is non-null - # or null respectively. - - enable_errors: List[dict] = [] - for err in enable_resp.get("errors", []): - if err["message_code"] == "service-already-enabled": - LOG.debug("Service `%s` already enabled.", err["service"]) - continue - enable_errors.append(err) - - if enable_errors: - error_services: List[str] = [] - for err in enable_errors: - service = err.get("service") - if service is not None: - error_services.append(service) - msg = f'Failure enabling `{service}`: {err["message"]}' - else: - msg = f'Failure of type `{err["type"]}`: {err["message"]}' - util.logexc(LOG, msg) - - raise RuntimeError( - "Failure enabling Ubuntu Advantage service(s): " - + ", ".join(error_services) - ) - - -def maybe_install_ua_tools(cloud: Cloud): - """Install ubuntu-advantage-tools if not present.""" - if subp.which("pro"): - return - try: - cloud.distro.update_package_sources() - except Exception: - util.logexc(LOG, "Package update failed") - raise - try: - cloud.distro.install_packages(["ubuntu-advantage-tools"]) - except Exception: - util.logexc(LOG, "Failed to install ubuntu-advantage-tools") - raise - - -def _should_auto_attach(ua_section: dict) -> bool: - disable_auto_attach = bool( - ua_section.get("features", {}).get("disable_auto_attach", False) - ) - if disable_auto_attach: - return False - - # pylint: disable=import-error - from uaclient.api.exceptions import UserFacingError - from uaclient.api.u.pro.attach.auto.should_auto_attach.v1 import ( - should_auto_attach, - ) - - # pylint: enable=import-error - - try: - result = util.log_time( - logfunc=LOG.debug, - msg="Checking if the instance can be attached to Ubuntu Pro", - func=should_auto_attach, - ) - except UserFacingError as ex: - LOG.debug("Error during `should_auto_attach`: %s", ex) - LOG.warning(ERROR_MSG_SHOULD_AUTO_ATTACH) - return False - return result.should_auto_attach - - -def _attach(ua_section: dict): - token = ua_section.get("token") - if not token: - msg = "`ubuntu_advantage.token` required in non-Pro Ubuntu instances." - LOG.error(msg) - raise RuntimeError(msg) - enable_beta = ua_section.get("enable_beta") - if enable_beta: - LOG.debug( - "Ignoring `ubuntu_advantage.enable_beta` services in UA attach:" - " %s", - ", ".join(enable_beta), - ) - configure_ua(token=token, enable=ua_section.get("enable")) - - -def _auto_attach(ua_section: dict): - - # pylint: disable=import-error - from uaclient.api.exceptions import AlreadyAttachedError, UserFacingError - from uaclient.api.u.pro.attach.auto.full_auto_attach.v1 import ( - FullAutoAttachOptions, - full_auto_attach, - ) - - # pylint: enable=import-error - - enable = ua_section.get("enable") - enable_beta = ua_section.get("enable_beta") - options = FullAutoAttachOptions( - enable=enable, - enable_beta=enable_beta, - ) - try: - util.log_time( - logfunc=LOG.debug, - msg="Attaching to Ubuntu Pro", - func=full_auto_attach, - kwargs={"options": options}, - ) - except AlreadyAttachedError: - if enable_beta is not None or enable is not None: - # Only warn if the user defined some service to enable/disable. - LOG.warning( - "The instance is already attached to Pro. Leaving enabled" - " services untouched. Ignoring config directives" - " ubuntu_advantage: enable and enable_beta" - ) - except UserFacingError as ex: - msg = f"Error during `full_auto_attach`: {ex.msg}" - LOG.error(msg) - raise RuntimeError(msg) from ex - - -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - ua_section = None - if "ubuntu-advantage" in cfg: - LOG.warning( - 'Deprecated configuration key "ubuntu-advantage" provided.' - ' Expected underscore delimited "ubuntu_advantage"; will' - " attempt to continue." - ) - ua_section = cfg["ubuntu-advantage"] - if "ubuntu_advantage" in cfg: - ua_section = cfg["ubuntu_advantage"] - if ua_section is None: - LOG.debug( - "Skipping module named %s," - " no 'ubuntu_advantage' configuration found", - name, - ) - return - elif not isinstance(ua_section, dict): - msg = ( - f"'ubuntu_advantage' should be a dict, not a" - f" {type(ua_section).__name__}" - ) - LOG.error(msg) - raise RuntimeError(msg) - if "commands" in ua_section: - msg = ( - 'Deprecated configuration "ubuntu-advantage: commands" provided.' - ' Expected "token"' - ) - LOG.error(msg) - raise RuntimeError(msg) - - maybe_install_ua_tools(cloud) - set_ua_config(ua_section.get("config")) - - # ua-auto-attach.service had noop-ed as ua_section is not empty - validate_schema_features(ua_section) - LOG.debug( - "To discover more log info, please check /var/log/ubuntu-advantage.log" - ) - if _should_auto_attach(ua_section): - _auto_attach(ua_section) - - # If ua-auto-attach.service did noop, we did not auto-attach and more keys - # than `features` are given under `ubuntu_advantage`, then try to attach. - # This supports the cases: - # - # 1) Previous attach behavior on non-pro instances. - # 2) Previous attach behavior on instances where ubuntu-advantage-tools - # is < v28.0 (UA apis for should_auto-attach and auto-attach are not - # available. - # 3) The user wants to disable auto-attach and attach by giving: - # `{"ubuntu_advantage": "features": {"disable_auto_attach": True}}` - elif not ua_section.keys() <= {"features"}: - _attach(ua_section) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ubuntu_autoinstall.py cloud-init-24.1.3/cloudinit/config/cc_ubuntu_autoinstall.py --- cloud-init-23.4.4/cloudinit/config/cc_ubuntu_autoinstall.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ubuntu_autoinstall.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,7 @@ import re from textwrap import dedent +from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import ( @@ -32,7 +33,7 @@ next generation desktop installer, via `ubuntu-desktop-install` snap. When "autoinstall" directives are provided in either ``#cloud-config`` user-data or ``/etc/cloud/cloud.cfg.d`` validate - minimal autoinstall schema adherance and emit a warning if the + minimal autoinstall schema adherence and emit a warning if the live-installer is not present. The live-installer will use autoinstall directives to seed answers to @@ -83,6 +84,7 @@ ) return + util.wait_for_snap_seeded(cloud) snap_list, _ = subp(["snap", "list"]) installer_present = None for snap_name in LIVE_INSTALLER_SNAPS: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_ubuntu_pro.py cloud-init-24.1.3/cloudinit/config/cc_ubuntu_pro.py --- cloud-init-23.4.4/cloudinit/config/cc_ubuntu_pro.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_ubuntu_pro.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,526 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""ubuntu_pro: Configure Ubuntu Pro support services""" + +import json +import logging +import re +from textwrap import dedent +from typing import Any, List +from urllib.parse import urlparse + +from cloudinit import subp, util +from cloudinit.cloud import Cloud +from cloudinit.config import Config +from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.settings import PER_INSTANCE + +PRO_URL = "https://ubuntu.com/pro" + +distros = ["ubuntu"] + +DEPRECATED_KEYS = set(["ubuntu-advantage", "ubuntu_advantage"]) + +meta: MetaSchema = { + "id": "cc_ubuntu_pro", + "name": "Ubuntu Pro", + "title": "Configure Ubuntu Pro support services", + "description": dedent( + """\ + Attach machine to an existing Ubuntu Pro support contract and + enable or disable support services such as Livepatch, ESM, + FIPS and FIPS Updates. When attaching a machine to Ubuntu Pro, + one can also specify services to enable. When the 'enable' + list is present, only named services will be activated. Whereas + if the 'enable' list is not present, the contract's default + services will be enabled. + + On Pro instances, when ``ubuntu_pro`` config is provided to + cloud-init, Pro's auto-attach feature will be disabled and cloud-init + will perform the Pro auto-attach ignoring the ``token`` key. + The ``enable`` and ``enable_beta`` values will strictly determine what + services will be enabled, ignoring contract defaults. + + Note that when enabling FIPS or FIPS updates you will need to schedule + a reboot to ensure the machine is running the FIPS-compliant kernel. + See `Power State Change`_ for information on how to configure + cloud-init to perform this reboot. + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ + # Attach the machine to an Ubuntu Pro support contract with a + # Pro contract token obtained from %s. + ubuntu_pro: + token: + """ + % PRO_URL + ), + dedent( + """\ + # Attach the machine to an Ubuntu Pro support contract enabling + # only fips and esm services. Services will only be enabled if + # the environment supports said service. Otherwise warnings will + # be logged for incompatible services specified. + ubuntu_pro: + token: + enable: + - fips + - esm + """ + ), + dedent( + """\ + # Attach the machine to an Ubuntu Pro support contract and enable + # the FIPS service. Perform a reboot once cloud-init has + # completed. + power_state: + mode: reboot + ubuntu_pro: + token: + enable: + - fips + """ + ), + dedent( + """\ + # Set a http(s) proxy before attaching the machine to an + # Ubuntu Pro support contract and enabling the FIPS service. + ubuntu_pro: + token: + config: + http_proxy: 'http://some-proxy:8088' + https_proxy: 'https://some-proxy:8088' + global_apt_https_proxy: 'https://some-global-apt-proxy:8088/' + global_apt_http_proxy: 'http://some-global-apt-proxy:8088/' + ua_apt_http_proxy: 'http://10.0.10.10:3128' + ua_apt_https_proxy: 'https://10.0.10.10:3128' + enable: + - fips + """ + ), + dedent( + """\ + # On Ubuntu PRO instances, auto-attach but enable no PRO services. + ubuntu_pro: + enable: [] + enable_beta: [] + """ + ), + dedent( + """\ + # Enable esm and beta realtime-kernel services in Ubuntu Pro instances. + ubuntu_pro: + enable: + - esm + enable_beta: + - realtime-kernel + """ + ), + dedent( + """\ + # Disable auto-attach in Ubuntu Pro instances. + ubuntu_pro: + features: + disable_auto_attach: True + """ + ), + ], + "frequency": PER_INSTANCE, + "activate_by_schema_keys": ["ubuntu_pro"] + list(DEPRECATED_KEYS), +} + +__doc__ = get_meta_doc(meta) + +LOG = logging.getLogger(__name__) +REDACTED = "REDACTED" +ERROR_MSG_SHOULD_AUTO_ATTACH = ( + "Unable to determine if this is an Ubuntu Pro instance." + " Fallback to normal Pro attach." +) +KNOWN_PRO_CONFIG_PROPS = ( + "http_proxy", + "https_proxy", + "global_apt_http_proxy", + "global_apt_https_proxy", + "ua_apt_http_proxy", + "ua_apt_https_proxy", +) + + +def validate_schema_features(pro_section: dict): + if "features" not in pro_section: + return + + # Validate ubuntu_pro.features type + features = pro_section["features"] + if not isinstance(features, dict): + msg = ( + f"'ubuntu_pro.features' should be a dict, not a" + f" {type(features).__name__}" + ) + LOG.error(msg) + raise RuntimeError(msg) + + # Validate ubuntu_pro.features.disable_auto_attach + if "disable_auto_attach" not in features: + return + disable_auto_attach = features["disable_auto_attach"] + if not isinstance(disable_auto_attach, bool): + msg = ( + f"'ubuntu_pro.features.disable_auto_attach' should be a bool" + f", not a {type(disable_auto_attach).__name__}" + ) + LOG.error(msg) + raise RuntimeError(msg) + + +def supplemental_schema_validation(pro_config: dict): + """Validate user-provided ua:config option values. + + This function supplements flexible jsonschema validation with specific + value checks to aid in triage of invalid user-provided configuration. + + Note: It does not log/raise config values as they could be urls containing + sensitive auth info. + + @param pro_config: Dictionary of config value under 'ubuntu_pro'. + + @raises: ValueError describing invalid values provided. + """ + errors = [] + for key, value in sorted(pro_config.items()): + if key not in KNOWN_PRO_CONFIG_PROPS: + LOG.warning( + "Not validating unknown ubuntu_pro.config.%s property", + key, + ) + continue + elif value is None: + # key will be unset. No extra validation needed. + continue + try: + parsed_url = urlparse(value) + if parsed_url.scheme not in ("http", "https"): + errors.append( + f"Expected URL scheme http/https for ua:config:{key}" + ) + except (AttributeError, ValueError): + errors.append(f"Expected a URL for ua:config:{key}") + + if errors: + raise ValueError( + "Invalid ubuntu_pro configuration:\n{}".format("\n".join(errors)) + ) + + +def set_pro_config(pro_config: Any = None): + if pro_config is None: + return + if not isinstance(pro_config, dict): + raise RuntimeError( + f"ubuntu_pro: config should be a dict, not" + f" a {type(pro_config).__name__};" + " skipping enabling config parameters" + ) + supplemental_schema_validation(pro_config) + + enable_errors = [] + for key, value in sorted(pro_config.items()): + redacted_key_value = None + subp_kwargs: dict = {} + if value is None: + LOG.debug("Disabling Pro config for %s", key) + config_cmd = ["pro", "config", "unset", key] + else: + redacted_key_value = f"{key}=REDACTED" + LOG.debug("Enabling Pro config %s", redacted_key_value) + if re.search(r"\s", value): + key_value = f"{key}={re.escape(value)}" + else: + key_value = f"{key}={value}" + config_cmd = ["pro", "config", "set", key_value] + subp_kwargs = {"logstring": config_cmd[:-1] + [redacted_key_value]} + try: + subp.subp(config_cmd, **subp_kwargs) + except subp.ProcessExecutionError as e: + err_msg = str(e) + if redacted_key_value is not None: + err_msg = err_msg.replace(value, REDACTED) + enable_errors.append((key, err_msg)) + if enable_errors: + for param, error in enable_errors: + LOG.warning('Failure enabling/disabling "%s":\n%s', param, error) + raise RuntimeError( + "Failure enabling/disabling Ubuntu Pro config(s): {}".format( + ", ".join('"{}"'.format(param) for param, _ in enable_errors) + ) + ) + + +def configure_pro(token, enable=None): + """Call ua commandline client to attach and/or enable services.""" + if enable is None: + enable = [] + elif isinstance(enable, str): + LOG.warning( + "ubuntu_pro: enable should be a list, not" + " a string; treating as a single enable" + ) + enable = [enable] + elif not isinstance(enable, list): + LOG.warning( + "ubuntu_pro: enable should be a list, not" + " a %s; skipping enabling services", + type(enable).__name__, + ) + enable = [] + + # Perform attach + if enable: + attach_cmd = ["pro", "attach", "--no-auto-enable", token] + else: + attach_cmd = ["pro", "attach", token] + redacted_cmd = attach_cmd[:-1] + [REDACTED] + LOG.debug("Attaching to Ubuntu Pro. %s", " ".join(redacted_cmd)) + try: + # Allow `ua attach` to fail in already attached machines + subp.subp(attach_cmd, rcs={0, 2}, logstring=redacted_cmd) + except subp.ProcessExecutionError as e: + err = str(e).replace(token, REDACTED) + msg = f"Failure attaching Ubuntu Pro:\n{err}" + util.logexc(LOG, msg) + raise RuntimeError(msg) from e + + # Enable services + if not enable: + return + cmd = ["pro", "enable", "--assume-yes", "--format", "json"] + enable + try: + enable_stdout, _ = subp.subp(cmd, capture=True, rcs={0, 1}) + except subp.ProcessExecutionError as e: + raise RuntimeError( + "Error while enabling service(s): " + ", ".join(enable) + ) from e + + try: + enable_resp = json.loads(enable_stdout) + except json.JSONDecodeError as e: + raise RuntimeError( + f"Pro response was not json: {enable_stdout}" + ) from e + + # At this point we were able to load the json response from Pro. This + # response contains a list of errors under the key 'errors'. E.g. + # + # { + # "errors": [ + # { + # "message": "UA Apps: ESM is already enabled ...", + # "message_code": "service-already-enabled", + # "service": "esm-apps", + # "type": "service" + # }, + # { + # "message": "Cannot enable unknown service 'asdf' ...", + # "message_code": "invalid-service-or-failure", + # "service": null, + # "type": "system" + # } + # ] + # } + # + # From our pov there are two type of errors, service and non-service + # related. We can distinguish them by checking if `service` is non-null + # or null respectively. + + enable_errors: List[dict] = [] + for err in enable_resp.get("errors", []): + if err["message_code"] == "service-already-enabled": + LOG.debug("Service `%s` already enabled.", err["service"]) + continue + enable_errors.append(err) + + if enable_errors: + error_services: List[str] = [] + for err in enable_errors: + service = err.get("service") + if service is not None: + error_services.append(service) + msg = f'Failure enabling `{service}`: {err["message"]}' + else: + msg = f'Failure of type `{err["type"]}`: {err["message"]}' + util.logexc(LOG, msg) + + raise RuntimeError( + "Failure enabling Ubuntu Pro service(s): " + + ", ".join(error_services) + ) + + +def maybe_install_ua_tools(cloud: Cloud): + """Install ubuntu-advantage-tools if not present.""" + if subp.which("pro"): + return + try: + cloud.distro.update_package_sources() + except Exception: + util.logexc(LOG, "Package update failed") + raise + try: + cloud.distro.install_packages(["ubuntu-advantage-tools"]) + except Exception: + util.logexc(LOG, "Failed to install ubuntu-advantage-tools") + raise + + +def _should_auto_attach(pro_section: dict) -> bool: + disable_auto_attach = bool( + pro_section.get("features", {}).get("disable_auto_attach", False) + ) + if disable_auto_attach: + return False + + # pylint: disable=import-error + from uaclient.api.exceptions import UserFacingError + from uaclient.api.u.pro.attach.auto.should_auto_attach.v1 import ( + should_auto_attach, + ) + + # pylint: enable=import-error + + try: + result = util.log_time( + logfunc=LOG.debug, + msg="Checking if the instance can be attached to Ubuntu Pro", + func=should_auto_attach, + ) + except UserFacingError as ex: + LOG.debug("Error during `should_auto_attach`: %s", ex) + LOG.warning(ERROR_MSG_SHOULD_AUTO_ATTACH) + return False + return result.should_auto_attach + + +def _attach(pro_section: dict): + token = pro_section.get("token") + if not token: + msg = "`ubuntu_pro.token` required in non-Pro Ubuntu instances." + LOG.error(msg) + raise RuntimeError(msg) + enable_beta = pro_section.get("enable_beta") + if enable_beta: + LOG.debug( + "Ignoring `ubuntu_pro.enable_beta` services in Pro attach: %s", + ", ".join(enable_beta), + ) + configure_pro(token=token, enable=pro_section.get("enable")) + + +def _auto_attach(pro_section: dict): + + # pylint: disable=import-error + from uaclient.api.exceptions import AlreadyAttachedError, UserFacingError + from uaclient.api.u.pro.attach.auto.full_auto_attach.v1 import ( + FullAutoAttachOptions, + full_auto_attach, + ) + + # pylint: enable=import-error + + enable = pro_section.get("enable") + enable_beta = pro_section.get("enable_beta") + options = FullAutoAttachOptions( + enable=enable, + enable_beta=enable_beta, + ) + try: + util.log_time( + logfunc=LOG.debug, + msg="Attaching to Ubuntu Pro", + func=full_auto_attach, + kwargs={"options": options}, + ) + except AlreadyAttachedError: + if enable_beta is not None or enable is not None: + # Only warn if the user defined some service to enable/disable. + LOG.warning( + "The instance is already attached to Pro. Leaving enabled" + " services untouched. Ignoring config directives" + " ubuntu_pro: enable and enable_beta" + ) + except UserFacingError as ex: + msg = f"Error during `full_auto_attach`: {ex.msg}" + LOG.error(msg) + raise RuntimeError(msg) from ex + + +def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: + pro_section = None + deprecated = list(DEPRECATED_KEYS.intersection(cfg)) + if deprecated: + if len(deprecated) > 1: + raise RuntimeError( + "Unable to configure Ubuntu Pro. Multiple deprecated config" + " keys provided: %s" % ", ".join(deprecated) + ) + LOG.warning( + "Deprecated configuration key(s) provided: %s." + ' Expected "ubuntu_pro"; will attempt to continue.', + ", ".join(deprecated), + ) + pro_section = cfg[deprecated[0]] + if "ubuntu_pro" in cfg: + # Prefer ubuntu_pro over any deprecated keys when both exist + if deprecated: + LOG.warning( + "Ignoring deprecated key %s and preferring ubuntu_pro config", + deprecated[0], + ) + pro_section = cfg["ubuntu_pro"] + if pro_section is None: + LOG.debug( + "Skipping module named %s, no 'ubuntu_pro' configuration found", + name, + ) + return + elif not isinstance(pro_section, dict): + msg = ( + f"'ubuntu_pro' should be a dict, not a" + f" {type(pro_section).__name__}" + ) + LOG.error(msg) + raise RuntimeError(msg) + if "commands" in pro_section: + msg = ( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"' + ) + LOG.error(msg) + raise RuntimeError(msg) + + maybe_install_ua_tools(cloud) + set_pro_config(pro_section.get("config")) + + # ua-auto-attach.service had noop-ed as pro_section is not empty + validate_schema_features(pro_section) + LOG.debug( + "To discover more log info, please check /var/log/ubuntu-advantage.log" + ) + if _should_auto_attach(pro_section): + _auto_attach(pro_section) + + # If ua-auto-attach.service did noop, we did not auto-attach and more keys + # than `features` are given under `ubuntu_pro`, then try to attach. + # This supports the cases: + # + # 1) Previous attach behavior on non-pro instances. + # 2) Previous attach behavior on instances where ubuntu-advantage-tools + # is < v28.0 (Pro apis for should_auto-attach and auto-attach are not + # available. + # 3) The user wants to disable auto-attach and attach by giving: + # `{"ubuntu_pro": "features": {"disable_auto_attach": True}}` + elif not pro_section.keys() <= {"features"}: + _attach(pro_section) diff -Nru cloud-init-23.4.4/cloudinit/config/cc_update_hostname.py cloud-init-24.1.3/cloudinit/config/cc_update_hostname.py --- cloud-init-23.4.4/cloudinit/config/cc_update_hostname.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_update_hostname.py 2024-03-27 13:14:04.000000000 +0000 @@ -65,6 +65,7 @@ fqdn: external.fqdn.me hostname: myhost prefer_fqdn_over_hostname: true + create_hostname_file: true """ ), dedent( @@ -74,6 +75,14 @@ prefer_fqdn_over_hostname: false """ ), + dedent( + """\ + # On a machine without an ``/etc/hostname`` file, don't create it + # In most clouds, this will result in a DHCP-configured hostname + # provided by the cloud + create_hostname_file: false + """ + ), ], "frequency": PER_ALWAYS, "activate_by_schema_keys": [], diff -Nru cloud-init-23.4.4/cloudinit/config/cc_wireguard.py cloud-init-24.1.3/cloudinit/config/cc_wireguard.py --- cloud-init-23.4.4/cloudinit/config/cc_wireguard.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_wireguard.py 2024-03-27 13:14:04.000000000 +0000 @@ -226,7 +226,7 @@ if subp.which("wg"): return - # Install DKMS when Kernel Verison lower 5.6 + # Install DKMS when Kernel Version lower 5.6 if util.kernel_version() < MIN_KERNEL_VERSION: packages.append("wireguard") diff -Nru cloud-init-23.4.4/cloudinit/config/cc_write_files.py cloud-init-24.1.3/cloudinit/config/cc_write_files.py --- cloud-init-23.4.4/cloudinit/config/cc_write_files.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_write_files.py 2024-03-27 13:14:04.000000000 +0000 @@ -74,7 +74,7 @@ ), dedent( """\ - # Provide gziped binary content + # Provide gzipped binary content write_files: - encoding: gzip content: !!binary | @@ -146,7 +146,7 @@ # Yaml already encodes binary data as base64 if it is given to the # yaml file as binary, so those will be automatically decoded for you. # But the above b64 is just for people that are more 'comfortable' - # specifing it manually (which might be a possibility) + # specifying it manually (which might be a possibility) if encoding_type in ["b64", "base64"]: return ["application/base64"] if encoding_type == TEXT_PLAIN_ENC: diff -Nru cloud-init-23.4.4/cloudinit/config/cc_zypper_add_repo.py cloud-init-24.1.3/cloudinit/config/cc_zypper_add_repo.py --- cloud-init-23.4.4/cloudinit/config/cc_zypper_add_repo.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/cc_zypper_add_repo.py 2024-03-27 13:14:04.000000000 +0000 @@ -172,7 +172,7 @@ if not zypper_config: return zypp_config = "/etc/zypp/zypp.conf" - zypp_conf_content = util.load_file(zypp_config) + zypp_conf_content = util.load_text_file(zypp_config) new_settings = ["# Added via cloud.cfg"] for setting, value in zypper_config.items(): if setting == "configdir": diff -Nru cloud-init-23.4.4/cloudinit/config/modules.py cloud-init-24.1.3/cloudinit/config/modules.py --- cloud-init-23.4.4/cloudinit/config/modules.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/modules.py 2024-03-27 13:14:04.000000000 +0000 @@ -27,6 +27,18 @@ # name in the lookup path... MOD_PREFIX = "cc_" +# List of modules that have removed upstream. This prevents every downstream +# from having to create upgrade scripts to avoid warnings about missing +# modules. +REMOVED_MODULES = [ + "cc_migrator", # Removed in 24.1 + "cc_rightscale_userdata", # Removed in 24.1 +] + +RENAMED_MODULES = { + "cc_ubuntu_advantage": "cc_ubuntu_pro", # Renamed 24.1 +} + class ModuleDetails(NamedTuple): module: ModuleType @@ -182,23 +194,42 @@ if not mod_name: continue if freq and freq not in FREQUENCIES: - LOG.warning( - "Config specified module %s has an unknown frequency %s", - raw_name, - freq, + util.deprecate( + deprecated=( + f"Config specified module {raw_name} has an unknown" + f" frequency {freq}" + ), + deprecated_version="22.1", ) # Misconfigured in /etc/cloud/cloud.cfg. Reset so cc_* module # default meta attribute "frequency" value is used. freq = None + if mod_name in RENAMED_MODULES: + util.deprecate( + deprecated=( + f"Module has been renamed from {mod_name} to " + f"{RENAMED_MODULES[mod_name][1]}. Update any" + " references in /etc/cloud/cloud.cfg" + ), + deprecated_version="24.1", + ) + mod_name = RENAMED_MODULES[mod_name] mod_locs, looked_locs = importer.find_module( mod_name, ["", type_utils.obj_name(config)], ["handle"] ) if not mod_locs: - LOG.warning( - "Could not find module named %s (searched %s)", - mod_name, - looked_locs, - ) + if mod_name in REMOVED_MODULES: + LOG.info( + "Module `%s` has been removed from cloud-init. " + "It may be removed from `/etc/cloud/cloud.cfg`.", + mod_name[3:], # [3:] to remove 'cc_' + ) + else: + LOG.warning( + "Could not find module named %s (searched %s)", + mod_name, + looked_locs, + ) continue mod = importer.import_module(mod_locs[0]) validate_module(mod, raw_name) diff -Nru cloud-init-23.4.4/cloudinit/config/schema.py cloud-init-24.1.3/cloudinit/config/schema.py --- cloud-init-23.4.4/cloudinit/config/schema.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/schema.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,11 +5,14 @@ import logging import os import re +import shutil import sys import textwrap from collections import defaultdict from collections.abc import Iterable +from contextlib import suppress from copy import deepcopy +from enum import Enum from errno import EACCES from functools import partial from itertools import chain @@ -19,6 +22,7 @@ List, NamedTuple, Optional, + Tuple, Type, Union, cast, @@ -29,8 +33,15 @@ from cloudinit import importer, safeyaml from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers import INCLUSION_TYPES_MAP, type_from_starts_with +from cloudinit.helpers import Paths from cloudinit.sources import DataSourceNotFoundException -from cloudinit.util import error, get_modules_from_dir, load_file +from cloudinit.temp_utils import mkdtemp +from cloudinit.util import ( + error, + get_modules_from_dir, + load_text_file, + write_file, +) try: from jsonschema import ValidationError as _ValidationError @@ -56,15 +67,6 @@ USERDATA_SCHEMA_FILE = "schema-cloud-config-v1.json" NETWORK_CONFIG_V1_SCHEMA_FILE = "schema-network-config-v1.json" -SCHEMA_FILES_BY_TYPE = { - "cloud-config": { - "latest": USERDATA_SCHEMA_FILE, - }, - "network-config": { - "latest": NETWORK_CONFIG_V1_SCHEMA_FILE, - }, -} - _YAML_MAP = {True: "true", False: "false", None: "null"} SCHEMA_DOC_TMPL = """ {name} @@ -148,6 +150,49 @@ SchemaProblems = List[SchemaProblem] +class SchemaType(Enum): + """Supported schema types are either cloud-config or network-config. + + Vendordata and Vendordata2 format adheres to cloud-config schema type. + Cloud Metadata is unique schema to each cloud platform and likely will not + be represented in this enum. + """ + + CLOUD_CONFIG = "cloud-config" + NETWORK_CONFIG = "network-config" + + +# Placeholders for versioned schema and schema file locations. +# The "latest" key is used in absence of a requested specific version. +SCHEMA_FILES_BY_TYPE = { + SchemaType.CLOUD_CONFIG: { + "latest": USERDATA_SCHEMA_FILE, + }, + SchemaType.NETWORK_CONFIG: { + "latest": NETWORK_CONFIG_V1_SCHEMA_FILE, + }, +} + + +class InstanceDataType(Enum): + """Types of instance data provided to cloud-init""" + + USERDATA = "user-data" + NETWORK_CONFIG = "network-config" + VENDORDATA = "vendor-data" + VENDOR2DATA = "vendor2-data" + # METADATA = "metadata" + + def __str__(self): # pylint: disable=invalid-str-returned + return self.value + + +class InstanceDataPart(NamedTuple): + config_type: InstanceDataType + schema_type: SchemaType + config_path: str + + class UserDataTypeAndDecodedContent(NamedTuple): userdata_type: str content: str @@ -206,6 +251,10 @@ return bool(self.schema_errors) +class SchemaValidationInvalidHeaderError(SchemaValidationError): + """Raised when no valid header is declared in the user-data file.""" + + def is_schema_byte_string(checker, instance): """TYPE_CHECKER override allowing bytes for string type @@ -532,15 +581,116 @@ ) +def network_schema_version(network_config: dict) -> Optional[int]: + """Return the version of the network schema when present.""" + if "network" in network_config: + return network_config["network"].get("version") + return network_config.get("version") + + +def netplan_validate_network_schema( + network_config: dict, + strict: bool = False, + annotate: bool = False, + log_details: bool = True, +) -> bool: + """On systems with netplan, validate network_config schema for file + + Leverage NetplanParser for error annotation line, column and detailed + errors. + + @param network_config: Dict of network configuration settings validated + against + @param strict: Boolean, when True raise SchemaValidationErrors instead of + logging warnings. + @param annotate: Boolean, when True, print original network_config_file + content with error annotations + @param log_details: Boolean, when True logs details of validation errors. + If there are concerns about logging sensitive userdata, this should + be set to False. + + @return: True when schema validation was performed. False when not on a + system with netplan and netplan python support. + @raises: SchemaValidationError when netplan's parser raises + NetplanParserExceptions. + """ + try: + from netplan import NetplanParserException, Parser # type: ignore + except ImportError: + LOG.debug("Skipping netplan schema validation. No netplan available") + return False + + # netplan Parser looks at all *.yaml files in the target directory underA + # /etc/netplan. cloud-init should only validate schema of the + # network-config it generates, so create a /etc/netplan + # to validate only our network-config. + parse_dir = mkdtemp() + netplan_file = os.path.join(parse_dir, "etc/netplan/network-config.yaml") + + # Datasource network config can optionally exclude top-level network key + net_cfg = deepcopy(network_config) + if "network" not in net_cfg: + net_cfg = {"network": net_cfg} + + src_content = safeyaml.dumps(net_cfg) + write_file(netplan_file, src_content, mode=0o600) + + parser = Parser() + errors = [] + try: + # Parse all netplan *.yaml files.load_yaml_hierarchy looks for nested + # etc/netplan subdir under "/". + parser.load_yaml_hierarchy(parse_dir) + except NetplanParserException as e: + errors.append( + SchemaProblem( + "format-l{line}.c{col}".format(line=e.line, col=e.column), + f"Invalid netplan schema. {e.message}", + ) + ) + if os.path.exists(parse_dir): + shutil.rmtree(parse_dir) + if errors: + if strict: + if annotate: + # Load YAML marks for annotation + _, marks = safeyaml.load_with_marks(src_content) + print( + annotated_cloudconfig_file( + net_cfg, + src_content, + marks, + schema_errors=errors, + ) + ) + raise SchemaValidationError(errors) + if log_details: + message = _format_schema_problems( + errors, + prefix=( + f"Invalid {SchemaType.NETWORK_CONFIG.value} provided:\n" + ), + separator="\n", + ) + else: + message = ( + f"Invalid {SchemaType.NETWORK_CONFIG.value} provided: " + "Please run 'sudo cloud-init schema --system' to " + "see the schema errors." + ) + LOG.warning(message) + return True + + def validate_cloudconfig_schema( config: dict, schema: Optional[dict] = None, - schema_type: str = "cloud-config", + schema_type: SchemaType = SchemaType.CLOUD_CONFIG, strict: bool = False, strict_metaschema: bool = False, log_details: bool = True, log_deprecations: bool = False, -): +) -> bool: """Validate provided config meets the schema definition. @param config: Dict of cloud configuration settings validated against @@ -548,8 +698,9 @@ @param schema: jsonschema dict describing the supported schema definition for the cloud config module (config.cc_*). If None, validate against global schema. - @param schema_type: Optional string. One of: cloud-config, network-config - Default: cloud-config. + @param schema_type: Optional SchemaType. + One of: SchemaType.CLOUD_CONFIG or SchemaType.NETWORK_CONFIG. + Default: SchemaType.CLOUD_CONFIG @param strict: Boolean, when True raise SchemaValidationErrors instead of logging warnings. @param strict_metaschema: Boolean, when True validates schema using strict @@ -562,9 +713,20 @@ @raises: SchemaValidationError when provided config does not validate against the provided schema. @raises: RuntimeError when provided config sourced from YAML is not a dict. - @raises: ValueError on invalid schema_type not in cloud-config or - network_config + @raises: ValueError on invalid schema_type not in CLOUD_CONFIG or + NETWORK_CONFIG """ + if schema_type == SchemaType.NETWORK_CONFIG: + if network_schema_version(config) == 2: + if netplan_validate_network_schema( + network_config=config, strict=strict, log_details=log_details + ): + # Schema was validated by netplan + return True + # network-config schema version 2 but no netplan. + # TODO(add JSON schema definition for network version 2) + return False + if schema is None: schema = get_schema(schema_type) try: @@ -575,7 +737,7 @@ ) except ImportError: LOG.debug("Ignoring schema validation. jsonschema is not present") - return + return False validator = cloudinitValidator(schema, format_checker=FormatChecker()) @@ -619,12 +781,12 @@ if log_details: details = _format_schema_problems( errors, - prefix=f"Invalid {schema_type} provided:\n", + prefix=f"Invalid {schema_type.value} provided:\n", separator="\n", ) else: details = ( - f"Invalid {schema_type} provided: " + f"Invalid {schema_type.value} provided: " "Please run 'sudo cloud-init schema --system' to " "see the schema errors." ) @@ -760,7 +922,7 @@ """Return contents of the cloud-config file annotated with schema errors. @param cloudconfig: YAML-loaded dict from the original_content or empty - dict if unparseable. + dict if unparsable. @param original_content: The contents of a cloud-config file @param schemamarks: Dict with schema marks. @param schema_errors: Instance of `SchemaProblems`. @@ -780,7 +942,7 @@ When merging multiple cloud-config parts cloud-init logs an error and ignores any user-data parts which are declared as #cloud-config but - cannot be processed. the hanlder.cloud_config module also leaves comments + cannot be processed. the handler.cloud_config module also leaves comments in the final merged config for every invalid part file which begin with MERGED_CONFIG_SCHEMA_ERROR_PREFIX to aid in triage. """ @@ -816,9 +978,12 @@ :return: UserDataTypeAndDecodedContent :raises: SchemaValidationError when non-jinja content found but header declared ## template: jinja. + :raises JinjaSyntaxParsingException when jinja syntax error found. + :raises JinjaLoadError when jinja template fails to load. """ from cloudinit.handlers.jinja_template import ( JinjaLoadError, + JinjaSyntaxParsingException, NotJinjaError, render_jinja_payload_from_file, ) @@ -840,13 +1005,18 @@ ) ] ) from e + except JinjaSyntaxParsingException as e: + error( + "Failed to render templated user-data. " + str(e), + sys_exit=True, + ) except JinjaLoadError as e: error(str(e), sys_exit=True) schema_position = "format-l2.c1" user_data_type = type_from_starts_with(content) if not user_data_type: # Neither jinja2 nor #cloud-config header_line, _, _ = content.partition("\n") - raise SchemaValidationError( + raise SchemaValidationInvalidHeaderError( [ SchemaProblem( schema_position, @@ -867,7 +1037,7 @@ def validate_cloudconfig_file( config_path: str, schema: dict, - schema_type: str = "cloud-config", + schema_type: SchemaType = SchemaType.CLOUD_CONFIG, annotate: bool = False, instance_data_path: str = None, ) -> bool: @@ -876,7 +1046,7 @@ @param config_path: Path to the yaml cloud-config file to parse, or None to default to system userdata from Paths object. @param schema: Dict describing a valid jsonschema to validate against. - @param schema_type: One of network-config or cloud-config. + @param schema_type: One of SchemaType.NETWORK_CONFIG or CLOUD_CONFIG @param annotate: Boolean set True to print original config file with error annotations on the offending lines. @param instance_data_path: Path to instance_data JSON, used for text/jinja @@ -886,17 +1056,17 @@ :raises SchemaValidationError containing any of schema_errors encountered. :raises RuntimeError when config_path does not exist. """ - decoded_content = load_file(config_path, decode=True) + decoded_content = load_text_file(config_path) if not decoded_content: print( "Empty '%s' found at %s. Nothing to validate." - % (schema_type, config_path) + % (schema_type.value, config_path) ) return False - if schema_type in ("network-config",): + if schema_type in (SchemaType.NETWORK_CONFIG,): decoded_config = UserDataTypeAndDecodedContent( - schema_type, decoded_content + schema_type.value, decoded_content ) else: decoded_config = _get_config_type_and_rendered_userdata( @@ -943,27 +1113,33 @@ # Return a meaningful message on empty cloud-config if not annotate: raise RuntimeError( - f"{schema_type} {config_path} is not a YAML dict." + f"{schema_type.value} {config_path} is not a YAML dict." ) - if schema_type == "network-config": - # Pop optional top-level "network" key when present - netcfg = cloudconfig.get("network", cloudconfig) - if not netcfg: + if schema_type == SchemaType.NETWORK_CONFIG: + if not cloudconfig.get("network", cloudconfig): print("Skipping network-config schema validation on empty config.") return False - elif netcfg.get("version") != 1: + network_version = network_schema_version(cloudconfig) + if network_version == 2: + if netplan_validate_network_schema( + network_config=cloudconfig, strict=True, annotate=annotate + ): + return True # schema validation performed by netplan + if network_version != 1: + # Validation requires JSON schema definition in + # cloudinit/config/schemas/schema-network-config-v1.json print( "Skipping network-config schema validation." " No network schema for version:" - f" {netcfg.get('version')}" + f" {network_schema_version(cloudconfig)}" ) return False try: if not validate_cloudconfig_schema( - cloudconfig, schema, strict=True, log_deprecations=False + cloudconfig, schema=schema, strict=True, log_deprecations=False ): print( - f"Skipping {schema_type} schema validation." + f"Skipping {schema_type.value} schema validation." " Jsonschema dependency missing." ) return False @@ -1280,7 +1456,7 @@ """ if schema is None: - schema = get_schema(schema_type="cloud-config") + schema = get_schema() if not meta or not schema: raise ValueError("Expected non-empty meta and schema") keys = set(meta.keys()) @@ -1381,7 +1557,7 @@ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "schemas") -def get_schema(schema_type: str = "cloud-config") -> dict: +def get_schema(schema_type: SchemaType = SchemaType.CLOUD_CONFIG) -> dict: """Return jsonschema for a specific type. Return empty schema when no specific schema file exists. @@ -1391,11 +1567,11 @@ ) full_schema = None try: - full_schema = json.loads(load_file(schema_file)) + full_schema = json.loads(load_text_file(schema_file)) except (IOError, OSError): LOG.warning( - "Skipping %s schema valiation. No JSON schema file found %s.", - schema_type, + "Skipping %s schema validation. No JSON schema file found %s.", + schema_type.value, schema_file, ) return {} @@ -1424,10 +1600,13 @@ "-t", "--schema-type", type=str, - choices=["cloud-config", "network-config"], + choices=[ + SchemaType.CLOUD_CONFIG.value, + SchemaType.NETWORK_CONFIG.value, + ], help=( "When providing --config-file, the schema type to validate config" - " against. Default: cloud-config" + f" against. Default: {SchemaType.CLOUD_CONFIG}" ), ) parser.add_argument( @@ -1467,8 +1646,8 @@ return parser -def handle_schema_args(name, args): - """Handle provided schema args and perform the appropriate actions.""" +def _assert_exclusive_args(args): + """Error or warn on invalid exclusive parameter combinations.""" exclusive_args = [args.config_file, args.docs, args.system] if len([arg for arg in exclusive_args if arg]) != 1: error( @@ -1485,10 +1664,48 @@ "Invalid flag combination. Cannot use --annotate with --docs", sys_exit=True, ) - full_schema = get_schema(schema_type="cloud-config") - if args.docs: - print(load_doc(args.docs)) - return + + +def get_config_paths_from_args( + args, +) -> Tuple[str, List[InstanceDataPart]]: + """Return appropriate instance-data.json and instance data parts + + Based on commandline args, and user permissions, determine the + appropriate instance-data.json to source for jinja templates and + a list of applicable InstanceDataParts such as user-data, vendor-data + and network-config for which to validate schema. Avoid returning any + InstanceDataParts when the expected config_path does not exist. + + :return: A tuple of the instance-data.json path and a list of + viable InstanceDataParts present on the system. + """ + + def get_processed_or_fallback_path( + paths: Paths, + primary_path_key: str, + raw_fallback_path_key: str, + ) -> str: + """Get processed data path when non-empty of fallback to raw data path. + + - When primary path and raw path exist and are empty, prefer primary + path. + - When primary path is empty but the raw fallback path is non-empty, + this indicates an invalid and ignored raw user-data was provided and + cloud-init emitted a warning and did not process unknown raw + user-data. + In the case of invalid raw user-data header, prefer + raw_fallback_path_key so actionable sensible warnings can be + reported to the user about the raw unparsable user-data. + """ + primary_datapath = paths.get_ipath(primary_path_key) or "" + with suppress(FileNotFoundError): + if not os.stat(primary_datapath).st_size: + raw_path = paths.get_ipath(raw_fallback_path_key) or "" + if os.stat(raw_path).st_size: + return raw_path + return primary_datapath + try: paths = read_cfg_paths(fetch_existing_datasource="trust") except (IOError, OSError) as e: @@ -1511,8 +1728,19 @@ instance_data_path = paths.get_runpath("instance_data") else: instance_data_path = paths.get_runpath("instance_data_sensitive") + config_files: List[InstanceDataPart] = [] if args.config_file: - config_files = ((args.schema_type, args.config_file),) + if args.schema_type: + schema_type = SchemaType(args.schema_type) + else: + schema_type = SchemaType.CLOUD_CONFIG + if schema_type == SchemaType.NETWORK_CONFIG: + instancedata_type = InstanceDataType.NETWORK_CONFIG + else: + instancedata_type = InstanceDataType.USERDATA + config_files.append( + InstanceDataPart(instancedata_type, schema_type, args.config_file) + ) else: if os.getuid() != 0: error( @@ -1520,92 +1748,111 @@ " user. Try using sudo.", sys_exit=True, ) - userdata_file = paths.get_ipath("cloud_config") - if not userdata_file: - error( - "Unable to obtain user data file. No instance data available", - sys_exit=True, - ) - return # Helps typing - - # Prefer raw user-data.txt when processed cloud-config is empty and - # raw user-data.txt is not because processed cloud-config.txt will - # not be written in cases where user-data header is not supported. - try: - if os.stat(userdata_file).st_size == 0: - raw_userdata_file = paths.get_ipath("userdata_raw") - if os.stat(raw_userdata_file).st_size: - userdata_file = raw_userdata_file - except FileNotFoundError: - # Error handling on absent userdata_file below - pass - - config_files = (("user-data", userdata_file),) - supplemental_config_files = ( - ("vendor-data", paths.get_ipath("vendor_cloud_config")), - ("vendor2-data", paths.get_ipath("vendor2_cloud_config")), - ("network-config", paths.get_ipath("network_config")), - ) - for cfg_type, cfg_file in supplemental_config_files: - if cfg_file and os.path.exists(cfg_file): - config_files += ((cfg_type, cfg_file),) - if not os.path.exists(config_files[0][1]): + userdata_file = get_processed_or_fallback_path( + paths, "cloud_config", "userdata_raw" + ) + config_files.append( + InstanceDataPart( + InstanceDataType.USERDATA, + SchemaType.CLOUD_CONFIG, + userdata_file, + ) + ) + supplemental_config_files: List[InstanceDataPart] = [ + InstanceDataPart( + InstanceDataType.VENDORDATA, + SchemaType.CLOUD_CONFIG, + get_processed_or_fallback_path( + paths, "vendor_cloud_config", "vendordata_raw" + ), + ), + InstanceDataPart( + InstanceDataType.VENDOR2DATA, + SchemaType.CLOUD_CONFIG, + get_processed_or_fallback_path( + paths, "vendor2_cloud_config", "vendordata2_raw" + ), + ), + InstanceDataPart( + InstanceDataType.NETWORK_CONFIG, + SchemaType.NETWORK_CONFIG, + paths.get_ipath("network_config") or "", + ), + ] + for data_part in supplemental_config_files: + if data_part.config_path and os.path.exists(data_part.config_path): + config_files.append(data_part) + if not os.path.exists(config_files[0].config_path): error( - f"Config file {config_files[0][1]} does not exist", + f"Config file {config_files[0].config_path} does not exist", fmt="Error: {}", sys_exit=True, ) + return instance_data_path, config_files + + +def handle_schema_args(name, args): + """Handle provided schema args and perform the appropriate actions.""" + _assert_exclusive_args(args) + full_schema = get_schema() + if args.docs: + print(load_doc(args.docs)) + return + instance_data_path, config_files = get_config_paths_from_args(args) nested_output_prefix = "" multi_config_output = bool(len(config_files) > 1) if multi_config_output: print( "Found cloud-config data types: %s" - % ", ".join(cfg_type for cfg_type, _ in config_files) + % ", ".join(str(cfg_part.config_type) for cfg_part in config_files) ) nested_output_prefix = " " error_types = [] - for idx, (cfg_type, cfg_file) in enumerate(config_files, 1): + for idx, cfg_part in enumerate(config_files, 1): performed_schema_validation = False if multi_config_output: - print(f"\n{idx}. {cfg_type} at {cfg_file}:") - if cfg_type == "network-config": - cfg_schema = get_schema(cfg_type) - schema_type = cfg_type + print( + f"\n{idx}. {cfg_part.config_type} at {cfg_part.config_path}:" + ) + if cfg_part.schema_type == SchemaType.NETWORK_CONFIG: + cfg_schema = get_schema(cfg_part.schema_type) else: cfg_schema = full_schema - cfg_type = "user-data" if cfg_type == "cloud-config" else cfg_type - schema_type = "cloud-config" try: performed_schema_validation = validate_cloudconfig_file( - cfg_file, + cfg_part.config_path, cfg_schema, - schema_type, + cfg_part.schema_type, args.annotate, instance_data_path, ) except SchemaValidationError as e: - if not cfg_type: - cfg_type = "UNKNOWN_CONFIG_HEADER" if not args.annotate: - print(f"{nested_output_prefix}Invalid {cfg_type} {cfg_file}") + print( + f"{nested_output_prefix}Invalid" + f" {cfg_part.config_type} {cfg_part.config_path}" + ) error( str(e), fmt=nested_output_prefix + "Error: {}\n", ) - error_types.append(cfg_type) + error_types.append(cfg_part.config_type) except RuntimeError as e: - print(f"{nested_output_prefix}Invalid {cfg_type}") + print(f"{nested_output_prefix}Invalid {cfg_part.config_type!s}") error(str(e), fmt=nested_output_prefix + "Error: {}\n") - error_types.append(cfg_type) + error_types.append(cfg_part.config_type) else: if performed_schema_validation: - cfg = cfg_file if args.config_file else cfg_type - print(f"{nested_output_prefix}Valid schema {cfg}") + if args.config_file: + cfg = cfg_part.config_path + else: + cfg = cfg_part.config_type + print(f"{nested_output_prefix}Valid schema {cfg!s}") if error_types: error( - ", ".join(error_type for error_type in error_types), + ", ".join(str(error_type) for error_type in error_types), fmt="Error: Invalid schema: {}\n", sys_exit=True, ) diff -Nru cloud-init-23.4.4/cloudinit/config/schemas/schema-cloud-config-v1.json cloud-init-24.1.3/cloudinit/config/schemas/schema-cloud-config-v1.json --- cloud-init-23.4.4/cloudinit/config/schemas/schema-cloud-config-v1.json 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/schemas/schema-cloud-config-v1.json 2024-03-27 13:14:04.000000000 +0000 @@ -34,7 +34,6 @@ "locale", "lxd", "mcollective", - "migrator", "mounts", "ntp", "package-update-upgrade-install", @@ -51,8 +50,6 @@ "resolv_conf", "rh-subscription", "rh_subscription", - "rightscale-userdata", - "rightscale_userdata", "rsyslog", "runcmd", "salt-minion", @@ -87,6 +84,7 @@ "ubuntu_autoinstall", "ubuntu-drivers", "ubuntu_drivers", + "ubuntu_pro", "update-etc-hosts", "update_etc_hosts", "update-hostname", @@ -104,6 +102,97 @@ "zypper_add_repo" ] }, + "ubuntu_pro.properties": { + "type": "object", + "additionalProperties": false, + "properties": { + "enable": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional list of Ubuntu Pro services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults. Passing beta services here will cause an error." + }, + "enable_beta": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional list of Ubuntu Pro beta services to enable. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults." + }, + "token": { + "type": "string", + "description": "Contract token obtained from https://ubuntu.com/pro to attach. Required for non-Pro instances." + }, + "features": { + "type": "object", + "description": "Ubuntu Pro features.", + "additionalProperties": false, + "properties": { + "disable_auto_attach": { + "type": "boolean", + "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``", + "default": false + } + } + }, + "config": { + "type": "object", + "description": "Configuration settings or override Ubuntu Pro config.", + "additionalProperties": true, + "properties": { + "http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "Ubuntu Pro HTTP Proxy URL or null to unset." + }, + "https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "Ubuntu Pro HTTPS Proxy URL or null to unset." + }, + "global_apt_http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "global_apt_https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "ua_apt_http_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTP Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + }, + "ua_apt_https_proxy": { + "type": [ + "string", + "null" + ], + "format": "uri", + "description": "HTTPS Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + } + } + } + } + }, "users_groups.groups_by_groupname": { "additionalProperties": false, "patternProperties": { @@ -231,15 +320,15 @@ "type": "boolean" }, "passwd": { - "description": "Hash of user password applied when user does not exist. This will NOT be applied if the user already exists. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While hashed password is better than plain text, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.", + "description": "Hash of user password applied when user does not exist. This will NOT be applied if the user already exists. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000`` **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, "hashed_passwd": { - "description": "Hash of user password to be applied. This will be applied even if the user is pre-existing. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While ``hashed_password`` is better than ``plain_text_passwd``, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.", + "description": "Hash of user password to be applied. This will be applied even if the user is preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, "plain_text_passwd": { - "description": "Clear text of user password to be applied. This will be applied even if the user is pre-existing. There are many more secure options than using plain text passwords, such as ``ssh_import_id`` or ``hashed_passwd``. Do not use this in production as user-data and your password can be exposed.", + "description": "Clear text of user password to be applied. This will be applied even if the user is preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your system. For local escalation, supplying a hashed password is a safer choice than plain text. Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. An exposed plain text password is an immediate security concern. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, "create_groups": { @@ -253,11 +342,11 @@ "type": "string" }, "selinux_user": { - "description": "SELinux user for user's login. Default to default SELinux user.", + "description": "SELinux user for user's login. Default: the default SELinux user.", "type": "string" }, "shell": { - "description": "Path to the user's login shell. The default is to set no shell, which results in a system-specific default being used.", + "description": "Path to the user's login shell. Default: the host system's default shell.", "type": "string" }, "snapuser": { @@ -273,7 +362,7 @@ "minItems": 1 }, "ssh_import_id": { - "description": "List of SSH IDs to import for user. Can not be combined with ``ssh_redirect_user``.", + "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html", "type": "array", "items": { "type": "string" @@ -308,7 +397,7 @@ ] }, "uid": { - "description": "The user's ID. Default is next available value.", + "description": "The user's ID. Default value [system default]", "oneOf": [ { "type": "integer" @@ -379,7 +468,7 @@ "deprecated_description": "Use ``remove_defaults`` instead." }, "remove_defaults": { - "description": "Remove default CA certificates if true. Default: false", + "description": "Remove default CA certificates if true. Default: ``false``", "type": "boolean", "default": false }, @@ -421,7 +510,7 @@ ] } }, - "merge_defintion": { + "merge_definition": { "oneOf": [ { "type": "string" @@ -484,10 +573,10 @@ "description": "The launch index for the specified cloud-config." }, "merge_how": { - "$ref": "#/$defs/merge_defintion" + "$ref": "#/$defs/merge_definition" }, "merge_type": { - "$ref": "#/$defs/merge_defintion" + "$ref": "#/$defs/merge_definition" } } }, @@ -871,7 +960,7 @@ }, "conf": { "type": "string", - "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax." + "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multi-line APT configuration, make sure to follow YAML syntax." }, "https_proxy": { "type": "string", @@ -1060,7 +1149,7 @@ "client_key": { "type": "string", "default": "/etc/chef/client.pem", - "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``." + "description": "Optional path for client_cert. Default: ``/etc/chef/client.pem``." }, "encrypted_data_bag_secret": { "type": "string", @@ -1095,7 +1184,7 @@ "log_location": { "type": "string", "default": "/var/log/chef/client.log", - "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``." + "description": "Specifies the location of the chef log file. By default, the location is specified at ``/var/log/chef/client.log``." }, "node_name": { "type": "string", @@ -1109,7 +1198,7 @@ "omnibus_url_retries": { "type": "integer", "default": 5, - "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5." + "description": "The number of retries that will be attempted to reach the Omnibus URL. Default: ``5``." }, "omnibus_version": { "type": "string", @@ -1180,7 +1269,7 @@ "properties": { "disable_ec2_metadata": { "default": false, - "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.", + "description": "Set true to disable IPv4 routes to EC2 metadata. Default: ``false``", "type": "boolean" } } @@ -1375,7 +1464,7 @@ "properties": { "mode": { "default": "auto", - "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action.", + "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action", "oneOf": [ { "enum": [ @@ -1541,9 +1630,7 @@ }, "ssh_key_console_blacklist": { "type": "array", - "default": [ - "ssh-dss" - ], + "default": [], "description": "Avoid printing matching SSH key types to the system console.", "items": { "type": "string" @@ -1744,7 +1831,7 @@ }, "ipv4_dhcp_leases": { "type": "integer", - "description": "Number of DHCP leases to allocate within the range. Automatically calculated based on `ipv4_dhcp_first` and `ipv4_dchp_last` when unset." + "description": "Number of DHCP leases to allocate within the range. Automatically calculated based on `ipv4_dhcp_first` and `ipv4_dhcp_last` when unset." }, "ipv4_nat": { "type": "boolean", @@ -1819,16 +1906,6 @@ } } }, - "cc_migrator": { - "type": "object", - "properties": { - "migrate": { - "type": "boolean", - "default": true, - "description": "Whether to migrate legacy cloud-init semaphores to new format. Default: ``true``" - } - } - }, "cc_mounts": { "type": "object", "properties": { @@ -2103,7 +2180,6 @@ "items": { "type": "string", "enum": [ - "pub_key_dsa", "pub_key_rsa", "pub_key_ecdsa", "pub_key_ed25519", @@ -2266,7 +2342,7 @@ }, "conf": { "type": "object", - "description": "Every key present in the conf object will be added to puppet.conf. As such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively.\n\n``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the puppetserver certificate in pem format. It should be a multi-line string (using the | yaml notation for multi-line strings).", + "description": "Every key present in the conf object will be added to puppet.conf. As such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively.\n\n``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the puppetserver certificate in pem format. It should be a multi-line string (using the | YAML notation for multi-line strings).", "additionalProperties": false, "properties": { "main": { @@ -2619,7 +2695,7 @@ }, "data": { "type": "string", - "description": "This data will be written to ``file`` before data from the datasource. When using a multiline value or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate" + "description": "This data will be written to ``file`` before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate" }, "encoding": { "type": "string", @@ -2691,7 +2767,7 @@ "changed_description": "Use of non-boolean values for this field is deprecated." } ], - "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default is to leave the value unchanged. In order for this config to be applied, SSH may need to be restarted. On systemd systems, this restart will only happen if the SSH service has already been started. On non-systemd systems, a restart will be attempted regardless of the service state." + "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default: leave the value unchanged. In order for this config to be applied, SSH may need to be restarted. On systemd systems, this restart will only happen if the SSH service has already been started. On non-systemd systems, a restart will be attempted regardless of the service state." }, "chpasswd": { "type": "object", @@ -2703,7 +2779,7 @@ "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``" }, "users": { - "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``.", + "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated passwords may be insecure, use at your own risk.", "type": "array", "items": { "minItems": 1, @@ -2900,10 +2976,10 @@ "properties": { "ssh_keys": { "type": "object", - "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multiline private host keys and certificates, use yaml multiline syntax.", + "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multi-line private host keys and certificates, use YAML multi-line syntax. **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, depending on your cloud's security model.", "additionalProperties": false, "patternProperties": { - "^(dsa|ecdsa|ed25519|rsa)_(public|private|certificate)$": { + "^(ecdsa|ed25519|rsa)_(public|private|certificate)$": { "label": "", "type": "string" } @@ -2924,9 +3000,8 @@ }, "ssh_genkeytypes": { "type": "array", - "description": "The SSH key types to generate. Default: ``[rsa, dsa, ecdsa, ed25519]``", + "description": "The SSH key types to generate. Default: ``[rsa, ecdsa, ed25519]``", "default": [ - "dsa", "ecdsa", "ed25519", "rsa" @@ -2935,7 +3010,6 @@ "items": { "type": "string", "enum": [ - "dsa", "ecdsa", "ed25519", "rsa" @@ -2973,7 +3047,7 @@ }, "blacklist": { "type": "array", - "description": "The SSH key types to ignore when publishing. Default: ``[dsa]``", + "description": "The SSH key types to ignore when publishing. Default: ``[]`` to publish all SSH key types", "items": { "type": "string" } @@ -2991,102 +3065,6 @@ } } }, - "cc_ubuntu_advantage": { - "type": "object", - "properties": { - "ubuntu_advantage": { - "type": "object", - "additionalProperties": false, - "properties": { - "enable": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults. Passing beta services here will cause an error." - }, - "enable_beta": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Optional list of ubuntu-advantage beta services to enable. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults." - }, - "token": { - "type": "string", - "description": "Contract token obtained from https://ubuntu.com/advantage to attach. Required for non-Pro instances." - }, - "features": { - "type": "object", - "description": "Ubuntu Advantage features.", - "additionalProperties": false, - "properties": { - "disable_auto_attach": { - "type": "boolean", - "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``", - "default": false - } - } - }, - "config": { - "type": "object", - "description": "Configuration settings or override Ubuntu Advantage config.", - "additionalProperties": true, - "properties": { - "http_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "Ubuntu Advantage HTTP Proxy URL or null to unset." - }, - "https_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "Ubuntu Advantage HTTPS Proxy URL or null to unset." - }, - "global_apt_http_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" - }, - "global_apt_https_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" - }, - "ua_apt_http_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "HTTP Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" - }, - "ua_apt_https_proxy": { - "type": [ - "string", - "null" - ], - "format": "uri", - "description": "HTTPS Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" - } - } - } - } - } - } - }, "cc_ubuntu_drivers": { "type": "object", "properties": { @@ -3107,7 +3085,7 @@ }, "version": { "type": "string", - "description": "The version of the driver to install (e.g. \"390\", \"410\"). Defaults to the latest version." + "description": "The version of the driver to install (e.g. \"390\", \"410\"). Default: latest version." } } } @@ -3115,12 +3093,26 @@ } } }, + "cc_ubuntu_pro": { + "type": "object", + "properties": { + "ubuntu_pro": { + "$ref": "#/$defs/ubuntu_pro.properties" + }, + "ubuntu_advantage": { + "$ref": "#/$defs/ubuntu_pro.properties", + "deprecated": true, + "deprecated_version": "24.1", + "deprecated_description": "Use ``ubuntu_pro`` instead." + } + } + }, "cc_update_etc_hosts": { "type": "object", "properties": { "manage_etc_hosts": { "default": false, - "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``", "oneOf": [ { "enum": [ @@ -3322,7 +3314,7 @@ "base64", "text/plain" ], - "description": "Optional encoding type of the content. Default is ``text/plain`` and no content decoding is performed. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64" + "description": "Optional encoding type of the content. Default: ``text/plain``. No decoding is performed by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64" }, "append": { "type": "boolean", @@ -3693,9 +3685,6 @@ "$ref": "#/$defs/cc_mcollective" }, { - "$ref": "#/$defs/cc_migrator" - }, - { "$ref": "#/$defs/cc_mounts" }, { @@ -3762,10 +3751,10 @@ "$ref": "#/$defs/cc_timezone" }, { - "$ref": "#/$defs/cc_ubuntu_advantage" + "$ref": "#/$defs/cc_ubuntu_drivers" }, { - "$ref": "#/$defs/cc_ubuntu_drivers" + "$ref": "#/$defs/cc_ubuntu_pro" }, { "$ref": "#/$defs/cc_update_etc_hosts" @@ -3882,6 +3871,7 @@ "swap": {}, "timezone": {}, "ubuntu_advantage": {}, + "ubuntu_pro": {}, "updates": {}, "user": {}, "users": {}, diff -Nru cloud-init-23.4.4/cloudinit/config/schemas/schema-network-config-v1.json cloud-init-24.1.3/cloudinit/config/schemas/schema-network-config-v1.json --- cloud-init-23.4.4/cloudinit/config/schemas/schema-network-config-v1.json 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/config/schemas/schema-network-config-v1.json 2024-03-27 13:14:04.000000000 +0000 @@ -17,6 +17,7 @@ }, "name": { "type": "string", + "maxLength": 15, "description": "Desired device name should be less than 15 characters. Any characters exceeding 15 will be truncated. This is a limitation of the Linux kernel network-device structure." }, "mac_address": { @@ -24,7 +25,10 @@ "description": "The lowercase MAC address of the physical device." }, "mtu": { - "type": "integer", + "type": [ + "integer", + "null" + ], "description": "The MTU size in bytes. The ``mtu`` key represents a device's Maximum Transmission Unit, which is the largest size packet or frame, specified in octets (eight-bit bytes), that can be sent in a packet- or frame-based network. Specifying ``mtu`` is optional. Values too small or too large for a device may be ignored by that device." }, "subnets": { @@ -32,6 +36,10 @@ "items": { "$ref": "#/$defs/config_type_subnet" } + }, + "accept-ra": { + "type": "boolean", + "description": "Whether to accept IPv6 Router Advertisements (RA) on this interface. If unset, it will not be rendered" } } }, @@ -69,7 +77,7 @@ "description": "The MTU size in bytes. This ``mtu`` key represents a device's Maximum Transmission Unit, which is the largest size packet or frame, specified in octets (eight-bit bytes), that can be sent in a packet- or frame-based network. Specifying ``mtu`` is optional. Values too small or too large for a device may be ignored by that device." }, "params": { - "desciption": "The ``params`` key in a bond holds a dictionary of bonding parameters. This dictionary may be empty. For more details on what the various bonding parameters mean please read the [Linux Kernel Bonding.txt](https://www.kernel.org/doc/Documentation/networking/bonding.txt).", + "description": "The ``params`` key in a bond holds a dictionary of bonding parameters. This dictionary may be empty. For more details on what the various bonding parameters mean please read the [Linux Kernel Bonding.txt](https://www.kernel.org/doc/Documentation/networking/bonding.txt).", "additionalProperties": false, "properties": { "bond-active_slave": { @@ -384,8 +392,7 @@ "additionalProperties": false, "required": [ "type", - "address", - "search" + "address" ], "properties": { "type": { @@ -396,7 +403,10 @@ }, "address": { "description": "List of IPv4 or IPv6 address of nameservers.", - "type": "array", + "type": [ + "array", + "string" + ], "items": { "type": "string" } @@ -440,10 +450,7 @@ }, "network": { "type": "string", - "description": "IPv4 network address with CIDR netmask notation or IPv6 with prefix length. Alias for ``destination`` and only read when ``destination`` key is absent.", - "deprecated": true, - "deprecated_version": "23.3", - "deprecated_description": "Use ``destination`` instead." + "description": "IPv4 network address with CIDR netmask notation or IPv6 with prefix length. Alias for ``destination`` and only read when ``destination`` key is absent. This exists for OpenStack support. OpenStack route definitions are passed through to v1 config and OpenStack's ``network_data.json`` uses ``network`` instead of ``destination``." }, "destination": { "type": "string", @@ -497,6 +504,10 @@ "type": "string", "description": "IPv4 subnet mask in dotted format or CIDR notation" }, + "broadcast": { + "type": "string", + "description": "IPv4 broadcast address in dotted format." + }, "gateway": { "type": "string", "description": "IPv4 address of the default gateway for this subnet." diff -Nru cloud-init-23.4.4/cloudinit/distros/__init__.py cloud-init-24.1.3/cloudinit/distros/__init__.py --- cloud-init-23.4.4/cloudinit/distros/__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -17,6 +17,7 @@ import string import urllib.parse from collections import defaultdict +from contextlib import suppress from io import StringIO from typing import ( Any, @@ -48,11 +49,11 @@ from cloudinit.distros.package_management.utils import known_package_managers from cloudinit.distros.parsers import hosts from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES -from cloudinit.net import activators, dhcp, eni, network_state, renderers +from cloudinit.net import activators, dhcp, renderers from cloudinit.net.network_state import parse_net_config_data from cloudinit.net.renderer import Renderer -# Used when a cloud-config module can be run on all cloud-init distibutions. +# Used when a cloud-config module can be run on all cloud-init distributions. # The value 'all' is surfaced in module documentation for distro support. ALL_DISTROS = "all" @@ -147,21 +148,26 @@ resolve_conf_fn = "/etc/resolv.conf" osfamily: str - dhcp_client_priority = [dhcp.IscDhclient, dhcp.Dhcpcd, dhcp.Udhcpc] + # Directory where the distro stores their DHCP leases. + # The children classes should override this with their dhcp leases + # directory + dhclient_lease_directory: Optional[str] = None + # A regex to match DHCP lease file(s) + # The children classes should override this with a regex matching + # their lease file name format + dhclient_lease_file_regex: Optional[str] = None def __init__(self, name, cfg, paths): self._paths = paths self._cfg = cfg self.name = name self.networking: Networking = self.networking_cls() - self.dhcp_client_priority = [ - dhcp.IscDhclient, - dhcp.Dhcpcd, - dhcp.Udhcpc, - ] + self.dhcp_client_priority = dhcp.ALL_DHCP_CLIENTS self.net_ops = iproute2.Iproute2 self._runner = helpers.Runners(paths) self.package_managers: List[PackageManager] = [] + self._dhcp_client = None + self._fallback_interface = None def _unpickle(self, ci_pkl_version: int) -> None: """Perform deserialization fixes for Distro.""" @@ -174,6 +180,10 @@ # either because it isn't present at all, or because it will be # missing expected instance state otherwise. self.networking = self.networking_cls() + if not hasattr(self, "_dhcp_client"): + self._dhcp_client = None + if not hasattr(self, "_fallback_interface"): + self._fallback_interface = None def _validate_entry(self, entry): if isinstance(entry, str): @@ -228,21 +238,29 @@ # First install packages using package manager(s) # supported by the distro - uninstalled = [] + total_failed: Set[str] = set() for manager in self.package_managers: - to_try = ( - packages_by_manager.get(manager.__class__, set()) - | generic_packages + + manager_packages = packages_by_manager.get( + manager.__class__, set() ) + + to_try = manager_packages | generic_packages + # Remove any failed we will try for this package manager + total_failed.difference_update(to_try) + if not manager.available(): + LOG.debug("Package manager '%s' not available", manager.name) + total_failed.update(to_try) + continue if not to_try: continue - uninstalled = manager.install_packages(to_try) - failed = { - pkg for pkg in uninstalled if pkg not in generic_packages - } + failed = manager.install_packages(to_try) + total_failed.update(failed) if failed: LOG.info(error_message, failed) - generic_packages = set(uninstalled) + # Ensure we don't attempt to install packages specific to + # one particular package manager using another package manager + generic_packages = set(failed) - manager_packages # Now attempt any specified package managers not explicitly supported # by distro @@ -250,22 +268,75 @@ if manager_type.name in [p.name for p in self.package_managers]: # We already installed/attempted these; don't try again continue - uninstalled.extend( + total_failed.update( manager_type.from_config( self._runner, self._cfg ).install_packages(pkglist=packages) ) - if uninstalled: - raise PackageInstallerError(error_message % uninstalled) + if total_failed: + raise PackageInstallerError(error_message % total_failed) + + @property + def dhcp_client(self) -> dhcp.DhcpClient: + """access the distro's preferred dhcp client + + if no client has been selected yet select one - uses + self.dhcp_client_priority, which may be overridden in each distro's + object to eliminate checking for clients which will not be provided + by the distro + """ + if self._dhcp_client: + return self._dhcp_client - def _write_network(self, settings): - """Deprecated. Remove if/when arch and gentoo support renderers.""" - raise NotImplementedError( - "Legacy function '_write_network' was called in distro '%s'.\n" - "_write_network_config needs implementation.\n" % self.name + # no client has been selected yet, so pick one + # + # set the default priority list to the distro-defined priority list + dhcp_client_priority = self.dhcp_client_priority + + # if the configuration includes a network.dhcp_client_priority list + # then attempt to use it + config_priority = util.get_cfg_by_path( + self._cfg, ("network", "dhcp_client_priority"), [] ) + if config_priority: + # user or image builder configured a custom dhcp client priority + # list + found_clients = [] + LOG.debug( + "Using configured dhcp client priority list: %s", + config_priority, + ) + for client_configured in config_priority: + for client_class in dhcp.ALL_DHCP_CLIENTS: + if client_configured == client_class.client_name: + found_clients.append(client_class) + break + else: + LOG.warning( + "Configured dhcp client %s is not supported, skipping", + client_configured, + ) + # If dhcp_client_priority is defined in the configuration, but none + # of the defined clients are supported by cloud-init, then we don't + # override the distro default. If at least one client in the + # configured list exists, then we use that for our list of clients + # to check. + if found_clients: + dhcp_client_priority = found_clients + + # iterate through our priority list and use the first client that is + # installed on the system + for client in dhcp_client_priority: + try: + self._dhcp_client = client() + LOG.debug("DHCP client selected: %s", client.client_name) + return self._dhcp_client + except (dhcp.NoDHCPLeaseMissingDhclientError,): + LOG.debug("DHCP client not found: %s", client.client_name) + raise dhcp.NoDHCPLeaseMissingDhclientError() + @property def network_activator(self) -> Optional[Type[activators.NetworkActivator]]: """Return the configured network activator for this environment.""" @@ -352,41 +423,6 @@ data_source=data_source, mirror_info=arch_info ) - def apply_network(self, settings, bring_up=True): - """Deprecated. Remove if/when arch and gentoo support renderers.""" - # this applies network where 'settings' is interfaces(5) style - # it is obsolete compared to apply_network_config - # Write it out - - # pylint: disable=assignment-from-no-return - # We have implementations in arch and gentoo still - dev_names = self._write_network(settings) - # pylint: enable=assignment-from-no-return - # Now try to bring them up - if bring_up: - return self._bring_up_interfaces(dev_names) - return False - - def _apply_network_from_network_config(self, netconfig, bring_up=True): - """Deprecated. Remove if/when arch and gentoo support renderers.""" - distro = self.__class__ - LOG.warning( - "apply_network_config is not currently implemented " - "for distribution '%s'. Attempting to use apply_network", - distro, - ) - header = "\n".join( - [ - "# Converted from network_config for distro %s" % distro, - "# Implementation of _write_network_config is needed.", - ] - ) - ns = network_state.parse_net_config_data(netconfig) - contents = eni.network_state_to_eni( - ns, header=header, render_hwaddress=True - ) - return self.apply_network(contents, bring_up=bring_up) - def generate_fallback_config(self): return net.generate_fallback_config() @@ -399,16 +435,7 @@ Returns True if any devices failed to come up, otherwise False. """ - # This method is preferred to apply_network which only takes - # a much less complete network config format (interfaces(5)). - try: - renderer = self._get_renderer() - except NotImplementedError: - # backwards compat until all distros have apply_network_config - return self._apply_network_from_network_config( - netconfig, bring_up=bring_up - ) - + renderer = self._get_renderer() network_state = parse_net_config_data(netconfig, renderer=renderer) self._write_network_state(network_state, renderer) @@ -563,7 +590,7 @@ def update_etc_hosts(self, hostname, fqdn): header = "" if os.path.exists(self.hosts_fn): - eh = hosts.HostsConf(util.load_file(self.hosts_fn)) + eh = hosts.HostsConf(util.load_text_file(self.hosts_fn)) else: eh = hosts.HostsConf("") header = util.make_header(base="added") @@ -611,20 +638,6 @@ return self._preferred_ntp_clients - def _bring_up_interface(self, device_name): - """Deprecated. Remove if/when arch and gentoo support renderers.""" - raise NotImplementedError - - def _bring_up_interfaces(self, device_names): - """Deprecated. Remove if/when arch and gentoo support renderers.""" - am_failed = 0 - for d in device_names: - if not self._bring_up_interface(d): - am_failed += 1 - if am_failed == 0: - return True - return False - def get_default_user(self): return self.get_option("default_user") @@ -632,7 +645,7 @@ """ Add a user to the system using standard GNU tools - This should be overriden on distros where useradd is not desirable or + This should be overridden on distros where useradd is not desirable or not available. """ # XXX need to make add_user idempotent somehow as we @@ -696,7 +709,7 @@ # that came in as a string like: groups: group1, group2 groups = [g.strip() for g in groups] - # kwargs.items loop below wants a comma delimeted string + # kwargs.items loop below wants a comma delimited string # that can go right through to the command. kwargs["groups"] = ",".join(groups) @@ -983,7 +996,7 @@ util.logexc(LOG, "Failed to write doas file %s", doas_file) raise e else: - if content not in util.load_file(doas_file): + if content not in util.load_text_file(doas_file): try: util.append_file(doas_file, content) except IOError as e: @@ -998,7 +1011,7 @@ sudoers_contents = "" base_exists = False if os.path.exists(sudo_base): - sudoers_contents = util.load_file(sudo_base) + sudoers_contents = util.load_text_file(sudo_base) base_exists = True found_include = False for line in sudoers_contents.splitlines(): @@ -1073,7 +1086,7 @@ util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) raise e else: - if content not in util.load_file(sudo_file): + if content not in util.load_text_file(sudo_file): try: util.append_file(sudo_file, content) except IOError as e: @@ -1240,6 +1253,90 @@ "/bin/true", ] + (["-cf", config_file, interface] if config_file else [interface]) + @property + def fallback_interface(self): + """Determine the network interface used during local network config.""" + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + if not self._fallback_interface: + LOG.warning( + "Did not find a fallback interface on distro: %s.", + self.name, + ) + return self._fallback_interface + + @fallback_interface.setter + def fallback_interface(self, value): + self._fallback_interface = value + + @staticmethod + def get_proc_ppid(pid: int) -> Optional[int]: + """Return the parent pid of a process by parsing /proc/$pid/stat""" + match = Distro._get_proc_stat_by_index(pid, 4) + if match is not None: + with suppress(ValueError): + return int(match) + LOG.warning("/proc/%s/stat has an invalid ppid [%s]", pid, match) + return None + + @staticmethod + def get_proc_pgid(pid: int) -> Optional[int]: + """Return the parent pid of a process by parsing /proc/$pid/stat""" + match = Distro._get_proc_stat_by_index(pid, 5) + if match is not None: + with suppress(ValueError): + return int(match) + LOG.warning("/proc/%s/stat has an invalid pgid [%s]", pid, match) + return None + + @staticmethod + def _get_proc_stat_by_index(pid: int, field: int) -> Optional[int]: + """ + parse /proc/$pid/stat for a specific field as numbered in man:proc(5) + + param pid: integer to query /proc/$pid/stat for + param field: field number within /proc/$pid/stat to return + """ + try: + content: str = util.load_text_file( + "/proc/%s/stat" % pid, quiet=True + ).strip() # pyright: ignore + match = re.search( + r"^(\d+) (\(.+\)) ([RSDZTtWXxKPI]) (\d+) (\d+)", content + ) + if not match: + LOG.warning( + "/proc/%s/stat has an invalid contents [%s]", pid, content + ) + return None + return int(match.group(field)) + except IOError as e: + LOG.warning("Failed to load /proc/%s/stat. %s", pid, e) + except IndexError: + LOG.warning( + "Unable to match field %s of process pid=%s (%s) (%s)", + field, + pid, + content, # pyright: ignore + match, # pyright: ignore + ) + return None + + @staticmethod + def eject_media(device: str) -> None: + cmd = None + if subp.which("eject"): + cmd = ["eject", device] + elif subp.which("/lib/udev/cdrom_id"): + cmd = ["/lib/udev/cdrom_id", "--eject-media", device] + else: + raise subp.ProcessExecutionError( + cmd="eject_media_cmd", + description="eject command not found", + reason="neither eject nor /lib/udev/cdrom_id are found", + ) + subp.subp(cmd) + def _apply_hostname_transformations_to_url(url: str, transformations: list): """ @@ -1258,7 +1355,7 @@ :return: A string whose value is ``url`` with the hostname ``transformations`` - applied, or ``None`` if ``url`` is unparseable. + applied, or ``None`` if ``url`` is unparsable. """ try: parts = urllib.parse.urlsplit(url) diff -Nru cloud-init-23.4.4/cloudinit/distros/alpine.py cloud-init-24.1.3/cloudinit/distros/alpine.py --- cloud-init-23.4.4/cloudinit/distros/alpine.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/alpine.py 2024-03-27 13:14:04.000000000 +0000 @@ -33,11 +33,15 @@ renderer_configs = { "eni": {"eni_path": network_conf_fn, "eni_header": NETWORK_FILE_HEADER} } + # Alpine stores dhclient leases at following location: + # /var/lib/dhcp/dhclient.leases + dhclient_lease_directory = "/var/lib/dhcp" + dhclient_lease_file_regex = r"dhclient\.leases" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.default_locale = "C.UTF-8" @@ -86,6 +90,9 @@ if create_hostname_file: pass else: + LOG.info( + "create_hostname_file is False; hostname file not created" + ) return if not conf: conf = HostnameConf("") @@ -97,7 +104,7 @@ return (self.hostname_conf_fn, sys_hostname) def _read_hostname_conf(self, filename): - conf = HostnameConf(util.load_file(filename)) + conf = HostnameConf(util.load_text_file(filename)) conf.parse() return conf diff -Nru cloud-init-23.4.4/cloudinit/distros/amazon.py cloud-init-24.1.3/cloudinit/distros/amazon.py --- cloud-init-23.4.4/cloudinit/distros/amazon.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/amazon.py 2024-03-27 13:14:04.000000000 +0000 @@ -14,5 +14,11 @@ class Distro(rhel.Distro): + # Amazon Linux 2 stores dhclient leases at following location: + # /var/lib/dhclient/dhclient--.leases + # Perhaps there could be a UUID in between two "-" in the file name + dhclient_lease_directory = "/var/lib/dhcp" + dhclient_lease_file_regex = r"dhclient-[\w-]+\.lease" + def update_package_sources(self): return None diff -Nru cloud-init-23.4.4/cloudinit/distros/arch.py cloud-init-24.1.3/cloudinit/distros/arch.py --- cloud-init-23.4.4/cloudinit/distros/arch.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/arch.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,13 +5,11 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging -import os from cloudinit import distros, helpers, subp, util -from cloudinit.distros import PackageList, net_util +from cloudinit.distros import PackageList from cloudinit.distros.parsers.hostname import HostnameConf -from cloudinit.net.renderer import Renderer -from cloudinit.net.renderers import RendererNotFoundError +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -19,11 +17,10 @@ class Distro(distros.Distro): locale_gen_fn = "/etc/locale.gen" - network_conf_dir = "/etc/netctl" init_cmd = ["systemctl"] # init scripts renderer_configs = { "netplan": { - "netplan_path": "/etc/netplan/50-cloud-init.yaml", + "netplan_path": CLOUDINIT_NETPLAN_FILE, "netplan_header": "# generated by cloud-init\n", "postcmds": True, } @@ -32,7 +29,7 @@ def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = "arch" @@ -61,56 +58,6 @@ self.update_package_sources() self.package_command("", pkgs=pkglist) - def _get_renderer(self) -> Renderer: - try: - return super()._get_renderer() - except RendererNotFoundError as e: - # Fall back to old _write_network - raise NotImplementedError from e - - def _write_network(self, settings): - entries = net_util.translate_network(settings) - LOG.debug( - "Translated ubuntu style network settings %s into %s", - settings, - entries, - ) - return _render_network( - entries, - resolv_conf=self.resolve_conf_fn, - conf_dir=self.network_conf_dir, - enable_func=self._enable_interface, - ) - - def _enable_interface(self, device_name): - cmd = ["netctl", "reenable", device_name] - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning( - "Running %s resulted in stderr output: %s", cmd, err - ) - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - - def _bring_up_interface(self, device_name): - cmd = ["netctl", "restart", device_name] - LOG.debug( - "Attempting to run bring up interface %s using command %s", - device_name, - cmd, - ) - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning( - "Running %s resulted in stderr output: %s", cmd, err - ) - return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False - def _write_hostname(self, hostname, filename): conf = None try: @@ -124,6 +71,9 @@ if create_hostname_file: pass else: + LOG.info( + "create_hostname_file is False; hostname file not created" + ) return if not conf: conf = HostnameConf("") @@ -135,7 +85,7 @@ return (self.hostname_conf_fn, sys_hostname) def _read_hostname_conf(self, filename): - conf = HostnameConf(util.load_file(filename)) + conf = HostnameConf(util.load_text_file(filename)) conf.parse() return conf @@ -195,71 +145,3 @@ self._runner.run( "update-sources", self.package_command, ["-y"], freq=PER_INSTANCE ) - - -def _render_network( - entries, - target="/", - conf_dir="etc/netctl", - resolv_conf="etc/resolv.conf", - enable_func=None, -): - """Render the translate_network format into netctl files in target. - Paths will be rendered under target. - """ - - devs = [] - nameservers = [] - resolv_conf = subp.target_path(target, resolv_conf) - conf_dir = subp.target_path(target, conf_dir) - - for (dev, info) in entries.items(): - if dev == "lo": - # no configuration should be rendered for 'lo' - continue - devs.append(dev) - net_fn = os.path.join(conf_dir, dev) - net_cfg = { - "Connection": "ethernet", - "Interface": dev, - "IP": info.get("bootproto"), - "Address": "%s/%s" % (info.get("address"), info.get("netmask")), - "Gateway": info.get("gateway"), - "DNS": info.get("dns-nameservers", []), - } - util.write_file(net_fn, convert_netctl(net_cfg)) - if enable_func and info.get("auto"): - enable_func(dev) - if "dns-nameservers" in info: - nameservers.extend(info["dns-nameservers"]) - - if nameservers: - util.write_file(resolv_conf, convert_resolv_conf(nameservers)) - return devs - - -def convert_netctl(settings): - """Given a dictionary, returns a string in netctl profile format. - - netctl profile is described at: - https://git.archlinux.org/netctl.git/tree/docs/netctl.profile.5.txt - - Note that the 'Special Quoting Rules' are not handled here.""" - result = [] - for key in sorted(settings): - val = settings[key] - if val is None: - val = "" - elif isinstance(val, (tuple, list)): - val = "(" + " ".join("'%s'" % v for v in val) + ")" - result.append("%s=%s\n" % (key, val)) - return "".join(result) - - -def convert_resolv_conf(settings): - """Returns a settings string formatted for resolv.conf.""" - result = "" - if isinstance(settings, list): - for ns in settings: - result = result + "nameserver %s\n" % ns - return result diff -Nru cloud-init-23.4.4/cloudinit/distros/bsd.py cloud-init-24.1.3/cloudinit/distros/bsd.py --- cloud-init-23.4.4/cloudinit/distros/bsd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/bsd.py 2024-03-27 13:14:04.000000000 +0000 @@ -32,7 +32,7 @@ def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) cfg["ssh_svcname"] = "sshd" @@ -130,7 +130,7 @@ cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False) + subp.subp(cmd, update_env=self._get_pkg_cmd_environ(), capture=False) def set_timezone(self, tz): distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) @@ -141,3 +141,11 @@ def chpasswd(self, plist_in: list, hashed: bool): for name, password in plist_in: self.set_passwd(name, password, hashed=hashed) + + @staticmethod + def get_proc_ppid(pid): + """ + Return the parent pid of a process by checking ps + """ + ppid, _ = subp.subp(["ps", "-oppid=", "-p", str(pid)]) + return int(ppid.strip()) diff -Nru cloud-init-23.4.4/cloudinit/distros/bsd_utils.py cloud-init-24.1.3/cloudinit/distros/bsd_utils.py --- cloud-init-23.4.4/cloudinit/distros/bsd_utils.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/bsd_utils.py 2024-03-27 13:14:04.000000000 +0000 @@ -20,7 +20,7 @@ def get_rc_config_value(key, fn="/etc/rc.conf"): key_prefix = "{}=".format(key) - for line in util.load_file(fn).splitlines(): + for line in util.load_text_file(fn).splitlines(): if line.startswith(key_prefix): value = line.replace(key_prefix, "") return _unquote(value) @@ -30,7 +30,7 @@ lines = [] done = False value = shlex.quote(value) - original_content = util.load_file(fn) + original_content = util.load_text_file(fn) for line in original_content.splitlines(): if "=" in line: k, v = line.split("=", 1) diff -Nru cloud-init-23.4.4/cloudinit/distros/debian.py cloud-init-24.1.3/cloudinit/distros/debian.py --- cloud-init-23.4.4/cloudinit/distros/debian.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/debian.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,6 +15,7 @@ from cloudinit.distros.package_management.apt import Apt from cloudinit.distros.package_management.package_manager import PackageManager from cloudinit.distros.parsers.hostname import HostnameConf +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE LOG = logging.getLogger(__name__) @@ -34,7 +35,7 @@ hostname_conf_fn = "/etc/hostname" network_conf_fn = { "eni": "/etc/network/interfaces.d/50-cloud-init", - "netplan": "/etc/netplan/50-cloud-init.yaml", + "netplan": CLOUDINIT_NETPLAN_FILE, } renderer_configs = { "eni": { @@ -47,11 +48,15 @@ "postcmds": True, }, } + # Debian stores dhclient leases at following location: + # /var/lib/dhcp/dhclient..leases + dhclient_lease_directory = "/var/lib/dhcp" + dhclient_lease_file_regex = r"dhclient\.\w+\.leases" def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self.osfamily = "debian" self.default_locale = "C.UTF-8" @@ -136,6 +141,9 @@ if create_hostname_file: pass else: + LOG.info( + "create_hostname_file is False; hostname file not created" + ) return if not conf: conf = HostnameConf("") @@ -147,7 +155,7 @@ return (self.hostname_conf_fn, sys_hostname) def _read_hostname_conf(self, filename): - conf = HostnameConf(util.load_file(filename)) + conf = HostnameConf(util.load_text_file(filename)) conf.parse() return conf @@ -230,7 +238,7 @@ bmsg = "Dynamic networking config may not apply." try: - contents = util.load_file(path) + contents = util.load_text_file(path) known_contents = ["auto eth0", "iface eth0 inet dhcp"] lines = [ f.strip() for f in contents.splitlines() if not f.startswith("#") @@ -253,7 +261,7 @@ raise ValueError("Invalid path: %s" % sys_path) if os.path.exists(sys_path): - locale_content = util.load_file(sys_path) + locale_content = util.load_text_file(sys_path) sys_defaults = util.load_shell_content(locale_content) sys_val = sys_defaults.get(keyname, "") diff -Nru cloud-init-23.4.4/cloudinit/distros/freebsd.py cloud-init-24.1.3/cloudinit/distros/freebsd.py --- cloud-init-23.4.4/cloudinit/distros/freebsd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/freebsd.py 2024-03-27 13:14:04.000000000 +0000 @@ -36,6 +36,10 @@ pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] prefer_fqdn = True # See rc.conf(5) in FreeBSD home_dir = "/usr/home" + # FreeBSD has the following dhclient lease path: + # /var/db/dhclient.leases. + dhclient_lease_directory = "/var/db" + dhclient_lease_file_regex = r"dhclient.leases.\w+" @classmethod def reload_init(cls, rcs=None): @@ -173,7 +177,7 @@ def apply_locale(self, locale, out_fn=None): # Adjust the locales value to the new value newconf = StringIO() - for line in util.load_file(self.login_conf_fn).splitlines(): + for line in util.load_text_file(self.login_conf_fn).splitlines(): newconf.write( re.sub(r"^default:", r"default:lang=%s:" % locale, line) ) @@ -201,9 +205,7 @@ def _get_pkg_cmd_environ(self): """Return environment vars used in FreeBSD package_command operations""" - e = os.environ.copy() - e["ASSUME_ALWAYS_YES"] = "YES" - return e + return {"ASSUME_ALWAYS_YES": "YES"} def update_package_sources(self): self._runner.run( @@ -224,3 +226,7 @@ return [path, "-l", lease_file, "-p", pid_file] + ( ["-c", config_file, interface] if config_file else [interface] ) + + @staticmethod + def eject_media(device: str) -> None: + subp.subp(["camcontrol", "eject", device]) diff -Nru cloud-init-23.4.4/cloudinit/distros/gentoo.py cloud-init-24.1.3/cloudinit/distros/gentoo.py --- cloud-init-23.4.4/cloudinit/distros/gentoo.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/gentoo.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,7 @@ import logging from cloudinit import distros, helpers, subp, util -from cloudinit.distros import PackageList, net_util +from cloudinit.distros import PackageList from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit.settings import PER_INSTANCE @@ -17,11 +17,8 @@ class Distro(distros.Distro): - locale_conf_fn = "/etc/env.d/02locale" locale_gen_fn = "/etc/locale.gen" - network_conf_fn = "/etc/conf.d/net" hostname_conf_fn = "/etc/conf.d/hostname" - init_cmd = ["rc-service"] # init scripts default_locale = "en_US.UTF-8" # C.UTF8 makes sense to generate, but is not selected @@ -31,7 +28,7 @@ def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = "gentoo" @@ -60,122 +57,6 @@ self.update_package_sources() self.package_command("", pkgs=pkglist) - def _write_network(self, settings): - entries = net_util.translate_network(settings) - LOG.debug( - "Translated ubuntu style network settings %s into %s", - settings, - entries, - ) - dev_names = entries.keys() - nameservers = [] - - for (dev, info) in entries.items(): - if "dns-nameservers" in info: - nameservers.extend(info["dns-nameservers"]) - if dev == "lo": - continue - net_fn = self.network_conf_fn + "." + dev - dns_nameservers = info.get("dns-nameservers") - if isinstance(dns_nameservers, (list, tuple)): - dns_nameservers = str(tuple(dns_nameservers)).replace(",", "") - # eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'} - # lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True} - results = "" - if info.get("bootproto") == "dhcp": - results += 'config_{name}="dhcp"'.format(name=dev) - else: - results += ( - 'config_{name}="{ip_address} netmask {netmask}"\n' - 'mac_{name}="{hwaddr}"\n' - ).format( - name=dev, - ip_address=info.get("address"), - netmask=info.get("netmask"), - hwaddr=info.get("hwaddress"), - ) - results += 'routes_{name}="default via {gateway}"\n'.format( - name=dev, gateway=info.get("gateway") - ) - if info.get("dns-nameservers"): - results += 'dns_servers_{name}="{dnsservers}"\n'.format( - name=dev, dnsservers=dns_nameservers - ) - util.write_file(net_fn, results) - self._create_network_symlink(dev) - if info.get("auto"): - cmd = [ - "rc-update", - "add", - "net.{name}".format(name=dev), - "default", - ] - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning( - "Running %s resulted in stderr output: %s", - cmd, - err, - ) - except subp.ProcessExecutionError: - util.logexc( - LOG, "Running interface command %s failed", cmd - ) - - if nameservers: - util.write_file( - self.resolve_conf_fn, convert_resolv_conf(nameservers) - ) - - return dev_names - - @staticmethod - def _create_network_symlink(interface_name): - file_path = "/etc/init.d/net.{name}".format(name=interface_name) - if not util.is_link(file_path): - util.sym_link("/etc/init.d/net.lo", file_path) - - def _bring_up_interface(self, device_name): - cmd = ["/etc/init.d/net.%s" % device_name, "restart"] - LOG.debug( - "Attempting to run bring up interface %s using command %s", - device_name, - cmd, - ) - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning( - "Running %s resulted in stderr output: %s", cmd, err - ) - return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False - - def _bring_up_interfaces(self, device_names): - use_all = False - for d in device_names: - if d == "all": - use_all = True - if use_all: - # Grab device names from init scripts - cmd = ["ls", "/etc/init.d/net.*"] - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning( - "Running %s resulted in stderr output: %s", cmd, err - ) - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False - devices = [x.split(".")[2] for x in _out.split(" ")] - return distros.Distro._bring_up_interfaces(self, devices) - else: - return distros.Distro._bring_up_interfaces(self, device_names) - def _write_hostname(self, hostname, filename): conf = None try: @@ -189,6 +70,9 @@ if create_hostname_file: pass else: + LOG.info( + "create_hostname_file is False; hostname file not created" + ) return if not conf: conf = HostnameConf("") @@ -205,7 +89,7 @@ @staticmethod def _read_hostname_conf(filename): - conf = HostnameConf(util.load_file(filename)) + conf = HostnameConf(util.load_text_file(filename)) conf.parse() return conf @@ -255,12 +139,3 @@ ["--sync"], freq=PER_INSTANCE, ) - - -def convert_resolv_conf(settings): - """Returns a settings string formatted for resolv.conf.""" - result = "" - if isinstance(settings, list): - for ns in settings: - result += "nameserver %s\n" % ns - return result diff -Nru cloud-init-23.4.4/cloudinit/distros/mariner.py cloud-init-24.1.3/cloudinit/distros/mariner.py --- cloud-init-23.4.4/cloudinit/distros/mariner.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/mariner.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,7 @@ from cloudinit import helpers from cloudinit.distros import photon +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE LOG = logging.getLogger(__name__) @@ -24,7 +25,7 @@ systemd_locale_conf_fn = "/etc/locale.conf" resolve_conf_fn = "/etc/systemd/resolved.conf" - network_conf_fn = {"netplan": "/etc/netplan/50-cloud-init.yaml"} + network_conf_fn = {"netplan": CLOUDINIT_NETPLAN_FILE} renderer_configs = { "networkd": { "resolv_conf_fn": resolve_conf_fn, @@ -43,7 +44,7 @@ def __init__(self, name, cfg, paths): photon.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = "mariner" diff -Nru cloud-init-23.4.4/cloudinit/distros/net_util.py cloud-init-24.1.3/cloudinit/distros/net_util.py --- cloud-init-23.4.4/cloudinit/distros/net_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/net_util.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - - -# This is a util function to translate debian based distro interface blobs as -# given in /etc/network/interfaces to an *somewhat* agnostic format for -# distributions that use other formats. -# -# TODO(harlowja) remove when we have python-netcf active... -# -# The format is the following: -# { -# : { -# # All optional (if not existent in original format) -# "netmask": , -# "broadcast": , -# "gateway": , -# "address": , -# "bootproto": "static"|"dhcp", -# "dns-search": , -# "hwaddress": , -# "auto": True (or non-existent), -# "dns-nameservers": [, ...], -# } -# } -# -# Things to note, comments are removed, if a ubuntu/debian interface is -# marked as auto then only then first segment (?) is retained, ie -# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1). -# -# Example input: -# -# auto lo -# iface lo inet loopback -# -# auto eth0 -# iface eth0 inet static -# address 10.0.0.1 -# netmask 255.255.252.0 -# broadcast 10.0.0.255 -# gateway 10.0.0.2 -# dns-nameservers 98.0.0.1 98.0.0.2 -# -# Example output: -# { -# "lo": { -# "auto": true -# }, -# "eth0": { -# "auto": true, -# "dns-nameservers": [ -# "98.0.0.1", -# "98.0.0.2" -# ], -# "broadcast": "10.0.0.255", -# "netmask": "255.255.252.0", -# "bootproto": "static", -# "address": "10.0.0.1", -# "gateway": "10.0.0.2" -# } -# } - -from cloudinit.net import mask_and_ipv4_to_bcast_addr, net_prefix_to_ipv4_mask - - -def translate_network(settings): - # Get the standard cmd, args from the ubuntu format - entries = [] - for line in settings.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - split_up = line.split(None, 1) - if len(split_up) <= 1: - continue - entries.append(split_up) - # Figure out where each iface section is - ifaces = [] - consume = {} - for (cmd, args) in entries: - if cmd == "iface": - if consume: - ifaces.append(consume) - consume = {} - consume[cmd] = args - else: - consume[cmd] = args - # Check if anything left over to consume - absorb = False - for (cmd, args) in consume.items(): - if cmd == "iface": - absorb = True - if absorb: - ifaces.append(consume) - # Now translate - real_ifaces = {} - for info in ifaces: - if "iface" not in info: - continue - iface_details = info["iface"].split(None) - # Check if current device *may* have an ipv6 IP - use_ipv6 = False - if "inet6" in iface_details: - use_ipv6 = True - dev_name = None - if len(iface_details) >= 1: - dev = iface_details[0].strip().lower() - if dev: - dev_name = dev - if not dev_name: - continue - iface_info = {} - iface_info["ipv6"] = {} - if len(iface_details) >= 3: - proto_type = iface_details[2].strip().lower() - # Seems like this can be 'loopback' which we don't - # really care about - if proto_type in ["dhcp", "static"]: - iface_info["bootproto"] = proto_type - # These can just be copied over - if use_ipv6: - for k in ["address", "gateway"]: - if k in info: - val = info[k].strip().lower() - if val: - iface_info["ipv6"][k] = val - else: - for k in ["netmask", "address", "gateway", "broadcast"]: - if k in info: - val = info[k].strip().lower() - if val: - iface_info[k] = val - # handle static ip configurations using - # ipaddress/prefix-length format - if "address" in iface_info: - if "netmask" not in iface_info: - # check if the address has a network prefix - addr, _, prefix = iface_info["address"].partition("/") - if prefix: - iface_info["netmask"] = net_prefix_to_ipv4_mask(prefix) - iface_info["address"] = addr - # if we set the netmask, we also can set the broadcast - iface_info["broadcast"] = mask_and_ipv4_to_bcast_addr( - iface_info["netmask"], addr - ) - - # Name server info provided?? - if "dns-nameservers" in info: - iface_info["dns-nameservers"] = info["dns-nameservers"].split() - # Name server search info provided?? - if "dns-search" in info: - iface_info["dns-search"] = info["dns-search"].split() - # Is any mac address spoofing going on?? - if "hwaddress" in info: - hw_info = info["hwaddress"].lower().strip() - hw_split = hw_info.split(None, 1) - if len(hw_split) == 2 and hw_split[0].startswith("ether"): - hw_addr = hw_split[1] - if hw_addr: - iface_info["hwaddress"] = hw_addr - # If ipv6 is enabled, device will have multiple IPs, so we need to - # update the dictionary instead of overwriting it... - if dev_name in real_ifaces: - real_ifaces[dev_name].update(iface_info) - else: - real_ifaces[dev_name] = iface_info - # Check for those that should be started on boot via 'auto' - for (cmd, args) in entries: - args = args.split(None) - if not args: - continue - dev_name = args[0].strip().lower() - if cmd == "auto": - # Seems like auto can be like 'auto eth0 eth0:1' so just get the - # first part out as the device name - if dev_name in real_ifaces: - real_ifaces[dev_name]["auto"] = True - if cmd == "iface" and "inet6" in args: - real_ifaces[dev_name]["inet6"] = True - return real_ifaces diff -Nru cloud-init-23.4.4/cloudinit/distros/netbsd.py cloud-init-24.1.3/cloudinit/distros/netbsd.py --- cloud-init-23.4.4/cloudinit/distros/netbsd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/netbsd.py 2024-03-27 13:14:04.000000000 +0000 @@ -146,14 +146,12 @@ """Return env vars used in NetBSD package_command operations""" os_release = platform.release() os_arch = platform.machine() - e = os.environ.copy() - e[ - "PKG_PATH" - ] = "http://cdn.netbsd.org/pub/pkgsrc/packages/NetBSD/%s/%s/All" % ( - os_arch, - os_release, - ) - return e + return { + "PKG_PATH": ( + f"http://cdn.netbsd.org/pub/pkgsrc/packages/NetBSD" + f"/{os_arch}/{os_release}/All" + ) + } def update_package_sources(self): pass diff -Nru cloud-init-23.4.4/cloudinit/distros/networking.py cloud-init-24.1.3/cloudinit/distros/networking.py --- cloud-init-23.4.4/cloudinit/distros/networking.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/networking.py 2024-03-27 13:14:04.000000000 +0000 @@ -142,7 +142,7 @@ # the current macs present; we only check MAC as cloud-init # has not yet renamed interfaces and the netcfg may include # such renames. - for _ in range(0, 5): + for _ in range(5): if expected_macs.issubset(present_macs): LOG.debug("net: all expected physical devices present") return diff -Nru cloud-init-23.4.4/cloudinit/distros/openbsd.py cloud-init-24.1.3/cloudinit/distros/openbsd.py --- cloud-init-23.4.4/cloudinit/distros/openbsd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/openbsd.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,7 +3,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging -import os import cloudinit.distros.netbsd from cloudinit import subp, util @@ -16,7 +15,7 @@ init_cmd = ["rcctl"] def _read_hostname(self, filename, default=None): - return util.load_file(self.hostname_conf_fn) + return util.load_text_file(self.hostname_conf_fn) def _write_hostname(self, hostname, filename): content = hostname + "\n" @@ -58,5 +57,4 @@ def _get_pkg_cmd_environ(self): """Return env vars used in OpenBSD package_command operations""" - e = os.environ.copy() - return e + return {} diff -Nru cloud-init-23.4.4/cloudinit/distros/opensuse.py cloud-init-24.1.3/cloudinit/distros/opensuse.py --- cloud-init-23.4.4/cloudinit/distros/opensuse.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/opensuse.py 2024-03-27 13:14:04.000000000 +0000 @@ -160,7 +160,7 @@ def _read_hostname(self, filename, default=None): if self.uses_systemd() and filename.endswith("/previous-hostname"): - return util.load_file(filename).strip() + return util.load_text_file(filename).strip() elif self.uses_systemd(): (out, _err) = subp.subp(["hostname"]) if len(out): @@ -181,7 +181,7 @@ return "127.0.1.1" def _read_hostname_conf(self, filename): - conf = HostnameConf(util.load_file(filename)) + conf = HostnameConf(util.load_text_file(filename)) conf.parse() return conf @@ -200,7 +200,7 @@ if result: (devpth, fs_type, mount_point) = result # Check if the file system is read only - mounts = util.load_file("/proc/mounts").split("\n") + mounts = util.load_text_file("/proc/mounts").split("\n") for mount in mounts: if mount.startswith(devpth): mount_info = mount.split() @@ -248,6 +248,10 @@ if create_hostname_file: pass else: + LOG.info( + "create_hostname_file is False; hostname file not" + "created" + ) return if not conf: conf = HostnameConf("") diff -Nru cloud-init-23.4.4/cloudinit/distros/package_management/apt.py cloud-init-24.1.3/cloudinit/distros/package_management/apt.py --- cloud-init-23.4.4/cloudinit/distros/package_management/apt.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/package_management/apt.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,6 +3,7 @@ import functools import logging import os +import re import time from typing import Any, Iterable, List, Mapping, Optional, Sequence, cast @@ -83,16 +84,15 @@ ): super().__init__(runner) if apt_get_command is None: - apt_get_command = APT_GET_COMMAND + self.apt_get_command = APT_GET_COMMAND if apt_get_upgrade_subcommand is None: apt_get_upgrade_subcommand = "dist-upgrade" self.apt_command = tuple(apt_get_wrapper_command) + tuple( - apt_get_command + self.apt_get_command ) self.apt_get_upgrade_subcommand = apt_get_upgrade_subcommand - self.environment = os.environ.copy() - self.environment["DEBIAN_FRONTEND"] = "noninteractive" + self.environment = {"DEBIAN_FRONTEND": "noninteractive"} @classmethod def from_config(cls, runner: helpers.Runners, cfg: Mapping) -> "Apt": @@ -105,6 +105,9 @@ apt_get_upgrade_subcommand=cfg.get("apt_get_upgrade_subcommand"), ) + def available(self) -> bool: + return bool(subp.which(self.apt_get_command[0])) + def update_package_sources(self): self.runner.run( "update-sources", @@ -124,7 +127,18 @@ return set(resp.splitlines()) def get_unavailable_packages(self, pkglist: Iterable[str]): - return [pkg for pkg in pkglist if pkg not in self.get_all_packages()] + # Packages ending with `-` signify to apt to not install a transitive + # dependency. + # Packages ending with '^' signify to apt to install a Task. + # Anything after "/" refers to a target release + # "=" allows specifying a specific version + # Strip all off when checking for availability + return [ + pkg + for pkg in pkglist + if re.split("/|=", pkg)[0].rstrip("-^") + not in self.get_all_packages() + ] def install_packages(self, pkglist: Iterable) -> UninstalledPackages: self.update_package_sources() @@ -132,11 +146,12 @@ unavailable = self.get_unavailable_packages( [x.split("=")[0] for x in pkglist] ) - LOG.debug( - "The following packages were not found by APT so APT will " - "not attempt to install them: %s", - unavailable, - ) + if unavailable: + LOG.debug( + "The following packages were not found by APT so APT will " + "not attempt to install them: %s", + unavailable, + ) to_install = [p for p in pkglist if p not in unavailable] if to_install: self.run_package_command("install", pkgs=to_install) @@ -162,7 +177,7 @@ short_cmd=command, subp_kwargs={ "args": full_command, - "env": self.environment, + "update_env": self.environment, "capture": False, }, ) diff -Nru cloud-init-23.4.4/cloudinit/distros/package_management/package_manager.py cloud-init-24.1.3/cloudinit/distros/package_management/package_manager.py --- cloud-init-23.4.4/cloudinit/distros/package_management/package_manager.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/package_management/package_manager.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,6 +18,10 @@ return cls(runner) @abstractmethod + def available(self) -> bool: + """Return if package manager is installed on system.""" + + @abstractmethod def update_package_sources(self): ... diff -Nru cloud-init-23.4.4/cloudinit/distros/package_management/snap.py cloud-init-24.1.3/cloudinit/distros/package_management/snap.py --- cloud-init-23.4.4/cloudinit/distros/package_management/snap.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/package_management/snap.py 2024-03-27 13:14:04.000000000 +0000 @@ -14,6 +14,9 @@ class Snap(PackageManager): name = "snap" + def available(self) -> bool: + return bool(subp.which("snap")) + def update_package_sources(self): pass diff -Nru cloud-init-23.4.4/cloudinit/distros/parsers/ifconfig.py cloud-init-24.1.3/cloudinit/distros/parsers/ifconfig.py --- cloud-init-23.4.4/cloudinit/distros/parsers/ifconfig.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/parsers/ifconfig.py 2024-03-27 13:14:04.000000000 +0000 @@ -102,6 +102,7 @@ """ ifindex = 0 ifs_by_mac = defaultdict(list) + dev = None for line in text.splitlines(): if len(line) == 0: continue @@ -119,6 +120,11 @@ dev.index = ifindex self._ifs_by_name[curif] = dev + if not dev: + # This shouldn't happen with normal ifconfig output, but + # if it does, ensure we don't Traceback + continue + toks = line.lower().strip().split() if len(toks) > 1 and toks[1].startswith("flags="): diff -Nru cloud-init-23.4.4/cloudinit/distros/photon.py cloud-init-24.1.3/cloudinit/distros/photon.py --- cloud-init-23.4.4/cloudinit/distros/photon.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/photon.py 2024-03-27 13:14:04.000000000 +0000 @@ -31,7 +31,7 @@ def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = "photon" @@ -66,7 +66,7 @@ return None def apply_locale(self, locale, out_fn=None): - # This has a dependancy on glibc-i18n, user need to manually install it + # This has a dependency on glibc-i18n, user need to manually install it # and enable the option in cloud.cfg if not out_fn: out_fn = self.systemd_locale_conf_fn @@ -108,6 +108,9 @@ str(hostname), ] ) + LOG.info( + "create_hostname_file is False; hostname set transiently" + ) if ret: LOG.warning( ( @@ -123,7 +126,7 @@ def _read_hostname(self, filename, default=None): if filename and filename.endswith("/previous-hostname"): - return util.load_file(filename).strip() + return util.load_text_file(filename).strip() _ret, out, _err = self.exec_cmd(["hostname", "-f"]) return out.strip() if out else default diff -Nru cloud-init-23.4.4/cloudinit/distros/rhel.py cloud-init-24.1.3/cloudinit/distros/rhel.py --- cloud-init-23.4.4/cloudinit/distros/rhel.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/rhel.py 2024-03-27 13:14:04.000000000 +0000 @@ -29,6 +29,12 @@ network_script_tpl = "/etc/sysconfig/network-scripts/ifcfg-%s" tz_local_fn = "/etc/localtime" usr_lib_exec = "/usr/libexec" + # RHEL and derivatives use NetworkManager DHCP client by default. + # But if NM is configured with using dhclient ("dhcp=dhclient" statement) + # then the following location is used: + # /var/lib/NetworkManager/dhclient--.lease + dhclient_lease_directory = "/var/lib/NetworkManager" + dhclient_lease_file_regex = r"dhclient-[\w-]+\.lease" renderer_configs = { "sysconfig": { "control": "etc/sysconfig/network", @@ -47,7 +53,7 @@ def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain - # calls from repeatly happening (when they + # calls from repeatedly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = "redhat" @@ -121,6 +127,9 @@ str(hostname), ] ) + LOG.info( + "create_hostname_file is False; hostname set transiently" + ) else: host_cfg = { "HOSTNAME": hostname, @@ -136,7 +145,7 @@ def _read_hostname(self, filename, default=None): if self.uses_systemd() and filename.endswith("/previous-hostname"): - return util.load_file(filename).strip() + return util.load_text_file(filename).strip() elif self.uses_systemd(): (out, _err) = subp.subp(["hostname"]) out = out.strip() diff -Nru cloud-init-23.4.4/cloudinit/distros/rhel_util.py cloud-init-24.1.3/cloudinit/distros/rhel_util.py --- cloud-init-23.4.4/cloudinit/distros/rhel_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/rhel_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -43,7 +43,7 @@ def read_sysconfig_file(fn): exists = False try: - contents = util.load_file(fn).splitlines() + contents = util.load_text_file(fn).splitlines() exists = True except IOError: contents = [] diff -Nru cloud-init-23.4.4/cloudinit/distros/ubuntu.py cloud-init-24.1.3/cloudinit/distros/ubuntu.py --- cloud-init-23.4.4/cloudinit/distros/ubuntu.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/ubuntu.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,6 +13,7 @@ from cloudinit.distros import PREFERRED_NTP_CLIENTS, debian from cloudinit.distros.package_management.snap import Snap +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE class Distro(debian.Distro): @@ -21,7 +22,7 @@ # Ubuntu specific network cfg locations self.network_conf_fn = { "eni": "/etc/network/interfaces.d/50-cloud-init.cfg", - "netplan": "/etc/netplan/50-cloud-init.yaml", + "netplan": CLOUDINIT_NETPLAN_FILE, } self.renderer_configs = { "eni": { diff -Nru cloud-init-23.4.4/cloudinit/distros/ug_util.py cloud-init-24.1.3/cloudinit/distros/ug_util.py --- cloud-init-23.4.4/cloudinit/distros/ug_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/distros/ug_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -17,7 +17,7 @@ # Normalizes an input group configuration which can be: -# Comma seperated string or a list or a dictionary +# Comma separated string or a list or a dictionary # # Returns dictionary of group names => members of that group which is the # standard form used in the rest of cloud-init diff -Nru cloud-init-23.4.4/cloudinit/dmi.py cloud-init-24.1.3/cloudinit/dmi.py --- cloud-init-23.4.4/cloudinit/dmi.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/dmi.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,57 +6,74 @@ from typing import Optional from cloudinit import subp -from cloudinit.util import is_container, is_FreeBSD +from cloudinit.util import ( + is_container, + is_DragonFlyBSD, + is_FreeBSD, + is_OpenBSD, +) LOG = logging.getLogger(__name__) # Path for DMI Data DMI_SYS_PATH = "/sys/class/dmi/id" -KernelNames = namedtuple("KernelNames", ["linux", "freebsd"]) -KernelNames.__new__.__defaults__ = (None, None) +KernelNames = namedtuple("KernelNames", ["linux", "freebsd", "openbsd"]) +KernelNames.__new__.__defaults__ = (None, None, None) # FreeBSD's kenv(1) and Linux /sys/class/dmi/id/* both use different names from # dmidecode. The values are the same, and ultimately what we're interested in. +# Likewise, OpenBSD has the most commonly used things we need in sysctl(8)'s +# hw hierarchy. Moreover, this means it can be run without kern.allowkmem=1. # These tools offer a "cheaper" way to access those values over dmidecode. # This is our canonical translation table. If we add more tools on other # platforms to find dmidecode's values, their keys need to be put in here. DMIDECODE_TO_KERNEL = { - "baseboard-asset-tag": KernelNames("board_asset_tag", "smbios.planar.tag"), + "baseboard-asset-tag": KernelNames( + "board_asset_tag", "smbios.planar.tag", None + ), "baseboard-manufacturer": KernelNames( - "board_vendor", "smbios.planar.maker" + "board_vendor", "smbios.planar.maker", None ), "baseboard-product-name": KernelNames( - "board_name", "smbios.planar.product" + "board_name", "smbios.planar.product", None ), "baseboard-serial-number": KernelNames( - "board_serial", "smbios.planar.serial" + "board_serial", "smbios.planar.serial", None + ), + "baseboard-version": KernelNames( + "board_version", "smbios.planar.version", None ), - "baseboard-version": KernelNames("board_version", "smbios.planar.version"), - "bios-release-date": KernelNames("bios_date", "smbios.bios.reldate"), - "bios-vendor": KernelNames("bios_vendor", "smbios.bios.vendor"), - "bios-version": KernelNames("bios_version", "smbios.bios.version"), + "bios-release-date": KernelNames("bios_date", "smbios.bios.reldate", None), + "bios-vendor": KernelNames("bios_vendor", "smbios.bios.vendor", None), + "bios-version": KernelNames("bios_version", "smbios.bios.version", None), "chassis-asset-tag": KernelNames( - "chassis_asset_tag", "smbios.chassis.tag" + "chassis_asset_tag", "smbios.chassis.tag", None ), "chassis-manufacturer": KernelNames( - "chassis_vendor", "smbios.chassis.maker" + "chassis_vendor", "smbios.chassis.maker", "hw.vendor" ), "chassis-serial-number": KernelNames( - "chassis_serial", "smbios.chassis.serial" + "chassis_serial", "smbios.chassis.serial", "hw.uuid" ), "chassis-version": KernelNames( - "chassis_version", "smbios.chassis.version" + "chassis_version", "smbios.chassis.version", None + ), + "system-manufacturer": KernelNames( + "sys_vendor", "smbios.system.maker", "hw.vendor" ), - "system-manufacturer": KernelNames("sys_vendor", "smbios.system.maker"), "system-product-name": KernelNames( - "product_name", "smbios.system.product" + "product_name", "smbios.system.product", "hw.product" ), "system-serial-number": KernelNames( - "product_serial", "smbios.system.serial" + "product_serial", "smbios.system.serial", "hw.uuid" + ), + "system-uuid": KernelNames( + "product_uuid", "smbios.system.uuid", "hw.uuid" + ), + "system-version": KernelNames( + "product_version", "smbios.system.version", None ), - "system-uuid": KernelNames("product_uuid", "smbios.system.uuid"), - "system-version": KernelNames("product_version", "smbios.system.version"), } @@ -110,8 +127,7 @@ try: cmd = ["kenv", "-q", kmap.freebsd] - (result, _err) = subp.subp(cmd) - result = result.strip() + result = subp.subp(cmd).stdout.strip() LOG.debug("kenv returned '%s' for '%s'", result, kmap.freebsd) return result except subp.ProcessExecutionError as e: @@ -120,6 +136,27 @@ return None +def _read_sysctl(key: str) -> Optional[str]: + """ + Reads dmi data from OpenBSD's sysctl(8) + """ + kmap = DMIDECODE_TO_KERNEL.get(key) + if kmap is None or kmap.openbsd is None: + return None + + LOG.debug("querying dmi data %s", kmap.openbsd) + + try: + cmd = ["sysctl", "-qn", kmap.openbsd] + result = subp.subp(cmd).stdout.strip() + LOG.debug("sysctl returned '%s' for '%s'", result, kmap.openbsd) + return result + except subp.ProcessExecutionError as e: + LOG.debug("failed sysctl cmd: %s\n%s", cmd, e) + + return None + + def _call_dmidecode(key: str, dmidecode_path: str) -> Optional[str]: """ Calls out to dmidecode to get the data out. This is mostly for supporting @@ -127,8 +164,7 @@ """ try: cmd = [dmidecode_path, "--string", key] - (result, _err) = subp.subp(cmd) - result = result.strip() + result = subp.subp(cmd).stdout.strip() LOG.debug("dmidecode returned '%s' for '%s'", result, key) if result.replace(".", "") == "": return "" @@ -159,9 +195,12 @@ if is_container(): return None - if is_FreeBSD(): + if is_FreeBSD() or is_DragonFlyBSD(): return _read_kenv(key) + if is_OpenBSD(): + return _read_sysctl(key) + syspath_value = _read_dmi_syspath(key) if syspath_value is not None: return syspath_value diff -Nru cloud-init-23.4.4/cloudinit/features.py cloud-init-24.1.3/cloudinit/features.py --- cloud-init-23.4.4/cloudinit/features.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/features.py 2024-03-27 13:14:04.000000000 +0000 @@ -62,7 +62,7 @@ NETPLAN_CONFIG_ROOT_READ_ONLY = True """ If ``NETPLAN_CONFIG_ROOT_READ_ONLY`` is True, then netplan configuration will -be written as a single root readon-only file /etc/netplan/50-cloud-init.yaml. +be written as a single root read-only file /etc/netplan/50-cloud-init.yaml. This prevents wifi passwords in network v2 configuration from being world-readable. Prior to 23.1, netplan configuration is world-readable. diff -Nru cloud-init-23.4.4/cloudinit/gpg.py cloud-init-24.1.3/cloudinit/gpg.py --- cloud-init-23.4.4/cloudinit/gpg.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/gpg.py 2024-03-27 13:14:04.000000000 +0000 @@ -68,7 +68,7 @@ Retries are done by default because keyservers can be unreliable. Additionally, there is no way to determine the difference between - a non-existant key and a failure. In both cases gpg (at least 2.2.4) + a non-existent key and a failure. In both cases gpg (at least 2.2.4) exits with status 2 and stderr: "keyserver receive failed: No data" It is assumed that a key provided to cloud-init exists on the keyserver so re-trying makes better sense than failing. diff -Nru cloud-init-23.4.4/cloudinit/handlers/__init__.py cloud-init-24.1.3/cloudinit/handlers/__init__.py --- cloud-init-23.4.4/cloudinit/handlers/__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/handlers/__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -50,7 +50,7 @@ "## template: jinja": "text/jinja2", # Note: for the next 3 entries, the prefix doesn't matter because these # are for types that can only be used as part of a MIME message. However, - # including these entries supresses warnings during `cloudinit devel + # including these entries suppresses warnings during `cloudinit devel # make-mime`, which otherwise would require `--force`. "text/x-shellscript-per-boot": "text/x-shellscript-per-boot", "text/x-shellscript-per-instance": "text/x-shellscript-per-instance", @@ -93,7 +93,7 @@ or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE) ): return - # Sanity checks on version (should be an int convertable) + # Sanity checks on version (should be an int convertible) try: mod_ver = mod.handler_version mod_ver = int(mod_ver) diff -Nru cloud-init-23.4.4/cloudinit/handlers/boot_hook.py cloud-init-24.1.3/cloudinit/handlers/boot_hook.py --- cloud-init-23.4.4/cloudinit/handlers/boot_hook.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/handlers/boot_hook.py 2024-03-27 13:14:04.000000000 +0000 @@ -44,11 +44,13 @@ filepath = self._write_part(payload, filename) try: - env = os.environ.copy() - if self.instance_id is not None: - env["INSTANCE_ID"] = str(self.instance_id) + env = ( + {"INSTANCE_ID": str(self.instance_id)} + if self.instance_id + else {} + ) LOG.debug("Executing boothook") - subp.subp([filepath], env=env, capture=False) + subp.subp([filepath], update_env=env, capture=False) except subp.ProcessExecutionError: util.logexc(LOG, "Boothooks script %s execution error", filepath) except Exception: diff -Nru cloud-init-23.4.4/cloudinit/handlers/jinja_template.py cloud-init-24.1.3/cloudinit/handlers/jinja_template.py --- cloud-init-23.4.4/cloudinit/handlers/jinja_template.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/handlers/jinja_template.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,10 +13,11 @@ from cloudinit.settings import PER_ALWAYS from cloudinit.templater import ( MISSING_JINJA_PREFIX, + JinjaSyntaxParsingException, detect_template, render_string, ) -from cloudinit.util import load_file, load_json +from cloudinit.util import load_json, load_text_file JUndefinedError: Type[Exception] try: @@ -54,9 +55,19 @@ if ctype in handlers.CONTENT_SIGNALS: return jinja_json_file = self.paths.get_runpath("instance_data_sensitive") - rendered_payload = render_jinja_payload_from_file( - payload, filename, jinja_json_file - ) + try: + rendered_payload = render_jinja_payload_from_file( + payload, filename, jinja_json_file + ) + except JinjaSyntaxParsingException as e: + LOG.warning( + "Ignoring jinja template for %s. " + "Failed to render template. %s", + filename, + str(e), + ) + return + if not rendered_payload: return subtype = handlers.type_from_starts_with(rendered_payload) @@ -105,7 +116,7 @@ " present at %s" % instance_data_file ) try: - instance_data = load_json(load_file(instance_data_file)) + instance_data = load_json(load_text_file(instance_data_file)) except Exception as e: msg = "Loading Jinja instance data failed" if isinstance(e, (IOError, OSError)): diff -Nru cloud-init-23.4.4/cloudinit/helpers.py cloud-init-24.1.3/cloudinit/helpers.py --- cloud-init-23.4.4/cloudinit/helpers.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/helpers.py 2024-03-27 13:14:04.000000000 +0000 @@ -104,23 +104,7 @@ sem_file = self._get_path(cname, freq) # This isn't really a good atomic check # but it suffices for where and when cloudinit runs - if os.path.exists(sem_file): - return True - - # this case could happen if the migrator module hadn't run yet - # but the item had run before we did canon_sem_name. - if cname != name and os.path.exists(self._get_path(name, freq)): - LOG.warning( - "%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. " - "It will run next boot.\n" - "run manually with: cloud-init single --name=migrator", - name, - cname, - ) - return True - - return False + return os.path.exists(sem_file) def _get_path(self, name, freq): sem_path = self.sem_path @@ -363,6 +347,7 @@ "vendor_cloud_config": "vendor-cloud-config.txt", "vendor_scripts": "scripts/vendor", "warnings": "warnings", + "hotplug.enabled": "hotplug.enabled", } # Set when a datasource becomes active self.datasource = ds @@ -388,6 +373,8 @@ self.lookups[ "combined_cloud_config" ] = "combined-cloud-config.json" + if "hotplug.enabled" not in self.lookups: + self.lookups["hotplug.enabled"] = "hotplug.enabled" # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): diff -Nru cloud-init-23.4.4/cloudinit/log.py cloud-init-24.1.3/cloudinit/log.py --- cloud-init-23.4.4/cloudinit/log.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/log.py 2024-03-27 13:14:04.000000000 +0000 @@ -146,7 +146,7 @@ def setup_backup_logging(): """In the event that internal logging exception occurs and logging is not - possible for some reason, make a desparate final attempt to log to stderr + possible for some reason, make a desperate final attempt to log to stderr which may ease debugging. """ fallback_handler = logging.StreamHandler(sys.stderr) @@ -166,6 +166,18 @@ logging.Handler.handleError = handleError +class CloudInitLogRecord(logging.LogRecord): + """reporting the filename as __init__.py isn't very useful in logs + + if the filename is __init__.py, use the parent directory as the filename + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if "__init__.py" == self.filename: + self.filename = os.path.basename(os.path.dirname(self.pathname)) + + def configure_root_logger(): """Customize the root logger for cloud-init""" @@ -179,3 +191,6 @@ handler = LogExporter() handler.setLevel(logging.WARN) logging.getLogger().addHandler(handler) + + # LogRecord allows us to report more useful information than __init__.py + logging.setLogRecordFactory(CloudInitLogRecord) diff -Nru cloud-init-23.4.4/cloudinit/mergers/__init__.py cloud-init-24.1.3/cloudinit/mergers/__init__.py --- cloud-init-23.4.4/cloudinit/mergers/__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/mergers/__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -110,7 +110,7 @@ continue match = NAME_MTCH.match(m_name) if not match: - msg = "Matcher identifer '%s' is not in the right format" % ( + msg = "Matcher identifier '%s' is not in the right format" % ( m_name ) raise ValueError(msg) diff -Nru cloud-init-23.4.4/cloudinit/mergers/m_list.py cloud-init-24.1.3/cloudinit/mergers/m_list.py --- cloud-init-23.4.4/cloudinit/mergers/m_list.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/mergers/m_list.py 2024-03-27 13:14:04.000000000 +0000 @@ -76,6 +76,6 @@ # Ok now we are replacing same indexes merged_list.extend(value) common_len = min(len(merged_list), len(merge_with)) - for i in range(0, common_len): + for i in range(common_len): merged_list[i] = merge_same_index(merged_list[i], merge_with[i]) return merged_list diff -Nru cloud-init-23.4.4/cloudinit/net/__init__.py cloud-init-24.1.3/cloudinit/net/__init__.py --- cloud-init-23.4.4/cloudinit/net/__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -73,7 +73,7 @@ ): dev_path = sys_dev_path(devname, path) try: - contents = util.load_file(dev_path) + contents = util.load_text_file(dev_path) except (OSError, IOError) as e: e_errno = getattr(e, "errno", None) if e_errno in (errno.ENOENT, errno.ENOTDIR): @@ -571,7 +571,12 @@ match = { "macaddress": read_sys_net_safe(target_name, "address").lower() } - cfg = {"dhcp4": True, "set-name": target_name, "match": match} + cfg = { + "dhcp4": True, + "dhcp6": True, + "set-name": target_name, + "match": match, + } if config_driver: driver = device_driver(target_name) if driver: @@ -1282,6 +1287,8 @@ """Common helper for checking network_state subnets for ipv6.""" # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or # 'ipv6_slaac' + # This function is inappropriate for v2-based routes as routes defined + # under v2 subnets can contain ipv4 and ipv6 simultaneously if subnet["type"].endswith("6") or subnet["type"] in IPV6_DYNAMIC_TYPES: # This is a request either static6 type or DHCPv6. return True diff -Nru cloud-init-23.4.4/cloudinit/net/activators.py cloud-init-24.1.3/cloudinit/net/activators.py --- cloud-init-23.4.4/cloudinit/net/activators.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/activators.py 2024-03-27 13:14:04.000000000 +0000 @@ -102,6 +102,33 @@ return _alter_interface(cmd, device_name) +class IfConfigActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: + """Return true if ifconfig can be used on this system.""" + expected = "ifconfig" + search = ["/sbin"] + return subp.which(expected, search=search, target=target) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: + """Bring up interface using ifconfig up. + + Return True is successful, otherwise return False + """ + cmd = ["ifconfig", device_name, "up"] + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Bring up interface using ifconfig down. + + Return True is successful, otherwise return False + """ + cmd = ["ifconfig", device_name, "down"] + return _alter_interface(cmd, device_name) + + class NetworkManagerActivator(NetworkActivator): @staticmethod def available(target=None) -> bool: @@ -117,6 +144,13 @@ from cloudinit.net.network_manager import conn_filename filename = conn_filename(device_name) + if filename is None: + LOG.warning( + "Unable to find an interface config file. " + "Unable to bring up interface." + ) + return False + cmd = ["nmcli", "connection", "load", filename] if _alter_interface(cmd, device_name): cmd = ["nmcli", "connection", "up", "filename", filename] @@ -220,6 +254,7 @@ "netplan", "network-manager", "networkd", + "ifconfig", ] NAME_TO_ACTIVATOR: Dict[str, Type[NetworkActivator]] = { @@ -227,6 +262,7 @@ "netplan": NetplanActivator, "network-manager": NetworkManagerActivator, "networkd": NetworkdActivator, + "ifconfig": IfConfigActivator, } diff -Nru cloud-init-23.4.4/cloudinit/net/bsd.py cloud-init-24.1.3/cloudinit/net/bsd.py --- cloud-init-23.4.4/cloudinit/net/bsd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/bsd.py 2024-03-27 13:14:04.000000000 +0000 @@ -166,7 +166,7 @@ # fails. try: resolvconf = ResolvConf( - util.load_file( + util.load_text_file( subp.target_path(self.target, self.resolv_conf_fn) ) ) diff -Nru cloud-init-23.4.4/cloudinit/net/cmdline.py cloud-init-24.1.3/cloudinit/net/cmdline.py --- cloud-init-23.4.4/cloudinit/net/cmdline.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/cmdline.py 2024-03-27 13:14:04.000000000 +0000 @@ -196,7 +196,7 @@ names = {} for cfg_file in files: name, entry = _klibc_to_config_entry( - util.load_file(cfg_file), mac_addrs=mac_addrs + util.load_text_file(cfg_file), mac_addrs=mac_addrs ) if name in names: prev = names[name]["entry"] diff -Nru cloud-init-23.4.4/cloudinit/net/dhcp.py cloud-init-24.1.3/cloudinit/net/dhcp.py --- cloud-init-23.4.4/cloudinit/net/dhcp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/dhcp.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,30 +5,30 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import contextlib import glob import logging import os import re import signal +import socket +import struct import time +from contextlib import suppress from io import StringIO -from typing import Any, Dict, List +from subprocess import TimeoutExpired +from typing import Any, Callable, Dict, List, Optional, Tuple import configobj from cloudinit import subp, temp_utils, util -from cloudinit.net import ( - find_fallback_nic, - get_devicelist, - get_ib_interface_hwaddr, - get_interface_mac, - is_ib_interface, -) +from cloudinit.net import get_interface_mac, is_ib_interface LOG = logging.getLogger(__name__) NETWORKD_LEASES_DIR = "/run/systemd/netif/leases" +DHCLIENT_FALLBACK_LEASE_DIR = "/var/lib/dhclient" +# Match something.lease or something.leases +DHCLIENT_FALLBACK_LEASE_REGEX = r".+\.leases?$" UDHCPC_SCRIPT = """#!/bin/sh log() { echo "udhcpc[$PPID]" "$interface: $2" @@ -82,28 +82,6 @@ """Raised when unable to find dhclient.""" -class NoDHCPLeaseMissingUdhcpcError(NoDHCPLeaseError): - """Raised when unable to find udhcpc client.""" - - -def select_dhcp_client(distro): - """distros set priority list, select based on this order which to use - - If the priority dhcp client isn't found, fall back to lower in list. - """ - for client in distro.dhcp_client_priority: - try: - dhcp_client = client() - LOG.debug("DHCP client selected: %s", client.client_name) - return dhcp_client - except ( - NoDHCPLeaseMissingDhclientError, - NoDHCPLeaseMissingUdhcpcError, - ): - LOG.warning("DHCP client not found: %s", client.client_name) - raise NoDHCPLeaseMissingDhclientError() - - def maybe_perform_dhcp_discovery(distro, nic=None, dhcp_log_func=None): """Perform dhcp discovery if nic valid and dhclient command exists. @@ -117,18 +95,12 @@ from the dhclient discovery if run, otherwise an empty list is returned. """ - if nic is None: - nic = find_fallback_nic() - if nic is None: - LOG.debug("Skip dhcp_discovery: Unable to find fallback nic.") - raise NoDHCPLeaseInterfaceError() - elif nic not in get_devicelist(): - LOG.debug( - "Skip dhcp_discovery: nic %s not found in get_devicelist.", nic - ) + interface = nic or distro.fallback_interface + if interface is None: + LOG.debug("Skip dhcp_discovery: Unable to find fallback nic.") raise NoDHCPLeaseInterfaceError() - client = select_dhcp_client(distro) - return client.dhcp_discovery(nic, dhcp_log_func, distro) + + return distro.dhcp_client.dhcp_discovery(interface, dhcp_log_func, distro) def networkd_parse_lease(content): @@ -156,7 +128,7 @@ return ret for lfile in os.listdir(leases_d): ret[lfile] = networkd_parse_lease( - util.load_file(os.path.join(leases_d, lfile)) + util.load_text_file(os.path.join(leases_d, lfile)) ) return ret @@ -173,6 +145,12 @@ class DhcpClient(abc.ABC): client_name = "" + timeout = 10 + + def __init__(self): + self.dhcp_client_path = subp.which(self.client_name) + if not self.dhcp_client_path: + raise NoDHCPLeaseMissingDhclientError() @classmethod def kill_dhcp_client(cls): @@ -195,67 +173,137 @@ def stop_service(cls, dhcp_interface: str, distro): distro.manage_service("stop", cls.client_name, rcs=[0, 1]) + @abc.abstractmethod + def get_newest_lease(self, interface: str) -> Dict[str, Any]: + """Get the most recent lease from the ephemeral phase as a dict. + + Return a dict of dhcp options. The dict contains key value + pairs from the most recent lease. + """ + return {} + + @staticmethod + @abc.abstractmethod + def parse_static_routes(routes: str) -> List[Tuple[str, str]]: + """ + parse classless static routes from string + + The tuple is composed of the network_address (including net length) and + gateway for a parsed static route. + + @param routes: string containing classless static routes + @returns: list of tuple(str, str) for all valid parsed routes until the + first parsing error. + """ + return [] + + @abc.abstractmethod + def dhcp_discovery( + self, + interface: str, + dhcp_log_func: Optional[Callable] = None, + distro=None, + ) -> Dict[str, Any]: + """Run dhcp client on the interface without scripts or filesystem + artifacts. + + @param interface: Name of the network interface on which to send a + dhcp request + @param dhcp_log_func: A callable accepting the client output and + error streams. + @param distro: a distro object for network interface manipulation + @return: dict of lease options representing the most recent dhcp lease + parsed from the dhclient.lease file + """ + return {} + class IscDhclient(DhcpClient): client_name = "dhclient" def __init__(self): - self.dhclient_path = subp.which("dhclient") - if not self.dhclient_path: - LOG.debug( - "Skip dhclient configuration: No dhclient command found." - ) - raise NoDHCPLeaseMissingDhclientError() + super().__init__() + self.lease_file = "/run/dhclient.lease" @staticmethod - def parse_dhcp_lease_file(lease_file: str) -> List[Dict[str, Any]]: - """Parse the given dhcp lease file returning all leases as dicts. - - Return a list of dicts of dhcp options. Each dict contains key value - pairs a specific lease in order from oldest to newest. + def parse_leases(lease_content: str) -> List[Dict[str, Any]]: + """parse the content of a lease file - @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile - content. + @param lease_content: a string containing the contents of an + isc-dhclient lease + @return: a list of leases, most recent last """ lease_regex = re.compile(r"lease {(?P.*?)}\n", re.DOTALL) - dhcp_leases = [] - lease_content = util.load_file(lease_file) + dhcp_leases: List[Dict] = [] if len(lease_content) == 0: - raise InvalidDHCPLeaseFileError( - "Cannot parse empty dhcp lease file {0}".format(lease_file) - ) + return [] for lease in lease_regex.findall(lease_content): lease_options = [] for line in lease.split(";"): # Strip newlines, double-quotes and option prefix line = line.strip().replace('"', "").replace("option ", "") - if not line: - continue - lease_options.append(line.split(" ", 1)) - dhcp_leases.append(dict(lease_options)) - if not dhcp_leases: - raise InvalidDHCPLeaseFileError( - "Cannot parse dhcp lease file {0}. No leases found".format( - lease_file + if line: + lease_options.append(line.split(" ", 1)) + options = dict(lease_options) + opt_245 = options.get("unknown-245") + if opt_245: + options["unknown-245"] = IscDhclient.get_ip_from_lease_value( + opt_245 ) - ) + dhcp_leases.append(options) return dhcp_leases + @staticmethod + def get_ip_from_lease_value(fallback_lease_value): + unescaped_value = fallback_lease_value.replace("\\", "") + if len(unescaped_value) > 4: + hex_string = "" + for hex_pair in unescaped_value.split(":"): + if len(hex_pair) == 1: + hex_pair = "0" + hex_pair + hex_string += hex_pair + packed_bytes = struct.pack( + ">L", int(hex_string.replace(":", ""), 16) + ) + else: + packed_bytes = unescaped_value.encode("utf-8") + return socket.inet_ntoa(packed_bytes) + + def get_newest_lease(self, interface: str) -> Dict[str, Any]: + """Get the most recent lease from the ephemeral phase as a dict. + + Return a dict of dhcp options. The dict contains key value + pairs from the most recent lease. + + @param interface: an interface string - not used in this class, but + required for function signature compatibility with other classes + that require a distro object + @raises: InvalidDHCPLeaseFileError on empty or unparsable leasefile + content. + """ + with suppress(FileNotFoundError): + content = util.load_text_file(self.lease_file) + if content: + dhcp_leases = self.parse_leases(content) + if dhcp_leases: + return dhcp_leases[-1] + return {} + def dhcp_discovery( self, - interface, - dhcp_log_func=None, + interface: str, + dhcp_log_func: Optional[Callable] = None, distro=None, - ): + ) -> Dict[str, Any]: """Run dhclient on the interface without scripts/filesystem artifacts. - @param dhclient_cmd_path: Full path to the dhclient used. - @param interface: Name of the network interface on which to dhclient. + @param interface: Name of the network interface on which to send a + dhcp request @param dhcp_log_func: A callable accepting the dhclient output and error streams. - - @return: A list of dicts of representing the dhcp leases parsed from - the dhclient.lease file or empty list. + @param distro: a distro object for network interface manipulation + @return: dict of lease options representing the most recent dhcp lease + parsed from the dhclient.lease file """ LOG.debug("Performing a dhcp discovery on %s", interface) @@ -263,14 +311,16 @@ # side-effects in # /etc/resolv.conf any any other vendor specific # scripts in /etc/dhcp/dhclient*hooks.d. pid_file = "/run/dhclient.pid" - lease_file = "/run/dhclient.lease" config_file = None + sleep_time = 0.01 + sleep_cycles = int(self.timeout / sleep_time) + maxwait = int(self.timeout / 2) # this function waits for these files to exist, clean previous runs # to avoid false positive in wait_for_files - with contextlib.suppress(FileNotFoundError): + with suppress(FileNotFoundError): os.remove(pid_file) - os.remove(lease_file) + os.remove(self.lease_file) # ISC dhclient needs the interface up to send initial discovery packets # Generally dhclient relies on dhclient-script PREINIT action to bring @@ -297,8 +347,8 @@ try: out, err = subp.subp( distro.build_dhclient_cmd( - self.dhclient_path, - lease_file, + self.dhcp_client_path, + self.lease_file, pid_file, interface, config_file, @@ -321,31 +371,43 @@ # kill the correct process, thus freeing cleandir to be deleted back # up the callstack. missing = util.wait_for_files( - [pid_file, lease_file], maxwait=5, naplen=0.01 + [pid_file, self.lease_file], maxwait=maxwait, naplen=0.01 ) if missing: LOG.warning( "dhclient did not produce expected files: %s", ", ".join(os.path.basename(f) for f in missing), ) - return [] + return {} ppid = "unknown" daemonized = False - for _ in range(0, 1000): - pid_content = util.load_file(pid_file).strip() + pid_content = None + debug_msg = "" + for _ in range(sleep_cycles): try: + pid_content = util.load_text_file(pid_file).strip() pid = int(pid_content) + except FileNotFoundError: + debug_msg = ( + f"No PID file found at {pid_file}, " + "dhclient is still running" + ) except ValueError: - pass + debug_msg = ( + f"PID file contained [{pid_content}], " + "dhclient is still running" + ) else: - ppid = util.get_proc_ppid(pid) + ppid = distro.get_proc_ppid(pid) if ppid == 1: LOG.debug("killing dhclient with pid=%s", pid) os.kill(pid, signal.SIGKILL) daemonized = True break - time.sleep(0.01) + time.sleep(sleep_time) + else: + LOG.debug(debug_msg) if not daemonized: LOG.error( @@ -357,10 +419,13 @@ ) if dhcp_log_func is not None: dhcp_log_func(out, err) - return self.parse_dhcp_lease_file(lease_file) + lease = self.get_newest_lease(interface) + if lease: + return lease + raise InvalidDHCPLeaseFileError() @staticmethod - def parse_static_routes(rfc3442): + def parse_static_routes(routes: str) -> List[Tuple[str, str]]: """ parse rfc3442 format and return a list containing tuple of strings. @@ -370,7 +435,7 @@ @param rfc3442: string in rfc3442 format (isc or dhcpd) @returns: list of tuple(str, str) for all valid parsed routes until the - first parsing error. + first parsing error. e.g.: @@ -392,9 +457,9 @@ /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes """ # raw strings from dhcp lease may end in semi-colon - rfc3442 = rfc3442.rstrip(";") + rfc3442 = routes.rstrip(";") tokens = [tok for tok in re.split(r"[, .]", rfc3442) if tok] - static_routes = [] + static_routes: List[Tuple[str, str]] = [] def _trunc_error(cidr, required, remain): msg = ( @@ -467,102 +532,379 @@ return static_routes @staticmethod - def get_dhclient_d(): - # find lease files directory - supported_dirs = [ - "/var/lib/dhclient", - "/var/lib/dhcp", - "/var/lib/NetworkManager", - ] - for d in supported_dirs: - if os.path.exists(d) and len(os.listdir(d)) > 0: - LOG.debug("Using %s lease directory", d) - return d - return None + def get_newest_lease_file_from_distro(distro) -> Optional[str]: + """Get the latest lease file from a distro-managed dhclient - @staticmethod - def get_latest_lease(lease_d=None): - # find latest lease file - if lease_d is None: - lease_d = IscDhclient.get_dhclient_d() - if not lease_d: - return None - lease_files = os.listdir(lease_d) - latest_mtime = -1 + Doesn't consider the ephemeral timeframe lease. + + @param distro: used for distro-specific lease location and filename + @return: The most recent lease file, or None + """ latest_file = None - # lease files are named inconsistently across distros. - # We assume that 'dhclient6' indicates ipv6 and ignore it. - # ubuntu: - # dhclient..leases, dhclient.leases, dhclient6.leases - # centos6: - # dhclient-.leases, dhclient6.leases - # centos7: ('--' is not a typo) - # dhclient--.lease, dhclient6.leases - for fname in lease_files: - if fname.startswith("dhclient6"): - # avoid files that start with dhclient6 assuming dhcpv6. + # Try primary dir/regex, then the fallback ones + for directory, regex in ( + ( + distro.dhclient_lease_directory, + distro.dhclient_lease_file_regex, + ), + (DHCLIENT_FALLBACK_LEASE_DIR, DHCLIENT_FALLBACK_LEASE_REGEX), + ): + if not directory: continue - if not (fname.endswith((".lease", ".leases"))): + + lease_files = [] + try: + lease_files = os.listdir(directory) + except FileNotFoundError: continue - abs_path = os.path.join(lease_d, fname) - mtime = os.path.getmtime(abs_path) - if mtime > latest_mtime: - latest_mtime = mtime - latest_file = abs_path - return latest_file + latest_mtime = -1.0 + for fname in lease_files: + if not re.search(regex, fname): + continue - @staticmethod - def parse_dhcp_server_from_lease_file(lease_file): - with open(lease_file, "r") as fd: - for line in fd: - if "dhcp-server-identifier" in line: - words = line.strip(" ;\r\n").split(" ") - if len(words) > 2: - dhcptok = words[2] - LOG.debug("Found DHCP identifier %s", dhcptok) - latest_address = dhcptok - return latest_address + abs_path = os.path.join(directory, fname) + mtime = os.path.getmtime(abs_path) + if mtime > latest_mtime: + latest_mtime = mtime + latest_file = abs_path + + # Lease file found, skipping falling back + if latest_file: + return latest_file + return None + + def get_key_from_latest_lease(self, distro, key: str): + """Get a key from the latest lease from distro-managed dhclient + + Doesn't consider the ephemeral timeframe lease. + + @param lease_dir: distro-specific lease to check + @param lease_file_regex: distro-specific regex to match lease name + @return: The most recent lease file, or None + """ + lease_file = self.get_newest_lease_file_from_distro(distro) + if lease_file: + content = util.load_text_file(lease_file) + if content: + for lease in reversed(self.parse_leases(content)): + server = lease.get(key) + if server: + return server -class Dhcpcd: +class Dhcpcd(DhcpClient): client_name = "dhcpcd" - def __init__(self): - raise NoDHCPLeaseMissingDhclientError("Dhcpcd not yet implemented") + def dhcp_discovery( + self, + interface: str, + dhcp_log_func: Optional[Callable] = None, + distro=None, + ) -> Dict[str, Any]: + """Run dhcpcd on the interface without scripts/filesystem artifacts. + + @param interface: Name of the network interface on which to send a + dhcp request + @param dhcp_log_func: A callable accepting the client output and + error streams. + @param distro: a distro object for network interface manipulation + @return: dict of lease options representing the most recent dhcp lease + parsed from the dhclient.lease file + """ + LOG.debug("Performing a dhcp discovery on %s", interface) + sleep_time = 0.01 + sleep_cycles = int(self.timeout / sleep_time) + infiniband_argument = [] + + # dhcpcd needs the interface up to send initial discovery packets + # Generally dhclient relies on dhclient-script PREINIT action to bring + # the link up before attempting discovery. Since we are using + # -sf /bin/true, we need to do that "link up" ourselves first. + distro.net_ops.link_up(interface) + try: + # Currently dhcpcd doesn't have a workable --oneshot lease parsing + # story. All non-daemon lease parsing options on dhcpcd appear + # broken: + # + # https://github.com/NetworkConfiguration/dhcpcd/issues/285 + # https://github.com/NetworkConfiguration/dhcpcd/issues/286 + # https://github.com/NetworkConfiguration/dhcpcd/issues/287 + # + # Until fixed, we allow dhcpcd to spawn background processes so + # that we can use --dumplease, but when any option above is fixed, + # it would be safer to avoid spawning processes using --oneshot + if is_ib_interface(interface): + infiniband_argument = ["--clientid"] + command = [ + self.dhcp_client_path, # pyright: ignore + "--ipv4only", # only attempt configuring ipv4 + "--waitip", # wait for ipv4 to be configured + "--persistent", # don't deconfigure when dhcpcd exits + "--noarp", # don't be slow + "--script=/bin/true", # disable hooks + *infiniband_argument, + interface, + ] + out, err = subp.subp( + command, + timeout=self.timeout, + ) + if dhcp_log_func is not None: + dhcp_log_func(out, err) + lease = self.get_newest_lease(interface) + # Attempt cleanup and leave breadcrumbs if it fails, but return + # the lease regardless of failure to clean up dhcpcd. + if lease: + # Note: the pid file location depends on the arguments passed + # it can be discovered with the -P flag + pid_file = subp.subp([*command, "-P"]).stdout.strip() + pid_content = None + gid = False + debug_msg = "" + for _ in range(sleep_cycles): + try: + pid_content = util.load_text_file(pid_file).strip() + pid = int(pid_content) + gid = distro.get_proc_pgid(pid) + if gid: + LOG.debug( + "killing dhcpcd with pid=%s gid=%s", pid, gid + ) + os.killpg(gid, signal.SIGKILL) + break + except ProcessLookupError: + LOG.debug( + "Process group id [%s] has already exited, " + "nothing to kill", + gid, + ) + break + except FileNotFoundError: + debug_msg = ( + f"No PID file found at {pid_file}, " + "dhcpcd is still running" + ) + except ValueError: + debug_msg = ( + f"PID file contained [{pid_content}], " + "dhcpcd is still running" + ) + else: + return lease + time.sleep(sleep_time) + else: + LOG.debug(debug_msg) + return lease + raise NoDHCPLeaseError("No lease found") + + except TimeoutExpired as error: + LOG.debug( + "dhcpcd timed out after %s seconds: stderr: %r stdout: %r", + error.timeout, + error.stderr, + error.stdout, + ) + raise NoDHCPLeaseError from error + except subp.ProcessExecutionError as error: + LOG.debug( + "dhcpcd exited with code: %s stderr: %r stdout: %r", + error.exit_code, + error.stderr, + error.stdout, + ) + raise NoDHCPLeaseError from error + + @staticmethod + def parse_unknown_options_from_packet( + data: bytes, dhcp_option_number: int + ) -> Optional[bytes]: + """get a specific option from a binary lease file + + This is required until upstream dhcpcd supports unknown option 245 + upstream bug: https://github.com/NetworkConfiguration/dhcpcd/issues/282 + + @param data: Binary lease data + @param number: Option number to return + @return: the option (bytes) or None + """ + # DHCP is basically an extension to bootp. The relevent standards that + # describe the packet format include: + # + # RFC 951 (Section 3) + # RFC 2132 (Section 2) + # + # Per RFC 951, the "vendor-specific area" of the dhcp packet starts at + # byte 236. An arbitrary constant, known as the magic cookie, takes 4 + # bytes. Vendor-specific options come next, so we start the search at + # byte 240. + INDEX = 240 + + def iter_options(data: bytes, index: int): + """options are variable length, and consist of the following format + + option number: 1 byte + option length: 1 byte + option data: variable length (see length field) + """ + while len(data) >= index + 2: + code = data[index] + length = data[1 + index] + option = data[2 + index : 2 + index + length] + yield code, option + index = 2 + length + index + + for code, option in iter_options(data, INDEX): + if code == dhcp_option_number: + return option + return None + + @staticmethod + def parse_dhcpcd_lease(lease_dump: str, interface: str) -> Dict: + """parse the output of dhcpcd --dump + + map names to the datastructure we create from dhclient + + example dhcpcd output: + + broadcast_address='192.168.15.255' + dhcp_lease_time='3600' + dhcp_message_type='5' + dhcp_server_identifier='192.168.0.1' + domain_name='us-east-2.compute.internal' + domain_name_servers='192.168.0.2' + host_name='ip-192-168-0-212' + interface_mtu='9001' + ip_address='192.168.0.212' + network_number='192.168.0.0' + routers='192.168.0.1' + subnet_cidr='20' + subnet_mask='255.255.240.0' + """ + + # create a dict from dhcpcd dump output - remove single quotes + lease = dict( + [ + a.split("=") + for a in lease_dump.strip().replace("'", "").split("\n") + ] + ) + + # this is expected by cloud-init's code + lease["interface"] = interface + + # transform underscores to hyphens + lease = {key.replace("_", "-"): value for key, value in lease.items()} + + # - isc-dhclient uses the key name "fixed-address" in place of + # "ip-address", and in the codebase some code assumes that we can use + # isc-dhclient's option names. Map accordingly + # - ephemeral.py we use an internal key name "static_routes" to map + # what I think is some RHEL customization to the isc-dhclient + # code, so we need to match this key for use there. + name_map = { + "ip-address": "fixed-address", + "classless-static-routes": "static_routes", + } + for source, destination in name_map.items(): + if source in lease: + lease[destination] = lease.pop(source) + dhcp_message = util.load_binary_file( + f"/var/lib/dhcpcd/{interface}.lease" + ) + opt_245 = Dhcpcd.parse_unknown_options_from_packet(dhcp_message, 245) + if opt_245: + lease["unknown-245"] = socket.inet_ntoa(opt_245) + return lease + + def get_newest_lease(self, interface: str) -> Dict[str, Any]: + """Return a dict of dhcp options. + + @param interface: which interface to dump the lease from + @raises: InvalidDHCPLeaseFileError on empty or unparsable leasefile + content. + """ + try: + return self.parse_dhcpcd_lease( + subp.subp( + [ + self.dhcp_client_path, + "--dumplease", + "--ipv4only", + interface, + ], + ).stdout, + interface, + ) + + except subp.ProcessExecutionError as error: + LOG.debug( + "dhcpcd exited with code: %s stderr: %r stdout: %r", + error.exit_code, + error.stderr, + error.stdout, + ) + raise NoDHCPLeaseError from error + + @staticmethod + def parse_static_routes(routes: str) -> List[Tuple[str, str]]: + """ + classless static routes as returned from dhcpcd --dumplease and return + a list containing tuple of strings. + + The tuple is composed of the network_address (including net length) and + gateway for a parsed static route. + + @param routes: string containing classless static routes + @returns: list of tuple(str, str) for all valid parsed routes until the + first parsing error. + + e.g.: + + sr=parse_static_routes( + "0.0.0.0/0 10.0.0.1 168.63.129.16/32 10.0.0.1" + ) + sr=[ + ("0.0.0.0/0", "10.0.0.1"), + ("169.63.129.16/32", "10.0.0.1"), + ] + """ + static_routes = routes.split() + if static_routes: + # format: dest1/mask gw1 ... destn/mask gwn + return [i for i in zip(static_routes[::2], static_routes[1::2])] + LOG.warning("Malformed classless static routes: [%s]", routes) + return [] class Udhcpc(DhcpClient): client_name = "udhcpc" def __init__(self): - self.udhcpc_path = subp.which("udhcpc") - if not self.udhcpc_path: - LOG.debug("Skip udhcpc configuration: No udhcpc command found.") - raise NoDHCPLeaseMissingUdhcpcError() + super().__init__() + self.lease_file = None def dhcp_discovery( self, - interface, - dhcp_log_func=None, + interface: str, + dhcp_log_func: Optional[Callable] = None, distro=None, - ): + ) -> Dict[str, Any]: """Run udhcpc on the interface without scripts or filesystem artifacts. @param interface: Name of the network interface on which to run udhcpc. @param dhcp_log_func: A callable accepting the udhcpc output and error streams. - @return: A list of dicts of representing the dhcp leases parsed from the udhcpc lease file. """ LOG.debug("Performing a dhcp discovery on %s", interface) tmp_dir = temp_utils.get_tmp_ancestor(needs_exe=True) - lease_file = os.path.join(tmp_dir, interface + ".lease.json") - with contextlib.suppress(FileNotFoundError): - os.remove(lease_file) + self.lease_file = os.path.join(tmp_dir, interface + ".lease.json") + with suppress(FileNotFoundError): + os.remove(self.lease_file) # udhcpc needs the interface up to send initial discovery packets distro.net_ops.link_up(interface) @@ -571,7 +913,7 @@ util.write_file(udhcpc_script, UDHCPC_SCRIPT, 0o755) cmd = [ - self.udhcpc_path, + self.dhcp_client_path, "-O", "staticroutes", "-i", @@ -589,15 +931,17 @@ # INFINIBAND or not. If yes, we are generating the the client-id to be # used with the udhcpc if is_ib_interface(interface): - dhcp_client_identifier = get_ib_interface_hwaddr( - interface, ethernet_format=True - ) cmd.extend( - ["-x", "0x3d:%s" % dhcp_client_identifier.replace(":", "")] + [ + "-x", + "0x3d:20{}".format( + get_interface_mac(interface)[36:].replace(":", "") + ), + ] ) try: out, err = subp.subp( - cmd, update_env={"LEASE_FILE": lease_file}, capture=True + cmd, update_env={"LEASE_FILE": self.lease_file}, capture=True ) except subp.ProcessExecutionError as error: LOG.debug( @@ -611,11 +955,29 @@ if dhcp_log_func is not None: dhcp_log_func(out, err) - lease_json = util.load_json(util.load_file(lease_file)) - static_routes = lease_json["static_routes"].split() + return self.get_newest_lease(interface) + + def get_newest_lease(self, interface: str) -> Dict[str, Any]: + """Get the most recent lease from the ephemeral phase as a dict. + + Return a dict of dhcp options. The dict contains key value + pairs from the most recent lease. + + @param interface: an interface name - not used in this class, but + required for function signature compatibility with other classes + that require a distro object + @raises: InvalidDHCPLeaseFileError on empty or unparsable leasefile + content. + """ + return util.load_json(util.load_text_file(self.lease_file)) + + @staticmethod + def parse_static_routes(routes: str) -> List[Tuple[str, str]]: + static_routes = routes.split() if static_routes: # format: dest1/mask gw1 ... destn/mask gwn - lease_json["static_routes"] = [ - i for i in zip(static_routes[::2], static_routes[1::2]) - ] - return [lease_json] + return [i for i in zip(static_routes[::2], static_routes[1::2])] + return [] + + +ALL_DHCP_CLIENTS = [Dhcpcd, IscDhclient, Udhcpc] diff -Nru cloud-init-23.4.4/cloudinit/net/eni.py cloud-init-24.1.3/cloudinit/net/eni.py --- cloud-init-23.4.4/cloudinit/net/eni.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/eni.py 2024-03-27 13:14:04.000000000 +0000 @@ -179,7 +179,7 @@ """Parses the file contents, placing result into ifaces. '_source_path' is added to every dictionary entry to define which file - the configration information came from. + the configuration information came from. :param ifaces: interface dictionary :param contents: contents of interfaces file diff -Nru cloud-init-23.4.4/cloudinit/net/ephemeral.py cloud-init-24.1.3/cloudinit/net/ephemeral.py --- cloud-init-23.4.4/cloudinit/net/ephemeral.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/ephemeral.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,7 @@ import cloudinit.net as net from cloudinit.net.dhcp import ( - IscDhclient, + Dhcpcd, NoDHCPLeaseError, maybe_perform_dhcp_discovery, ) @@ -116,7 +116,7 @@ cmd() def _bringup_device(self): - """Perform the ip comands to fully setup the device.""" + """Perform the ip commands to fully setup the device.""" cidr = "{0}/{1}".format(self.ip, self.prefix) LOG.debug( "Attempting setup of ephemeral network on %s with %s brd %s", @@ -285,12 +285,11 @@ """ if self.lease: return self.lease - leases = maybe_perform_dhcp_discovery( + self.lease = maybe_perform_dhcp_discovery( self.distro, self.iface, self.dhcp_log_func ) - if not leases: + if not self.lease: raise NoDHCPLeaseError() - self.lease = leases[-1] LOG.debug( "Received dhcp lease on %s for %s/%s", self.lease["interface"], @@ -305,6 +304,7 @@ "static_routes": [ "rfc3442-classless-static-routes", "classless-static-routes", + "static_routes", ], "router": "routers", } @@ -314,12 +314,17 @@ kwargs["prefix_or_mask"], kwargs["ip"] ) if kwargs["static_routes"]: - kwargs["static_routes"] = IscDhclient.parse_static_routes( + kwargs[ + "static_routes" + ] = self.distro.dhcp_client.parse_static_routes( kwargs["static_routes"] ) if self.connectivity_url_data: kwargs["connectivity_url_data"] = self.connectivity_url_data - ephipv4 = EphemeralIPv4Network(self.distro, **kwargs) + if isinstance(self.distro.dhcp_client, Dhcpcd): + ephipv4 = DhcpcdEphemeralIPv4Network(self.distro, **kwargs) + else: + ephipv4 = EphemeralIPv4Network(self.distro, **kwargs) ephipv4.__enter__() self._ephipv4 = ephipv4 return self.lease @@ -343,6 +348,16 @@ result[internal_mapping] = self.lease.get(different_names) +class DhcpcdEphemeralIPv4Network(EphemeralIPv4Network): + """dhcpcd sets up its own ephemeral network and routes""" + + def __enter__(self): + return + + def __exit__(self, excp_type, excp_value, excp_traceback): + return + + class EphemeralIPNetwork: """Combined ephemeral context manager for IPv4 and IPv6 diff -Nru cloud-init-23.4.4/cloudinit/net/netplan.py cloud-init-24.1.3/cloudinit/net/netplan.py --- cloud-init-23.4.4/cloudinit/net/netplan.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/netplan.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,6 +18,8 @@ ) from cloudinit.net.network_state import NET_CONFIG_TO_V2, NetworkState +CLOUDINIT_NETPLAN_FILE = "/etc/netplan/50-cloud-init.yaml" + KNOWN_SNAPD_CONFIG = b"""\ # This is the initial network config. # It can be overwritten by cloud-init or console-conf. @@ -210,7 +212,7 @@ tpath = subp.target_path(target, "etc/netplan/00-snapd-config.yaml") if not os.path.isfile(tpath): return - content = util.load_file(tpath, decode=False) + content = util.load_binary_file(tpath) if content != KNOWN_SNAPD_CONFIG: return @@ -242,9 +244,7 @@ def __init__(self, config=None): if not config: config = {} - self.netplan_path = config.get( - "netplan_path", "etc/netplan/50-cloud-init.yaml" - ) + self.netplan_path = config.get("netplan_path", CLOUDINIT_NETPLAN_FILE) self.netplan_header = config.get("netplan_header", None) self._postcmds = config.get("postcmds", False) self.clean_default = config.get("clean_default", True) diff -Nru cloud-init-23.4.4/cloudinit/net/network_manager.py cloud-init-24.1.3/cloudinit/net/network_manager.py --- cloud-init-23.4.4/cloudinit/net/network_manager.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/network_manager.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,15 +12,21 @@ import logging import os import uuid -from typing import Optional +from typing import List, Optional from cloudinit import subp, util -from cloudinit.net import is_ipv6_address, renderer, subnet_is_ipv6 +from cloudinit.net import ( + is_ipv6_address, + is_ipv6_network, + renderer, + subnet_is_ipv6, +) from cloudinit.net.network_state import NetworkState +from cloudinit.net.sysconfig import available_nm_ifcfg_rh NM_RUN_DIR = "/etc/NetworkManager" NM_LIB_DIR = "/usr/lib/NetworkManager" -NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" +IFCFG_CFG_FILE = "/etc/sysconfig/network-scripts" NM_IPV6_ADDR_GEN_CONF = """# This is generated by cloud-init. Do not edit. # [.config] @@ -71,6 +77,57 @@ if not self.config.has_option(section, option): self.config[section][option] = value + def _config_option_is_set(self, section, option): + """ + Checks if a config option is set. Returns True if it is, + else returns False. + """ + return self.config.has_section(section) and self.config.has_option( + section, option + ) + + def _get_config_option(self, section, option): + """ + Returns the value of a config option if its set, + else returns None. + """ + if self._config_option_is_set(section, option): + return self.config[section][option] + else: + return None + + def _change_set_config_option(self, section, option, value): + """ + Overrides the value of a config option if its already set. + Else, if the config option is not set, it does nothing. + """ + if self._config_option_is_set(section, option): + self.config[section][option] = value + + def _set_mayfail_true_if_both_false_dhcp(self): + """ + If for both ipv4 and ipv6, 'may-fail' is set to be False, + set it to True for both of them. + """ + for family in ["ipv4", "ipv6"]: + if self._get_config_option(family, "may-fail") != "false": + # if either ipv4 or ipv6 sections are not set/configured, + # or if both are configured but for either ipv4 or ipv6, + # 'may-fail' is not 'false', do not do anything. + return + if self._get_config_option(family, "method") not in [ + "dhcp", + "auto", + ]: + # if both v4 and v6 are not dhcp, do not do anything. + return + + # If we landed here, it means both ipv4 and ipv6 are configured + # with dhcp/auto and both have 'may-fail' set to 'false'. So set + # both to 'true'. + for family in ["ipv4", "ipv6"]: + self._change_set_config_option(family, "may-fail", "true") + def _set_ip_method(self, family, subnet_type): """ Ensures there's appropriate [ipv4]/[ipv6] for given family @@ -105,28 +162,56 @@ if self.config[family]["method"] == "auto" and method == "manual": return - if ( - subnet_type == "ipv6_dhcpv6-stateful" - or subnet_type == "ipv6_dhcpv6-stateless" - or subnet_type == "ipv6_slaac" - ): + if subnet_type in [ + "ipv6_dhcpv6-stateful", + "ipv6_dhcpv6-stateless", + "ipv6_slaac", + ]: # set ipv4 method to 'disabled' to align with sysconfig renderer. self._set_default("ipv4", "method", "disabled") self.config[family]["method"] = method self._set_default(family, "may-fail", "false") + def _get_next_numbered_section(self, section, key_prefix) -> str: + if not self.config.has_section(section): + self.config[section] = {} + for index in itertools.count(1): + key = f"{key_prefix}{index}" + if not self.config.has_option(section, key): + return key + return "not_possible" # for typing + def _add_numbered(self, section, key_prefix, value): """ Adds a numbered property, such as address or route, ensuring the appropriate value gets used for . """ + key = self._get_next_numbered_section(section, key_prefix) + self.config[section][key] = value - for index in itertools.count(1): - key = f"{key_prefix}{index}" - if not self.config.has_option(section, key): - self.config[section][key] = value - break + def _add_route_options(self, section, route, key, value): + """Add route options to a given route + + Example: + Given: + section: ipv4 + route: route0 + key: mtu + value: 500 + + Create line under [ipv4] section: + route0_options=mtu=500 + + If the line already exists, then append the new key/value pair + """ + numbered_key = f"{route}_options" + route_options = self.config[section].get(numbered_key) + self.config[section][numbered_key] = ( + f"{route_options},{key}={value}" + if route_options + else f"{key}={value}" + ) def _add_address(self, family, subnet): """ @@ -136,40 +221,40 @@ value = subnet["address"] + "/" + str(subnet["prefix"]) self._add_numbered(family, "address", value) - def _add_route(self, family, route): - """ - Adds a ipv[46].route property. - """ - - value = route["network"] + "/" + str(route["prefix"]) + def _add_route(self, route): + """Adds a ipv[46].route property.""" + # Because network v2 route definitions can have mixed v4 and v6 + # routes, determine the family per route based on the network + family = "ipv6" if is_ipv6_network(route["network"]) else "ipv4" + value = f'{route["network"]}/{route["prefix"]}' if "gateway" in route: - value = value + "," + route["gateway"] - self._add_numbered(family, "route", value) + value += f',{route["gateway"]}' + route_key = self._get_next_numbered_section(family, "route") + self.config[family][route_key] = value + if "mtu" in route: + self._add_route_options(family, route_key, "mtu", route["mtu"]) - def _add_nameserver(self, dns): + def _add_nameserver(self, dns: str) -> None: """ Extends the ipv[46].dns property with a name server. """ - - # FIXME: the subnet contains IPv4 and IPv6 name server mixed - # together. We might be getting an IPv6 name server while - # we're dealing with an IPv4 subnet. Sort this out by figuring - # out the correct family and making sure a valid section exist. family = "ipv6" if is_ipv6_address(dns) else "ipv4" - self._set_default(family, "method", "disabled") - - self._set_default(family, "dns", "") - self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" + if self.config.has_section(family): + self._set_default(family, "dns", "") + self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" - def _add_dns_search(self, family, dns_search): + def _add_dns_search(self, dns_search: List[str]) -> None: """ Extends the ipv[46].dns-search property with a name server. """ - - self._set_default(family, "dns-search", "") - self.config[family]["dns-search"] = ( - self.config[family]["dns-search"] + ";".join(dns_search) + ";" - ) + for family in ["ipv4", "ipv6"]: + if self.config.has_section(family): + self._set_default(family, "dns-search", "") + self.config[family]["dns-search"] = ( + self.config[family]["dns-search"] + + ";".join(dns_search) + + ";" + ) def con_uuid(self): """ @@ -190,7 +275,7 @@ """ return addr.replace("-", ":").upper() - def render_interface(self, iface, renderer): + def render_interface(self, iface, network_state, renderer): """ Integrate information from network state interface information into the connection. Most of the work is done here. @@ -251,6 +336,8 @@ device_mtu = iface["mtu"] ipv4_mtu = None + found_nameservers = [] + found_dns_search = [] # Deal with Layer 3 configuration for subnet in iface["subnets"]: @@ -262,15 +349,49 @@ if "gateway" in subnet: self.config[family]["gateway"] = subnet["gateway"] for route in subnet["routes"]: - self._add_route(family, route) + self._add_route(route) + # Add subnet-level DNS if "dns_nameservers" in subnet: - for nameserver in subnet["dns_nameservers"]: - self._add_nameserver(nameserver) + found_nameservers.extend(subnet["dns_nameservers"]) if "dns_search" in subnet: - self._add_dns_search(family, subnet["dns_search"]) + found_dns_search.extend(subnet["dns_search"]) if family == "ipv4" and "mtu" in subnet: ipv4_mtu = subnet["mtu"] + # Add interface-level DNS + if "dns" in iface: + found_nameservers += [ + dns + for dns in iface["dns"]["nameservers"] + if dns not in found_nameservers + ] + found_dns_search += [ + search + for search in iface["dns"]["search"] + if search not in found_dns_search + ] + + # We prefer any interface-specific DNS entries, but if we do not + # have any, add the global DNS to the connection + if not found_nameservers and network_state.dns_nameservers: + found_nameservers = network_state.dns_nameservers + if not found_dns_search and network_state.dns_searchdomains: + found_dns_search = network_state.dns_searchdomains + + # Write out all DNS entries to the connection + for nameserver in found_nameservers: + self._add_nameserver(nameserver) + if found_dns_search: + self._add_dns_search(found_dns_search) + + # we do not want to set may-fail to false for both ipv4 and ipv6 dhcp + # at the at the same time. This will make the network configuration + # work only when both ipv4 and ipv6 dhcp succeeds. This may not be + # what we want. If we have configured both ipv4 and ipv6 dhcp, any one + # succeeding should be enough. Therefore, if "may-fail" is set to + # False for both ipv4 and ipv6 dhcp, set them both to True. + self._set_mayfail_true_if_both_false_dhcp() + if ipv4_mtu is None: ipv4_mtu = device_mtu if not ipv4_mtu == device_mtu: @@ -346,7 +467,10 @@ class Renderer(renderer.Renderer): - """Renders network information in a NetworkManager keyfile format.""" + """Renders network information in a NetworkManager keyfile format. + + See https://networkmanager.dev/docs/api/latest/nm-settings-keyfile.html + """ def __init__(self, config=None): self.connections = {} @@ -377,13 +501,13 @@ # Now render the actual interface configuration for iface in network_state.iter_interfaces(): conn = self.connections[iface["name"]] - conn.render_interface(iface, self) + conn.render_interface(iface, network_state, self) # And finally write the files for con_id, conn in self.connections.items(): if not conn.valid(): continue - name = conn_filename(con_id, target) + name = nm_conn_filename(con_id, target) util.write_file(name, conn.dump(), 0o600) # Select EUI64 to be used by default by NM for creating the address @@ -393,12 +517,39 @@ ) -def conn_filename(con_id, target=None): +def nm_conn_filename(con_id, target=None): target_con_dir = subp.target_path(target, NM_RUN_DIR) con_file = f"cloud-init-{con_id}.nmconnection" return f"{target_con_dir}/system-connections/{con_file}" +def sysconfig_conn_filename(devname, target=None): + target_con_dir = subp.target_path(target, IFCFG_CFG_FILE) + con_file = f"ifcfg-{devname}" + return f"{target_con_dir}/{con_file}" + + +def conn_filename(devname): + """ + This function returns the name of the interface config file. + It first checks for presence of network manager connection file. + If absent and ifcfg-rh plugin for network manager is available, + it returns the name of the ifcfg file if it is present. If the + plugin is not present or the plugin is present but ifcfg file is + not, it returns None. + This function is called from NetworkManagerActivator class in + activators.py. + """ + conn_file = nm_conn_filename(devname) + # If the network manager connection file is absent, also check for + # presence of ifcfg files for the same interface (if nm-ifcfg-rh plugin is + # present, network manager can handle ifcfg files). If both network manager + # connection file and ifcfg files are absent, return None. + if not os.path.isfile(conn_file) and available_nm_ifcfg_rh(): + conn_file = sysconfig_conn_filename(devname) + return conn_file if os.path.isfile(conn_file) else None + + def cloud_init_nm_conf_filename(target=None): target_con_dir = subp.target_path(target, NM_RUN_DIR) conf_file = "30-cloud-init-ip6-addr-gen-mode.conf" @@ -410,7 +561,6 @@ # It is imported here to avoid circular import from cloudinit.distros import uses_systemd - config_present = os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)) nmcli_present = subp.which("nmcli", target=target) service_active = True if uses_systemd(): @@ -419,4 +569,4 @@ except subp.ProcessExecutionError: service_active = False - return config_present and bool(nmcli_present) and service_active + return bool(nmcli_present) and service_active diff -Nru cloud-init-23.4.4/cloudinit/net/network_state.py cloud-init-24.1.3/cloudinit/net/network_state.py --- cloud-init-23.4.4/cloudinit/net/network_state.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/network_state.py 2024-03-27 13:14:04.000000000 +0000 @@ -336,7 +336,7 @@ if iface: nameservers, search = dns iface["dns"] = { - "addresses": nameservers, + "nameservers": nameservers, "search": search, } @@ -812,7 +812,6 @@ name_cmd.update({"search": search}) if len(dns) > 0: name_cmd.update({"address": dns}) - self.handle_nameserver(name_cmd) mac_address: Optional[str] = dev_cfg.get("match", {}).get( "macaddress" @@ -926,6 +925,7 @@ "destination": route.get("to"), "gateway": route.get("via"), "metric": route.get("metric"), + "mtu": route.get("mtu"), } ) ) diff -Nru cloud-init-23.4.4/cloudinit/net/networkd.py cloud-init-24.1.3/cloudinit/net/networkd.py --- cloud-init-23.4.4/cloudinit/net/networkd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/networkd.py 2024-03-27 13:14:04.000000000 +0000 @@ -221,12 +221,6 @@ def parse_dns(self, iface, cfg: CfgParser, ns: NetworkState): sec = "Network" - dns_cfg_map = { - "search": "Domains", - "nameservers": "DNS", - "addresses": "DNS", - } - dns = iface.get("dns") if not dns and ns.version == 1: dns = { @@ -236,9 +230,10 @@ elif not dns and ns.version == 2: return - for k, v in dns_cfg_map.items(): - if k in dns and dns[k]: - cfg.update_section(sec, v, " ".join(dns[k])) + if dns.get("search"): + cfg.update_section(sec, "Domains", " ".join(dns["search"])) + if dns.get("nameservers"): + cfg.update_section(sec, "DNS", " ".join(dns["nameservers"])) def parse_dhcp_overrides(self, cfg: CfgParser, device, dhcp, version): dhcp_config_maps = { diff -Nru cloud-init-23.4.4/cloudinit/net/sysconfig.py cloud-init-24.1.3/cloudinit/net/sysconfig.py --- cloud-init-23.4.4/cloudinit/net/sysconfig.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/net/sysconfig.py 2024-03-27 13:14:04.000000000 +0000 @@ -43,7 +43,7 @@ "Created by cloud-init automatically, do not edit.", "", ] - for i in range(0, len(lines)): + for i in range(len(lines)): if lines[i]: lines[i] = sep + " " + lines[i] else: @@ -835,7 +835,9 @@ return None content = resolv_conf.ResolvConf("") if existing_dns_path and os.path.isfile(existing_dns_path): - content = resolv_conf.ResolvConf(util.load_file(existing_dns_path)) + content = resolv_conf.ResolvConf( + util.load_text_file(existing_dns_path) + ) for nameserver in network_state.dns_nameservers: content.add_nameserver(nameserver) for searchdomain in network_state.dns_searchdomains: diff -Nru cloud-init-23.4.4/cloudinit/persistence.py cloud-init-24.1.3/cloudinit/persistence.py --- cloud-init-23.4.4/cloudinit/persistence.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/persistence.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,7 +13,7 @@ use it. Versioning is done at the class level. The current version of a class's pickle should be set in the class variable - ``_ci_pkl_version``, as an int. If not overriden, it will default to 0. + ``_ci_pkl_version``, as an int. If not overridden, it will default to 0. On unpickle, the object's state will be restored and then ``self._unpickle`` is called with the version of the stored pickle as the diff -Nru cloud-init-23.4.4/cloudinit/settings.py cloud-init-24.1.3/cloudinit/settings.py --- cloud-init-23.4.4/cloudinit/settings.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/settings.py 2024-03-27 13:14:04.000000000 +0000 @@ -49,6 +49,7 @@ "VMware", "NWCS", "Akamai", + "WSL", # At the end to act as a 'catch' when none of the above work... "None", ], @@ -74,3 +75,5 @@ # Used to sanity check incoming handlers/modules frequencies FREQUENCIES = [PER_INSTANCE, PER_ALWAYS, PER_ONCE] + +HOTPLUG_ENABLED_FILE = "/var/lib/cloud/hotplug.enabled" diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceAliYun.py cloud-init-24.1.3/cloudinit/sources/DataSourceAliYun.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceAliYun.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceAliYun.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import logging from typing import List @@ -29,6 +30,7 @@ def __init__(self, sys_cfg, distro, paths): super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) + self.default_update_events = copy.deepcopy(self.default_update_events) self.default_update_events[EventScope.NETWORK].add(EventType.BOOT) def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceAltCloud.py cloud-init-24.1.3/cloudinit/sources/DataSourceAltCloud.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceAltCloud.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceAltCloud.py 2024-03-27 13:14:04.000000000 +0000 @@ -58,10 +58,10 @@ # First try deltacloud_user_data_file. On failure try user_data_file. try: - user_data = util.load_file(deltacloud_user_data_file).strip() + user_data = util.load_text_file(deltacloud_user_data_file).strip() except IOError: try: - user_data = util.load_file(user_data_file).strip() + user_data = util.load_text_file(user_data_file).strip() except IOError: util.logexc(LOG, "Failed accessing user data file.") return None @@ -100,7 +100,9 @@ """ if os.path.exists(CLOUD_INFO_FILE): try: - cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() + cloud_type = ( + util.load_text_file(CLOUD_INFO_FILE).strip().upper() + ) except IOError: util.logexc( LOG, diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceAzure.py cloud-init-24.1.3/cloudinit/sources/DataSourceAzure.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceAzure.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceAzure.py 2024-03-27 13:14:04.000000000 +0000 @@ -33,7 +33,6 @@ from cloudinit.sources.helpers import netlink from cloudinit.sources.helpers.azure import ( DEFAULT_WIRESERVER_ENDPOINT, - BrokenAzureDataSource, NonAzureDataSource, OvfEnvXml, azure_ds_reporter, @@ -41,11 +40,10 @@ build_minimal_ovf, dhcp_log_cb, get_boot_telemetry, - get_ip_from_lease_value, get_metadata_from_fabric, get_system_info, - push_log_to_kvp, report_diagnostic_event, + report_dmesg_to_kvp, report_failure_to_fabric, ) from cloudinit.url_helper import UrlError @@ -58,7 +56,7 @@ ) except (ImportError, AttributeError): try: - import passlib + import passlib.hash blowfish_hash = passlib.hash.sha512_crypt.hash except ImportError: @@ -313,7 +311,6 @@ class DataSourceAzure(sources.DataSource): - dsname = "Azure" default_update_events = { EventScope.NETWORK: { @@ -501,9 +498,7 @@ # Update wireserver IP from DHCP options. if "unknown-245" in lease: - self._wireserver_endpoint = get_ip_from_lease_value( - lease["unknown-245"] - ) + self._wireserver_endpoint = lease["unknown-245"] driver = device_driver(iface) ephipv4 = self._ephemeral_dhcp_ctx._ephipv4 @@ -616,10 +611,6 @@ "%s was not mountable" % src, logger_func=LOG.debug ) continue - except BrokenAzureDataSource as exc: - msg = "BrokenAzureDataSource: %s" % exc - report_diagnostic_event(msg, logger_func=LOG.error) - raise sources.InvalidMetaDataException(msg) else: msg = ( "Unable to find provisioning media, falling back to IMDS " @@ -992,7 +983,7 @@ ) system_uuid = identity.query_system_uuid() if os.path.exists(prev_iid_path): - previous = util.load_file(prev_iid_path).strip() + previous = util.load_text_file(prev_iid_path).strip() swapped_id = identity.byte_swap_system_uuid(system_uuid) # Older kernels than 4.15 will have UPPERCASE product_uuid. @@ -1185,7 +1176,7 @@ logger_func=LOG.info, ) sleep(31536000) - raise BrokenAzureDataSource("Shutdown failure for PPS disk.") + raise errors.ReportableErrorOsDiskPpsFailure() @azure_ds_telemetry_reporter def _wait_for_pps_running_reuse(self) -> None: @@ -1306,6 +1297,7 @@ f"Azure datasource failure occurred: {error.as_encoded_report()}", logger_func=LOG.error, ) + report_dmesg_to_kvp() reported = kvp.report_failure_to_host(error) if host_only: return reported @@ -1365,11 +1357,13 @@ :returns: List of SSH keys, if requested. """ + report_dmesg_to_kvp() kvp.report_success_to_host() try: data = get_metadata_from_fabric( endpoint=self._wireserver_endpoint, + distro=self.distro, iso_dev=self._iso_dev, pubkey_info=pubkey_info, ) @@ -1473,7 +1467,7 @@ preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False), ) finally: - push_log_to_kvp(self.sys_cfg["def_log_file"]) + report_dmesg_to_kvp() return @property @@ -1822,7 +1816,7 @@ if not files: files = {} util.ensure_dir(datadir, dirmode) - for (name, content) in files.items(): + for name, content in files.items(): fname = os.path.join(datadir, name) if "ovf-env.xml" in name: content = _redact_password(content, fname) @@ -1836,7 +1830,7 @@ :return: Tuple of metadata, configuration, userdata dicts. :raises NonAzureDataSource: if XML is not in Azure's format. - :raises BrokenAzureDataSource: if XML is unparseable or invalid. + :raises errors.ReportableError: if XML is unparsable or invalid. """ ovf_env = OvfEnvXml.parse_text(contents) md: Dict[str, Any] = {} @@ -1876,6 +1870,12 @@ "PreprovisionedVMType: %s" % ovf_env.preprovisioned_vm_type, logger_func=LOG.info, ) + + cfg["ProvisionGuestProxyAgent"] = ovf_env.provision_guest_proxy_agent + report_diagnostic_event( + "ProvisionGuestProxyAgent: %s" % ovf_env.provision_guest_proxy_agent, + logger_func=LOG.info, + ) return (md, ud, cfg) @@ -1903,12 +1903,12 @@ # now update ds_cfg to reflect contents pass in config if source is None: return None - seed = util.load_file(source, quiet=True, decode=False) + seed = util.load_binary_file(source, quiet=True) - # The seed generally contains non-Unicode characters. load_file puts + # The seed generally contains non-Unicode characters. load_binary_file puts # them into bytes (in python 3). - # bytes is a non-serializable type, and the handler load_file - # uses applies b64 encoding *again* to handle it. The simplest solution + # bytes is a non-serializable type, and the handler + # used applies b64 encoding *again* to handle it. The simplest solution # is to just b64encode the data and then decode it to a serializable # string. Same number of bits of entropy, just with 25% more zeroes. # There's no need to undo this base64-encoding when the random seed is @@ -1953,7 +1953,7 @@ ) -> dict: """Convert imds network metadata dictionary to network v2 configuration. - :param: network_metadata: Dict of "network" key from instance metdata. + :param: network_metadata: Dict of "network" key from instance metadata. :return: Dictionary containing network version 2 standard configuration. """ diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceBigstep.py cloud-init-24.1.3/cloudinit/sources/DataSourceBigstep.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceBigstep.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceBigstep.py 2024-03-27 13:14:04.000000000 +0000 @@ -41,7 +41,7 @@ self.paths.cloud_dir, "data", "seed", "bigstep", "url" ) try: - content = util.load_file(url_file) + content = util.load_text_file(url_file) except IOError as e: # If the file doesn't exist, then the server probably isn't a # Bigstep instance; otherwise, another problem exists which needs diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceCloudStack.py cloud-init-24.1.3/cloudinit/sources/DataSourceCloudStack.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceCloudStack.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceCloudStack.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,6 +15,7 @@ import logging import os import time +from contextlib import suppress from socket import gaierror, getaddrinfo, inet_ntoa from struct import pack @@ -87,12 +88,84 @@ # Cloudstack has its metadata/userdata URLs located at # http:///latest/ self.api_ver = "latest" - self.vr_addr = get_vr_address() + + self.distro = distro + self.vr_addr = get_vr_address(self.distro) if not self.vr_addr: raise RuntimeError("No virtual router found!") - self.metadata_address = "http://%s/" % (self.vr_addr,) + self.metadata_address = f"http://{self.vr_addr}/" self.cfg = {} + def _get_domainname(self): + """ + Try obtaining a "domain-name" DHCP lease parameter: + - From systemd-networkd lease + - From dhclient lease + """ + LOG.debug("Try obtaining domain name from networkd leases") + domainname = dhcp.networkd_get_option_from_leases("DOMAINNAME") + if domainname: + return domainname + LOG.debug( + "Could not obtain FQDN from networkd leases. " + "Falling back to ISC dhclient" + ) + + # some distros might use isc-dhclient for network setup via their + # network manager. If this happens, the lease is more recent than the + # ephemeral lease, so use it first. + with suppress(dhcp.NoDHCPLeaseMissingDhclientError): + domain_name = dhcp.IscDhclient().get_key_from_latest_lease( + self.distro, "domain-name" + ) + if domain_name: + return domain_name + + LOG.debug( + "Could not obtain FQDN from ISC dhclient leases. " + "Falling back to %s", + self.distro.dhcp_client.client_name, + ) + + # If no distro leases were found, check the ephemeral lease that + # cloud-init set up. + with suppress(FileNotFoundError): + latest_lease = self.distro.dhcp_client.get_newest_lease( + self.distro.fallback_interface + ) + domain_name = latest_lease.get("domain-name") or None + return domain_name + LOG.debug("No dhcp leases found") + return None + + def get_hostname( + self, + fqdn=False, + resolve_ip=False, + metadata_only=False, + ): + """ + Returns instance's hostname / fqdn + First probes the parent class method. + + If fqdn is requested, and the parent method didn't return it, + then attach the domain-name from DHCP response. + """ + hostname = super().get_hostname(fqdn, resolve_ip, metadata_only) + if fqdn and "." not in hostname.hostname: + LOG.debug("FQDN requested") + domainname = self._get_domainname() + if domainname: + fqdn = f"{hostname.hostname}.{domainname}" + LOG.debug("Obtained the following FQDN: %s", fqdn) + return sources.DataSourceHostname(fqdn, hostname.is_default) + LOG.debug( + "Could not determine domain name for FQDN. " + "Fall back to hostname as an FQDN: %s", + fqdn, + ) + return hostname + def wait_for_metadata_service(self): url_params = self.get_url_params() @@ -196,7 +269,7 @@ def get_default_gateway(): # Returns the default gateway ip address in the dotted format. - lines = util.load_file("/proc/net/route").splitlines() + lines = util.load_text_file("/proc/net/route").splitlines() for line in lines: items = line.split("\t") if items[1] == "00000000": @@ -207,7 +280,7 @@ return None -def get_vr_address(): +def get_vr_address(distro): # Get the address of the virtual router via dhcp leases # If no virtual router is detected, fallback on default gateway. # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa @@ -229,18 +302,30 @@ ) return latest_address - # Try dhcp lease files next... - lease_file = dhcp.IscDhclient.get_latest_lease() - if not lease_file: - LOG.debug("No lease file found, using default gateway") - return get_default_gateway() - - lease_file = dhcp.IscDhclient.parse_dhcp_server_from_lease_file(lease_file) - if not latest_address: - # No virtual router found, fallback on default gateway - LOG.debug("No DHCP found, using default gateway") - return get_default_gateway() - return latest_address + # Try dhcp lease files next + # get_key_from_latest_lease() needs a Distro object to know which directory + # stores lease files + with suppress(dhcp.NoDHCPLeaseMissingDhclientError): + latest_address = dhcp.IscDhclient().get_key_from_latest_lease( + distro, "dhcp-server-identifier" + ) + if latest_address: + LOG.debug("Found SERVER_ADDRESS '%s' via dhclient", latest_address) + return latest_address + + with suppress(FileNotFoundError): + latest_lease = distro.dhcp_client.get_newest_lease(distro) + if latest_lease: + LOG.debug( + "Found SERVER_ADDRESS '%s' via ephemeral %s lease ", + latest_lease, + distro.dhcp_client.client_name, + ) + return latest_lease + + # No virtual router found, fallback to default gateway + LOG.debug("No DHCP found, using default gateway") + return get_default_gateway() # Used to match classes to dependencies diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceConfigDrive.py cloud-init-24.1.3/cloudinit/sources/DataSourceConfigDrive.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceConfigDrive.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceConfigDrive.py 2024-03-27 13:14:04.000000000 +0000 @@ -27,7 +27,7 @@ LABEL_TYPES = ("config-2", "CONFIG-2") POSSIBLE_MOUNTS = ("sr", "cd") OPTICAL_DEVICES = tuple( - ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2)) + ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(2)) ) @@ -157,7 +157,7 @@ LOG.warning("Invalid content in vendor-data2: %s", e) self.vendordata2_raw = None - # network_config is an /etc/network/interfaces formated file and is + # network_config is an /etc/network/interfaces formatted file and is # obsolete compared to networkdata (from network_data.json) but both # might be present. self.network_eni = results.get("network_config") @@ -217,7 +217,7 @@ # hasn't declared itself found. fname = os.path.join(paths.get_cpath("data"), "instance-id") try: - return util.load_file(fname).rstrip("\n") + return util.load_text_file(fname).rstrip("\n") except IOError: return None diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceEc2.py cloud-init-24.1.3/cloudinit/sources/DataSourceEc2.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceEc2.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceEc2.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,12 +12,14 @@ import logging import os import time -from typing import List +from typing import Dict, List from cloudinit import dmi, net, sources from cloudinit import url_helper as uhelp from cloudinit import util, warnings +from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType +from cloudinit.net import activators from cloudinit.net.dhcp import NoDHCPLeaseError from cloudinit.net.ephemeral import EphemeralIPNetwork from cloudinit.sources.helpers import ec2 @@ -53,9 +55,15 @@ # Cloud platforms that support IMDSv2 style metadata server IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN] +# Only trigger hook-hotplug on NICs with Ec2 drivers. Avoid triggering +# it on docker virtual NICs and the like. LP: #1946003 +_EXTRA_HOTPLUG_UDEV_RULES = """ +ENV{ID_NET_DRIVER}=="vif|ena|ixgbevf", GOTO="cloudinit_hook" +GOTO="cloudinit_end" +""" -class DataSourceEc2(sources.DataSource): +class DataSourceEc2(sources.DataSource): dsname = "Ec2" # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the @@ -97,10 +105,23 @@ } } + default_update_events = { + EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.HOTPLUG, + } + } + + extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES + def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES + def _get_cloud_name(self): """Return the cloud name as identified during _get_data.""" return identify_platform() @@ -131,7 +152,7 @@ try: with EphemeralIPNetwork( self.distro, - self.fallback_interface, + self.distro.fallback_interface, ipv4=True, ipv6=True, ) as netw: @@ -299,7 +320,7 @@ connect_synchronously=False, ) except uhelp.UrlError: - # We use the raised exception to interupt the retry loop. + # We use the raised exception to interrupt the retry loop. # Nothing else to do here. pass @@ -402,7 +423,7 @@ LOG.debug("block-device-mapping not a dictionary: '%s'", bdm) return None - for (entname, device) in bdm.items(): + for entname, device in bdm.items(): if entname == name: found = device break @@ -500,7 +521,7 @@ func=self.get_data, ) - iface = self.fallback_interface + iface = self.distro.fallback_interface net_md = self.metadata.get("network") if isinstance(net_md, dict): # SRU_BLOCKER: xenial, bionic and eoan should default @@ -508,6 +529,7 @@ # behavior on those releases. result = convert_ec2_metadata_network_config( net_md, + self.distro, fallback_nic=iface, full_network_config=util.get_cfg_option_bool( self.ds_cfg, "apply_full_imds_network_config", True @@ -532,19 +554,6 @@ return self._network_config - @property - def fallback_interface(self): - if self._fallback_interface is None: - # fallback_nic was used at one point, so restored objects may - # have an attribute there. respect that if found. - _legacy_fbnic = getattr(self, "fallback_nic", None) - if _legacy_fbnic: - self._fallback_interface = _legacy_fbnic - self.fallback_nic = None - else: - return super(DataSourceEc2, self).fallback_interface - return self._fallback_interface - def crawl_metadata(self): """Crawl metadata service when available. @@ -853,7 +862,7 @@ """ data = {} try: - uuid = util.load_file("/sys/hypervisor/uuid").strip() + uuid = util.load_text_file("/sys/hypervisor/uuid").strip() data["uuid_source"] = "hypervisor" except Exception: uuid = dmi.read_dmi_data("system-uuid") @@ -884,8 +893,145 @@ return data +def _build_nic_order( + macs_metadata: Dict[str, Dict], macs: List[str] +) -> Dict[str, int]: + """ + Builds a dictionary containing macs as keys nad nic orders as values, + taking into account `network-card` and `device-number` if present. + + Note that the first NIC will be the primary NIC as it will be the one with + [network-card] == 0 and device-number == 0 if present. + + @param macs_metadata: dictionary with mac address as key and contents like: + {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} + @macs: list of macs to consider + + @return: Dictionary with macs as keys and nic orders as values. + """ + nic_order: Dict[str, int] = {} + if len(macs) == 0 or len(macs_metadata) == 0: + return nic_order + + valid_macs_metadata = filter( + # filter out nics without metadata (not a physical nic) + lambda mmd: mmd[1] is not None, + # filter by macs + map(lambda mac: (mac, macs_metadata.get(mac)), macs), + ) + + def _get_key_as_int_or(dikt, key, alt_value): + value = dikt.get(key, None) + if value is not None: + return int(value) + return alt_value + + # Sort by (network_card, device_index) as some instances could have + # multiple network cards with repeated device indexes. + # + # On platforms where network-card and device-number are not present, + # as AliYun, the order will be by mac, as before the introduction of this + # function. + return { + mac: i + for i, (mac, _mac_metadata) in enumerate( + sorted( + valid_macs_metadata, + key=lambda mmd: ( + _get_key_as_int_or( + mmd[1], "network-card", float("infinity") + ), + _get_key_as_int_or( + mmd[1], "device-number", float("infinity") + ), + ), + ) + ) + } + + +def _configure_policy_routing( + dev_config: dict, + *, + nic_name: str, + nic_metadata: dict, + distro: Distro, + is_ipv4: bool, + table: int, +) -> None: + """ + Configure policy-based routing on secondary NICs / secondary IPs to + ensure outgoing packets are routed via the correct interface. + + @param: dev_config: network cfg v2 to be updated inplace. + @param: nic_name: nic name. Only used if ipv4. + @param: nic_metadata: nic metadata from IMDS. + @param: distro: Instance of Distro. Only used if ipv4. + @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. + @param: table: Routing table id. + """ + if not dev_config.get("routes"): + dev_config["routes"] = [] + if is_ipv4: + subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] + ips = nic_metadata["local-ipv4s"] + try: + lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) + gateway = lease["routers"] + except NoDHCPLeaseError as e: + LOG.warning( + "Could not perform dhcp discovery on %s to find its " + "gateway. Not adding default route via the gateway. " + "Error: %s", + nic_name, + e, + ) + else: + # Add default route via the NIC's gateway + dev_config["routes"].append( + { + "to": "0.0.0.0/0", + "via": gateway, + "table": table, + }, + ) + else: + subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-blocks"] + ips = nic_metadata["ipv6s"] + + subnet_prefix_routes = ( + [subnet_prefix_routes] + if isinstance(subnet_prefix_routes, str) + else subnet_prefix_routes + ) + for prefix_route in subnet_prefix_routes: + dev_config["routes"].append( + { + "to": prefix_route, + "table": table, + }, + ) + + if not dev_config.get("routing-policy"): + dev_config["routing-policy"] = [] + # Packets coming from any IP associated with the current NIC + # will be routed using `table` routing table + ips = [ips] if isinstance(ips, str) else ips + for ip in ips: + dev_config["routing-policy"].append( + { + "from": ip, + "table": table, + }, + ) + + def convert_ec2_metadata_network_config( - network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True + network_md, + distro, + macs_to_nics=None, + fallback_nic=None, + full_network_config=True, ): """Convert ec2 metadata to network config version 2 data dict. @@ -893,6 +1039,7 @@ generally formed as {"interfaces": {"macs": {}} where 'macs' is a dictionary with mac address as key and contents like: {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} + @param: distro: instance of Distro. @param: macs_to_nics: Optional dict of mac addresses and nic names. If not provided, get_interfaces_by_mac is called to get it from the OS. @param: fallback_nic: Optionally provide the primary nic interface name. @@ -926,15 +1073,18 @@ netcfg["ethernets"][nic_name] = dev_config return netcfg # Apply network config for all nics and any secondary IPv4/v6 addresses - nic_idx = 0 - for mac, nic_name in sorted(macs_to_nics.items()): + is_netplan = distro.network_activator == activators.NetplanActivator + macs = sorted(macs_to_nics.keys()) + nic_order = _build_nic_order(macs_metadata, macs) + for mac in macs: + nic_name = macs_to_nics[mac] nic_metadata = macs_metadata.get(mac) if not nic_metadata: continue # Not a physical nic represented in metadata - # device-number is zero-indexed, we want it 1-indexed for the - # multiplication on the following line - nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1 - dhcp_override = {"route-metric": nic_idx * 100} + nic_idx = nic_order[mac] + is_primary_nic = nic_idx == 0 + # nic_idx + 1 to start route_metric at 100 (nic_idx is 0-indexed) + dhcp_override = {"route-metric": (nic_idx + 1) * 100} dev_config = { "dhcp4": True, "dhcp4-overrides": dhcp_override, @@ -942,18 +1092,58 @@ "match": {"macaddress": mac.lower()}, "set-name": nic_name, } + # This config only works on systems using Netplan because Networking + # config V2 does not support `routing-policy`, but this config is + # passed through on systems using Netplan. + # See: https://github.com/canonical/cloud-init/issues/4862 + # + # If device-number is not present (AliYun or other ec2-like platforms), + # do not configure source-routing as we cannot determine which is the + # primary NIC. + table = 100 + nic_idx + if ( + is_netplan + and nic_metadata.get("device-number") + and not is_primary_nic + ): + dhcp_override["use-routes"] = True + _configure_policy_routing( + dev_config, + distro=distro, + nic_name=nic_name, + nic_metadata=nic_metadata, + is_ipv4=True, + table=table, + ) if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured dev_config["dhcp6"] = True dev_config["dhcp6-overrides"] = dhcp_override + if ( + is_netplan + and nic_metadata.get("device-number") + and not is_primary_nic + ): + _configure_policy_routing( + dev_config, + distro=distro, + nic_name=nic_name, + nic_metadata=nic_metadata, + is_ipv4=False, + table=table, + ) dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) if not dev_config["addresses"]: dev_config.pop("addresses") # Since we found none configured + netcfg["ethernets"][nic_name] = dev_config - # Remove route-metric dhcp overrides if only one nic configured + # Remove route-metric dhcp overrides and routes / routing-policy if only + # one nic configured if len(netcfg["ethernets"]) == 1: for nic_name in netcfg["ethernets"].keys(): netcfg["ethernets"][nic_name].pop("dhcp4-overrides") netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None) + netcfg["ethernets"][nic_name].pop("routes", None) + netcfg["ethernets"][nic_name].pop("routing-policy", None) return netcfg diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceGCE.py cloud-init-24.1.3/cloudinit/sources/DataSourceGCE.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceGCE.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceGCE.py 2024-03-27 13:14:04.000000000 +0000 @@ -124,10 +124,10 @@ except NoDHCPLeaseError: continue if ret["success"]: - self._fallback_interface = candidate_nic + self.distro.fallback_interface = candidate_nic LOG.debug("Primary NIC found: %s.", candidate_nic) break - if self._fallback_interface is None: + if self.distro.fallback_interface is None: LOG.warning( "Did not find a fallback interface on %s.", self.cloud_name ) @@ -154,7 +154,7 @@ @property def launch_index(self): - # GCE does not provide lauch_index property. + # GCE does not provide launch_index property. return None def get_instance_id(self): @@ -222,7 +222,7 @@ except ValueError: return False - # Do not expire keys if there is no expriation timestamp. + # Do not expire keys if there is no expiration timestamp. if "expireOn" not in json_obj: return False diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceIBMCloud.py cloud-init-24.1.3/cloudinit/sources/DataSourceIBMCloud.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceIBMCloud.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceIBMCloud.py 2024-03-27 13:14:04.000000000 +0000 @@ -192,7 +192,7 @@ uuid_path = "/sys/hypervisor/uuid" if not os.path.isfile(uuid_path): return None - return util.load_file(uuid_path).strip().lower() + return util.load_text_file(uuid_path).strip().lower() def _is_xen(): @@ -361,7 +361,7 @@ fpath = os.path.join(source_dir, path) raw = None try: - raw = util.load_file(fpath, decode=False) + raw = util.load_binary_file(fpath) except IOError as e: LOG.debug("Failed reading path '%s': %s", fpath, e) diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceMAAS.py cloud-init-24.1.3/cloudinit/sources/DataSourceMAAS.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceMAAS.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceMAAS.py 2024-03-27 13:14:04.000000000 +0000 @@ -320,7 +320,7 @@ # Used to match classes to dependencies datasources = [ - (DataSourceMAAS, (sources.DEP_FILESYSTEM,)), + (DataSourceMAASLocal, (sources.DEP_FILESYSTEM,)), (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceOVF.py cloud-init-24.1.3/cloudinit/sources/DataSourceOVF.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceOVF.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceOVF.py 2024-03-27 13:14:04.000000000 +0000 @@ -175,7 +175,7 @@ full_fn = os.path.join(dirname, fname) if os.path.isfile(full_fn): try: - contents = util.load_file(full_fn) + contents = util.load_text_file(full_fn) return (fname, contents) except Exception: util.logexc(LOG, "Failed loading ovf file %s", full_fn) diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceOpenNebula.py cloud-init-24.1.3/cloudinit/sources/DataSourceOpenNebula.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceOpenNebula.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceOpenNebula.py 2024-03-27 13:14:04.000000000 +0000 @@ -435,7 +435,7 @@ ) from e try: path = os.path.join(source_dir, "context.sh") - content = util.load_file(path) + content = util.load_text_file(path) context = parse_shell_config(content, asuser=asuser) except subp.ProcessExecutionError as e: raise BrokenContextDiskDir( diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceOpenStack.py cloud-init-24.1.3/cloudinit/sources/DataSourceOpenStack.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceOpenStack.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceOpenStack.py 2024-03-27 13:14:04.000000000 +0000 @@ -153,7 +153,9 @@ if self.perform_dhcp_setup: # Setup networking in init-local stage. try: - with EphemeralDHCPv4(self.distro, self.fallback_interface): + with EphemeralDHCPv4( + self.distro, self.distro.fallback_interface + ): results = util.log_time( logfunc=LOG.debug, msg="Crawl of metadata service", diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceOracle.py cloud-init-24.1.3/cloudinit/sources/DataSourceOracle.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceOracle.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceOracle.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,9 +15,11 @@ import base64 import ipaddress +import json import logging +import time from collections import namedtuple -from typing import Optional, Tuple +from typing import Dict, Optional, Tuple from cloudinit import atomic_helper, dmi, net, sources, util from cloudinit.distros.networking import NetworkConfig @@ -27,7 +29,7 @@ get_interfaces_by_mac, is_netfail_master, ) -from cloudinit.url_helper import UrlError, readurl +from cloudinit.url_helper import wait_for_url LOG = logging.getLogger(__name__) @@ -123,6 +125,11 @@ ) _network_config: dict = {"config": [], "version": 1} + perform_dhcp_setup = True + + # Careful...these can be overridden in __init__ + url_max_wait = 30 + url_timeout = 5 def __init__(self, sys_cfg, *args, **kwargs): super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) @@ -136,6 +143,21 @@ ) self._network_config_source = KlibcOracleNetworkConfigSource() + url_params = self.get_url_params() + self.url_max_wait = url_params.max_wait_seconds + self.url_timeout = url_params.timeout_seconds + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if not hasattr(self, "_vnics_data"): + setattr(self, "_vnics_data", None) + if not hasattr(self, "_network_config_source"): + setattr( + self, + "_network_config_source", + KlibcOracleNetworkConfigSource(), + ) + def _has_network_config(self) -> bool: return bool(self._network_config.get("config", [])) @@ -148,23 +170,31 @@ self.system_uuid = _read_system_uuid() - network_context = ephemeral.EphemeralDHCPv4( - self.distro, - iface=net.find_fallback_nic(), - connectivity_url_data={ - "url": METADATA_PATTERN.format(version=2, path="instance"), - "headers": V2_HEADERS, - }, - ) + if self.perform_dhcp_setup: + network_context = ephemeral.EphemeralDHCPv4( + self.distro, + iface=net.find_fallback_nic(), + connectivity_url_data={ + "url": METADATA_PATTERN.format(version=2, path="instance"), + "headers": V2_HEADERS, + }, + ) + else: + network_context = util.nullcontext() fetch_primary_nic = not self._is_iscsi_root() fetch_secondary_nics = self.ds_cfg.get( "configure_secondary_nics", BUILTIN_DS_CONFIG["configure_secondary_nics"], ) + with network_context: fetched_metadata = read_opc_metadata( - fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics + fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics, + max_wait=self.url_max_wait, + timeout=self.url_timeout, ) + if not fetched_metadata: + return False data = self._crawled_metadata = fetched_metadata.instance_data self.metadata_address = METADATA_ROOT.format( @@ -332,6 +362,10 @@ self._network_config["ethernets"][name] = interface_config +class DataSourceOracleNet(DataSourceOracle): + perform_dhcp_setup = False + + def _read_system_uuid() -> Optional[str]: sys_uuid = dmi.read_dmi_data("system-uuid") return None if sys_uuid is None else sys_uuid.lower() @@ -342,15 +376,20 @@ return asset_tag == CHASSIS_ASSET_TAG -def _fetch(metadata_version: int, path: str, retries: int = 2) -> dict: - return readurl( - url=METADATA_PATTERN.format(version=metadata_version, path=path), - headers=V2_HEADERS if metadata_version > 1 else None, - retries=retries, - )._response.json() +def _url_version(url: str) -> int: + return 2 if url.startswith("http://169.254.169.254/opc/v2") else 1 + + +def _headers_cb(url: str) -> Optional[Dict[str, str]]: + return V2_HEADERS if _url_version(url) == 2 else None -def read_opc_metadata(*, fetch_vnics_data: bool = False) -> OpcMetadata: +def read_opc_metadata( + *, + fetch_vnics_data: bool = False, + max_wait=DataSourceOracle.url_max_wait, + timeout=DataSourceOracle.url_timeout, +) -> Optional[OpcMetadata]: """Fetch metadata from the /opc/ routes. :return: @@ -359,30 +398,60 @@ The JSON-decoded value of the instance data endpoint on the IMDS The JSON-decoded value of the vnics data endpoint if `fetch_vnics_data` is True, else None + or None if fetching metadata failed """ # Per Oracle, there are short windows (measured in milliseconds) throughout # an instance's lifetime where the IMDS is being updated and may 404 as a - # result. To work around these windows, we retry a couple of times. - metadata_version = 2 - try: - instance_data = _fetch(metadata_version, path="instance") - except UrlError: - metadata_version = 1 - instance_data = _fetch(metadata_version, path="instance") + # result. + urls = [ + METADATA_PATTERN.format(version=2, path="instance"), + METADATA_PATTERN.format(version=1, path="instance"), + ] + start_time = time.time() + instance_url, instance_response = wait_for_url( + urls, + max_wait=max_wait, + timeout=timeout, + headers_cb=_headers_cb, + sleep_time=0, + ) + if not instance_url: + LOG.warning("Failed to fetch IMDS metadata!") + return None + instance_data = json.loads(instance_response.decode("utf-8")) + + metadata_version = _url_version(instance_url) vnics_data = None if fetch_vnics_data: - try: - vnics_data = _fetch(metadata_version, path="vnics") - except UrlError: - util.logexc(LOG, "Failed to fetch IMDS network configuration!") + # This allows us to go over the max_wait time by the timeout length, + # but if we were able to retrieve instance metadata, that seems + # like a worthwhile tradeoff rather than having incomplete metadata. + vnics_url, vnics_response = wait_for_url( + [METADATA_PATTERN.format(version=metadata_version, path="vnics")], + max_wait=max_wait - (time.time() - start_time), + timeout=timeout, + headers_cb=_headers_cb, + sleep_time=0, + ) + if vnics_url: + vnics_data = json.loads(vnics_response.decode("utf-8")) + else: + LOG.warning("Failed to fetch IMDS network configuration!") return OpcMetadata(metadata_version, instance_data, vnics_data) # Used to match classes to dependencies datasources = [ (DataSourceOracle, (sources.DEP_FILESYSTEM,)), + ( + DataSourceOracleNet, + ( + sources.DEP_FILESYSTEM, + sources.DEP_NETWORK, + ), + ), ] diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceRbxCloud.py cloud-init-24.1.3/cloudinit/sources/DataSourceRbxCloud.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceRbxCloud.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceRbxCloud.py 2024-03-27 13:14:04.000000000 +0000 @@ -24,7 +24,7 @@ def get_manage_etc_hosts(): - hosts = util.load_file(ETC_HOSTS, quiet=True) + hosts = util.load_text_file(ETC_HOSTS, quiet=True) if hosts: LOG.debug("/etc/hosts exists - setting manage_etc_hosts to False") return False @@ -153,11 +153,9 @@ @returns: A dict containing userdata, metadata and cfg based on metadata. """ meta_data = util.load_json( - text=util.load_file( - fname=os.path.join(mount_dir, "cloud.json"), decode=False - ) + text=util.load_binary_file(fname=os.path.join(mount_dir, "cloud.json")) ) - user_data = util.load_file( + user_data = util.load_text_file( fname=os.path.join(mount_dir, "user.data"), quiet=True ) if "vm" not in meta_data or "netadp" not in meta_data: diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceScaleway.py cloud-init-24.1.3/cloudinit/sources/DataSourceScaleway.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceScaleway.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceScaleway.py 2024-03-27 13:14:04.000000000 +0000 @@ -19,7 +19,7 @@ from urllib3.connection import HTTPConnection from urllib3.poolmanager import PoolManager -from cloudinit import dmi, net, sources, url_helper, util +from cloudinit import dmi, sources, url_helper, util from cloudinit.event import EventScope, EventType from cloudinit.net.dhcp import NoDHCPLeaseError from cloudinit.net.ephemeral import EphemeralDHCPv4, EphemeralIPv6Network @@ -78,7 +78,7 @@ api_address, data=None, timeout=timeout, - # It's the caller's responsability to recall this function in case + # It's the caller's responsibility to recall this function in case # of exception. Don't let url_helper.readurl() retry by itself. retries=0, session=requests_session, @@ -171,7 +171,6 @@ self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT)) self.max_wait = int(self.ds_cfg.get("max_wait", DEF_MD_MAX_WAIT)) - self._fallback_interface = None self._network_config = sources.UNSET self.metadata_urls = DS_BASE_URLS self.userdata_url = None @@ -181,6 +180,20 @@ if "metadata_urls" in self.ds_cfg.keys(): self.metadata_urls += self.ds_cfg["metadata_urls"] + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + attr_defaults = { + "ephemeral_fixed_address": None, + "has_ipv4": True, + "max_wait": DEF_MD_MAX_WAIT, + "metadata_urls": DS_BASE_URLS, + "userdata_url": None, + "vendordata_url": None, + } + for attr in attr_defaults: + if not hasattr(self, attr): + setattr(self, attr, attr_defaults[attr]) + def _set_metadata_url(self, urls): """ Define metadata_url based upon api-metadata URL availability. @@ -267,9 +280,6 @@ def _get_data(self): - if self._fallback_interface is None: - self._fallback_interface = net.find_fallback_nic() - # The DataSource uses EventType.BOOT so we are called more than once. # Try to crawl metadata on IPv4 first and set has_ipv4 to False if we # timeout so we do not try to crawl on IPv4 more than once. @@ -280,7 +290,7 @@ # it will only reach timeout on VMs with only IPv6 addresses. with EphemeralDHCPv4( self.distro, - self._fallback_interface, + self.distro.fallback_interface, ) as ipv4: util.log_time( logfunc=LOG.debug, @@ -311,7 +321,7 @@ try: with EphemeralIPv6Network( self.distro, - self._fallback_interface, + self.distro.fallback_interface, ): util.log_time( logfunc=LOG.debug, @@ -346,9 +356,6 @@ if self._network_config != sources.UNSET: return self._network_config - if self._fallback_interface is None: - self._fallback_interface = net.find_fallback_nic() - if self.metadata["private_ip"] is None: # New method of network configuration @@ -377,13 +384,13 @@ ip_cfg["routes"] += [route] else: ip_cfg["routes"] = [route] - netcfg[self._fallback_interface] = ip_cfg + netcfg[self.distro.fallback_interface] = ip_cfg self._network_config = {"version": 2, "ethernets": netcfg} else: # Kept for backward compatibility netcfg = { "type": "physical", - "name": "%s" % self._fallback_interface, + "name": "%s" % self.distro.fallback_interface, } subnets = [{"type": "dhcp4"}] if self.metadata["ipv6"]: diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceSmartOS.py cloud-init-24.1.3/cloudinit/sources/DataSourceSmartOS.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceSmartOS.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceSmartOS.py 2024-03-27 13:14:04.000000000 +0000 @@ -775,7 +775,7 @@ @param shebang: if no file magic, set shebang @param mode: file mode - Becuase of the way that Cloud-init executes scripts (no shell), + Because of the way that Cloud-init executes scripts (no shell), a script will fail to execute if does not have a magic bit (shebang) set for the file. If shebang=True, then the script will be checked for a magic bit and to the SmartOS default of assuming that bash. diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceVMware.py cloud-init-24.1.3/cloudinit/sources/DataSourceVMware.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceVMware.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceVMware.py 2024-03-27 13:14:04.000000000 +0000 @@ -129,7 +129,7 @@ For CloudinitPrep customization, Network config Version 2 data is parsed from the customization specification. - envvar and guestinfo tranports: + envvar and guestinfo transports: Network Config Version 2 data is supported as long as the Linux distro's cloud-init package is new enough to parse the data. The metadata key "network.encoding" may be used to indicate the @@ -160,6 +160,32 @@ (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True), ] + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + for attr in ("rpctool", "rpctool_fn"): + if not hasattr(self, attr): + setattr(self, attr, None) + if not hasattr(self, "cfg"): + setattr(self, "cfg", {}) + if not hasattr(self, "possible_data_access_method_list"): + setattr( + self, + "possible_data_access_method_list", + [ + ( + DATA_ACCESS_METHOD_ENVVAR, + self.get_envvar_data_fn, + False, + ), + ( + DATA_ACCESS_METHOD_GUESTINFO, + self.get_guestinfo_data_fn, + True, + ), + (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True), + ], + ) + def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.data_access_method) diff -Nru cloud-init-23.4.4/cloudinit/sources/DataSourceWSL.py cloud-init-24.1.3/cloudinit/sources/DataSourceWSL.py --- cloud-init-23.4.4/cloudinit/sources/DataSourceWSL.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/DataSourceWSL.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,261 @@ +# Copyright (C) 2024 Canonical Ltd. +# +# Author: Carlos Nihelton +# +# This file is part of cloud-init. See LICENSE file for license information. +""" Datasource to support the Windows Subsystem for Linux platform. """ + +import logging +import os +from pathlib import PurePath +from typing import List, cast + +from cloudinit import sources, subp, util + +LOG = logging.getLogger(__name__) + +WSLPATH_CMD = "/usr/bin/wslpath" + + +def wsl_path_2_win(path: str) -> PurePath: + """ + Translates a path inside the current WSL instance's filesystem to a + Windows accessible path. + + Example: + # Running under an instance named "CoolInstance" + root = wslpath2win("/") # root == "//wsl.localhost/CoolInstance/" + + :param path: string representing a Linux path, whether existing or not. + """ + out, _ = subp.subp([WSLPATH_CMD, "-am", path]) + return PurePath(out.rstrip()) + + +def instance_name() -> str: + """ + Returns the name of the current WSL instance as seen from outside. + """ + root_net_path = wsl_path_2_win("/") + return root_net_path.name + + +def mounted_win_drives() -> List[str]: + """ + Return a list of mount points of the Windows drives inside the current + WSL instance, if drives are mounted, or an empty list otherwise + """ + FS_TYPE = "9p" + OPTIONS_CONTAIN = "aname=drvfs" + + mounted = [] + for mnt in util.mounts().values(): + if mnt["fstype"] == FS_TYPE and OPTIONS_CONTAIN in mnt["opts"]: + mounted.append(mnt["mountpoint"]) + + return mounted + + +def win_path_2_wsl(path: str) -> PurePath: + """ + Returns a translation of a Windows path to a Linux path that can be + accessed inside the current instance filesystem. + + It requires the Windows drive mounting feature to be enabled and the + disk drive must be muonted for this to succeed. + + Example: + # Assuming Windows drives are mounted under /mnt/ and "S:" doesn't exist: + p = winpath2wsl("C:\\ProgramData") # p == "/mnt/c/ProgramData/" + n = winpath2wsl("S:\\CoolFolder") # Exception! S: is not mounted. + + :param path: string representing a Windows path. The root drive must exist, + although the path is not required to. + """ + out, _ = subp.subp([WSLPATH_CMD, "-au", path]) + return PurePath(out.rstrip()) + + +def cmd_executable() -> PurePath: + """ + Returns the Linux path to the Windows host's cmd.exe. + """ + + mounts = mounted_win_drives() + if not mounts: + raise IOError("Windows drives are not mounted.") + + # cmd.exe path is being stable for decades. + candidate = "%s/Windows/System32/cmd.exe" + for mnt in mounts: + cmd = candidate % mnt + if not os.access(cmd, os.X_OK): + continue + + LOG.debug("Found cmd.exe at <%s>", cmd) + return PurePath(cmd) + + raise IOError( + "Couldn't find cmd.exe in any mount point: %s" % ", ".join(mounts) + ) + + +def cloud_init_data_dir() -> PurePath: + """ + Returns the Windows user profile directory translated as a Linux path + accessible inside the current WSL instance. + """ + cmd = cmd_executable() + + # cloud-init runs too early to rely on binfmt to execute Windows binaries. + # But we know that `/init` is the interpreter, so we can run it directly. + # See /proc/sys/fs/binfmt_misc/WSLInterop[-late] + # inside any WSL instance for more details. + home, _ = subp.subp(["/init", cmd.as_posix(), "/C", "echo %USERPROFILE%"]) + home = home.rstrip() + if not home: + raise subp.ProcessExecutionError( + "No output from cmd.exe to show the user profile dir." + ) + + win_profile_dir = win_path_2_wsl(home) + seed_dir = os.path.join(win_profile_dir, ".cloud-init") + if not os.path.isdir(seed_dir): + raise FileNotFoundError("%s directory doesn't exist." % seed_dir) + + return PurePath(seed_dir) + + +def candidate_user_data_file_names(instance_name) -> List[str]: + """ + Return a list of candidate file names that may contain user-data + in some supported format, ordered by precedence. + """ + distribution_id, version_id, version_codename = util.get_linux_distro() + version = version_id if version_id else version_codename + + return [ + # WSL instance specific: + "%s.user-data" % instance_name, + # release codename specific + "%s-%s.user-data" % (distribution_id, version), + # distribution specific (Alpine, Arch, Fedora, openSUSE, Ubuntu...) + "%s-all.user-data" % distribution_id, + # generic, valid for all WSL distros and instances. + "default.user-data", + ] + + +DEFAULT_INSTANCE_ID = "iid-datasource-wsl" + + +def load_instance_metadata(cloudinitdir: PurePath, instance_name: str) -> dict: + """ + Returns the relevant metadata loaded from cloudinit dir based on the + instance name + """ + metadata = {"instance-id": DEFAULT_INSTANCE_ID} + metadata_path = os.path.join( + cloudinitdir.as_posix(), "%s.meta-data" % instance_name + ) + try: + metadata = util.load_yaml(util.load_binary_file(metadata_path)) + except FileNotFoundError: + LOG.debug( + "No instance metadata found at %s. Using default instance-id.", + metadata_path, + ) + if not metadata or "instance-id" not in metadata: + # Parsed metadata file invalid + msg = ( + f" Metadata at {metadata_path} does not contain instance-id key." + f" Instead received: {metadata}" + ) + LOG.error(msg) + raise ValueError(msg) + + return metadata + + +class DataSourceWSL(sources.DataSource): + dsname = "WSL" + + def find_user_data_file(self, seed_dir: PurePath) -> PurePath: + """ + Finds the most precendent of the candidate files that may contain + user-data, if any, or None otherwise. + """ + + # Notice that by default file name casing is irrelevant here. Windows + # filenames are case insensitive. Even though accessed through Linux, + # path translation just works with whichever casing we try. + # But users can change that behavior with configuration + # (ref https://learn.microsoft.com/en-us/windows/wsl/case-sensitivity), + # thus better prevent it by always relying on case insensitive match. + existing_files = { + ef.name.casefold(): ef.path for ef in os.scandir(seed_dir) + } + if not existing_files: + raise IOError("%s directory is empty" % seed_dir) + + folded_names = [ + f.casefold() + for f in candidate_user_data_file_names(self.instance_name) + ] + for filename in folded_names: + if filename in existing_files.keys(): + return PurePath(existing_files[filename]) + + raise IOError( + "%s doesn't contain any of the expected user-data files" % seed_dir + ) + + def check_instance_id(self, sys_cfg) -> bool: + # quickly (local check only) if self.metadata['instance_id'] + # is still valid. + current = self.get_instance_id() + if not current: + return False + + try: + metadata = load_instance_metadata( + cloud_init_data_dir(), self.instance_name + ) + return current == metadata.get("instance-id") + + except (IOError, ValueError) as err: + LOG.warning( + "Unable to check_instance_id from metadata file: %s", + str(err), + ) + return False + + def _get_data(self) -> bool: + self.vendordata_raw = None + seed_dir = cloud_init_data_dir() + self.instance_name = instance_name() + + try: + self.metadata = load_instance_metadata( + seed_dir, self.instance_name + ) + file = self.find_user_data_file(seed_dir) + self.userdata_raw = cast( + str, util.load_binary_file(file.as_posix()) + ) + return True + + except (ValueError, IOError) as err: + LOG.error("Unable to setup WSL datasource: %s", str(err)) + return False + + +# Used to match classes to dependencies +datasources = [ + (DataSourceWSL, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff -Nru cloud-init-23.4.4/cloudinit/sources/__init__.py cloud-init-24.1.3/cloudinit/sources/__init__.py --- cloud-init-23.4.4/cloudinit/sources/__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -195,9 +195,6 @@ # - seed-dir () _subplatform = None - # Track the discovered fallback nic for use in configuration generation. - _fallback_interface = None - # The network configuration sources that should be considered for this data # source. (The first source in this list that provides network # configuration will be used without considering any that follow.) This @@ -223,10 +220,28 @@ # The datasource also defines a set of default EventTypes that the # datasource can react to. These are the event types that will be used # if not overridden by the user. + # # A datasource requiring to write network config on each system boot - # would call default_update_events['network'].add(EventType.BOOT). + # would either: + # + # 1) Overwrite the class attribute `default_update_events` like: + # + # >>> default_update_events = { + # ... EventScope.NETWORK: { + # ... EventType.BOOT_NEW_INSTANCE, + # ... EventType.BOOT, + # ... } + # ... } + # + # 2) Or, if writing network config on every boot has to be determined at + # runtime, then deepcopy to not overwrite the class attribute on other + # elements of this class hierarchy, like: + # + # >>> self.default_update_events = copy.deepcopy( + # ... self.default_update_events + # ... ) + # >>> self.default_update_events[EventScope.NETWORK].add(EventType.BOOT) - # Default: generate network config on new instance id (first boot). supported_update_events = { EventScope.NETWORK: { EventType.BOOT_NEW_INSTANCE, @@ -235,6 +250,8 @@ EventType.HOTPLUG, } } + + # Default: generate network config on new instance id (first boot). default_update_events = { EventScope.NETWORK: { EventType.BOOT_NEW_INSTANCE, @@ -279,6 +296,9 @@ # in the updated metadata skip_hotplug_detect = False + # Extra udev rules for cc_install_hotplug + extra_hotplug_udev_rules: Optional[str] = None + _ci_pkl_version = 1 def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): @@ -327,6 +347,8 @@ e, ) raise DatasourceUnpickleUserDataError() from e + if not hasattr(self, "extra_hotplug_udev_rules"): + self.extra_hotplug_udev_rules = None def __str__(self): return type_utils.obj_name(self) @@ -507,7 +529,7 @@ cloud_id = instance_data["v1"].get("cloud_id", "none") cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id") util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n") - # cloud-id not found, then no previous cloud-id fle + # cloud-id not found, then no previous cloud-id file prev_cloud_id_file = None new_cloud_id_file = f"{cloud_id_file}-{cloud_id}" # cloud-id found, then the prev cloud-id file is source of symlink @@ -608,17 +630,6 @@ return self.vendordata2 @property - def fallback_interface(self): - """Determine the network interface used during local network config.""" - if self._fallback_interface is None: - self._fallback_interface = net.find_fallback_nic() - if self._fallback_interface is None: - LOG.warning( - "Did not find a fallback interface on %s.", self.cloud_name - ) - return self._fallback_interface - - @property def platform_type(self): if not hasattr(self, "_platform_type"): # Handle upgrade path where pickled datasource has no _platform. @@ -714,7 +725,7 @@ def get_vendordata2_raw(self): return self.vendordata2_raw - # the data sources' config_obj is a cloud-config formated + # the data sources' config_obj is a cloud-config formatted # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere def get_config_obj(self): @@ -1163,7 +1174,7 @@ """Use pickle to deserialize a instance Datasource from a cache file.""" pickle_contents = None try: - pickle_contents = util.load_file(fname, decode=False) + pickle_contents = util.load_binary_file(fname) except Exception as e: if os.path.isfile(fname): LOG.warning("failed loading pickle in %s: %s", fname, e) diff -Nru cloud-init-23.4.4/cloudinit/sources/azure/errors.py cloud-init-24.1.3/cloudinit/sources/azure/errors.py --- cloud-init-23.4.4/cloudinit/sources/azure/errors.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/azure/errors.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,6 +9,7 @@ from datetime import datetime from io import StringIO from typing import Any, Dict, List, Optional, Tuple +from xml.etree import ElementTree import requests @@ -150,6 +151,14 @@ self.supporting_data["url"] = exception.url +class ReportableErrorImdsInvalidMetadata(ReportableError): + def __init__(self, *, key: str, value: Any) -> None: + super().__init__(f"invalid IMDS metadata for key={key}") + + self.supporting_data["key"] = key + self.supporting_data["value"] = repr(value) + + class ReportableErrorImdsMetadataParsingException(ReportableError): def __init__(self, *, exception: ValueError) -> None: super().__init__("error parsing IMDS metadata") @@ -157,6 +166,22 @@ self.supporting_data["exception"] = repr(exception) +class ReportableErrorOsDiskPpsFailure(ReportableError): + def __init__(self) -> None: + super().__init__("error waiting for host shutdown") + + +class ReportableErrorOvfInvalidMetadata(ReportableError): + def __init__(self, message: str) -> None: + super().__init__(f"unexpected metadata parsing ovf-env.xml: {message}") + + +class ReportableErrorOvfParsingException(ReportableError): + def __init__(self, *, exception: ElementTree.ParseError) -> None: + message = exception.msg + super().__init__(f"error parsing ovf-env.xml: {message}") + + class ReportableErrorUnhandledException(ReportableError): def __init__(self, exception: Exception) -> None: super().__init__("unhandled exception") @@ -170,11 +195,3 @@ self.supporting_data["exception"] = repr(exception) self.supporting_data["traceback_base64"] = trace_base64 - - -class ReportableErrorImdsInvalidMetadata(ReportableError): - def __init__(self, *, key: str, value: Any) -> None: - super().__init__(f"invalid IMDS metadata for key={key}") - - self.supporting_data["key"] = key - self.supporting_data["value"] = repr(value) diff -Nru cloud-init-23.4.4/cloudinit/sources/azure/imds.py cloud-init-24.1.3/cloudinit/sources/azure/imds.py --- cloud-init-23.4.4/cloudinit/sources/azure/imds.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/azure/imds.py 2024-03-27 13:14:04.000000000 +0000 @@ -104,7 +104,7 @@ report_diagnostic_event( "Polling IMDS failed attempt %d with exception: %r" % (self._request_count, exception), - logger_func=LOG.info, + logger_func=LOG.warning, ) return retry @@ -160,7 +160,7 @@ metadata = _fetch_url(url, retry_handler=retry_handler) try: - return util.load_json(metadata) + return util.load_json(metadata.decode("utf-8")) except ValueError as error: report_diagnostic_event( "Failed to parse metadata from IMDS: %s" % error, diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/azure.py cloud-init-24.1.3/cloudinit/sources/helpers/azure.py --- cloud-init-23.4.4/cloudinit/sources/helpers/azure.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/azure.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,24 +4,18 @@ import logging import os import re -import socket -import struct import textwrap import zlib from contextlib import contextmanager from datetime import datetime -from errno import ENOENT from time import sleep, time -from typing import TYPE_CHECKING, Callable, List, Optional, TypeVar, Union +from typing import Callable, List, Optional, TypeVar, Union from xml.etree import ElementTree from xml.sax.saxutils import escape from cloudinit import distros, subp, temp_utils, url_helper, util, version from cloudinit.reporting import events -from cloudinit.settings import CFG_BUILTIN - -if TYPE_CHECKING: - from cloudinit.sources.azure import errors +from cloudinit.sources.azure import errors LOG = logging.getLogger(__name__) @@ -32,14 +26,6 @@ SYSTEMINFO_EVENT_TYPE = "system-info" DIAGNOSTIC_EVENT_TYPE = "diagnostic" COMPRESSED_EVENT_TYPE = "compressed" -# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP -# at once. This number is based on the analysis done on a large sample of -# cloud-init.log files where the P95 of the file sizes was 537KB and the time -# consumed to dump 500KB file was (P95:76, P99:233, P99.9:1170) in ms -MAX_LOG_TO_KVP_LENGTH = 512000 -# File to store the last byte of cloud-init.log that was pushed to KVP. This -# file will be deleted with every VM reboot. -LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index" azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -212,35 +198,8 @@ @azure_ds_telemetry_reporter -def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]): - """Push a portion of cloud-init.log file or the whole file to KVP - based on the file size. - The first time this function is called after VM boot, It will push the last - n bytes of the log file such that n < MAX_LOG_TO_KVP_LENGTH - If called again on the same boot, it continues from where it left off. - In addition to cloud-init.log, dmesg log will also be collected.""" - - start_index = get_last_log_byte_pushed_to_kvp_index() - - LOG.debug("Dumping cloud-init.log file to KVP") - try: - with open(file_name, "rb") as f: - f.seek(0, os.SEEK_END) - seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, start_index) - report_diagnostic_event( - "Dumping last {0} bytes of cloud-init.log file to KVP starting" - " from index: {1}".format(f.tell() - seek_index, seek_index), - logger_func=LOG.debug, - ) - f.seek(seek_index, os.SEEK_SET) - report_compressed_event("cloud-init.log", f.read()) - util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell())) - except Exception as ex: - report_diagnostic_event( - "Exception when dumping log file: %s" % repr(ex), - logger_func=LOG.warning, - ) - +def report_dmesg_to_kvp(): + """Report dmesg to KVP.""" LOG.debug("Dumping dmesg log to KVP") try: out, _ = subp.subp(["dmesg"], decode=False, capture=True) @@ -252,30 +211,6 @@ ) -@azure_ds_telemetry_reporter -def get_last_log_byte_pushed_to_kvp_index(): - try: - with open(LOG_PUSHED_TO_KVP_INDEX_FILE, "r") as f: - return int(f.read()) - except IOError as e: - if e.errno != ENOENT: - report_diagnostic_event( - "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e), - logger_func=LOG.warning, - ) - except ValueError as e: - report_diagnostic_event( - "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e), - logger_func=LOG.warning, - ) - except Exception as e: - report_diagnostic_event( - "Failed to get the last log byte pushed to KVP: %s." % repr(e), - logger_func=LOG.warning, - ) - return 0 - - @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -286,20 +221,6 @@ os.chdir(prevdir) -def get_ip_from_lease_value(fallback_lease_value): - unescaped_value = fallback_lease_value.replace("\\", "") - if len(unescaped_value) > 4: - hex_string = "" - for hex_pair in unescaped_value.split(":"): - if len(hex_pair) == 1: - hex_pair = "0" + hex_pair - hex_string += hex_pair - packed_bytes = struct.pack(">L", int(hex_string.replace(":", ""), 16)) - else: - packed_bytes = unescaped_value.encode("utf-8") - return socket.inet_ntoa(packed_bytes) - - @azure_ds_telemetry_reporter def http_with_retries( url: str, @@ -390,7 +311,6 @@ class AzureEndpointHttpClient: - headers = { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": "2012-11-30", @@ -487,7 +407,6 @@ class OpenSSLManager: - certificate_names = { "private_key": "TransportPrivate.pem", "certificate": "TransportCert.pem", @@ -616,7 +535,6 @@ class GoalStateHealthReporter: - HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent( """\ @@ -744,8 +662,6 @@ @azure_ds_telemetry_reporter def _post_health_report(self, document: bytes) -> None: - push_log_to_kvp() - # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code, # the diagnostic messages are written to special files # (/var/opt/hyperv/.kvp_pool_*) as Hyper-V KVP messages. @@ -787,10 +703,10 @@ self.openssl_manager.clean_up() @azure_ds_telemetry_reporter - def eject_iso(self, iso_dev) -> None: + def eject_iso(self, iso_dev, distro: distros.Distro) -> None: + LOG.debug("Ejecting the provisioning iso") try: - LOG.debug("Ejecting the provisioning iso") - subp.subp(["eject", iso_dev]) + distro.eject_media(iso_dev) except Exception as e: report_diagnostic_event( "Failed ejecting the provisioning iso: %s" % e, @@ -799,7 +715,7 @@ @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data( - self, pubkey_info=None, iso_dev=None + self, distro: distros.Distro, pubkey_info=None, iso_dev=None ) -> Optional[List[str]]: """Gets the VM's GoalState from Azure, uses the GoalState information to report ready/send the ready signal/provisioning complete signal to @@ -830,7 +746,7 @@ ) if iso_dev is not None: - self.eject_iso(iso_dev) + self.eject_iso(iso_dev, distro=distro) health_reporter.send_ready_signal() return ssh_keys @@ -1008,13 +924,14 @@ @azure_ds_telemetry_reporter def get_metadata_from_fabric( endpoint: str, + distro: distros.Distro, pubkey_info: Optional[List[str]] = None, iso_dev: Optional[str] = None, ): shim = WALinuxAgentShim(endpoint=endpoint) try: return shim.register_with_azure_and_fetch_data( - pubkey_info=pubkey_info, iso_dev=iso_dev + distro=distro, pubkey_info=pubkey_info, iso_dev=iso_dev ) finally: shim.clean_up() @@ -1039,10 +956,6 @@ ) -class BrokenAzureDataSource(Exception): - pass - - class NonAzureDataSource(Exception): pass @@ -1064,6 +977,7 @@ public_keys: Optional[List[dict]] = None, preprovisioned_vm: bool = False, preprovisioned_vm_type: Optional[str] = None, + provision_guest_proxy_agent: bool = False, ) -> None: self.username = username self.password = password @@ -1073,6 +987,7 @@ self.public_keys: List[dict] = public_keys or [] self.preprovisioned_vm = preprovisioned_vm self.preprovisioned_vm_type = preprovisioned_vm_type + self.provision_guest_proxy_agent = provision_guest_proxy_agent def __eq__(self, other) -> bool: return self.__dict__ == other.__dict__ @@ -1082,13 +997,13 @@ """Parser for ovf-env.xml data. :raises NonAzureDataSource: if XML is not in Azure's format. - :raises BrokenAzureDataSource: if XML is unparseable or invalid. + :raises errors.ReportableErrorOvfParsingException: if XML is + unparsable or invalid. """ try: root = ElementTree.fromstring(ovf_env_xml) except ElementTree.ParseError as e: - error_str = "Invalid ovf-env.xml: %s" % e - raise BrokenAzureDataSource(error_str) from e + raise errors.ReportableErrorOvfParsingException(exception=e) from e # If there's no provisioning section, it's not Azure ovf-env.xml. if not root.find("./wa:ProvisioningSection", cls.NAMESPACES): @@ -1113,14 +1028,14 @@ "./%s:%s" % (namespace, name), OvfEnvXml.NAMESPACES ) if len(matches) == 0: - msg = "No ovf-env.xml configuration for %r" % name + msg = "missing configuration for %r" % name LOG.debug(msg) if required: - raise BrokenAzureDataSource(msg) + raise errors.ReportableErrorOvfInvalidMetadata(msg) return None elif len(matches) > 1: - raise BrokenAzureDataSource( - "Multiple configuration matches in ovf-exml.xml for %r (%d)" + raise errors.ReportableErrorOvfInvalidMetadata( + "multiple configuration matches for %r (%d)" % (name, len(matches)) ) @@ -1137,14 +1052,14 @@ ): matches = node.findall("./wa:" + name, OvfEnvXml.NAMESPACES) if len(matches) == 0: - msg = "No ovf-env.xml configuration for %r" % name + msg = "missing configuration for %r" % name LOG.debug(msg) if required: - raise BrokenAzureDataSource(msg) + raise errors.ReportableErrorOvfInvalidMetadata(msg) return default elif len(matches) > 1: - raise BrokenAzureDataSource( - "Multiple configuration matches in ovf-exml.xml for %r (%d)" + raise errors.ReportableErrorOvfInvalidMetadata( + "multiple configuration matches for %r (%d)" % (name, len(matches)) ) @@ -1216,6 +1131,12 @@ "PreprovisionedVMType", required=False, ) + self.provision_guest_proxy_agent = self._parse_property( + platform_settings, + "ProvisionGuestProxyAgent", + default=False, + required=False, + ) def _parse_ssh_section(self, config_set): self.public_keys = [] diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/openstack.py cloud-init-24.1.3/cloudinit/sources/helpers/openstack.py --- cloud-init-23.4.4/cloudinit/sources/helpers/openstack.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/openstack.py 2024-03-27 13:14:04.000000000 +0000 @@ -364,7 +364,11 @@ return os.path.join(*components) def _path_read(self, path, decode=False): - return util.load_file(path, decode=decode) + return ( + util.load_text_file(path) + if decode + else util.load_binary_file(path) + ) def _fetch_available_versions(self): if self._versions is None: @@ -548,7 +552,7 @@ There are additional fields that are populated in the network_data.json from OpenStack that are not relevant to network_config yaml, so we enumerate a dictionary of valid keys for network_yaml and apply filtering - to drop these superflous keys from the network_config yaml. + to drop these superfluous keys from the network_config yaml. """ if network_json is None: return None diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/config.py cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/config.py --- cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/config.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/config.py 2024-03-27 13:14:04.000000000 +0000 @@ -88,7 +88,7 @@ @property def reset_password(self): - """Retreives if the root password needs to be reset.""" + """Retrieves if the root password needs to be reset.""" resetPass = self._configFile.get(Config.RESETPASS, "no") resetPass = resetPass.lower() if resetPass not in ("yes", "no"): diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/config_custom_script.py cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/config_custom_script.py --- cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/config_custom_script.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/config_custom_script.py 2024-03-27 13:14:04.000000000 +0000 @@ -50,9 +50,9 @@ util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) # Strip any CR characters from the decoded script - content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace( - "\r", "" - ) + content = util.load_text_file( + CustomScriptConstant.CUSTOM_SCRIPT + ).replace("\r", "") util.write_file( CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544 ) diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/guestcust_util.py cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/guestcust_util.py --- cloud-init-23.4.4/cloudinit/sources/helpers/vmware/imc/guestcust_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/vmware/imc/guestcust_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -102,7 +102,7 @@ enableNicsWaitCount = 5 enableNicsWaitSeconds = 1 - for attempt in range(0, enableNicsWaitRetries): + for attempt in range(enableNicsWaitRetries): logger.debug("Trying to connect interfaces, attempt %d", attempt) (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, @@ -117,7 +117,7 @@ logger.warning("NICS connection status query is not supported") return - for count in range(0, enableNicsWaitCount): + for count in range(enableNicsWaitCount): (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, @@ -251,7 +251,7 @@ ) return (None, None, None) try: - md = util.load_file(md_path) + md = util.load_text_file(md_path) except Exception as e: set_cust_error_status( "Error loading cloud-init meta data file", @@ -284,7 +284,7 @@ ) return (None, None, None) try: - ud = util.load_file(ud_path).replace("\r", "") + ud = util.load_text_file(ud_path).replace("\r", "") except Exception as e: set_cust_error_status( "Error loading cloud-init userdata file", diff -Nru cloud-init-23.4.4/cloudinit/sources/helpers/vultr.py cloud-init-24.1.3/cloudinit/sources/helpers/vultr.py --- cloud-init-23.4.4/cloudinit/sources/helpers/vultr.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/sources/helpers/vultr.py 2024-03-27 13:14:04.000000000 +0000 @@ -20,7 +20,7 @@ def get_metadata( distro, url, timeout, retries, sec_between, agent, tmp_dir=None ): - # Bring up interface (and try untill one works) + # Bring up interface (and try until one works) exception = RuntimeError("Failed to DHCP") # Seek iface with DHCP diff -Nru cloud-init-23.4.4/cloudinit/ssh_util.py cloud-init-24.1.3/cloudinit/ssh_util.py --- cloud-init-23.4.4/cloudinit/ssh_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/ssh_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -27,14 +27,13 @@ # refer to the keytype struct of OpenSSH in the same file, to see # if the position of the sigonly flag has been moved. # -# dsa, rsa, ecdsa and ed25519 are added for legacy, as they are valid +# rsa, ecdsa and ed25519 are added for legacy, as they are valid # public keys in some old distros. They can possibly be removed # in the future when support for the older distros is dropped # # When updating the list, also update the _is_printable_key list in # cloudinit/config/cc_ssh_authkey_fingerprints.py VALID_KEY_TYPES = ( - "dsa", "rsa", "ecdsa", "ed25519", @@ -48,8 +47,6 @@ "sk-ecdsa-sha2-nistp256@openssh.com", "sk-ssh-ed25519-cert-v01@openssh.com", "sk-ssh-ed25519@openssh.com", - "ssh-dss-cert-v01@openssh.com", - "ssh-dss", "ssh-ed25519-cert-v01@openssh.com", "ssh-ed25519", "ssh-rsa-cert-v01@openssh.com", @@ -106,7 +103,7 @@ (because of the size of the public key encoding) up to a limit of 8 kilo- bytes, which permits DSA keys up to 8 kilobits and RSA keys up to 16 kilobits. You don't want to type them in; instead, copy the - identity.pub, id_dsa.pub, or the id_rsa.pub file and edit it. + identity.pub or the id_rsa.pub file and edit it. sshd enforces a minimum RSA key modulus size for protocol 1 and protocol 2 keys of 768 bits. @@ -193,7 +190,7 @@ for fname in fnames: try: if os.path.isfile(fname): - lines = util.load_file(fname).splitlines() + lines = util.load_text_file(fname).splitlines() for line in lines: contents.append(parser.parse(line)) except (IOError, OSError): @@ -204,7 +201,7 @@ def update_authorized_keys(old_entries, keys): to_add = list([k for k in keys if k.valid()]) - for i in range(0, len(old_entries)): + for i in range(len(old_entries)): ent = old_entries[i] if not ent.valid(): continue @@ -504,7 +501,7 @@ def parse_ssh_config(fname) -> List[SshdConfigLine]: if not os.path.isfile(fname): return [] - return parse_ssh_config_lines(util.load_file(fname).splitlines()) + return parse_ssh_config_lines(util.load_text_file(fname).splitlines()) def parse_ssh_config_lines(lines) -> List[SshdConfigLine]: @@ -664,7 +661,7 @@ def get_opensshd_upstream_version(): - """Get the upstream version of the OpenSSH sshd dameon on the system. + """Get the upstream version of the OpenSSH sshd daemon on the system. This will NOT include the portable number, so if the Ubuntu version looks like `1.2p1 Ubuntu-1ubuntu0.1`, then this function would return diff -Nru cloud-init-23.4.4/cloudinit/stages.py cloud-init-24.1.3/cloudinit/stages.py --- cloud-init-23.4.4/cloudinit/stages.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/stages.py 2024-03-27 13:14:04.000000000 +0000 @@ -64,7 +64,7 @@ datasource: sources.DataSource, cfg: dict, event_source_type: EventType, - scope: Optional[EventScope] = None, + scope: EventScope, ) -> bool: """Determine if a particular EventType is enabled. @@ -91,13 +91,27 @@ copy.deepcopy(default_events), ] ) + + # Add supplemental hotplug event if supported and present in + # hotplug.enabled file + if EventType.HOTPLUG in datasource.supported_update_events.get( + scope, set() + ): + hotplug_enabled_file = util.read_hotplug_enabled_file(datasource.paths) + if scope.value in hotplug_enabled_file["scopes"]: + LOG.debug( + "Adding event: scope=%s EventType=%s found in %s", + scope, + EventType.HOTPLUG, + datasource.paths.get_cpath("hotplug.enabled"), + ) + if not allowed.get(scope): + allowed[scope] = set() + allowed[scope].add(EventType.HOTPLUG) + LOG.debug("Allowed events: %s", allowed) - scopes: Iterable[EventScope] - if not scope: - scopes = allowed.keys() - else: - scopes = [scope] + scopes: Iterable[EventScope] = [scope] scope_values = [s.value for s in scopes] for evt_scope in scopes: @@ -122,7 +136,7 @@ else: self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] # Created on first use - self._cfg: Optional[dict] = None + self._cfg: Dict = {} self._paths: Optional[helpers.Paths] = None self._distro: Optional[distros.Distro] = None # Changed only when a fetch occurs @@ -138,14 +152,11 @@ ) self.reporter = reporter - def _reset(self, reset_ds=False): + def _reset(self): # Recreated on access - self._cfg = None + self._cfg = {} self._paths = None self._distro = None - if reset_ds: - self.datasource = None - self.ds_restored = False @property def distro(self): @@ -179,8 +190,6 @@ ocfg = util.get_cfg_by_path(ocfg, ("system_info",), {}) elif restriction == "paths": ocfg = util.get_cfg_by_path(ocfg, ("system_info", "paths"), {}) - if not isinstance(ocfg, (dict)): - ocfg = {} return ocfg @property @@ -262,10 +271,8 @@ ) def read_cfg(self, extra_fns=None): - # None check so that we don't keep on re-loading if empty - if self._cfg is None: + if not self._cfg: self._cfg = self._read_cfg(extra_fns) - # LOG.debug("Loaded 'init' config %s", self._cfg) def _read_cfg(self, extra_fns): no_cfg_paths = helpers.Paths({}, self.datasource) @@ -321,7 +328,7 @@ run_iid_fn = self.paths.get_runpath("instance_id") if os.path.exists(run_iid_fn): - run_iid = util.load_file(run_iid_fn).strip() + run_iid = util.load_text_file(run_iid_fn).strip() else: run_iid = None @@ -346,7 +353,6 @@ description="attempting to read from cache [%s]" % existing, parent=self.reporter, ) as myrep: - ds, desc = self._restore_from_checked_cache(existing) myrep.description = desc self.ds_restored = bool(ds) @@ -403,7 +409,9 @@ network_link = self.paths.get_runpath("network_config") if os.path.exists(ncfg_instance_path): # Compare and only write on delta of current network-config - if netcfg != util.load_json(util.load_file(ncfg_instance_path)): + if netcfg != util.load_json( + util.load_text_file(ncfg_instance_path) + ): atomic_helper.write_json( ncfg_instance_path, netcfg, mode=0o600 ) @@ -434,7 +442,7 @@ previous_ds = None ds_fn = os.path.join(idir, "datasource") try: - previous_ds = util.load_file(ds_fn).strip() + previous_ds = util.load_text_file(ds_fn).strip() except Exception: pass if not previous_ds: @@ -469,7 +477,7 @@ dp = self.paths.get_cpath("data") iid_fn = os.path.join(dp, "instance-id") try: - self._previous_iid = util.load_file(iid_fn).strip() + self._previous_iid = util.load_text_file(iid_fn).strip() except Exception: self._previous_iid = NO_PREVIOUS_INSTANCE_ID @@ -636,7 +644,7 @@ if not path or not os.path.isdir(path): return potential_handlers = util.get_modules_from_dir(path) - for (fname, mod_name) in potential_handlers.items(): + for fname, mod_name in potential_handlers.items(): try: mod_locs, looked_locs = importer.find_module( mod_name, [""], ["list_types", "handle_part"] @@ -684,7 +692,7 @@ def init_handlers(): # Init the handlers first - for (_ctype, mod) in c_handlers.items(): + for _ctype, mod in c_handlers.items(): if mod in c_handlers.initialized: # Avoid initiating the same module twice (if said module # is registered to more than one content-type). @@ -696,7 +704,7 @@ # Walk the user data part_data = { "handlers": c_handlers, - # Any new handlers that are encountered get writen here + # Any new handlers that are encountered get written here "handlerdir": idir, "data": data, # The default frequency if handlers don't have one @@ -711,7 +719,7 @@ def finalize_handlers(): # Give callbacks opportunity to finalize - for (_ctype, mod) in c_handlers.items(): + for _ctype, mod in c_handlers.items(): if mod not in c_handlers.initialized: # Said module was never inited in the first place, so lets # not attempt to finalize those that never got called. @@ -781,7 +789,9 @@ ) json_sensitive_file = self.paths.get_runpath("instance_data_sensitive") try: - instance_json = util.load_json(util.load_file(json_sensitive_file)) + instance_json = util.load_json( + util.load_text_file(json_sensitive_file) + ) except (OSError, IOError) as e: LOG.warning( "Skipping write of system_info/features to %s." @@ -980,7 +990,10 @@ Find the config, determine whether to apply it, apply it via the distro, and optionally bring it up """ - from cloudinit.config.schema import validate_cloudconfig_schema + from cloudinit.config.schema import ( + SchemaType, + validate_cloudconfig_schema, + ) netcfg, src = self._find_networking_config() if netcfg is None: @@ -1019,15 +1032,14 @@ netcfg, src = self._find_networking_config() self._write_network_config_json(netcfg) - if netcfg and netcfg.get("version") == 1: + if netcfg: validate_cloudconfig_schema( config=netcfg, - schema_type="network-config", - strict=False, - log_details=True, + schema_type=SchemaType.NETWORK_CONFIG, + strict=False, # Warnings not raising exceptions + log_details=False, # May have wifi passwords in net cfg log_deprecations=True, ) - # ensure all physical devices in config are present self.distro.networking.wait_for_physdevs(netcfg) diff -Nru cloud-init-23.4.4/cloudinit/subp.py cloud-init-24.1.3/cloudinit/subp.py --- cloud-init-23.4.4/cloudinit/subp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/subp.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,6 +5,7 @@ import logging import os import subprocess +import time from errno import ENOEXEC from io import TextIOWrapper from typing import List, Union @@ -143,22 +144,36 @@ return text.rstrip(b"\n").replace(b"\n", b"\n" + b" " * indent_level) +def raise_on_invalid_command(args: Union[List[str], List[bytes]]): + """check argument types to ensure that subp() can run the argument + + Throw a user-friendly exception which explains the issue. + + args: list of arguments passed to subp() + raises: ProcessExecutionError with information explaining the issue + """ + for component in args: + # if already bytes, or implements encode(), then it should be safe + if not (isinstance(component, bytes) or hasattr(component, "encode")): + LOG.warning("Running invalid command: %s", args) + raise ProcessExecutionError( + cmd=args, reason=f"Running invalid command: {args}" + ) + + def subp( - args, + args: Union[str, bytes, List[str], List[bytes]], *, data=None, rcs=None, - env=None, capture=True, - combine_capture=False, shell=False, logstring=False, decode="replace", - target=None, update_env=None, - status_cb=None, cwd=None, -): + timeout=None, +) -> SubpResult: """Run a subprocess. :param args: command to run in a list. [cmd, arg1, arg2...] @@ -167,16 +182,9 @@ a list of allowed return codes. If subprocess exits with a value not in this list, a ProcessExecutionError will be raised. By default, data is returned as a string. See 'decode' parameter. - :param env: a dictionary for the command's environment. :param capture: boolean indicating if output should be captured. If True, then stderr and stdout will be returned. If False, they will not be redirected. - :param combine_capture: - boolean indicating if stderr should be redirected to stdout. When True, - interleaved stderr and stdout will be returned as the first element of - a tuple, the second will be empty string or bytes (per decode). - if combine_capture is True, then output is captured independent of - the value of capture. :param shell: boolean indicating if this should be run with a shell. :param logstring: the command will be logged to DEBUG. If it contains info that should @@ -186,17 +194,13 @@ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. These values are passed through to bytes().decode() as the 'errors' parameter. There is no support for decoding to other than utf-8. - :param target: - not supported, kwarg present only to make function signature similar - to curtin's subp. :param update_env: - update the enviornment for this command with this dictionary. + update the environment for this command with this dictionary. this will not affect the current processes os.environ. - :param status_cb: - call this fuction with a single string argument before starting - and after finishing. :param cwd: change the working directory to cwd before executing the command. + :param timeout: maximum time for the subprocess to run, passed directly to + the timeout parameter of Popen.communicate() :return if not capturing, return is (None, None) @@ -207,28 +211,13 @@ entries in tuple will be bytes """ - # not supported in cloud-init (yet), for now kept in the call signature - # to ease maintaining code shared between cloud-init and curtin - if target is not None: - raise ValueError("target arg not supported by cloud-init") - if rcs is None: rcs = [0] - devnull_fp = None - + env = os.environ.copy() if update_env: - if env is None: - env = os.environ - env = env.copy() env.update(update_env) - if target_path(target) != "/": - args = ["chroot", target] + list(args) - - if status_cb: - command = " ".join(args) if isinstance(args, list) else args - status_cb("Begin run command: {command}\n".format(command=command)) if not logstring: LOG.debug( "Running command %s with allowed return codes %s" @@ -236,7 +225,7 @@ args, rcs, shell, - "combine" if combine_capture else capture, + capture, ) else: LOG.debug( @@ -251,14 +240,10 @@ if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE - if combine_capture: - stdout = subprocess.PIPE - stderr = subprocess.STDOUT if data is None: # using devnull assures any reads get null, rather # than possibly waiting on input. - devnull_fp = open(os.devnull) - stdin = devnull_fp + stdin = subprocess.DEVNULL else: stdin = subprocess.PIPE if not isinstance(data, bytes): @@ -273,10 +258,12 @@ elif isinstance(args, str): bytes_args = args.encode("utf-8") else: + raise_on_invalid_command(args) bytes_args = [ x if isinstance(x, bytes) else x.encode("utf-8") for x in args ] try: + before = time.time() sp = subprocess.Popen( bytes_args, stdout=stdout, @@ -286,10 +273,11 @@ shell=shell, cwd=cwd, ) - (out, err) = sp.communicate(data) + out, err = sp.communicate(data, timeout=timeout) + total = time.time() - before + if total > 0.1: + LOG.debug("command %s took %.3ss to run", args, total) except OSError as e: - if status_cb: - status_cb("ERROR: End run command: invalid command provided\n") raise ProcessExecutionError( cmd=args, reason=e, @@ -297,16 +285,6 @@ stdout="-" if decode else b"-", stderr="-" if decode else b"-", ) from e - finally: - if devnull_fp: - devnull_fp.close() - - # Just ensure blank instead of none. - if capture or combine_capture: - if not out: - out = b"" - if not err: - err = b"" if decode: def ldecode(data, m="utf-8"): @@ -317,17 +295,13 @@ rc = sp.returncode if rc not in rcs: - if status_cb: - status_cb("ERROR: End run command: exit({code})\n".format(code=rc)) raise ProcessExecutionError( stdout=out, stderr=err, exit_code=rc, cmd=args ) - if status_cb: - status_cb("End run command: exit({code})\n".format(code=rc)) return SubpResult(out, err) -def target_path(target, path=None): +def target_path(target=None, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" @@ -405,13 +379,15 @@ except ProcessExecutionError as e: LOG.debug(e) failed.append(exe_name) - else: + elif os.path.isfile(exe_path): LOG.warning( "skipping %s as its not executable " "or the underlying file system is mounted without " "executable permissions.", exe_path, ) + else: + LOG.debug("Not executing special file [%s]", exe_path) if failed and attempted: raise RuntimeError( diff -Nru cloud-init-23.4.4/cloudinit/templater.py cloud-init-24.1.3/cloudinit/templater.py --- cloud-init-23.4.4/cloudinit/templater.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/templater.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,6 +18,8 @@ import sys from typing import Any +from jinja2 import TemplateSyntaxError + from cloudinit import type_utils as tu from cloudinit import util from cloudinit.atomic_helper import write_file @@ -42,6 +44,47 @@ MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/" +class JinjaSyntaxParsingException(TemplateSyntaxError): + def __init__( + self, + error: TemplateSyntaxError, + ) -> None: + super().__init__( + error.message or "unknown syntax error", + error.lineno, + error.name, + error.filename, + ) + self.source = error.source + + def __str__(self): + """Avoid jinja2.TemplateSyntaxError multi-line __str__ format.""" + return self.format_error_message( + syntax_error=self.message, + line_number=self.lineno, + line_content=self.source.splitlines()[self.lineno - 2].strip(), + ) + + @staticmethod + def format_error_message( + syntax_error: str, + line_number: str, + line_content: str = "", + ) -> str: + """Avoid jinja2.TemplateSyntaxError multi-line __str__ format.""" + line_content = f": {line_content}" if line_content else "" + return JinjaSyntaxParsingException.message_template.format( + syntax_error=syntax_error, + line_number=line_number, + line_content=line_content, + ) + + message_template = ( + "Unable to parse Jinja template due to syntax error: " + "{syntax_error} on line {line_number}{line_content}" + ) + + # Mypy, and the PEP 484 ecosystem in general, does not support creating # classes with dynamic base types: https://stackoverflow.com/a/59636248 class UndefinedJinjaVariable(JUndefined): @@ -102,18 +145,26 @@ def jinja_render(content, params): # keep_trailing_newline is in jinja2 2.7+, not 2.6 add = "\n" if content.endswith("\n") else "" - return ( - JTemplate( - content, - undefined=UndefinedJinjaVariable, - trim_blocks=True, - extensions=["jinja2.ext.do"], - ).render(**params) - + add - ) + try: + return ( + JTemplate( + content, + undefined=UndefinedJinjaVariable, + trim_blocks=True, + extensions=["jinja2.ext.do"], + ).render(**params) + + add + ) + except TemplateSyntaxError as template_syntax_error: + template_syntax_error.lineno += 1 + raise JinjaSyntaxParsingException( + error=template_syntax_error, + ) from template_syntax_error + except Exception as unknown_error: + raise unknown_error from unknown_error if text.find("\n") != -1: - ident, rest = text.split("\n", 1) + ident, rest = text.split("\n", 1) # remove the first line else: ident = text rest = "" @@ -142,7 +193,7 @@ def render_from_file(fn, params): if not params: params = {} - template_type, renderer, content = detect_template(util.load_file(fn)) + template_type, renderer, content = detect_template(util.load_text_file(fn)) LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type) return renderer(content, params) diff -Nru cloud-init-23.4.4/cloudinit/url_helper.py cloud-init-24.1.3/cloudinit/url_helper.py --- cloud-init-23.4.4/cloudinit/url_helper.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/url_helper.py 2024-03-27 13:14:04.000000000 +0000 @@ -285,7 +285,7 @@ # Handle retrying ourselves since the built-in support # doesn't handle sleeping between tries... # Infinitely retry if infinite is True - for i in count() if infinite else range(0, manual_tries): + for i in count() if infinite else range(manual_tries): req_args["headers"] = headers_cb(url) filtered_req_args = {} for (k, v) in req_args.items(): @@ -473,14 +473,14 @@ def wait_for_url( urls, - max_wait=None, - timeout=None, + max_wait: float = float("inf"), + timeout: Optional[float] = None, status_cb: Callable = LOG.debug, # some sources use different log levels headers_cb: Optional[Callable] = None, headers_redact=None, - sleep_time: int = 1, + sleep_time: Optional[float] = None, exception_cb: Optional[Callable] = None, - sleep_time_cb: Optional[Callable[[Any, int], int]] = None, + sleep_time_cb: Optional[Callable[[Any, float], float]] = None, request_method: str = "", connect_synchronously: bool = True, async_delay: float = 0.150, @@ -496,10 +496,15 @@ headers_cb: call method with single argument of url to get headers for request. headers_redact: a list of header names to redact from the log + sleep_time: Amount of time to sleep between retries. If this and + sleep_time_cb are None, the default sleep time + defaults to 1 second and increases by 1 seconds every 5 + tries. Cannot be specified along with `sleep_time_cb`. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that - generates the next sleep time. + generates the next sleep time. Cannot be specified + along with 'sleep_time`. request_method: indicate the type of HTTP request, GET, PUT, or POST connect_synchronously: if false, enables executing requests in parallel async_delay: delay before parallel metadata requests, see RFC 6555 @@ -520,17 +525,19 @@ data host (169.254.169.254) may be firewalled off Entirely for a system, meaning that the connection will block forever unless a timeout is set. - A value of None for max_wait will retry indefinitely. + The default value for max_wait will retry indefinitely. """ - def default_sleep_time(_, loop_number: int) -> int: - return int(loop_number / 5) + 1 + def default_sleep_time(_, loop_number: int) -> float: + return sleep_time if sleep_time is not None else loop_number // 5 + 1 - def timeup(max_wait, start_time): + def timeup(max_wait: float, start_time: float, sleep_time: float = 0): """Check if time is up based on start time and max wait""" - if max_wait is None: + if max_wait in (float("inf"), None): return False - return (max_wait <= 0) or (time.time() - start_time > max_wait) + return (max_wait <= 0) or ( + time.time() - start_time + sleep_time > max_wait + ) def handle_url_response(response, url): """Map requests response code/contents to internal "UrlError" type""" @@ -575,7 +582,7 @@ time_taken = int(time.time() - start_time) max_wait_str = "%ss" % max_wait if max_wait else "unlimited" status_msg = "Calling '%s' failed [%s/%s]: %s" % ( - url, + url or getattr(url_exc, "url", "url ? None"), time_taken, max_wait_str, reason, @@ -641,6 +648,8 @@ return out start_time = time.time() + if sleep_time and sleep_time_cb: + raise ValueError("sleep_time and sleep_time_cb are mutually exclusive") # Dual-stack support factored out serial and parallel execution paths to # allow the retry loop logic to exist separately from the http calls. @@ -656,25 +665,30 @@ loop_n: int = 0 response = None while True: - sleep_time = calculate_sleep_time(response, loop_n) + current_sleep_time = calculate_sleep_time(response, loop_n) url = do_read_url(start_time, timeout, exception_cb, status_cb) if url: address, response = url return (address, response.contents) - if timeup(max_wait, start_time): + if timeup(max_wait, start_time, current_sleep_time): break loop_n = loop_n + 1 LOG.debug( - "Please wait %s seconds while we wait to try again", sleep_time + "Please wait %s seconds while we wait to try again", + current_sleep_time, ) - time.sleep(sleep_time) + time.sleep(current_sleep_time) # shorten timeout to not run way over max_time - # timeout=0.0 causes exceptions in urllib, set to None if zero - timeout = int((start_time + max_wait) - time.time()) or None + current_time = time.time() + if timeout and current_time + timeout > start_time + max_wait: + timeout = max_wait - (current_time - start_time) + if timeout <= 0: + # We've already exceeded our max_wait. Time to bail. + break LOG.error("Timed out, no response from urls: %s", urls) return False, None diff -Nru cloud-init-23.4.4/cloudinit/user_data.py cloud-init-24.1.3/cloudinit/user_data.py --- cloud-init-23.4.4/cloudinit/user_data.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/user_data.py 2024-03-27 13:14:04.000000000 +0000 @@ -51,8 +51,8 @@ # Msg header used to track attachments ATTACHMENT_FIELD = "Number-Attachments" -# Only the following content types can have there launch index examined -# in there payload, evey other content type can still provide a header +# Only the following content types can have their launch index examined +# in their payload, every other content type can still provide a header EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"] @@ -231,7 +231,7 @@ if include_once_on: include_once_fn = self._get_include_once_filename(include_url) if include_once_on and os.path.isfile(include_once_fn): - content = util.load_file(include_once_fn) + content = util.load_text_file(include_once_fn) else: try: resp = read_file_or_url( diff -Nru cloud-init-23.4.4/cloudinit/util.py cloud-init-24.1.3/cloudinit/util.py --- cloud-init-23.4.4/cloudinit/util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/util.py 2024-03-27 13:14:04.000000000 +0000 @@ -36,19 +36,23 @@ import time from base64 import b64decode from collections import deque, namedtuple -from contextlib import suppress -from errno import EACCES, ENOENT +from contextlib import contextmanager, suppress +from errno import ENOENT from functools import lru_cache, total_ordering from pathlib import Path from typing import ( + TYPE_CHECKING, + Any, Callable, Deque, Dict, + Generator, List, Mapping, Optional, Sequence, TypeVar, + Union, ) from urllib import parse @@ -58,13 +62,18 @@ mergers, net, safeyaml, + settings, subp, temp_utils, type_utils, url_helper, version, ) -from cloudinit.settings import CFG_BUILTIN +from cloudinit.settings import CFG_BUILTIN, PER_ONCE + +if TYPE_CHECKING: + # Avoid circular import + from cloudinit.helpers import Paths _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) @@ -84,20 +93,18 @@ @lru_cache() -def get_dpkg_architecture(target=None): +def get_dpkg_architecture(): """Return the sanitized string output by `dpkg --print-architecture`. N.B. This function is wrapped in functools.lru_cache, so repeated calls won't shell out every time. """ - out = subp.subp( - ["dpkg", "--print-architecture"], capture=True, target=target - ) + out = subp.subp(["dpkg", "--print-architecture"], capture=True) return out.stdout.strip() @lru_cache() -def lsb_release(target=None): +def lsb_release(): fmap = { "Codename": "codename", "Description": "description", @@ -107,7 +114,7 @@ data = {} try: - out = subp.subp(["lsb_release", "--all"], capture=True, target=target) + out = subp.subp(["lsb_release", "--all"], capture=True) for line in out.stdout.splitlines(): fname, _, val = line.partition(":") if fname in fmap: @@ -126,18 +133,14 @@ return data -def decode_binary(blob, encoding="utf-8"): +def decode_binary(blob: Union[str, bytes], encoding="utf-8") -> str: # Converts a binary type into a text type using given encoding. - if isinstance(blob, str): - return blob - return blob.decode(encoding) + return blob if isinstance(blob, str) else blob.decode(encoding=encoding) -def encode_text(text, encoding="utf-8"): +def encode_text(text: Union[str, bytes], encoding="utf-8") -> bytes: # Converts a text string into a binary type using given encoding. - if isinstance(text, bytes): - return text - return text.encode(encoding) + return text if isinstance(text, bytes) else text.encode(encoding=encoding) def maybe_b64decode(data: bytes) -> bytes: @@ -287,7 +290,7 @@ r = random.SystemRandom() if not select_from: select_from = string.ascii_letters + string.digits - return "".join([r.choice(select_from) for _x in range(0, strlen)]) + return "".join([r.choice(select_from) for _x in range(strlen)]) def rand_dict_key(dictionary, postfix=None): @@ -305,17 +308,15 @@ # Avoid circular import from cloudinit.handlers.jinja_template import ( JinjaLoadError, + JinjaSyntaxParsingException, NotJinjaError, render_jinja_payload_from_file, ) try: - config_file = load_file(fname) - except IOError as e: - if e.errno == ENOENT: - return {} - else: - raise + config_file = load_text_file(fname) + except FileNotFoundError: + return {} if instance_data_file and os.path.exists(instance_data_file): try: @@ -330,6 +331,12 @@ instance_data_file, fname, ) + except JinjaSyntaxParsingException as e: + LOG.warning( + "Failed to render templated yaml config file '%s'. %s", + fname, + e, + ) except NotJinjaError: # A log isn't appropriate here as we generally expect most # cloud.cfgs to not be templated. The other path is logged @@ -544,7 +551,7 @@ release_file = "/etc/redhat-release" if not os.path.exists(release_file): return {} - redhat_release = load_file(release_file) + redhat_release = load_text_file(release_file) redhat_regex = ( r"(?P.+) release (?P[\d\.]+) " r"\((?P[^)]+)\)" @@ -581,7 +588,7 @@ os_release = {} os_release_rhel = False if os.path.exists("/etc/os-release"): - os_release = load_shell_content(load_file("/etc/os-release")) + os_release = load_shell_content(load_text_file("/etc/os-release")) if not os_release: os_release_rhel = True os_release = _parse_redhat_release() @@ -942,7 +949,7 @@ # read_optional_seed -# returns boolean indicating success or failure (presense of files) +# returns boolean indicating success or failure (presence of files) # if files are present, populates 'fill' dictionary with 'user-data' and # 'meta-data' entries def read_optional_seed(fill, base="", ext="", timeout=5): @@ -1088,18 +1095,20 @@ # Load them all so that they can be merged cfgs = [] for fn in confs: + path = os.path.join(confd, fn) try: cfgs.append( read_conf( - os.path.join(confd, fn), + path, instance_data_file=instance_data_file, ) ) + except PermissionError: + LOG.warning( + "REDACTED config part %s, insufficient permissions", path + ) except OSError as e: - if e.errno == EACCES: - LOG.warning( - "REDACTED config part %s/%s for non-root user", confd, fn - ) + LOG.warning("Error accessing file %s: [%s]", path, e) return mergemanydict(cfgs) @@ -1120,9 +1129,12 @@ cfg: dict = {} try: cfg = read_conf(cfgfile, instance_data_file=instance_data_file) + except PermissionError: + LOG.warning( + "REDACTED config part %s, insufficient permissions", cfgfile + ) except OSError as e: - if e.errno == EACCES: - LOG.warning("REDACTED config part %s for non-root user", cfgfile) + LOG.warning("Error accessing file %s: [%s]", cfgfile, e) else: cfgs.append(cfg) @@ -1262,7 +1274,7 @@ """ fqdn = None try: - for line in load_file(filename).splitlines(): + for line in load_text_file(filename).splitlines(): hashpos = line.find("#") if hashpos >= 0: line = line[0:hashpos] @@ -1417,6 +1429,7 @@ devlist = [] label = None _type = None + mscdlabel_out = "" if criteria: if criteria.startswith("LABEL="): label = criteria.lstrip("LABEL=") @@ -1577,30 +1590,39 @@ return out_list -def load_file(fname, read_cb=None, quiet=False, decode=True): +def load_binary_file( + fname: Union[str, os.PathLike], + *, + read_cb: Optional[Callable[[int], None]] = None, + quiet: bool = False, +) -> bytes: LOG.debug("Reading from %s (quiet=%s)", fname, quiet) ofh = io.BytesIO() try: with open(fname, "rb") as ifh: pipe_in_out(ifh, ofh, chunk_cb=read_cb) - except IOError as e: + except FileNotFoundError: if not quiet: raise - if e.errno != ENOENT: - raise contents = ofh.getvalue() LOG.debug("Read %s bytes from %s", len(contents), fname) - if decode: - return decode_binary(contents) - else: - return contents + return contents + + +def load_text_file( + fname: Union[str, os.PathLike], + *, + read_cb: Optional[Callable[[int], None]] = None, + quiet: bool = False, +) -> str: + return decode_binary(load_binary_file(fname, read_cb=read_cb, quiet=quiet)) @lru_cache() def _get_cmdline(): if is_container(): try: - contents = load_file("/proc/1/cmdline") + contents = load_text_file("/proc/1/cmdline") # replace nulls with space and drop trailing null cmdline = contents.replace("\x00", " ")[:-1] except Exception as e: @@ -1608,7 +1630,7 @@ cmdline = "" else: try: - cmdline = load_file("/proc/cmdline").strip() + cmdline = load_text_file("/proc/cmdline").strip() except Exception: cmdline = "" @@ -1625,7 +1647,7 @@ def fips_enabled() -> bool: fips_proc = "/proc/sys/crypto/fips_enabled" try: - contents = load_file(fips_proc).strip() + contents = load_text_file(fips_proc).strip() return contents == "1" except (IOError, OSError): # for BSD systems and Linux systems where the proc entry is not @@ -1670,7 +1692,7 @@ chownbyid(fname, uid, gid) -# Always returns well formated values +# Always returns well formatted values # cfg is expected to have an entry 'output' in it, which is a dictionary # that includes entries for 'init', 'config', 'final' or 'all' # init: /var/log/cloud.out @@ -1743,6 +1765,7 @@ @param cfg: The cloud-init merged configuration dictionary. """ logs = [] + rotated_logs = [] if not cfg or not isinstance(cfg, dict): return logs default_log = cfg.get("def_log_file") @@ -1760,7 +1783,16 @@ logs.append(target) elif ["tee", "-a"] == parts[:2]: logs.append(parts[2]) - return list(set(logs)) + + # add rotated log files + for logfile in logs: + for rotated_logfile in glob.glob(f"{logfile}*"): + # Check that log file exists and is rotated. + # Do not add current one + if os.path.isfile(rotated_logfile) and rotated_logfile != logfile: + rotated_logs.append(rotated_logfile) + + return list(set(logs + rotated_logs)) def logexc(log, msg, *args): @@ -1897,7 +1929,7 @@ try: # Go through mounts to see what is already mounted if os.path.exists("/proc/mounts"): - mount_locs = load_file("/proc/mounts").splitlines() + mount_locs = load_text_file("/proc/mounts").splitlines() method = "proc" else: out = subp.subp("mount") @@ -2052,9 +2084,8 @@ LOG.debug("Attempting to remove %s", path) try: os.unlink(path) - except OSError as e: - if e.errno != ENOENT: - raise e + except FileNotFoundError: + pass def copy(src, dest): @@ -2072,28 +2103,36 @@ @lru_cache() def boottime(): - """Use sysctlbyname(3) via ctypes to find kern.boottime + """Use sysctl(3) via ctypes to find kern.boottime kern.boottime is of type struct timeval. Here we create a private class to easier unpack it. + Use sysctl(3) (or sysctl(2) on OpenBSD) because sysctlbyname(3) does not + exist on OpenBSD. That complicates retrieval on NetBSD, which #defines + KERN_BOOTTIME as 83 instead of 21. + 21 on NetBSD is KERN_OBOOTTIME, the kern.boottime up until NetBSD 5.0 @return boottime: float to be compatible with linux """ import ctypes import ctypes.util - NULL_BYTES = b"\x00" - class timeval(ctypes.Structure): _fields_ = [("tv_sec", ctypes.c_int64), ("tv_usec", ctypes.c_int64)] libc = ctypes.CDLL(ctypes.util.find_library("c")) size = ctypes.c_size_t() size.value = ctypes.sizeof(timeval) + mib_values = [ # This corresponds to + 1, # CTL_KERN, and + 21 if not is_NetBSD() else 83, # KERN_BOOTTIME + ] + mib = (ctypes.c_int * 2)(*mib_values) buf = timeval() if ( - libc.sysctlbyname( - b"kern.boottime" + NULL_BYTES, + libc.sysctl( + mib, + ctypes.c_int(len(mib_values)), ctypes.byref(buf), ctypes.byref(size), None, @@ -2111,7 +2150,7 @@ try: if os.path.exists("/proc/uptime"): method = "/proc/uptime" - contents = load_file("/proc/uptime") + contents = load_text_file("/proc/uptime") if contents: uptime_str = contents.split()[0] else: @@ -2405,7 +2444,7 @@ try: # Detect Vserver containers - lines = load_file("/proc/self/status").splitlines() + lines = load_text_file("/proc/self/status").splitlines() for line in lines: if line.startswith("VxID:"): (_key, val) = line.strip().split(":", 1) @@ -2433,7 +2472,7 @@ fn = os.path.join("/proc", str(pid), "environ") try: - contents = load_file(fn, decode=False) + contents = load_binary_file(fn) except (IOError, OSError): return {} @@ -2578,7 +2617,7 @@ def parse_mtab(path): """On older kernels there's no /proc/$$/mountinfo, so use mtab.""" - for line in load_file("/etc/mtab").splitlines(): + for line in load_text_file("/etc/mtab").splitlines(): devpth, mount_point, fs_type = line.split()[:3] if mount_point == path: return devpth, fs_type, mount_point @@ -2628,26 +2667,6 @@ return "/dev/" + label_part -def get_device_info_from_zpool(zpool): - # zpool has 10 second timeout waiting for /dev/zfs LP: #1760173 - if not os.path.exists("/dev/zfs"): - LOG.debug("Cannot get zpool info, no /dev/zfs") - return None - try: - (zpoolstatus, err) = subp.subp(["zpool", "status", zpool]) - except subp.ProcessExecutionError as err: - LOG.warning("Unable to get zpool status of %s: %s", zpool, err) - return None - if len(err): - return None - r = r".*(ONLINE).*" - for line in zpoolstatus.split("\n"): - if re.search(r, line) and zpool not in line and "state" not in line: - disk = line.split()[0] - LOG.debug('found zpool "%s" on disk %s', zpool, disk) - return disk - - def parse_mount(path, get_mnt_opts=False): """Return the mount information for PATH given the lines ``mount(1)`` This function is compatible with ``util.parse_mount_info()``""" @@ -2766,7 +2785,7 @@ # input path. mountinfo_path = "/proc/%s/mountinfo" % os.getpid() if os.path.exists(mountinfo_path): - lines = load_file(mountinfo_path).splitlines() + lines = load_text_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log, get_mnt_opts) elif os.path.exists("/etc/mtab"): return parse_mtab(path) @@ -2848,13 +2867,10 @@ ret = {} for f in required + optional: try: - ret[f] = load_file(base + delim + f, quiet=False, decode=False) - except IOError as e: - if e.errno != ENOENT: - raise + ret[f] = load_binary_file(base + delim + f, quiet=False) + except FileNotFoundError: if f in required: missing.append(f) - if len(missing): raise ValueError( "Missing required files: {files}".format(files=",".join(missing)) @@ -2873,7 +2889,7 @@ "MemAvailable:": "available", } ret = {} - for line in load_file(meminfo).splitlines(): + for line in load_text_file(meminfo).splitlines(): try: key, value, unit = line.split() except ValueError: @@ -2946,8 +2962,8 @@ return email.message_from_string(string) -def get_installed_packages(target=None): - out = subp.subp(["dpkg-query", "--list"], target=target, capture=True) +def get_installed_packages(): + out = subp.subp(["dpkg-query", "--list"], capture=True) pkgs_inst = set() for line in out.stdout.splitlines(): @@ -2967,7 +2983,7 @@ # this is certainly not a perfect test, but good enough for now. orpath = "/etc/os-release" try: - orinfo = load_shell_content(load_file(orpath, quiet=True)) + orinfo = load_shell_content(load_text_file(orpath, quiet=True)) if orinfo.get("ID", "").lower() == "ubuntu-core": return True except ValueError as e: @@ -2977,7 +2993,7 @@ if "snap_core=" in cmdline: return True - content = load_file("/etc/system-image/channel.ini", quiet=True) + content = load_text_file("/etc/system-image/channel.ini", quiet=True) if "ubuntu-core" in content.lower(): return True if os.path.isdir("/etc/system-image/config.d/"): @@ -3067,6 +3083,18 @@ return need +def wait_for_snap_seeded(cloud): + """Helper to wait on completion of snap seeding.""" + + def callback(): + if not subp.which("snap"): + LOG.debug("Skipping snap wait, no snap command present") + return + subp.subp(["snap", "wait", "system", "seed.loaded"]) + + cloud.run("snap-seeded", callback, [], freq=PER_ONCE) + + def mount_is_read_write(mount_point): """Check whether the given mount point is mounted rw""" result = get_mount_info(mount_point, get_mnt_opts=True) @@ -3092,47 +3120,6 @@ return subp.subp(settle_cmd) -def get_proc_ppid_linux(pid): - """ - Return the parent pid of a process by parsing /proc/$pid/stat. - """ - ppid = 0 - try: - contents = load_file("/proc/%s/stat" % pid, quiet=True) - if contents: - # see proc.5 for format - m = re.search(r"^\d+ \(.+\) [RSDZTtWXxKPI] (\d+)", str(contents)) - if m: - ppid = int(m.group(1)) - else: - LOG.warning( - "Unable to match parent pid of process pid=%s input: %s", - pid, - contents, - ) - except IOError as e: - LOG.warning("Failed to load /proc/%s/stat. %s", pid, e) - return ppid - - -def get_proc_ppid_ps(pid): - """ - Return the parent pid of a process by checking ps - """ - ppid, _ = subp.subp(["ps", "-oppid=", "-p", str(pid)]) - return int(ppid.strip()) - - -def get_proc_ppid(pid): - """ - Return the parent pid of a process. - """ - if is_Linux(): - return get_proc_ppid_linux(pid) - else: - return get_proc_ppid_ps(pid) - - def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): """ Print error to stderr and return or exit @@ -3288,3 +3275,32 @@ return decorator return wrapper + + +def read_hotplug_enabled_file(paths: "Paths") -> dict: + content: dict = {"scopes": []} + try: + content = json.loads( + load_text_file(paths.get_cpath("hotplug.enabled"), quiet=False) + ) + except FileNotFoundError: + LOG.debug("File not found: %s", paths.get_cpath("hotplug.enabled")) + except json.JSONDecodeError as e: + LOG.warning( + "Ignoring contents of %s because it is not decodable. Error: %s", + settings.HOTPLUG_ENABLED_FILE, + e, + ) + else: + if "scopes" not in content: + content["scopes"] = [] + return content + + +@contextmanager +def nullcontext() -> Generator[None, Any, None]: + """Context manager that does nothing. + + Note: In python-3.7+, this can be substituted by contextlib.nullcontext + """ + yield diff -Nru cloud-init-23.4.4/cloudinit/version.py cloud-init-24.1.3/cloudinit/version.py --- cloud-init-23.4.4/cloudinit/version.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/cloudinit/version.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "23.4.4" +__VERSION__ = "24.1.3" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ diff -Nru cloud-init-23.4.4/config/clean.d/README cloud-init-24.1.3/config/clean.d/README --- cloud-init-23.4.4/config/clean.d/README 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/config/clean.d/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ --- cloud-init's clean.d run-parts directory -- - -This directory is provided for third party applications which need -additional configuration artifact cleanup from the filesystem when -the command `cloud-init clean` is invoked. - -The `cloud-init clean` operation is typically performed by image creators -when preparing a golden image for clone and redeployment. The clean command -removes any cloud-init semaphores, allowing cloud-init to treat the next -boot of this image as the "first boot". When the image is next booted -cloud-init will performing all initial configuration based on any valid -datasource meta-data and user-data. - -Any executable scripts in this subdirectory will be invoked in lexicographical -order with run-parts by the command: sudo cloud-init clean. - -Typical format of such scripts would be a ##- like the following: - /etc/cloud/clean.d/99-live-installer diff -Nru cloud-init-23.4.4/config/cloud.cfg.d/05_logging.cfg cloud-init-24.1.3/config/cloud.cfg.d/05_logging.cfg --- cloud-init-23.4.4/config/cloud.cfg.d/05_logging.cfg 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/config/cloud.cfg.d/05_logging.cfg 2024-03-27 13:14:04.000000000 +0000 @@ -1,7 +1,7 @@ -## This yaml formated config file handles setting +## This yaml formatted config file handles setting ## logger information. The values that are necessary to be set ## are seen at the bottom. The top '_log' are only used to remove -## redundency in a syslog and fallback-to-file case. +## redundancy in a syslog and fallback-to-file case. ## ## The 'log_cfgs' entry defines a list of logger configs ## Each entry in the list is tried, and the first one that diff -Nru cloud-init-23.4.4/config/cloud.cfg.tmpl cloud-init-24.1.3/config/cloud.cfg.tmpl --- cloud-init-23.4.4/config/cloud.cfg.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/config/cloud.cfg.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -111,7 +111,6 @@ # The modules that run in the 'init' stage cloud_init_modules: - - migrator {% if variant not in ["netbsd"] %} - seed_random {% endif %} @@ -164,7 +163,7 @@ - apt_pipelining - apt_configure {% if variant == "ubuntu" %} - - ubuntu_advantage + - ubuntu_pro {% endif %} {% elif variant in ["fedora", "mariner", "openeuler", "openmandriva", "photon"] or is_rhel %} @@ -204,7 +203,6 @@ - mcollective - salt_minion - reset_rmc - - rightscale_userdata - scripts_vendor - scripts_per_once - scripts_per_boot @@ -303,6 +301,7 @@ {% elif variant in ["ubuntu", "unknown"] %} {# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #} network: + dhcp_client_priority: [dhcpcd, dhclient, udhcpc] renderers: ['netplan', 'eni', 'sysconfig'] activators: ['netplan', 'eni', 'network-manager', 'networkd'] {% elif is_rhel %} @@ -327,6 +326,9 @@ paths: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ +{% elif is_bsd %} + paths: + run_dir: /var/run/ {% endif %} {% if variant == "debian" %} package_mirrors: diff -Nru cloud-init-23.4.4/conftest.py cloud-init-24.1.3/conftest.py --- cloud-init-23.4.4/conftest.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/conftest.py 2024-03-27 13:14:04.000000000 +0000 @@ -79,6 +79,14 @@ return result[0] +class UnexpectedSubpError(BaseException): + """Error thrown when subp.subp is unexpectedly used. + + We inherit from BaseException so it doesn't get silently swallowed + by other error handlers. + """ + + @pytest.fixture(autouse=True) def disable_subp_usage(request, fixture_utils): """ @@ -142,12 +150,12 @@ if allow_all_subp is None and allow_subp_for is None: # No marks, default behaviour; disallow all subp.subp usage def side_effect(args, *other_args, **kwargs): - raise AssertionError("Unexpectedly used subp.subp") + raise UnexpectedSubpError("Unexpectedly used subp.subp") elif allow_all_subp is not None and allow_subp_for is not None: # Both marks, ambiguous request; raise an exception on all subp usage def side_effect(args, *other_args, **kwargs): - raise AssertionError( + raise UnexpectedSubpError( "Test marked both allow_all_subp and allow_subp_for: resolve" " this either by modifying your test code, or by modifying" " disable_subp_usage to handle precedence." @@ -161,7 +169,7 @@ def side_effect(args, *other_args, **kwargs): cmd = args[0] if cmd not in allow_subp_for: - raise AssertionError( + raise UnexpectedSubpError( "Unexpectedly used subp.subp to call {} (allowed:" " {})".format(cmd, ",".join(allow_subp_for)) ) diff -Nru cloud-init-23.4.4/debian/apport-general-hook.py cloud-init-24.1.3/debian/apport-general-hook.py --- cloud-init-23.4.4/debian/apport-general-hook.py 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/apport-general-hook.py 2024-03-27 13:36:38.000000000 +0000 @@ -1,61 +1,6 @@ """General Apport hook for all reports that are using cloud-init.""" -import json -import logging - - -def _get_azure_data(ds_data) -> dict[str, str]: - compute = ds_data.get("meta_data", {}).get("imds", {}).get("compute") - if not compute: - return {} - name_to_report_map = { - "publisher": "ImagePublisher", - "offer": "ImageOffer", - "sku": "ImageSKU", - "version": "ImageVersion", - "vmSize": "VMSize", - } - azure_data = {} - for src_key, report_key_name in name_to_report_map.items(): - azure_data[report_key_name] = compute[src_key] - return azure_data - - -def _get_ec2_data(ds_data) -> dict[str, str]: - document = ( - ds_data.get("dynamic", {}).get("instance-identity", {}).get("document") - ) - if not document: - return {} - wanted_keys = { - "architecture", - "billingProducts", - "imageId", - "instanceType", - "region", - } - return { - key: value for key, value in document.items() if key in wanted_keys - } - - -PLATFORM_SPECIFIC_INFO = {"azure": _get_azure_data, "ec2": _get_ec2_data} - - -def add_datasource_specific_info(report, platform: str, ds_data) -> None: - """Add datasoure specific information from the ds dictionary. - - ds_data contains the "ds" entry from data from - /run/cloud/instance-data.json. - """ - platform_info = PLATFORM_SPECIFIC_INFO.get(platform) - if not platform_info: - return - retrieved_data = platform_info(ds_data) - for key, value in retrieved_data.items(): - if not value: - continue - report[platform.capitalize() + key.capitalize()] = value +from cloudinit.apport import general_add_info def add_info(report, ui) -> None: @@ -64,36 +9,4 @@ Add a subset of non-sensitive cloud-init data from /run/cloud/instance-data.json that will be helpful for debugging. """ - try: - with open("/run/cloud-init/instance-data.json", "r") as fopen: - instance_data = json.load(fopen) - except FileNotFoundError: - logging.getLogger().warning( - "cloud-init run data not found on system. " - "Unable to add cloud-specific data." - ) - return - - v1 = instance_data.get("v1") - if not v1: - logging.getLogger().warning( - "instance-data.json lacks 'v1' metadata. Present keys: %s", - sorted(instance_data.keys()), - ) - return - - for key, report_key in { - "cloud_id": "CloudID", - "cloud_name": "CloudName", - "machine": "CloudArchitecture", - "platform": "CloudPlatform", - "region": "CloudRegion", - "subplatform": "CloudSubPlatform", - }.items(): - value = v1.get(key) - if value: - report[report_key] = value - - add_datasource_specific_info( - report, v1["platform"], instance_data.get("ds") - ) + general_add_info(report, ui) diff -Nru cloud-init-23.4.4/debian/changelog cloud-init-24.1.3/debian/changelog --- cloud-init-23.4.4/debian/changelog 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/changelog 2024-03-27 13:36:38.000000000 +0000 @@ -1,3 +1,51 @@ +cloud-init (24.1.3-0ubuntu1~22.04.1) jammy; urgency=medium + + * Upstream snapshot based on 24.1.3. (LP: #2056100). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/24.1.3/ChangeLog + + -- James Falcon Wed, 27 Mar 2024 08:36:38 -0500 + +cloud-init (24.1.2-0ubuntu1~22.04.1) jammy; urgency=medium + + * refresh patches: + - d/p/retain-ec2-default-net-update-events.patch + * Upstream snapshot based on 24.1.2. (LP: #2056100). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/24.1.2/ChangeLog + + -- Chad Smith Thu, 21 Mar 2024 09:22:13 -0600 + +cloud-init (24.1.1-0ubuntu1~22.04.1) jammy; urgency=medium + + * d/apport-general-hook.py: Move apport hook to main branch + * d/cloud-init.maintscript: remove /etc/cloud/clean.d/README + * d/cloud-init.logrotate: add logrotate config for cloud-init + * d/cloud-init.templates: enable WSL datasource by default + * Drop d/p/retain-netplan-world-readable.patch: + - Limit perms to 600 of /etc/netplan/50-cloud-init.yaml instead of 644 + (LP: #2053157) + * d/p/keep-dhclient-as-priority-client.patch: + - keep dhclient as default client + * d/p/revert-551f560d-cloud-config-after-snap-seeding.patch + - Retain systemd ordering cloud-config.service After=snapd.seeded.service + * d/p/retain-ec2-default-net-update-events.patch: + Reverts 4dbb08f5f0cc4f41cf9dd1474f0600a11510a3c9 to not change behavior + on stable releases. + * d/po/templates.pot: update for wsl + * d/cloud-init.postinst: change priority of hotplug rules. + Avoids LP #1946003 on upgraded systems. References: + [0] https://github.com/canonical/cloud-init/pull/4799 + [1] commit/b519d861aff8b44a0610c176cb34adcbe28df144 + * refresh patches: + - d/p/status-do-not-remove-duplicated-data.patch + - d/p/status-retain-recoverable-error-exit-code.patch + * Upstream snapshot based on 24.1.1. (LP: #2056100). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/24.1.1/ChangeLog + + -- Brett Holman Wed, 13 Mar 2024 16:38:18 -0600 + cloud-init (23.4.4-0ubuntu0~22.04.1) jammy; urgency=medium * Upstream snapshot based on 23.4.4. (LP: #2055081). diff -Nru cloud-init-23.4.4/debian/cloud-init.logrotate cloud-init-24.1.3/debian/cloud-init.logrotate --- cloud-init-23.4.4/debian/cloud-init.logrotate 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/debian/cloud-init.logrotate 2024-03-27 13:36:38.000000000 +0000 @@ -0,0 +1,11 @@ +/var/log/cloud-init*.log +{ + su root root + missingok + nocreate + notifempty + rotate 6 + compress + delaycompress + size 1M +} diff -Nru cloud-init-23.4.4/debian/cloud-init.maintscript cloud-init-24.1.3/debian/cloud-init.maintscript --- cloud-init-23.4.4/debian/cloud-init.maintscript 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/cloud-init.maintscript 2024-03-27 13:36:38.000000000 +0000 @@ -7,3 +7,4 @@ rm_conffile /etc/init/cloud-init.conf 0.7.9-243-ge74d775-0ubuntu2~ rm_conffile /etc/init/cloud-log-shutdown.conf 0.7.9-243-ge74d775-0ubuntu2~ rm_conffile /etc/NetworkManager/dispatcher.d/hook-network-manager 23.3.1-0ubuntu1~ +rm_conffile /etc/cloud/clean.d/README 24.1-0ubuntu0~22.04.1~ diff -Nru cloud-init-23.4.4/debian/cloud-init.postinst cloud-init-24.1.3/debian/cloud-init.postinst --- cloud-init-23.4.4/debian/cloud-init.postinst 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/cloud-init.postinst 2024-03-27 13:36:38.000000000 +0000 @@ -371,6 +371,13 @@ fi } +rename_hook_hotplug_udev_rule() { + # Avoids LP: #1946003 see commit: b519d861aff8b44a0610c176cb34adcbe28df144 + if [ -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules ]; then + mv -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules \ + /etc/udev/rules.d/90-cloud-init-hook-hotplug.rules + fi +} if [ "$1" = "configure" ]; then if db_get cloud-init/datasources; then @@ -406,6 +413,8 @@ # Redact schema sensitive warning logs on invalid user-data fix_1978422_redact_sensitive_logs_on_invalid_userdata_schema "$2" + + rename_hook_hotplug_udev_rule fi #DEBHELPER# diff -Nru cloud-init-23.4.4/debian/cloud-init.templates cloud-init-24.1.3/debian/cloud-init.templates --- cloud-init-23.4.4/debian/cloud-init.templates 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/cloud-init.templates 2024-03-27 13:36:38.000000000 +0000 @@ -1,8 +1,8 @@ Template: cloud-init/datasources Type: multiselect -Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, Akamai, None -Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, Akamai, None -__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, LXD: Reads /dev/lxd/sock representation of instance data, NWCS: NWCS, Akamai: Akamai and Linode platforms, None: Failsafe datasource +Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, Akamai, WSL, None +Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, Akamai, WSL, None +__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, LXD: Reads /dev/lxd/sock representation of instance data, NWCS: NWCS, Akamai: Akamai and Linode platforms, WSL: Windows Subsystem for Linux, None: Failsafe datasource _Description: Which data sources should be searched? Cloud-init supports searching different "Data Sources" for information that it uses to configure a cloud instance. diff -Nru cloud-init-23.4.4/debian/patches/expire-on-hashed-users.patch cloud-init-24.1.3/debian/patches/expire-on-hashed-users.patch --- cloud-init-23.4.4/debian/patches/expire-on-hashed-users.patch 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/expire-on-hashed-users.patch 2024-03-27 13:36:38.000000000 +0000 @@ -18,7 +18,7 @@ in cc_set_passwords, hashed passwords will be expired. Previous to 22.3, --- a/tests/unittests/config/test_cc_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py -@@ -190,61 +190,6 @@ class TestSetPasswordsHandle: +@@ -194,61 +194,6 @@ class TestSetPasswordsHandle: @pytest.mark.parametrize( "user_cfg", [ @@ -80,7 +80,7 @@ {"expire": "false", "list": ["root:R", "ubuntu:RANDOM"]}, { "expire": "false", -@@ -504,6 +449,7 @@ expire_cases = [ +@@ -508,6 +453,7 @@ expire_cases = [ class TestExpire: @pytest.mark.parametrize("cfg", expire_cases) def test_expire(self, cfg, mocker, caplog): diff -Nru cloud-init-23.4.4/debian/patches/keep-dhclient-as-priority-client.patch cloud-init-24.1.3/debian/patches/keep-dhclient-as-priority-client.patch --- cloud-init-23.4.4/debian/patches/keep-dhclient-as-priority-client.patch 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/keep-dhclient-as-priority-client.patch 2024-03-27 13:36:38.000000000 +0000 @@ -0,0 +1,15 @@ +Description: Keep dhclient as default client +Author: Brett Holman +Last-Update: 2024-02-20 + +--- a/config/cloud.cfg.tmpl ++++ b/config/cloud.cfg.tmpl +@@ -301,7 +301,7 @@ system_info: + {% elif variant in ["ubuntu", "unknown"] %} + {# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #} + network: +- dhcp_client_priority: [dhcpcd, dhclient, udhcpc] ++ dhcp_client_priority: [dhclient, dhcpcd, udhcpc] + renderers: ['netplan', 'eni', 'sysconfig'] + activators: ['netplan', 'eni', 'network-manager', 'networkd'] + {% elif is_rhel %} diff -Nru cloud-init-23.4.4/debian/patches/retain-ec2-default-net-update-events.patch cloud-init-24.1.3/debian/patches/retain-ec2-default-net-update-events.patch --- cloud-init-23.4.4/debian/patches/retain-ec2-default-net-update-events.patch 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/retain-ec2-default-net-update-events.patch 2024-03-27 13:36:38.000000000 +0000 @@ -0,0 +1,23 @@ +Description: Retain ec2 default net update events + Reverts 4dbb08f5f0cc4f41cf9dd1474f0600a11510a3c9 to not change behavior + on stable releases. +Author: Alberto Contreras +Last-Update: 2024-02-15 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -105,13 +105,6 @@ class DataSourceEc2(sources.DataSource): + } + } + +- default_update_events = { +- EventScope.NETWORK: { +- EventType.BOOT_NEW_INSTANCE, +- EventType.HOTPLUG, +- } +- } +- + extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES + + def __init__(self, sys_cfg, distro, paths): diff -Nru cloud-init-23.4.4/debian/patches/retain-netplan-world-readable.patch cloud-init-24.1.3/debian/patches/retain-netplan-world-readable.patch --- cloud-init-23.4.4/debian/patches/retain-netplan-world-readable.patch 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/retain-netplan-world-readable.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -Description: Retain world-readable /etc/netplan/50-cloud-init.yaml - To avoid change in behavior stable releases wil not adopt root read-only - /etc/netplan/50-cloud-init.yaml. which is present in Lunar and newer. -Author: chad.smith@canonical.com -Origin: backport -Forwarded: not-needed -Last-Update: 2023-01-09 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ ---- a/cloudinit/features.py -+++ b/cloudinit/features.py -@@ -59,7 +59,7 @@ only non-hashed passwords were expired. - (This flag can be removed after Jammy is no longer supported.) - """ - --NETPLAN_CONFIG_ROOT_READ_ONLY = True -+NETPLAN_CONFIG_ROOT_READ_ONLY = False - """ - If ``NETPLAN_CONFIG_ROOT_READ_ONLY`` is True, then netplan configuration will - be written as a single root readon-only file /etc/netplan/50-cloud-init.yaml. ---- a/tests/unittests/distros/test_netconfig.py -+++ b/tests/unittests/distros/test_netconfig.py -@@ -608,32 +608,41 @@ class TestNetCfgDistroUbuntuNetplan(Test - (self.netplan_path(), V1_TO_V2_NET_CFG_OUTPUT, 0o600), - ) - -- self._apply_and_verify_netplan( -- self.distro.apply_network_config, -- V1_NET_CFG, -- expected_cfgs=expected_cfgs, -- ) -+ with mock.patch.object( -+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", True -+ ): -+ self._apply_and_verify_netplan( -+ self.distro.apply_network_config, -+ V1_NET_CFG, -+ expected_cfgs=expected_cfgs, -+ ) - - def test_apply_network_config_v1_ipv6_to_netplan_ub(self): - expected_cfgs = ( - (self.netplan_path(), V1_TO_V2_NET_CFG_IPV6_OUTPUT, 0o600), - ) - -- self._apply_and_verify_netplan( -- self.distro.apply_network_config, -- V1_NET_CFG_IPV6, -- expected_cfgs=expected_cfgs, -- ) -+ with mock.patch.object( -+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", True -+ ): -+ self._apply_and_verify_netplan( -+ self.distro.apply_network_config, -+ V1_NET_CFG_IPV6, -+ expected_cfgs=expected_cfgs, -+ ) - - def test_apply_network_config_v2_passthrough_ub(self): - expected_cfgs = ( - (self.netplan_path(), V2_TO_V2_NET_CFG_OUTPUT, 0o600), - ) -- self._apply_and_verify_netplan( -- self.distro.apply_network_config, -- V2_NET_CFG, -- expected_cfgs=expected_cfgs, -- ) -+ with mock.patch.object( -+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", True -+ ): -+ self._apply_and_verify_netplan( -+ self.distro.apply_network_config, -+ V2_NET_CFG, -+ expected_cfgs=expected_cfgs, -+ ) - - def test_apply_network_config_v2_passthrough_retain_orig_perms(self): - """Custom permissions on existing netplan is kept when more strict.""" -@@ -673,11 +682,14 @@ class TestNetCfgDistroUbuntuNetplan(Test - expected_cfgs = ( - (self.netplan_path(), V2_PASSTHROUGH_NET_CFG_OUTPUT, 0o600), - ) -- self._apply_and_verify_netplan( -- self.distro.apply_network_config, -- V2_PASSTHROUGH_NET_CFG, -- expected_cfgs=expected_cfgs, -- ) -+ with mock.patch.object( -+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", True -+ ): -+ self._apply_and_verify_netplan( -+ self.distro.apply_network_config, -+ V2_PASSTHROUGH_NET_CFG, -+ expected_cfgs=expected_cfgs, -+ ) - self.assertIn("Passthrough netplan v2 config", self.logs.getvalue()) - self.assertIn( - "Selected renderer 'netplan' from priority list: ['netplan']", -@@ -1072,12 +1084,16 @@ class TestNetCfgDistroArch(TestNetCfgDis - with mock.patch( - "cloudinit.net.netplan.get_devicelist", return_value=[] - ): -- self._apply_and_verify( -- self.distro.apply_network_config, -- V1_NET_CFG, -- expected_cfgs=expected_cfgs.copy(), -- with_netplan=True, -- ) -+ with mock.patch.object( -+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY" -+ ) as netplan_readonly: -+ netplan_readonly = True -+ self._apply_and_verify( -+ self.distro.apply_network_config, -+ V1_NET_CFG, -+ expected_cfgs=expected_cfgs.copy(), -+ with_netplan=True, -+ ) - - - class TestNetCfgDistroPhoton(TestNetCfgDistroBase): diff -Nru cloud-init-23.4.4/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch cloud-init-24.1.3/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch --- cloud-init-23.4.4/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch 2024-03-27 13:36:38.000000000 +0000 @@ -0,0 +1,338 @@ +Description: Retain systemd ordering cloud-config.service After=snapd.seeded + Revert of upstream commit 551f560d which drops After=snapd.seeded.service + from cloud-config.service configuration. Stable releases want to retain + this behavior. +Author: chad.smith@canonical.com +Origin: backport +Last-Update: 2024-02-14 +--- a/cloudinit/cloud.py ++++ b/cloudinit/cloud.py +@@ -57,17 +57,6 @@ class Cloud: + return copy.deepcopy(self._cfg) + + def run(self, name, functor, args, freq=None, clear_on_fail=False): +- """Run a function gated by a named semaphore for a desired frequency. +- +- The typical case for this method would be to limit running of the +- provided func to a single well-defined frequency: +- PER_INSTANCE, PER_BOOT or PER_ONCE +- +- The semaphore provides a gate that persists across cloud-init +- boot stage boundaries so multiple modules can share this state +- even if they happen to be run in different boot stages or across +- reboots. +- """ + return self._runners.run(name, functor, args, freq, clear_on_fail) + + def get_template_filename(self, name): +--- a/cloudinit/config/cc_lxd.py ++++ b/cloudinit/config/cc_lxd.py +@@ -210,7 +210,6 @@ def handle(name: str, cfg: Config, cloud + f" '{type(lxd_cfg).__name__}'" + ) + +- util.wait_for_snap_seeded(cloud) + # Grab the configuration + init_cfg = lxd_cfg.get("init", {}) + preseed_str = lxd_cfg.get("preseed", "") +--- a/cloudinit/config/cc_snap.py ++++ b/cloudinit/config/cc_snap.py +@@ -191,7 +191,7 @@ def handle(name: str, cfg: Config, cloud + "Skipping module named %s, no 'snap' key in configuration", name + ) + return +- util.wait_for_snap_seeded(cloud) ++ + add_assertions( + cfgin.get("assertions", []), + os.path.join(cloud.paths.get_ipath_cur(), "snapd.assertions"), +--- a/cloudinit/config/cc_ubuntu_autoinstall.py ++++ b/cloudinit/config/cc_ubuntu_autoinstall.py +@@ -6,7 +6,6 @@ import logging + import re + from textwrap import dedent + +-from cloudinit import util + from cloudinit.cloud import Cloud + from cloudinit.config import Config + from cloudinit.config.schema import ( +@@ -84,7 +83,6 @@ def handle(name: str, cfg: Config, cloud + ) + return + +- util.wait_for_snap_seeded(cloud) + snap_list, _ = subp(["snap", "list"]) + installer_present = None + for snap_name in LIVE_INSTALLER_SNAPS: +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -69,7 +69,7 @@ from cloudinit import ( + url_helper, + version, + ) +-from cloudinit.settings import CFG_BUILTIN, PER_ONCE ++from cloudinit.settings import CFG_BUILTIN + + if TYPE_CHECKING: + # Avoid circular import +@@ -3083,18 +3083,6 @@ def wait_for_files(flist, maxwait, naple + return need + + +-def wait_for_snap_seeded(cloud): +- """Helper to wait on completion of snap seeding.""" +- +- def callback(): +- if not subp.which("snap"): +- LOG.debug("Skipping snap wait, no snap command present") +- return +- subp.subp(["snap", "wait", "system", "seed.loaded"]) +- +- cloud.run("snap-seeded", callback, [], freq=PER_ONCE) +- +- + def mount_is_read_write(mount_point): + """Check whether the given mount point is mounted rw""" + result = get_mount_info(mount_point, get_mnt_opts=True) +--- a/systemd/cloud-config.service.tmpl ++++ b/systemd/cloud-config.service.tmpl +@@ -2,6 +2,7 @@ + [Unit] + Description=Apply the settings specified in cloud-config + After=network-online.target cloud-config.target ++After=snapd.seeded.service + Before=systemd-user-sessions.service + Wants=network-online.target cloud-config.target + ConditionPathExists=!/etc/cloud/cloud-init.disabled +--- a/tests/integration_tests/modules/test_frequency_override.py ++++ b/tests/integration_tests/modules/test_frequency_override.py +@@ -1,7 +1,6 @@ + import pytest + + from tests.integration_tests.instances import IntegrationInstance +-from tests.integration_tests.releases import CURRENT_RELEASE + + USER_DATA = """\ + #cloud-config +@@ -18,17 +17,6 @@ def test_frequency_override(client: Inte + in client.read_from_file("/var/log/cloud-init.log") + ) + assert client.read_from_file("/var/tmp/hi").strip().count("hi") == 1 +- if CURRENT_RELEASE.os == "ubuntu": +- if CURRENT_RELEASE.series in ("focal", "jammy", "lunar", "mantic"): +- # Stable series will block on snapd.seeded.service and create a +- # semaphore file +- assert client.execute("test -f /var/lib/cloud/snap-seeded.once").ok +- else: +- # Newer series will not block on snapd.seeded.service nor create a +- # semaphore file +- assert not client.execute( +- "test -f /var/lib/cloud/snap-seeded.once" +- ).ok + + # Change frequency of scripts_user to always + config = client.read_from_file("/etc/cloud/cloud.cfg") +--- a/tests/unittests/config/test_cc_lxd.py ++++ b/tests/unittests/config/test_cc_lxd.py +@@ -11,8 +11,6 @@ from cloudinit.config.schema import ( + get_schema, + validate_cloudconfig_schema, + ) +-from cloudinit.helpers import Paths +-from cloudinit.util import del_file + from tests.unittests import helpers as t_help + from tests.unittests.util import get_cloud + +@@ -46,9 +44,7 @@ class TestLxd(t_help.CiTestCase): + ) + def test_lxd_init(self, maybe_clean, which, subp, exists, system_info): + system_info.return_value = {"uname": [0, 1, "mykernel"]} +- tmpdir = self.tmp_dir() +- sem_file = f"{tmpdir}/sem/snap_seeded.once" +- cc = get_cloud(mocked_distro=True, paths=Paths({"cloud_dir": tmpdir})) ++ cc = get_cloud(mocked_distro=True) + install = cc.distro.install_packages + + for backend, cmd, package in BACKEND_DEF: +@@ -88,23 +84,21 @@ class TestLxd(t_help.CiTestCase): + if backend == "lvm": + self.assertEqual( + [ +- mock.call(sem_file), + mock.call( + "/lib/modules/mykernel/" + "kernel/drivers/md/dm-thin-pool.ko" +- ), ++ ) + ], + exists.call_args_list, + ) + else: +- self.assertEqual([mock.call(sem_file)], exists.call_args_list) +- del_file(sem_file) ++ self.assertEqual([], exists.call_args_list) + + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False) + def test_lxd_install(self, m_which, mock_subp, m_maybe_clean): +- cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) ++ cc = get_cloud() + cc.distro = mock.MagicMock() + mock_subp.which.return_value = None + cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, []) +@@ -118,7 +112,7 @@ class TestLxd(t_help.CiTestCase): + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): +- cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) ++ cc = get_cloud() + cc.distro = mock.MagicMock() + cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, []) + self.assertFalse(cc.distro.install_packages.called) +@@ -128,17 +122,16 @@ class TestLxd(t_help.CiTestCase): + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): +- cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) ++ cc = get_cloud() + cc.distro = mock.MagicMock() + cc_lxd.handle("cc_lxd", {"package_update": True}, cc, []) + self.assertFalse(cc.distro.install_packages.called) + self.assertFalse(mock_subp.subp.called) + self.assertFalse(m_maybe_clean.called) + +- @mock.patch("cloudinit.config.cc_lxd.util.wait_for_snap_seeded") + @mock.patch("cloudinit.config.cc_lxd.subp") +- def test_lxd_preseed(self, mock_subp, wait_for_snap_seeded): +- cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) ++ def test_lxd_preseed(self, mock_subp): ++ cc = get_cloud() + cc.distro = mock.MagicMock() + cc_lxd.handle( + "cc_lxd", +@@ -153,7 +146,6 @@ class TestLxd(t_help.CiTestCase): + ], + mock_subp.subp.call_args_list, + ) +- wait_for_snap_seeded.assert_called_once_with(cc) + + def test_lxd_debconf_new_full(self): + data = { +--- a/tests/unittests/config/test_cc_snap.py ++++ b/tests/unittests/config/test_cc_snap.py +@@ -301,11 +301,8 @@ class TestSnapSchema: + + + class TestHandle: +- @mock.patch("cloudinit.util.wait_for_snap_seeded") + @mock.patch("cloudinit.config.cc_snap.subp.subp") +- def test_handle_adds_assertions( +- self, m_subp, wait_for_snap_seeded, fake_cloud, tmpdir +- ): ++ def test_handle_adds_assertions(self, m_subp, fake_cloud, tmpdir): + """Any configured snap assertions are provided to add_assertions.""" + assert_file = os.path.join( + fake_cloud.paths.get_ipath_cur(), "snapd.assertions" +@@ -320,4 +317,3 @@ class TestHandle: + assert util.load_text_file(compare_file) == util.load_text_file( + assert_file + ) +- wait_for_snap_seeded.assert_called_once_with(fake_cloud) +--- a/tests/unittests/config/test_cc_ubuntu_autoinstall.py ++++ b/tests/unittests/config/test_cc_ubuntu_autoinstall.py +@@ -11,7 +11,6 @@ from cloudinit.config.schema import ( + get_schema, + validate_cloudconfig_schema, + ) +-from cloudinit.helpers import Paths + from tests.unittests.helpers import skipUnlessJsonSchema + from tests.unittests.util import get_cloud + +@@ -65,20 +64,18 @@ class TestvalidateConfigSchema: + cc_ubuntu_autoinstall.validate_config_schema(src_cfg) + + +-@mock.patch(MODPATH + "util.wait_for_snap_seeded") + @mock.patch(MODPATH + "subp") + class TestHandleAutoinstall: + """Test cc_ubuntu_autoinstall handling of config.""" + + @pytest.mark.parametrize( +- "cfg,snap_list,subp_calls,logs,snap_wait_called", ++ "cfg,snap_list,subp_calls,logs", + [ + pytest.param( + {}, + SAMPLE_SNAP_LIST_OUTPUT, + [], + ["Skipping module named name, no 'autoinstall' key"], +- False, + id="skip_no_cfg", + ), + pytest.param( +@@ -90,7 +87,6 @@ class TestHandleAutoinstall: + " installer snap packages to be present: subiquity," + " ubuntu-desktop-installer" + ], +- True, + id="valid_autoinstall_schema_checks_snaps", + ), + pytest.param( +@@ -101,7 +97,6 @@ class TestHandleAutoinstall: + "Valid autoinstall schema. Config will be processed by" + " subiquity" + ], +- True, + id="valid_autoinstall_schema_sees_subiquity", + ), + pytest.param( +@@ -112,33 +107,19 @@ class TestHandleAutoinstall: + "Valid autoinstall schema. Config will be processed by" + " ubuntu-desktop-installer" + ], +- True, + id="valid_autoinstall_schema_sees_desktop_installer", + ), + ], + ) + def test_handle_autoinstall_cfg( +- self, +- subp, +- wait_for_snap_seeded, +- cfg, +- snap_list, +- subp_calls, +- logs, +- snap_wait_called, +- caplog, +- tmpdir, ++ self, subp, cfg, snap_list, subp_calls, logs, caplog + ): + subp.return_value = snap_list, "" +- cloud = get_cloud(distro="ubuntu", paths=Paths({"cloud_dir": tmpdir})) ++ cloud = get_cloud(distro="ubuntu") + cc_ubuntu_autoinstall.handle("name", cfg, cloud, None) + assert subp_calls == subp.call_args_list + for log in logs: + assert log in caplog.text +- if snap_wait_called: +- wait_for_snap_seeded.assert_called_once_with(cloud) +- else: +- wait_for_snap_seeded.assert_not_called() + + + class TestAutoInstallSchema: +--- a/tests/unittests/util.py ++++ b/tests/unittests/util.py +@@ -33,9 +33,7 @@ def get_cloud( + myds.metadata.update(metadata) + if paths: + paths.datasource = myds +- return cloud.Cloud( +- myds, paths, sys_cfg, mydist, runners=helpers.Runners(paths) +- ) ++ return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + + + def abstract_to_concrete(abclass): diff -Nru cloud-init-23.4.4/debian/patches/series cloud-init-24.1.3/debian/patches/series --- cloud-init-23.4.4/debian/patches/series 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/series 2024-03-27 13:36:38.000000000 +0000 @@ -1,7 +1,9 @@ expire-on-hashed-users.patch -retain-netplan-world-readable.patch retain-old-groups.patch +keep-dhclient-as-priority-client.patch +revert-551f560d-cloud-config-after-snap-seeding.patch do-not-block-user-login.patch status-do-not-remove-duplicated-data.patch retain-apt-pre-deb822.patch status-retain-recoverable-error-exit-code.patch +retain-ec2-default-net-update-events.patch diff -Nru cloud-init-23.4.4/debian/patches/status-do-not-remove-duplicated-data.patch cloud-init-24.1.3/debian/patches/status-do-not-remove-duplicated-data.patch --- cloud-init-23.4.4/debian/patches/status-do-not-remove-duplicated-data.patch 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/status-do-not-remove-duplicated-data.patch 2024-03-27 13:36:38.000000000 +0000 @@ -7,18 +7,19 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py -@@ -192,6 +192,8 @@ def handle_status_args(name, args) -> in +@@ -178,6 +178,9 @@ def print_status(args, details: StatusDe "last_update": details.last_update, **details.v1, } + details_dict["schemas"] = {"1": deepcopy(details_dict)} + details_dict["_schema_version"] = "1" - ++ if args.format == "tabular": prefix = "\n" if args.wait else "" + --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py -@@ -482,6 +482,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -507,6 +507,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr dedent( """\ --- @@ -26,7 +27,7 @@ boot_status_code: enabled-by-kernel-cmdline datasource: '' detail: 'Running in stage: init' -@@ -495,6 +496,23 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -520,6 +521,23 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr start: 123.45 last_update: Thu, 01 Jan 1970 00:02:04 +0000 recoverable_errors: {} @@ -50,7 +51,7 @@ stage: init status: running ... -@@ -527,6 +545,25 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -552,6 +570,25 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr "init-local": {"finished": 123.46, "start": 123.45}, "last_update": "Thu, 01 Jan 1970 00:02:04 +0000", "recoverable_errors": {}, @@ -76,7 +77,7 @@ "stage": "init", }, id="running_json_format", -@@ -558,6 +595,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -583,6 +620,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr MyArgs(long=False, wait=False, format="json"), 1, { @@ -84,7 +85,7 @@ "boot_status_code": "enabled-by-kernel-cmdline", "datasource": "nocloud", "detail": ( -@@ -579,6 +617,32 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -604,6 +642,30 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr }, "last_update": "Thu, 01 Jan 1970 00:02:05 +0000", "recoverable_errors": {}, @@ -95,7 +96,7 @@ + "detail": "DataSourceNoCloud " + "[seed=/var/.../seed/nocloud-net][dsmode=net]", + "errors": ["error1", "error2", "error3"], -+ "extended_status": "error", ++ "extended_status": "error - running", + "init": { + "errors": ["error1"], + "finished": 125.678, @@ -112,12 +113,10 @@ + "status": "error", + } + }, -+ "last_update": "Thu, 01 Jan 1970 00:02:05 +0000", -+ "recoverable_errors": {}, "stage": None, }, id="running_json_format_with_errors", -@@ -641,6 +705,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -666,6 +728,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr MyArgs(long=False, wait=False, format="json"), 2, { @@ -125,7 +124,7 @@ "boot_status_code": "enabled-by-kernel-cmdline", "datasource": "nocloud", "detail": ( -@@ -700,6 +765,89 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -725,6 +788,89 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr "don't try to open the hatch or we'll all be soup" ], }, diff -Nru cloud-init-23.4.4/debian/patches/status-retain-recoverable-error-exit-code.patch cloud-init-24.1.3/debian/patches/status-retain-recoverable-error-exit-code.patch --- cloud-init-23.4.4/debian/patches/status-retain-recoverable-error-exit-code.patch 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/patches/status-retain-recoverable-error-exit-code.patch 2024-03-27 13:36:38.000000000 +0000 @@ -6,10 +6,10 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py -@@ -257,7 +257,7 @@ def handle_status_args(name, args) -> in +@@ -254,7 +254,7 @@ def handle_status_args(name, args) -> in return 1 # Recoverable error - elif details.status in UXAppStatusDegradedMap.values(): + elif details.condition_status == ConditionStatus.DEGRADED: - return 2 + return 0 return 0 @@ -17,7 +17,7 @@ --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py -@@ -703,7 +703,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -726,7 +726,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr }, None, MyArgs(long=False, wait=False, format="json"), diff -Nru cloud-init-23.4.4/debian/po/templates.pot cloud-init-24.1.3/debian/po/templates.pot --- cloud-init-23.4.4/debian/po/templates.pot 2024-02-27 15:14:44.000000000 +0000 +++ cloud-init-24.1.3/debian/po/templates.pot 2024-03-27 13:36:38.000000000 +0000 @@ -8,7 +8,7 @@ msgstr "" "Project-Id-Version: cloud-init\n" "Report-Msgid-Bugs-To: cloud-init@packages.debian.org\n" -"POT-Creation-Date: 2023-08-28 15:11-0500\n" +"POT-Creation-Date: 2024-03-14 13:35-0600\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -186,6 +186,12 @@ msgstr "" #. Type: multiselect +#. Choices +#: ../cloud-init.templates:1001 +msgid "WSL: Windows Subsystem for Linux" +msgstr "" + +#. Type: multiselect #. Choices #: ../cloud-init.templates:1001 msgid "None: Failsafe datasource" diff -Nru cloud-init-23.4.4/doc/examples/cloud-config-lxd.txt cloud-init-24.1.3/doc/examples/cloud-config-lxd.txt --- cloud-init-23.4.4/doc/examples/cloud-config-lxd.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/examples/cloud-config-lxd.txt 2024-03-27 13:14:04.000000000 +0000 @@ -49,7 +49,7 @@ domain: lxd -# The simplist working configuration is +# The simplest working configuration is # lxd: # init: # storage_backend: dir diff -Nru cloud-init-23.4.4/doc/examples/cloud-config-ssh-keys.txt cloud-init-24.1.3/doc/examples/cloud-config-ssh-keys.txt --- cloud-init-23.4.4/doc/examples/cloud-config-ssh-keys.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/examples/cloud-config-ssh-keys.txt 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,7 @@ # Send pre-generated SSH private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated -# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported +# in addition to 'rsa' as shown below, 'ecdsa' is also supported ssh_keys: rsa_private: | -----BEGIN RSA PRIVATE KEY----- @@ -27,22 +27,6 @@ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost - dsa_private: | - -----BEGIN DSA PRIVATE KEY----- - MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT - pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX - DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR - 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa - LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY - d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH - bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3 - 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC - /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv - 99iziAH0KBMVbxy03Trz - -----END DSA PRIVATE KEY----- - - dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost - # By default, the fingerprints of the authorized keys for the users # cloud-init adds are printed to the console. Setting # no_ssh_fingerprints to true suppresses this output. diff -Nru cloud-init-23.4.4/doc/examples/cloud-config.txt cloud-init-24.1.3/doc/examples/cloud-config.txt --- cloud-init-23.4.4/doc/examples/cloud-config.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/examples/cloud-config.txt 2024-03-27 13:14:04.000000000 +0000 @@ -65,7 +65,7 @@ # Send pre-generated ssh private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated -# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported +# in addition to 'rsa' as shown below, 'ecdsa' is also supported ssh_keys: rsa_private: | -----BEGIN RSA PRIVATE KEY----- @@ -83,23 +83,6 @@ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost - dsa_private: | - -----BEGIN DSA PRIVATE KEY----- - MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT - pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX - DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR - 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa - LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY - d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH - bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3 - 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC - /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv - 99iziAH0KBMVbxy03Trz - -----END DSA PRIVATE KEY----- - - dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost - - # remove access to the ec2 metadata service early in boot via null route # the null route can be removed (by root) with: # route del -host 169.254.169.254 reject @@ -281,7 +264,7 @@ # # * fqdn: # this option will be used wherever 'fqdn' is needed. -# simply substitue it in the description above. +# simply substitute it in the description above. # default: fqdn as returned by the metadata service. on EC2 'hostname' # is used, so this is like: ip-10-244-170-199.ec2.internal # @@ -341,7 +324,7 @@ # phone_home: url: http://my.example.com/$INSTANCE_ID/ - post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] + post: [ pub_key_rsa, pub_key_ecdsa, instance_id ] # timezone: set the timezone for this instance # the value of 'timezone' must exist in /usr/share/zoneinfo @@ -405,7 +388,7 @@ # boolean indicating if existing ssh keys should be deleted on a # per-instance basis. On a public image, this should absolutely be set # to 'True' -# ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa'] +# ssh_genkeytypes: ['rsa', 'ecdsa'] # a list of the ssh key types that should be generated # These are passed to 'ssh-keygen -t' diff -Nru cloud-init-23.4.4/doc/man/cloud-init.1 cloud-init-24.1.3/doc/man/cloud-init.1 --- cloud-init-23.4.4/doc/man/cloud-init.1 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/man/cloud-init.1 2024-03-27 13:14:04.000000000 +0000 @@ -26,10 +26,6 @@ Show additional pre-action logging (default: False). .TP -.B "-f , --files " -Use additional YAML configuration files. - -.TP .B "--force" Force running even if no datasource is found (use at your own risk). diff -Nru cloud-init-23.4.4/doc/rtd/conf.py cloud-init-24.1.3/doc/rtd/conf.py --- cloud-init-23.4.4/doc/rtd/conf.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/conf.py 2024-03-27 13:14:04.000000000 +0000 @@ -36,8 +36,17 @@ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.viewcode", + "sphinxcontrib.spelling", ] + +# Spelling settings for sphinxcontrib.spelling +# https://docs.ubuntu.com/styleguide/en/ +spelling_warning = True + +# Uses case-independent spelling matches from doc/rtd/spelling_word_list.txt +spelling_filters = ["spelling.WordListFilter"] + # The suffix of source filenames. source_suffix = ".rst" @@ -122,12 +131,17 @@ linkcheck_ignore = [ r"http://\[fd00:ec2::254.*", r"http://instance-data.*", + r"https://www.scaleway.com/en/developers/api/instance.*", r"https://powersj.io.*", r"http://169.254.169.254.*", r"http://10.10.0.1.*", ] linkcheck_anchors_ignore_for_url = ( + # Ignore github anchors in rst or md files + r"https://github.com/.*\.rst", + r"https://github.com/.*\.md", + # Ignore github line number anchors in cloud-init and ubuntu-pro-client r"https://github.com/canonical/cloud-init.*", r"https://github.com/canonical/ubuntu-pro-client.*", ) diff -Nru cloud-init-23.4.4/doc/rtd/development/contribute_docs.rst cloud-init-24.1.3/doc/rtd/development/contribute_docs.rst --- cloud-init-23.4.4/doc/rtd/development/contribute_docs.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/contribute_docs.rst 2024-03-27 13:14:04.000000000 +0000 @@ -10,8 +10,9 @@ Style guide Directory layout -The documentation for cloud-init is hosted in GitHub and rendered on -`Read the Docs`_. It is mostly written in reStructuredText. +The documentation for cloud-init is hosted in the +`cloud-init GitHub repository`_ and rendered on `Read the Docs`_. It is mostly +written in reStructuredText. The process for contributing to the docs is largely the same as for code, except that for cosmetic changes to the documentation (spelling, grammar, etc) @@ -65,6 +66,7 @@ .. LINKS .. include:: ../links.txt +.. _cloud-init GitHub repository: https://github.com/canonical/cloud-init/tree/main/doc/rtd .. _Read the Docs: https://readthedocs.com/ .. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers .. _tagging s-makin: https://github.com/s-makin diff -Nru cloud-init-23.4.4/doc/rtd/development/datasource_creation.rst cloud-init-24.1.3/doc/rtd/development/datasource_creation.rst --- cloud-init-23.4.4/doc/rtd/development/datasource_creation.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/datasource_creation.rst 2024-03-27 13:14:04.000000000 +0000 @@ -1,7 +1,86 @@ .. _datasource_creation: -Datasource creation -******************* +Supporting your cloud or platform +********************************* + +The upstream cloud-init project regularly accepts code contributions for new +platforms that wish to support cloud-init. + +Ways to add platform support +============================ + +To add cloud-init support for a new platform, there are two possible +approaches: + +1. Provide platform compatibility with one of the existing datasource + definitions, such as `nocloud`_ via `DatasourceNoCloud.py`_. Several + platforms, including `Libvirt`_ and `Proxmox`_ use this approach. +2. Add a new datasource definition to upstream cloud-init. This provides + tighter integration, a better debugging experience, and more control + and flexibility of cloud-init's interaction with the datasource. This + option is more sensible for clouds that have a unique architecture. + +Platform requirements +===================== + +There are some technical and logistical prerequisites that must be met for +cloud-init support. + +Technical requirements +---------------------- + +A cloud needs to be able to identify itself to cloud-init at runtime, and that +the cloud be able to provide configuration to the instance. + +A mechanism for self-identification +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Each cloud platform must positively identify itself to the guest. This allows +the guest to make educated decisions based on the platform on which it is +running. On the x86 and arm64 architectures, many clouds identify themselves +through `DMI`_ data. For example, Oracle's public cloud provides the string +``'OracleCloud.com'`` in the DMI chassis-asset field. + +``Cloud-init``-enabled images produce a log file with details about the +platform. Reading through this log in :file:`/run/cloud-init/ds-identify.log` +may provide the information needed to uniquely identify the platform. +If the log is not present, one can generate the log by running ``ds-identify`` +manually. + +The mechanism used to identify the platform will be required for +``ds-identify`` and the datasource module sections below. + +A mechanism for cloud-init to retrieve configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are different methods that cloud-init can use to retrieve +cloud-configuration for configuring the instance. The most common method is a +webserver providing configuration over a link-local network. + +Logistical requirements +----------------------- + +As with any open source project, multiple logistal requirements exist. + +Testing access +^^^^^^^^^^^^^^ + +A platform that isn't available for testing cannot be independently validated. +You will need to provide some means for community members and upstream +developers to test and verify this platform. Code that cannot be used cannot be +supported. + +Maintainer support +^^^^^^^^^^^^^^^^^^ + +A point of contact is required who can answer questions and occasionally +provide testing or maintenance support. Maintainership is relatively informal, +but there is an expectation that from time to time upstream may need to contact +a the maintainer with inquiries. Datasources that appear to be unmaintained +and/or unused may be considered for eventual removal. + +Adding cloud-init support +========================= There are multiple ways to provide `user data`, `metadata`, and `vendor data`, and each cloud solution prefers its own way. A datasource @@ -13,80 +92,88 @@ If you are interested in adding a new datasource for your cloud platform you will need to do all of the following: -Identify a mechanism for positive identification of the platform -================================================================ +Add datasource module cloudinit/sources/DataSource.py +-------------------------------------------------------------------- -It is good practice for a cloud platform to positively identify itself to -the guest. This allows the guest to make educated decisions based on the -platform on which it is running. On the x86 and arm64 architectures, many -clouds identify themselves through `DMI`_ data. For example, Oracle's public -cloud provides the string ``'OracleCloud.com'`` in the DMI chassis-asset -field. +We suggest you start by copying one of the simpler datasources +such as ``DataSourceHetzner``. -``Cloud-init``-enabled images produce a log file with details about the -platform. Reading through this log in :file:`/run/cloud-init/ds-identify.log` -may provide the information needed to uniquely identify the platform. -If the log is not present, you can generate it by running from source -:file:`./tools/ds-identify` or the installed location -:file:`/usr/lib/cloud-init/ds-identify`. +Re-run datasource detection +^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The mechanism used to identify the platform will be required for the -``ds-identify`` and datasource module sections below. +While developing a new datasource it may be helpful to manually run datasource +detection without rebooting the system. -Add datasource module cloudinit/sources/DataSource.py -==================================================================== +To re-run datasource detection, you must first force :file:`ds-identify` to +re-run, then clean up any logs, and finally, re-run ``cloud-init``: -We suggest you start by copying one of the simpler datasources -such as ``DataSourceHetzner``. +.. code-block:: bash + + sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force + sudo cloud-init clean --logs + sudo cloud-init init --local + sudo cloud-init init Add tests for datasource module -=============================== +------------------------------- Add a new file with some tests for the module to :file:`cloudinit/sources/test_.py`. For example, see :file:`cloudinit/sources/tests/test_oracle.py` Update ``ds-identify`` -====================== +---------------------- In ``systemd`` systems, ``ds-identify`` is used to detect which datasource should be enabled, or if ``cloud-init`` should run at all. You'll need to make changes to :file:`tools/ds-identify`. Add tests for ``ds-identify`` -============================= +----------------------------- Add relevant tests in a new class to :file:`tests/unittests/test_ds_identify.py`. You can use ``TestOracle`` as an example. Add your datasource name to the built-in list of datasources -============================================================ +------------------------------------------------------------ Add your datasource module name to the end of the ``datasource_list`` entry in :file:`cloudinit/settings.py`. Add your cloud platform to apport collection prompts -==================================================== +---------------------------------------------------- Update the list of cloud platforms in :file:`cloudinit/apport.py`. This list will be provided to the user who invokes :command:`ubuntu-bug cloud-init`. Enable datasource by default in Ubuntu packaging branches -========================================================= +--------------------------------------------------------- Ubuntu packaging branches contain a template file, -:file:`debian/cloud-init.templates`, which ultimately sets the default -``datasource_list`` when installed via package. This file needs updating when -the commit gets into a package. +:file:`config/cloud.cfg.tmpl`, which ultimately sets the default +``datasource_list`` that is installed by distros that use the upstream +packaging configuration. Add documentation for your datasource -===================================== +------------------------------------- You should add a new file in :file:`doc/rtd/reference/datasources/.rst` and reference it in :file:`doc/rtd/reference/datasources.rst` +Benefits of including your datasource in upstream cloud-init +============================================================ + +Datasources included in upstream cloud-init benefit from ongoing maintenance, +compatibility with the rest of the codebase, and security fixes by the upstream +development team. + + .. _make-mime: https://cloudinit.readthedocs.io/en/latest/explanation/instancedata.html#storage-locations .. _DMI: https://www.dmtf.org/sites/default/files/standards/documents/DSP0005.pdf +.. _Libvirt: https://github.com/virt-manager/virt-manager/blob/main/man/virt-install.rst#--cloud-init +.. _Proxmox: https://pve.proxmox.com/wiki/Cloud-Init_Support +.. _DatasourceNoCloud.py: https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/DataSourceNoCloud.py +.. _nocloud: https://cloudinit.readthedocs.io/en/latest/reference/datasources/nocloud.html diff -Nru cloud-init-23.4.4/doc/rtd/development/debugging.rst cloud-init-24.1.3/doc/rtd/development/debugging.rst --- cloud-init-23.4.4/doc/rtd/development/debugging.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/debugging.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,327 +0,0 @@ -.. _debugging: - -Debugging cloud-init -******************** - -Overview -======== - -This topic will discuss general approaches for testing and debugging cloud-init -on deployed instances. - -.. _boot_time_analysis: - -Boot time analysis -================== - -:command:`cloud-init analyze` ------------------------------ - -Occasionally, instances don't appear as performant as we would like and -cloud-init packages a simple facility to inspect which operations took the -longest during boot and setup. - -The script :file:`/usr/bin/cloud-init` has an analysis sub-command, -:command:`analyze`, which parses any :file:`cloud-init.log` file into formatted -and sorted events. It allows for detailed analysis of the most costly -cloud-init operations, and to determine the long-pole in cloud-init -configuration and setup. These subcommands default to reading -:file:`/var/log/cloud-init.log`. - -:command:`analyze show` -^^^^^^^^^^^^^^^^^^^^^^^ - -Parse and organise :file:`cloud-init.log` events by stage and include each -sub-stage granularity with time delta reports. - -.. code-block:: shell-session - - $ cloud-init analyze show -i my-cloud-init.log - -Example output: - -.. code-block:: shell-session - - -- Boot Record 01 -- - The total time elapsed since completing an event is printed after the "@" - character. - The time the event takes is printed after the "+" character. - - Starting stage: modules-config - |`->config-snap_config ran successfully @05.47700s +00.00100s - |`->config-ssh-import-id ran successfully @05.47800s +00.00200s - |`->config-locale ran successfully @05.48000s +00.00100s - ... - - -:command:`analyze dump` -^^^^^^^^^^^^^^^^^^^^^^^ - -Parse :file:`cloud-init.log` into event records and return a list of -dictionaries that can be consumed for other reporting needs. - -.. code-block:: shell-session - - $ cloud-init analyze dump -i my-cloud-init.log - -Example output: - -.. code-block:: - - [ - { - "description": "running config modules", - "event_type": "start", - "name": "modules-config", - "origin": "cloudinit", - "timestamp": 1510807493.0 - },... - -:command:`analyze blame` -^^^^^^^^^^^^^^^^^^^^^^^^ - -Parse :file:`cloud-init.log` into event records and sort them based on the -highest time cost for a quick assessment of areas of cloud-init that may -need improvement. - -.. code-block:: shell-session - - $ cloud-init analyze blame -i my-cloud-init.log - -Example output: - -.. code-block:: - - -- Boot Record 11 -- - 00.01300s (modules-final/config-scripts-per-boot) - 00.00400s (modules-final/config-final-message) - 00.00100s (modules-final/config-rightscale_userdata) - ... - -:command:`analyze boot` -^^^^^^^^^^^^^^^^^^^^^^^ - -Make subprocess calls to the kernel in order to get relevant pre-cloud-init -timestamps, such as the kernel start, kernel finish boot, and cloud-init -start. - -.. code-block:: shell-session - - $ cloud-init analyze boot - -Example output: - -.. code-block:: - - -- Most Recent Boot Record -- - Kernel Started at: 2019-06-13 15:59:55.809385 - Kernel ended boot at: 2019-06-13 16:00:00.944740 - Kernel time to boot (seconds): 5.135355 - Cloud-init start: 2019-06-13 16:00:05.738396 - Time between Kernel boot and Cloud-init start (seconds): 4.793656 - -Analyze quickstart - LXC ------------------------- - -To quickly obtain a cloud-init log, try using :command:`lxc` on any -Ubuntu system: - -.. code-block:: shell-session - - $ lxc init ubuntu-daily:focal x1 - $ lxc start x1 - $ # Take lxc's cloud-init.log and pipe it to the analyzer - $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - - $ lxc file pull x1/var/log/cloud-init.log - | \ - python3 -m cloudinit.analyze dump -i - - - -Analyze quickstart - KVM ------------------------- -To quickly analyze a KVM cloud-init log: - -1. Download the current cloud image - -.. code-block:: shell-session - - $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img - -2. Create a snapshot image to preserve the original cloud image - -.. code-block:: shell-session - - $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \ - test-cloudinit.qcow2 - -3. Create a seed image with metadata using :command:`cloud-localds` - -.. code-block:: shell-session - - $ cat > user-data <.` which -marks when the module last successfully ran. Presence of this semaphore file -prevents a module from running again if it has already been run. To ensure that -a module is run again, the desired frequency can be overridden via the -command line: - -.. code-block:: shell-session - - $ sudo cloud-init single --name cc_ssh --frequency always - -Example output: - -.. code-block:: - - ... - Generating public/private ed25519 key pair - ... - -Inspect :file:`cloud-init.log` for output of what operations were performed as -a result. - -.. _proposed_sru_testing: - -Stable Release Updates (SRU) testing for cloud-init -=================================================== - -Once an Ubuntu release is stable (i.e. after it is released), updates for it -must follow a special procedure called a "Stable Release Update" (`SRU`_). - -The cloud-init project has a specific process it follows when validating -a cloud-init SRU, documented in the `CloudinitUpdates`_ wiki page. - -Generally an SRU test of cloud-init performs the following: - -* Install a pre-release version of cloud-init from the **-proposed** APT - pocket (e.g., **bionic-proposed**). -* Upgrade cloud-init and attempt a clean run of cloud-init to assert - that the new version works properly on the specific platform and Ubuntu - series. -* Check for tracebacks or errors in behaviour. - -Manual SRU verification procedure ---------------------------------- - -Below are steps to manually test a pre-release version of cloud-init -from **-proposed** - -.. note:: - For each Ubuntu SRU, the Ubuntu Server team manually validates the new - version of cloud-init on these platforms: **Amazon EC2, Azure, GCE, - OpenStack, Oracle, Softlayer (IBM), LXD, KVM** - -1. Launch a VM on your favorite platform, providing this cloud-config - user data and replacing ```` with your username: - -.. code-block:: yaml - - ## template: jinja - #cloud-config - ssh_import_id: [] - hostname: SRU-worked-{{v1.cloud_name}} - -2. Wait for current cloud-init to complete, replace ```` with - the IP address of the VM that you launched in step 1. Be sure to make a - note of the datasource cloud-init detected in ``--long`` output. You - will need this during step 5, where you will use it to confirm the same - datasource is detected after the upgrade: - -.. code-block:: bash - - CI_VM_IP= - $ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long - -3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed** - cloud-init. To do this, create the following bash script, which will - add the **-proposed** pocket to APT's sources and install cloud-init - from that pocket: - -.. code-block:: bash - - cat > setup_proposed.sh <`: - -.. code-block:: shell-session - - $ ssh ubuntu@$CI_VM_IP -- hostname - -Then, check for any errors or warnings in cloud-init logs. If successful, -this will produce no output: - -.. code-block:: shell-session - - $ ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*" - -6. If you encounter an error during SRU testing: - - * Create a `new cloud-init bug`_ reporting the version of cloud-init - affected - * Ping upstream cloud-init on Libera's `#cloud-init IRC channel `_ - -.. LINKS -.. include:: ../links.txt -.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates -.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates -.. _new cloud-init bug: https://github.com/canonical/cloud-init/issues -.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init diff -Nru cloud-init-23.4.4/doc/rtd/development/index.rst cloud-init-24.1.3/doc/rtd/development/index.rst --- cloud-init-23.4.4/doc/rtd/development/index.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/index.rst 2024-03-27 13:14:04.000000000 +0000 @@ -73,7 +73,8 @@ ../howto/bugs.rst logging.rst - debugging.rst + internal_files.rst + ../howto/debugging.rst .. LINKS: .. include:: ../links.txt diff -Nru cloud-init-23.4.4/doc/rtd/development/internal_files.rst cloud-init-24.1.3/doc/rtd/development/internal_files.rst --- cloud-init-23.4.4/doc/rtd/development/internal_files.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/internal_files.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,46 @@ +.. _internal_files: + +Internal Files: data +******************** + +Cloud-init uses the filesystem to store its own internal state. These files +are not intended for user consumption, but may prove helpful to debug +unexpected cloud-init failures. + +.. _data_files: + +Data files +========== + +Inside the :file:`/var/lib/cloud/` directory there are two important +subdirectories: + +:file:`instance` +---------------- + +The :file:`/var/lib/cloud/instance` directory is a symbolic link that points +to the most recently used :file:`instance-id` directory. This folder contains +the information ``cloud-init`` received from datasources, including vendor and +user data. This can help to determine that the correct data was passed. + +It also contains the :file:`datasource` file that contains the full information +about which datasource was identified and used to set up the system. + +Finally, the :file:`boot-finished` file is the last thing that +``cloud-init`` creates. + +:file:`data` +------------ + +The :file:`/var/lib/cloud/data` directory contains information related to the +previous boot: + +* :file:`instance-id`: + ID of the instance as discovered by ``cloud-init``. Changing this file has + no effect. +* :file:`result.json`: + JSON file showing both the datasource used to set up the instance, and + whether any errors occurred. +* :file:`status.json`: + JSON file showing the datasource used, a breakdown of all four stages, + whether any errors occurred, and the start and stop times of the stages. diff -Nru cloud-init-23.4.4/doc/rtd/development/summit/2023_summit.rst cloud-init-24.1.3/doc/rtd/development/summit/2023_summit.rst --- cloud-init-23.4.4/doc/rtd/development/summit/2023_summit.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/development/summit/2023_summit.rst 2024-03-27 13:14:04.000000000 +0000 @@ -58,8 +58,8 @@ Catherine Redfield, Alberto Contreras, Sally Makin, John Chittum, Daniel Bungert and Chad Smith. You really helped to make this event a success. -Presentation take-aways ------------------------ +Presentation takeaways +---------------------- * **Integration-testing tour/demo**: James showed how Canonical uses our integration tests and pycloudlib during SRU verification, and demonstrated diff -Nru cloud-init-23.4.4/doc/rtd/explanation/analyze.rst cloud-init-24.1.3/doc/rtd/explanation/analyze.rst --- cloud-init-23.4.4/doc/rtd/explanation/analyze.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/analyze.rst 2024-03-27 13:14:04.000000000 +0000 @@ -78,11 +78,10 @@ 00.00100s (modules-final/config-scripts_vendor) 00.00100s (modules-final/config-scripts_per_once) 00.00100s (modules-final/config-salt_minion) - 00.00100s (modules-final/config-rightscale_userdata) 00.00100s (modules-final/config-phone_home) 00.00100s (modules-final/config-package_update_upgrade_install) 00.00100s (modules-final/config-fan) - 00.00100s (modules-config/config-ubuntu_advantage) + 00.00100s (modules-config/config-ubuntu_pro) 00.00100s (modules-config/config-ssh_import_id) 00.00100s (modules-config/config-snap) 00.00100s (modules-config/config-set_passwords) @@ -92,7 +91,6 @@ 00.00100s (modules-config/config-apt_pipelining) 00.00100s (init-network/config-write_files) 00.00100s (init-network/config-seed_random) - 00.00100s (init-network/config-migrator) 00.00000s (modules-final/config-ubuntu_drivers) 00.00000s (modules-final/config-scripts_user) 00.00000s (modules-final/config-scripts_per_instance) diff -Nru cloud-init-23.4.4/doc/rtd/explanation/boot.rst cloud-init-24.1.3/doc/rtd/explanation/boot.rst --- cloud-init-23.4.4/doc/rtd/explanation/boot.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/boot.rst 2024-03-27 13:14:04.000000000 +0000 @@ -3,23 +3,24 @@ Boot stages *********** -To be able to provide the functionality that it does, ``cloud-init`` must be -integrated into the boot in a fairly controlled way. There are five -stages to boot: +There are five stages to boot: -1. Generator +1. Detect 2. Local 3. Network 4. Config 5. Final -.. _boot-Generator: +.. _boot-Detect: -Generator -========= -When booting under ``systemd``, a generator will run that determines if -cloud-init.target should be included in the boot goals. ``ds-identify`` -runs at this stage. +Detect +====== + +A platform identification tool called ``ds-identify`` runs in the first stage. +This tool detects which platform the instance is running on. This tool is +integrated into the init system to disable cloud-init when no platform is +found, and enable cloud-init when a valid platform is detected. This stage +might not be present for every installation of cloud-init. .. _boot-Local: diff -Nru cloud-init-23.4.4/doc/rtd/explanation/events.rst cloud-init-24.1.3/doc/rtd/explanation/events.rst --- cloud-init-23.4.4/doc/rtd/explanation/events.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/events.rst 2024-03-27 13:14:04.000000000 +0000 @@ -72,9 +72,10 @@ .. warning:: Due to its use of ``systemd`` sockets, ``hotplug`` functionality is - currently incompatible with SELinux. This issue is being `tracked - in GitHub #3890`_. Additionally, ``hotplug`` support is considered - experimental for non-Debian-based systems. + currently incompatible with SELinux on Linux distributions using systemd. + This issue is being `tracked in GitHub #3890`_. Additionally, ``hotplug`` + support is considered experimental for non-Alpine and non-Debian-based + systems. Example ======= @@ -92,4 +93,4 @@ when: ['boot'] .. _Cloud-init: https://launchpad.net/cloud-init -.. _tracked in Github #3890: https://github.com/canonical/cloud-init/issues/3890 +.. _tracked in GitHub #3890: https://github.com/canonical/cloud-init/issues/3890 diff -Nru cloud-init-23.4.4/doc/rtd/explanation/exported_errors.rst cloud-init-24.1.3/doc/rtd/explanation/exported_errors.rst --- cloud-init-23.4.4/doc/rtd/explanation/exported_errors.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/exported_errors.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,131 @@ +.. _exported_errors: + +Exported errors +=============== + +Cloud-init makes internal errors available to users for debugging. These +errors map to logged errors and may be useful for understanding what +happens when cloud-init doesn't do what you expect. + +Aggregated errors +----------------- + +When a :ref:`recoverable error` occurs, the internal +cloud-init state information is made visible under a top level aggregate key +``recoverable_errors`` with errors sorted by error level: + +.. code-block:: shell-session + :emphasize-lines: 11-19 + + $ cloud-init status --format json + { + "boot_status_code": "enabled-by-generator", + "config": {...}, + "datasource": "", + "detail": "Cloud-init enabled by systemd cloud-init-generator", + "errors": [], + "extended_status": "degraded done", + "init": {...}, + "last_update": "", + "recoverable_errors": + { + "WARNING": [ + "Failed at merging in cloud config part from p-01: empty cloud config", + "No template found in /etc/cloud/templates for template source.deb822", + "No template found in /etc/cloud/templates for template sources.list", + "No template found, not rendering /etc/apt/soures.list.d/ubuntu.source" + ] + }, + "status": "done" + } + + +Reported recoverable error messages are grouped by the level at which +they are logged. Complete list of levels in order of increasing +criticality: + +.. code-block:: shell-session + + WARNING + DEPRECATED + ERROR + CRITICAL + +Each message has a single level. In cloud-init's :ref:`log files`, +the level at which logs are reported is configurable. These messages are +exported via the ``'recoverable_errors'`` key regardless of which level of +logging is configured. + +Per-stage errors +---------------- + +The keys ``errors`` and ``recoverable_errors`` are also exported for each +stage to allow identifying when recoverable and non-recoverable errors +occurred. + +.. code-block:: shell-session + :emphasize-lines: 4-11,16-21 + + $ cloud-init status --format json + { + "boot_status_code": "enabled-by-generator", + "config": + { + "WARNING": [ + "No template found in /etc/cloud/templates for template source.deb822", + "No template found in /etc/cloud/templates for template sources.list", + "No template found, not rendering /etc/apt/soures.list.d/ubuntu.source" + ] + }, + "datasource": "", + "detail": "Cloud-init enabled by systemd cloud-init-generator", + "errors": [], + "extended_status": "degraded done", + "init": + { + "WARNING": [ + "Failed at merging in cloud config part from p-01: empty cloud config", + ] + }, + "last_update": "", + "recoverable_errors": + { + "WARNING": [ + "Failed at merging in cloud config part from p-01: empty cloud config", + "No template found in /etc/cloud/templates for template source.deb822", + "No template found in /etc/cloud/templates for template sources.list", + "No template found, not rendering /etc/apt/soures.list.d/ubuntu.source" + ] + }, + "status": "done" + } + +.. note:: + + Only completed cloud-init stages are listed in the output of + ``cloud-init status --format json``. + +The JSON representation of cloud-init :ref:`boot stages` +(in run order) is: + +.. code-block:: shell-session + + "init-local" + "init" + "modules-config" + "modules-final" + +Limitations of exported errors +------------------------------ + +- Exported recoverable errors represent logged messages, which are not + guaranteed to be stable between releases. The contents of the + ``'errors'`` and ``'recoverable_errors'`` keys are not guaranteed to have + stable output. +- Exported errors and recoverable errors may occur at different stages + since users may reorder configuration modules to run at different + stages via :file:`cloud.cfg`. + +Where to next? +-------------- +See :ref:`here` for a detailed guide to debugging cloud-init. diff -Nru cloud-init-23.4.4/doc/rtd/explanation/failure_states.rst cloud-init-24.1.3/doc/rtd/explanation/failure_states.rst --- cloud-init-23.4.4/doc/rtd/explanation/failure_states.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/failure_states.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,78 @@ +.. _failure_states: + +Failure states +============== + +Cloud-init has multiple modes of failure. This page describes these +modes and how to gather information about failures. + +.. _critical_failure: + +Critical failure +---------------- + +Critical failures happens when cloud-init experiences a condition that it +cannot safely handle. When this happens, cloud-init may be unable to complete, +and the instance is likely to be in an unknown broken state. + +Cloud-init experiences critical failure when: + +* there is a major problem with the cloud image that is running cloud-init +* there is a severe bug in cloud-init + +When this happens, error messages will be visible in output of +``cloud-init status --long`` within the ``'error'``. + +The same errors will also be located under the key nested under the +module-level keys that store information related to each +:ref:`stage of cloud-init`: ``init-local``, ``init``, +``modules-config``, ``modules-final``. + +.. _recoverable_failure: + +Recoverable failure +------------------- + +In the case that cloud-init is able to complete yet something went wrong, +cloud-init has experienced a "recoverable failure". When this happens, +the service will return with exit code 2, and error messages will be +visible in the output of ``cloud-init status --long`` under the top +level ``recoverable_errors`` and ``error`` keys. + +To identify which stage an error came from, one can check under the +module-level keys: ``init-local``, ``init``, ``modules-config``, +``modules-final`` for the same error keys. + +See :ref:`this more detailed explanation` for to learn how to +use cloud-init's exported errors. + +Cloud-init error codes +---------------------- + +Cloud-init's ``status`` subcommand is useful for understanding which type of +error cloud-init experienced while running. The return code will be one of the +following: + +.. code-block:: shell-session + + 0 - success + 1 - unrecoverable error + 2 - recoverable error + +If ``cloud-init status`` exits with exit code 1, cloud-init experienced +critical failure and was unable to recover. In this case, something is likely +seriously wrong with the system, or cloud-init has experienced a serious bug. +If you believe that you have experienced a serious bug, please file a +:ref:`bug report`. + +If cloud-init exits with exit code 2, cloud-init was able to complete +gracefully, however something went wrong and the user should investigate. + +See :ref:`this more detailed explanation` for more information +on cloud-init's status. + +Where to next? +-------------- + +See :ref:`our more detailed guide` for a detailed guide to +debugging cloud-init. diff -Nru cloud-init-23.4.4/doc/rtd/explanation/format.rst cloud-init-24.1.3/doc/rtd/explanation/format.rst --- cloud-init-23.4.4/doc/rtd/explanation/format.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/format.rst 2024-03-27 13:14:04.000000000 +0000 @@ -33,9 +33,8 @@ using a MIME archive. .. note:: - New in ``cloud-init`` v. 18.4: Cloud config data can also render cloud - instance metadata variables using jinja templating. See - :ref:`instance_metadata` for more information. + Cloud config data can also render cloud instance metadata variables using + jinja templating. See :ref:`instance_metadata` for more information. .. _user_data_script: diff -Nru cloud-init-23.4.4/doc/rtd/explanation/index.rst cloud-init-24.1.3/doc/rtd/explanation/index.rst --- cloud-init-23.4.4/doc/rtd/explanation/index.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/index.rst 2024-03-27 13:14:04.000000000 +0000 @@ -20,3 +20,5 @@ security.rst analyze.rst kernel-cmdline.rst + failure_states.rst + exported_errors.rst diff -Nru cloud-init-23.4.4/doc/rtd/explanation/instancedata.rst cloud-init-24.1.3/doc/rtd/explanation/instancedata.rst --- cloud-init-23.4.4/doc/rtd/explanation/instancedata.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/explanation/instancedata.rst 2024-03-27 13:14:04.000000000 +0000 @@ -232,7 +232,7 @@ This is a cloud-init configuration key present in :file:`/etc/cloud/cloud.cfg` which describes cloud-init's configured `default_user`, `distro`, `network` -renderes, and `paths` that cloud-init will use. Not to be confused with the +renderers, and `paths` that cloud-init will use. Not to be confused with the underlying host ``sys_info`` key above. ``v1`` @@ -332,8 +332,8 @@ Example output: - - ip-10-41-41-70 - - + - ``ip-10-41-41-70`` + - ```` ``v1.machine`` ^^^^^^^^^^^^^^ @@ -445,7 +445,7 @@ "grub_dpkg", "apt_pipelining", "apt_configure", - "ubuntu_advantage", + "ubuntu_pro", "ntp", "timezone", "disable_ec2_metadata", @@ -462,7 +462,6 @@ "chef", "mcollective", "salt_minion", - "rightscale_userdata", "scripts_vendor", "scripts_per_once", "scripts_per_boot", @@ -475,7 +474,6 @@ "power_state_change" ], "cloud_init_modules": [ - "migrator", "seed_random", "bootcmd", "write_files", diff -Nru cloud-init-23.4.4/doc/rtd/howto/debug_user_data.rst cloud-init-24.1.3/doc/rtd/howto/debug_user_data.rst --- cloud-init-23.4.4/doc/rtd/howto/debug_user_data.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/debug_user_data.rst 2024-03-27 13:14:04.000000000 +0000 @@ -1,43 +1,55 @@ -How to debug user data -====================== +.. _check_user_data_cloud_config: -Two of the most common issues with cloud config user data are: +How to validate user data cloud config +====================================== + +The two most common issues with cloud config user data are: 1. Incorrectly formatted YAML -2. The first line does not contain ``#cloud-config`` +2. The first line does not start with ``#cloud-config`` Static user data validation --------------------------- -To verify your cloud config is valid YAML you can use `validate-yaml.py`_. - -To ensure the keys and values in your user data are correct, you can run: +Cloud-init is capable of validating cloud config user data directly from +its datasource (i.e. on a running cloud instance). To do this, you can run: .. code-block:: shell-session sudo cloud-init schema --system --annotate -Or, to test YAML in a file: +Or, to test YAML in a specific file: .. code-block:: shell-session cloud-init schema -c test.yml --annotate -Log analysis ------------- - -If you can log into your system, the best way to debug your system is to -check the contents of the log files :file:`/var/log/cloud-init.log` and -:file:`/var/log/cloud-init-output.log` for warnings, errors, and -tracebacks. Tracebacks are always reportable bugs. +Example output: -To report any bugs you find, :ref:`refer to this guide `. +.. code-block:: shell-session -Validation service ------------------- + $ cloud-init schema --config-file=test.yaml --annotate + #cloud-config + users: + - name: holmanb # E1,E2,E3 + gecos: Brett Holman + primary_group: holmanb + lock_passwd: false + invalid_key: true + + # Errors: ------------- + # E1: Additional properties are not allowed ('invalid_key' was unexpected) + # E2: {'name': 'holmanb', 'gecos': 'Brett Holman', 'primary_group': 'holmanb', 'lock_passwd': False, 'invalid_key': True} is not of type 'array' + # E3: {'name': 'holmanb', 'gecos': 'Brett Holman', 'primary_group': 'holmanb', 'lock_passwd': False, 'invalid_key': True} is not of type 'string' + +Debugging +--------- + +If your user-data cloud config is correct according to the `cloud-init schema` +command, but you are still having issues, then please refer to our +:ref:`debugging guide`. -Another option to is to use the self-hosted HTTP `validation service`_, -refer to its documentation for more info. +To report any bugs you find, :ref:`refer to this guide `. .. LINKS .. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py diff -Nru cloud-init-23.4.4/doc/rtd/howto/debugging.rst cloud-init-24.1.3/doc/rtd/howto/debugging.rst --- cloud-init-23.4.4/doc/rtd/howto/debugging.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/debugging.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,183 @@ +.. _how_to_debug: + +How to debug cloud-init +*********************** + +There are several cloud-init :ref:`failure modes` that one may +need to debug. Debugging is specific to the scenario, but the starting points +are often similar: + +* :ref:`I cannot log in` +* :ref:`Cloud-init did not run` +* :ref:`Cloud-init did the unexpected` +* :ref:`Cloud-init never finished running` + +.. _cannot_log_in: + +I can't log in to my instance +============================= + +One of the more challenging scenarios to debug is when you don't have +shell access to your instance. You have a few options: + +1. Acquire log messages from the serial console and check for any errors. + +2. To access instances without SSH available, create a user with password + access (using the user-data) and log in via the cloud serial port console. + This only works if ``cc_users_groups`` successfully ran. + +3. Try running the same user-data locally, such as in one of the + :ref:`tutorials`. Use LXD or QEMU locally to get a shell or + logs then debug with :ref:`these steps`. + +4. Try copying the image to your local system, mount the filesystem locally + and inspect the image logs for clues. + +.. _did_not_run: + +Cloud-init did not run +====================== + +1. Check the output of ``cloud-init status --long`` + + - what is the value of the ``'extended_status'`` key? + - what is the value of the ``'boot_status_code'`` key? + + See :ref:`our reported status explanation` for more + information on the status. + +2. Check the contents of :file:`/run/cloud-init/ds-identify.log` + + This log file is used when the platform that cloud-init is running on + :ref:`is detected`. This stage enables or disables cloud-init. + +3. Check the status of the services + + .. code-block:: + + systemctl status cloud-init-local.service cloud-init.service\ + cloud-config.service cloud-final.service + + Cloud-init may have started to run, but not completed. This shows how many, + and which, cloud-init stages completed. + +.. _did_not_do_the_thing: + +Cloud-init ran, but didn't do what I want it to +=============================================== + +1. If you are using cloud-init's user data + :ref:`cloud config`, make sure + to :ref:`validate your user data cloud config` + +2. Check for errors in ``cloud-init status --long`` + + - what is the value of the ``'errors'`` key? + - what is the value of the ``'recoverable_errors'`` key? + + See :ref:`our guide on exported errors` for more + information on these exported errors. + +3. For more context on errors, check the logs files: + + - :file:`/var/log/cloud-init.log` + - :file:`/var/log/cloud-init-output.log` + + Identify errors in the logs and the lines preceding these errors. + + Ask yourself: + + - According to the log files, what went wrong? + - How does the cloud-init error relate to the configuration provided + to this instance? + - What does the documentation say about the parts of the configuration that + relate to this error? Did a configuration module fail? + - What :ref:`failure state` is cloud-init in? + + +.. _did_not_finish_running: + +Cloud-init never finished running +================================= + +There are many reasons why cloud-init may fail to complete. Some reasons are +internal to cloud-init, but in other cases, cloud-init failure to +complete may be a symptom of failure in other components of the +system, or the result of a user configuration. + +External reasons +---------------- + +- Failed dependent services in the boot. +- Bugs in the kernel or drivers. +- Bugs in external userspace tools that are called by ``cloud-init``. + +Internal reasons +---------------- + +- A command in ``bootcmd`` or ``runcmd`` that never completes (e.g., running + :command:`cloud-init status --wait` will deadlock). +- Configurations that disable timeouts or set extremely high timeout values. + +To start debugging +------------------ + +1. Check ``dmesg`` for errors: + + .. code-block:: + + dmesg -T | grep -i -e warning -e error -e fatal -e exception + +2. Investigate other systemd services that failed + + .. code-block:: + + systemctl --failed + +3. Check the output of ``cloud-init status --long`` + + - what is the value of the ``'extended_status'`` key? + - what is the value of the ``'boot_status_code'`` key? + + See :ref:`our guide on exported errors` for more + information on these exported errors. + +4. Identify which cloud-init :ref:`boot stage` is currently + running: + + .. code-block:: + + systemctl status cloud-init-local.service cloud-init.service\ + cloud-config.service cloud-final.service + + Cloud-init may have started to run, but not completed. This shows how many, + and which, cloud-init stages completed. + +5. Use the PID of the running service to find all running subprocesses. + Any running process that was spawned by cloud-init may be blocking + cloud-init from continuing. + + .. code-block:: + + pstree + + Ask yourself: + + - Which process is still running? + - Why is this process still running? + - How does this process relate to the configuration that I provided? + +6. For more context on errors, check the logs files: + + - :file:`/var/log/cloud-init.log` + - :file:`/var/log/cloud-init-output.log` + + Identify errors in the logs and the lines preceding these errors. + + Ask yourself: + + - According to the log files, what went wrong? + - How does the cloud-init error relate to the configuration provided to this + instance? + - What does the documentation say about the parts of the configuration that + relate to this error? diff -Nru cloud-init-23.4.4/doc/rtd/howto/identify_datasource.rst cloud-init-24.1.3/doc/rtd/howto/identify_datasource.rst --- cloud-init-23.4.4/doc/rtd/howto/identify_datasource.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/identify_datasource.rst 2024-03-27 13:14:04.000000000 +0000 @@ -28,28 +28,3 @@ The ``force`` parameter allows the command to be run again since the instance has already launched. The other options increase the verbosity of logging and outputs the logs to :file:`STDERR`. - -How can I re-run datasource detection and ``cloud-init``? ---------------------------------------------------------- - -If you are developing a new datasource or working on debugging an issue it -may be useful to re-run datasource detection and the initial setup of -``cloud-init``. - -.. warning:: - - **Do not run the following commands on production systems.** - - These commands will re-run ``cloud-init`` as if this were first boot of a - system. At the very least, this will cycle SSH host keys but may do - substantially more. - -To re-run datasource detection, you must first force :file:`ds-identify` to -re-run, then clean up any logs, and finally, re-run ``cloud-init``: - -.. code-block:: bash - - sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force - sudo cloud-init clean --logs - sudo cloud-init init --local - sudo cloud-init init diff -Nru cloud-init-23.4.4/doc/rtd/howto/index.rst cloud-init-24.1.3/doc/rtd/howto/index.rst --- cloud-init-23.4.4/doc/rtd/howto/index.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/index.rst 2024-03-27 13:14:04.000000000 +0000 @@ -18,10 +18,13 @@ .. toctree:: :maxdepth: 1 - Test cloud-init locally before deploying + Run cloud-init locally before deploying + Re-run cloud-init Change how often a module runs - Debug my user data + Validate my user data + Debug cloud-init + Check the status of cloud-init Report a bug Identify my datasource - Locate log, configuration, and data files Disable cloud-init + Test pre-release cloud-init on Ubuntu diff -Nru cloud-init-23.4.4/doc/rtd/howto/locate_files.rst cloud-init-24.1.3/doc/rtd/howto/locate_files.rst --- cloud-init-23.4.4/doc/rtd/howto/locate_files.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/locate_files.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -How to find files -***************** - -Cloud-init log files -==================== - -``Cloud-init`` uses two files to log to: - -- :file:`/var/log/cloud-init-output.log`: - Captures the output from each stage of ``cloud-init`` when it runs. -- :file:`/var/log/cloud-init.log`: - Very detailed log with debugging output, describing each action taken. -- :file:`/run/cloud-init`: - Contains logs about how ``cloud-init`` enabled or disabled itself, as well as - what platforms/datasources were detected. These logs are most useful when - trying to determine what ``cloud-init`` did or did not run. - -Be aware that each time a system boots, new logs are appended to the files in -:file:`/var/log`. Therefore, the files may contain information from more -than one boot. - -When reviewing these logs, look for errors or Python tracebacks. - -Configuration files -=================== - -``Cloud-init`` configuration files are provided in two places: - -- :file:`/etc/cloud/cloud.cfg` -- :file:`/etc/cloud/cloud.cfg.d/*.cfg` - -These files can define the modules that run during instance initialisation, -the datasources to evaluate on boot, as well as other settings. - -See the :ref:`configuration sources explanation` and -:ref:`configuration reference` pages for more details. - -Data files -========== - -Inside the :file:`/var/lib/cloud/` directory there are two important -subdirectories: - -:file:`instance` ----------------- - -The :file:`/var/lib/cloud/instance` directory is a symbolic link that points -to the most recently used :file:`instance-id` directory. This folder contains -the information ``cloud-init`` received from datasources, including vendor and -user data. This can help to determine that the correct data was passed. - -It also contains the :file:`datasource` file that contains the full information -about which datasource was identified and used to set up the system. - -Finally, the :file:`boot-finished` file is the last thing that -``cloud-init`` creates. - -:file:`data` ------------- - -The :file:`/var/lib/cloud/data` directory contain information related to the -previous boot: - -* :file:`instance-id`: - ID of the instance as discovered by ``cloud-init``. Changing this file has - no effect. -* :file:`result.json`: - JSON file showing both the datasource used to set up the instance, and - whether any errors occurred. -* :file:`status.json`: - JSON file showing the datasource used, a breakdown of all four modules, - whether any errors occurred, and the start and stop times. diff -Nru cloud-init-23.4.4/doc/rtd/howto/predeploy_testing.rst cloud-init-24.1.3/doc/rtd/howto/predeploy_testing.rst --- cloud-init-23.4.4/doc/rtd/howto/predeploy_testing.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/predeploy_testing.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -.. _predeploy_testing: - -How to test ``cloud-init`` locally before deploying -*************************************************** - -It's very likely that you will want to test ``cloud-init`` locally before -deploying it to the cloud. Fortunately, there are several different virtual -machines (VMs) and container tools that are ideal for this sort of local -testing. - -In this guide, we will show how to use three of the most popular tools: -`Multipass`_, `LXD`_ and `QEMU`_. - -Multipass -========= - -Multipass is a cross-platform tool for launching Ubuntu VMs across Linux, -Windows, and macOS. - -When a user launches a Multipass VM, user data can be passed by adding the -``--cloud-init`` flag and the appropriate YAML file containing the user data: - -.. code-block:: shell-session - - $ multipass launch bionic --name test-vm --cloud-init userdata.yaml - -Multipass will validate the YAML syntax of the cloud-config file before -attempting to start the VM! A nice addition which saves time when you're -experimenting and launching instances with various cloud-configs. - -Multipass *only* supports passing user data, and *only* as YAML cloud-config -files. Passing a script, a MIME archive, or any of the other user data formats -``cloud-init`` supports will result in an error from the YAML syntax validator. - -LXD -=== - -LXD offers a streamlined user experience for using Linux system containers. -With LXD, a user can pass: - -* user data, -* vendor data, -* metadata, and -* network configuration. - -The following command initialises a container with user data: - -.. code-block:: shell-session - - $ lxc init ubuntu-daily:bionic test-container - $ lxc config set test-container user.user-data - < userdata.yaml - $ lxc start test-container - -To avoid the extra commands this can also be done at launch: - -.. code-block:: shell-session - - $ lxc launch ubuntu-daily:bionic test-container --config=user.user-data="$(cat userdata.yaml)" - -Finally, a profile can be set up with the specific data if you need to -launch this multiple times: - -.. code-block:: shell-session - - $ lxc profile create dev-user-data - $ lxc profile set dev-user-data user.user-data - < cloud-init-config.yaml - $ lxc launch ubuntu-daily:bionic test-container -p default -p dev-user-data - -The above examples all show how to pass user data. To pass other types of -configuration data use the config option specified below: - -+----------------+---------------------------+ -| Data | Config option | -+================+===========================+ -| user data | cloud-init.user-data | -+----------------+---------------------------+ -| vendor data | cloud-init.vendor-data | -+----------------+---------------------------+ -| network config | cloud-init.network-config | -+----------------+---------------------------+ - -See the LXD `Instance Configuration`_ docs for more info about configuration -values or the LXD `Custom Network Configuration`_ document for more about -custom network config. - -QEMU -==== - -The :command:`cloud-localds` command from the `cloud-utils`_ package generates -a disk with user-supplied data. The ``NoCloud`` datasouce allows users to -provide their own user data, metadata, or network configuration directly to -an instance without running a network service. This is helpful for launching -local cloud images with QEMU, for example. - -The following is an example of creating the local disk using the -:command:`cloud-localds` command: - -.. code-block:: shell-session - - $ cat >user-data <` +format, you might wish to re-run just a single configuration module. +Cloud-init provides the ability to run a single module in isolation and +separately from boot. This command is: + +.. code-block:: shell-session + + $ sudo cloud-init single --name cc_ssh --frequency always + +Example output: + +.. code-block:: + + ... + Generating public/private ed25519 key pair + ... + +This subcommand is not called by the init system. It can be called manually to +load the configured datasource and run a single cloud-config module once, using +the cached user data and metadata after the instance has booted. + +.. note:: + + Each cloud-config module has a module ``FREQUENCY`` configured: ``PER_INSTANCE``, ``PER_BOOT``, ``PER_ONCE`` or ``PER_ALWAYS``. When a module is run by cloud-init, it stores a semaphore file in :file:`/var/lib/cloud/instance/sem/config_.` which marks when the module last successfully ran. Presence of this semaphore file prevents a module from running again if it has already been run. + +Inspect :file:`cloud-init.log` for output of what operations were performed as +a result. + +.. _partially_rerun_cloud_init: + +How to partially re-run cloud-init +================================== + +If the behavior you are testing runs on every boot, there are a couple +of ways to test this behavior. + +Manually run cloud-init stages +------------------------------ + +Note that during normal boot of cloud-init, the init system runs these +stages at specific points during boot. This means that running the code +manually after booting the system may cause the code to interact with +the system in a different way than it does while it boots. + +.. code-block:: shell-session + + cloud-init init --local + cloud-init init + cloud-init modules --mode=config + cloud-init modules --mode=final + +Reboot the instance +------------------- + +Rebooting the instance will take a little bit longer, however it will +make cloud-init stages run at the correct times during boot, so it will +behave more correctly. + +.. code-block:: shell-session + + reboot -h now diff -Nru cloud-init-23.4.4/doc/rtd/howto/run_cloud_init_locally.rst cloud-init-24.1.3/doc/rtd/howto/run_cloud_init_locally.rst --- cloud-init-23.4.4/doc/rtd/howto/run_cloud_init_locally.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/run_cloud_init_locally.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,217 @@ +.. _run_cloud_init_locally: + +How to run ``cloud-init`` locally +********************************* + +It's very likely that you will want to test ``cloud-init`` locally before +deploying it to the cloud. Fortunately, there are several different virtual +machine (VM) and container tools that are ideal for this sort of local +testing. + +* :ref:`boot cloud-init with QEMU ` +* :ref:`boot cloud-init with LXD ` +* :ref:`boot cloud-init with Libvirt ` +* :ref:`boot cloud-init with Multipass ` + +.. _run_with_qemu: + +QEMU +==== + +`QEMU`_ is a general purpose computer hardware emulator that is capable of +running virtual machines with hardware acceleration as well as emulating the +instruction sets of different architectures than the host that you are +running on. + +The ``NoCloud`` datasource allows users to provide their own user data, +metadata, or network configuration directly to an instance without running a +network service. This is helpful for launching local cloud images with QEMU. + +Create your configuration +------------------------- + +We will leave the :file:`network-config` and :file:`meta-data` files empty, but +populate :file:`user-data` with a cloud-init configuration. You may edit the +:file:`network-config` and :file:`meta-data` files if you have a config to +provide. + +.. code-block:: shell-session + + $ touch network-config + $ touch meta-data + $ cat >user-data <user-data <`. + +Cloud-init status +----------------- + +To simplify this, cloud-init provides a tool, ``cloud-init status`` to +report the current status of cloud-init. + +.. code-block:: shell-session + + $ cloud-init status + "done" + +Cloud-init's extended status +---------------------------- + +Cloud-init is also capable of reporting when cloud-init has not been +able to complete the tasks described in a user configuration. If cloud-init +has experienced issues while running, the extended status will include the word +"degraded" in its status. + +Cloud-init can report its internal state via the ``status --format json`` +subcommand under the ``extended_status`` key. + +.. code-block:: shell-session + :emphasize-lines: 7 + + $ cloud-init status --format json + { + "boot_status_code": "enabled-by-generator", + "datasource": "lxd", + "detail": "DataSourceLXD", + "errors": [], + "extended_status": "degraded done", + "init": { + "errors": [], + "finished": 1708550839.1837437, + "recoverable_errors": {}, + "start": 1708550838.6881146 + }, + "init-local": { + "errors": [], + "finished": 1708550838.0196638, + "recoverable_errors": {}, + "start": 1708550837.7719762 + }, + "last_update": "Wed, 21 Feb 2024 21:27:24 +0000", + "modules-config": { + "errors": [], + "finished": 1708550843.8297973, + "recoverable_errors": { + "WARNING": [ + "Removing /etc/apt/sources.list to favor deb822 source format" + ] + }, + "start": 1708550843.7163966 + }, + "modules-final": { + "errors": [], + "finished": 1708550844.0884337, + "recoverable_errors": {}, + "start": 1708550844.029698 + }, + "recoverable_errors": { + "WARNING": [ + "Removing /etc/apt/sources.list to favor deb822 source format" + ] + }, + "stage": null, + "status": "done" + } + + +See the list of all possible reported statuses: + +.. code-block:: shell-session + + "not started" + "running" + "done" + "error - done" + "error - running" + "degraded done" + "degraded running" + "disabled" + +Cloud-init enablement status +---------------------------- + +Separately from the current running status described above, cloud-init can also +report how it was disabled or enabled. This can be viewed by checking +the `boot_status_code` in ``cloud-init status --long``, which may +contain any of the following states: + +- ``'unknown'``: ``ds-identify`` has not run yet to determine if cloud-init + should be run during this boot +- ``'disabled-by-marker-file'``: :file:`/etc/cloud/cloud-init.disabled` exists + which prevents cloud-init from ever running +- ``'disabled-by-generator'``: ``ds-identify`` determined no applicable + cloud-init datasources +- ``'disabled-by-kernel-cmdline'``: kernel command line contained + cloud-init=disabled +- ``'disabled-by-environment-variable'``: environment variable + ``KERNEL_CMDLINE`` contained ``cloud-init=disabled`` +- ``'enabled-by-kernel-cmdline'``: kernel command line contained + cloud-init=enabled +- ``'enabled-by-generator'``: ``ds-identify`` detected possible cloud-init + datasources +- ``'enabled-by-sysvinit'``: enabled by default in SysV init environment + +See :ref:`our explanation of failure states` for more +information. diff -Nru cloud-init-23.4.4/doc/rtd/howto/ubuntu_test_prerelease.rst cloud-init-24.1.3/doc/rtd/howto/ubuntu_test_prerelease.rst --- cloud-init-23.4.4/doc/rtd/howto/ubuntu_test_prerelease.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/howto/ubuntu_test_prerelease.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,62 @@ +.. _ubuntu_test_pre_release: + +Test pre-release cloud-init +=========================== + +After the cloud-init team creates an upstream release, cloud-init will +be released in the -proposed APT repository for a +:ref:`period of testing`. Users are encouraged to test their +workloads on this pending release so that bugs can be caught and fixed prior +to becoming more broadly available via the -updates repository. This guide +describes how to test the pre-release package on Ubuntu. + +Add the -proposed repository pocket +----------------------------------- + +The -proposed repository pocket will contain the cloud-init package to be +tested prior to release in the -updates pocket. + +.. code-block:: bash + + echo "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc)-proposed main" >> /etc/apt/sources.list.d/proposed.list + apt update + +Install the pre-release cloud-init package +------------------------------------------ + +.. code-block:: bash + + apt install cloud-init + +Test the package +---------------- + +Whatever workload you use cloud-init for in production is the best one +to test. This ensures that you can discover and report any bugs that the +cloud-init developers missed during testing before cloud-init gets +released more broadly. + +If issues are found during testing, please file a `new cloud-init bug`_ and +leave a message in the `#cloud-init IRC channel`_. + +Remove the proposed repository +------------------------------ + +Do this to avoid unintentionally installing other unreleased packages. + +.. code-block:: bash + + rm -f /etc/apt/sources.list.d/proposed.list + apt update + +Remove artefacts and reboot +--------------------------- + +This will cause cloud-init to rerun as if it is a first boot. + +.. code-block:: bash + + sudo cloud-init clean --logs --reboot + +.. _new cloud-init bug: https://github.com/canonical/cloud-init/issues +.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init diff -Nru cloud-init-23.4.4/doc/rtd/reference/base_config_reference.rst cloud-init-24.1.3/doc/rtd/reference/base_config_reference.rst --- cloud-init-23.4.4/doc/rtd/reference/base_config_reference.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/base_config_reference.rst 2024-03-27 13:14:04.000000000 +0000 @@ -232,19 +232,24 @@ ``datasource_list`` ^^^^^^^^^^^^^^^^^^^ -Prioritised list of datasources that ``cloud-init`` will attempt to find on -boot. By default, this will be defined in :file:`/etc/cloud/cloud.cfg.d`. There -are two primary use cases for modifying the ``datasource_list``: - -1. Remove known invalid datasources. This may avoid long timeouts when - attempting to detect datasources on any system without a systemd-generator - hook that invokes ``ds-identify``. -2. Override default datasource ordering to discover a different datasource - type than would typically be prioritised. - -If ``datasource_list`` has only a single entry (or a single entry + ``None``), -`cloud-init` will automatically assume and use this datasource without -attempting detection. +This key contains a prioritised list of datasources that ``cloud-init`` +attempts to discover on boot. By default, this is defined in +:file:`/etc/cloud/cloud.cfg.d`. + +There are a few reasons to modify the ``datasource_list``: + +1. Override default datasource discovery priority order +2. Force cloud-init to use a specific datasource: A single entry in + the list (or a single entry and ``None``) will override datasource + discovery, which will force the specified datasource to run. +3. Remove known invalid datasources: this might improve boot speed on distros + that do not use ``ds-identify`` to detect and select the datasource, + +.. warning:: + + This key is unique in that it uses a subset of YAML syntax. It **requires** + that the key and its contents, a list, must share a single line - no + newlines. ``vendor_data``/``vendor_data2`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -298,7 +303,6 @@ # The modules that run in the 'init' stage cloud_init_modules: - - migrator - seed_random - bootcmd - write_files @@ -316,7 +320,9 @@ # The modules that run in the 'config' stage cloud_config_modules: + - wireguard - snap + - ubuntu_autoinstall - ssh_import_id - keyboard - locale @@ -324,7 +330,7 @@ - grub_dpkg - apt_pipelining - apt_configure - - ubuntu_advantage + - ubuntu_pro - ntp - timezone - disable_ec2_metadata @@ -341,10 +347,10 @@ - write_files_deferred - puppet - chef + - ansible - mcollective - salt_minion - reset_rmc - - rightscale_userdata - scripts_vendor - scripts_per_once - scripts_per_boot @@ -373,7 +379,9 @@ sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/bash network: + dhcp_client_priority: [dhclient, dhcpcd, udhcpc] renderers: ['netplan', 'eni', 'sysconfig'] + activators: ['netplan', 'eni', 'network-manager', 'networkd'] # Automatically discover the best ntp_client ntp_client: auto # Other config here will be given to the distro class and/or path classes diff -Nru cloud-init-23.4.4/doc/rtd/reference/cli.rst cloud-init-24.1.3/doc/rtd/reference/cli.rst --- cloud-init-23.4.4/doc/rtd/reference/cli.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/cli.rst 2024-03-27 13:14:04.000000000 +0000 @@ -15,14 +15,12 @@ .. code-block:: - usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] + usage: cloud-init [-h] [--version] [--debug] [--force] {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ... options: -h, --help show this help message and exit --version, -v Show program's version number and exit. - --file FILES, -f FILES - Use additional yaml configuration files. --debug, -d Show additional pre-action logging (default: False). --force Force running even if no datasource is found (use at your own risk). @@ -83,6 +81,33 @@ config files for ssh daemon. Argument `network` removes all generated config files for network. `all` removes config files of all types. +.. note:: + + Cloud-init provides the directory :file:`/etc/cloud/clean.d/` for third party + applications which need additional configuration artifact cleanup from + the filesystem when the `clean` command is invoked. + + The :command:`clean` operation is typically performed by image creators + when preparing a golden image for clone and redeployment. The clean command + removes any cloud-init semaphores, allowing cloud-init to treat the next + boot of this image as the "first boot". When the image is next booted + cloud-init will performing all initial configuration based on any valid + datasource meta-data and user-data. + + Any executable scripts in this subdirectory will be invoked in lexicographical + order with run-parts when running the :command:`clean` command. + + Typical format of such scripts would be a ##- like the following: + :file:`/etc/cloud/clean.d/99-live-installer` + + An example of a script is: + + .. code-block:: bash + + sudo rm -rf /var/lib/installer_imgs/ + sudo rm -rf /var/log/installer/ + + .. _cli_collect_logs: :command:`collect-logs` @@ -135,11 +160,30 @@ :command:`hotplug-hook` ----------------------- -Respond to newly added system devices by retrieving updated system metadata -and bringing up/down the corresponding device. This command is intended to be +Hotplug related subcommands. This command is intended to be called via a ``systemd`` service and is not considered user-accessible except for debugging purposes. + +:command:`query` +^^^^^^^^^^^^^^^^ + +Query if hotplug is enabled for a given subsystem. + +:command:`handle` +^^^^^^^^^^^^^^^^^ + +Respond to newly added system devices by retrieving updated system metadata +and bringing up/down the corresponding device. + +:command:`enable` +^^^^^^^^^^^^^^^^^ + +Enable hotplug for a given subsystem. This is a last resort command for +administrators to enable hotplug in running instances. The recommended +method is configuring :ref:`events`, if not enabled by default in the active +datasource. + .. _cli_features: :command:`features` @@ -173,6 +217,7 @@ :file:`/var/lib/cloud/sem`. * :command:`--local`: Run *init-local* stage instead of *init*. +* :command:`--file` : Use additional yaml configuration files. .. _cli_modules: @@ -195,6 +240,7 @@ * :command:`--mode [init|config|final]`: Run ``modules:init``, ``modules:config`` or ``modules:final`` ``cloud-init`` stages. See :ref:`boot_stages` for more info. +* :command:`--file` : Use additional yaml configuration files. .. _cli_query: @@ -333,11 +379,12 @@ * :command:`--name`: The cloud-config module name to run. * :command:`--frequency`: Module frequency for this run. - One of (``always``|``once-per-instance``|``once``). + One of (``always``|``instance``|``once``). * :command:`--report`: Enable reporting. +* :command:`--file` : Use additional yaml configuration files. The following example re-runs the ``cc_set_hostname`` module ignoring the -module default frequency of ``once-per-instance``: +module default frequency of ``instance``: .. code-block:: shell-session diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/ec2.rst cloud-init-24.1.3/doc/rtd/reference/datasources/ec2.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/ec2.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/ec2.rst 2024-03-27 13:14:04.000000000 +0000 @@ -86,8 +86,8 @@ entry that successfully returns a 200 response for ``//meta-data/instance-id`` will be selected. -Default: ['http://169.254.169.254', 'http://[fd00:ec2::254]', -'http://instance-data:8773']. +Default: [``'http://169.254.169.254'``, ``'http://[fd00:ec2::254]'``, +``'http://instance-data.:8773'``]. ``max_wait`` ------------ @@ -150,4 +150,11 @@ For example: the primary NIC will have a DHCP route-metric of 100, the next NIC will have 200. + * For EC2 instances with multiple NICs, policy-based routing will be + configured on secondary NICs / secondary IPs to ensure outgoing packets + are routed via the correct interface. + This network configuration is only applied on distros using Netplan and + at first boot only but it can be configured to be applied on every boot + and when NICs are hotplugged, see :ref:`events`. + .. _EC2 tags user guide: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/lxd.rst cloud-init-24.1.3/doc/rtd/reference/datasources/lxd.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/lxd.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/lxd.rst 2024-03-27 13:14:04.000000000 +0000 @@ -24,8 +24,8 @@ warnings from ``cloud-init``, and ``cloud-init`` will keep the originally-detected LXD datasource. -The LXD datasource is detected as viable by ``ds-identify`` during ``systemd`` -generator time when either ``/dev/lxd/sock`` exists, or +The LXD datasource is detected as viable by ``ds-identify`` during the +:ref:`detect stage` when either ``/dev/lxd/sock`` exists or ``/sys/class/dmi/id/board_name`` matches "LXD". The LXD datasource provides ``cloud-init`` with the ability to react to diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/nocloud.rst cloud-init-24.1.3/doc/rtd/reference/datasources/nocloud.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/nocloud.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/nocloud.rst 2024-03-27 13:14:04.000000000 +0000 @@ -141,7 +141,7 @@ .. code-block:: sh $ echo -e "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data - $ echo -e "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data + $ echo -e "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\ncreate_hostname_file: true\n" > user-data 2. At this stage you have three options: diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/oracle.rst cloud-init-24.1.3/doc/rtd/reference/datasources/oracle.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/oracle.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/oracle.rst 2024-03-27 13:14:04.000000000 +0000 @@ -39,6 +39,18 @@ set to True on an OCI Bare Metal Machine, it will have no effect (though this may change in the future). +``max_wait`` +------------ + +An integer, defaulting to 30. The maximum time in seconds to wait for the +metadata service to become available. If the metadata service is not +available within this time, the datasource will fail. + +``timeout`` +----------- +An integer, defaulting to 5. The time in seconds to wait for a response from +the metadata service before retrying. + Example configuration --------------------- @@ -49,5 +61,7 @@ datasource: Oracle: configure_secondary_nics: false + max_wait: 30 + timeout: 5 .. _Oracle Compute Infrastructure: https://cloud.oracle.com/ diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/scaleway.rst cloud-init-24.1.3/doc/rtd/reference/datasources/scaleway.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/scaleway.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/scaleway.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,56 @@ +.. _datasource_scaleway: + +Scaleway +******** +`Scaleway`_ datasource uses data provided by the Scaleway metadata service +to do initial configuration of the network services. + +The metadata service is reachable at the following addresses : + +* IPv4: ``169.254.42.42`` +* IPv6: ``fd00:42::42`` + +Configuration +============= +Scaleway datasource may be configured in system configuration +(in `/etc/cloud cloud.cfg`) or by adding a file with the .cfg suffix containing +the following information in the `/etc/cloud.cfg.d` directory:: + + datasource: + Scaleway: + retries: 3 + timeout: 10 + max_wait: 2 + metadata_urls: + - alternate_url + +* ``retries`` + + Controls the maximum number of attempts to reach the metadata service. + +* ``timeout`` + + Controls the number of seconds to wait for a response from the metadata + service for one protocol. + +* ``max_wait`` + + Controls the number of seconds to wait for a response from the metadata + service for all protocols. + +* ``metadata_urls`` + + List of additional URLs to be used in an attempt to reach the metadata + service in addition to the existing ones. + +User Data +========= + +cloud-init fetches user data using the metadata service using the `/user_data` +endpoint. Scaleway's documentation provides a detailed description on how to +use `userdata`_. One can also interact with it using the `userdata api`_. + + +.. _Scaleway: https://www.scaleway.com +.. _userdata: https://www.scaleway.com/en/docs/compute/instances/api-cli/using-cloud-init/ +.. _userdata api: https://www.scaleway.com/en/developers/api/instance/#path-user-data-list-user-data diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/smartos.rst cloud-init-24.1.3/doc/rtd/reference/datasources/smartos.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/smartos.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/smartos.rst 2024-03-27 13:14:04.000000000 +0000 @@ -164,7 +164,7 @@ You can control the ``disk_setup`` in 2 ways: -1. Through the datasource config, you can change the 'alias' of ``ephermeral0`` +1. Through the datasource config, you can change the 'alias' of ``ephemeral0`` to reference another device. The default is: .. code-block:: diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/vmware.rst cloud-init-24.1.3/doc/rtd/reference/datasources/vmware.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/vmware.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/vmware.rst 2024-03-27 13:14:04.000000000 +0000 @@ -355,12 +355,12 @@ users: - default - name: akutz - primary_group: akutz - sudo: ALL=(ALL) NOPASSWD:ALL - groups: sudo, wheel - lock_passwd: true - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com + primary_group: akutz + sudo: ALL=(ALL) NOPASSWD:ALL + groups: sudo, wheel + lock_passwd: true + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com #. Please note this step requires that the VM be powered off. All of the commands below use the VMware CLI tool, `govc`_. diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources/wsl.rst cloud-init-24.1.3/doc/rtd/reference/datasources/wsl.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources/wsl.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources/wsl.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,208 @@ +.. _datasource_wsl: + +WSL +*** + +The Windows Subsystem for Linux (WSL) somewhat resembles a container +hypervisor. A Windows user may have as many Linux distro instances as they +wish, either created by the distro-launcher workflow (for the distros delivered +through MS Store) or by importing a tarball containing a root filesystem. This +page assumes the reader is familiar with WSL. To learn more about that, please +visit the `Microsoft documentation `_. + +Requirements +============== + +1. **WSL interoperability must be enabled**. The datasource needs to execute + some Windows binaries to compute the possible locations of the user data + files. + +2. **WSL automount must be enabled**. The datasource needs to access files in + the Windows host filesystem. + +3. **The init system must be aware of cloud-init**. WSL has opt-in support for + systemd, thus for distros that rely on it, such as Ubuntu, cloud-init will + run automatically if systemd is enabled via the ``/etc/wsl.conf``. The + Ubuntu applications distributed via Microsoft Store enable systemd in the + first boot, so no action is required if the user sets up a new instance by + using them. Users of other distros may find it surprising that cloud-init + doesn't run automatically by default. At the time of this writing, only + systemd distros are supported by the WSL datasource, although there is + nothing hard-coded in the implementation code that requires it, so + non-systemd distros may find ways to run cloud-init and make it just work. + +Notice that requirements 1 and 2 are met by default, i.e. WSL grants those +features enabled. Users can disable those features, though. That would prevent +the datasource from working. +For more information about how to configure WSL, +`check the official documentation `_. + +User data configuration +======================== + +The WSL datasource relies exclusively on the Windows filesystem as the provider +of user data. Access to those files is provided by WSL itself unless disabled +by the user, thus the datasource doesn't require any special component running +on the Windows host to provide such data. + +User data can be supplied in any +:ref:`format supported by cloud-init`, such as YAML +cloud-config files or shell scripts. At runtime, the WSL datasource looks for +user data in the following locations inside the Windows host filesystem, in the +order specified below: + +1. ``%USERPROFILE%\.cloud-init\.user-data`` holds user data for a + specific instance configuration. The datasource resolves the name attributed + by WSL to the instance being initialized and looks for this file before any + of the subsequent alternatives. Example: ``sid-mlkit.user-data`` matches an + instance named ``Sid-MLKit``. + +2. ``%USERPROFILE%\.cloud-init\-.user-data`` for the + distro-specific configuration, matched by the distro ID and VERSION_ID + entries as specified in ``/etc/os-release``. If VERSION_ID is not present, + then VERSION_CODENAME will be used instead. + Example: + ``ubuntu-22.04.user-data`` will affect any instance created from an Ubuntu + 22.04 Jammy Jellyfish image if a more specific configuration file does not + match. + +3. ``%USERPROFILE%\.cloud-init\-all.user-data`` for the distro-specific + configuration, matched by the distro ID entry in ``/etc/os-release``, + regardless of the release version. Example: ``debian-all.user-data`` will + affect any instance created from any Debian GNU/Linux image, regardless of + which release, if a more specific configuration file does not match. + +4. ``%USERPROFILE%\.cloud-init\default.user-data`` for the configuration + affecting all instances, regardless of which distro and release version, if + a more specific configuration file does not match. That could be used, for + example, to automatically create a user with the same name across all WSL + instances a user may have. + +Only the first match is loaded, and no config merging is done, even in the +presence of errors. That avoids unexpected behaviour due to surprising merge +scenarios. Also, notice that the file name casing is irrelevant since both the +Windows file names, as well as the WSL distro names, are case-insensitive by +default. If none are found, cloud-init remains disabled. + +.. note:: + Some users may have configured case sensitivity for file names on Windows. + Note that user data files will still be matched case-insensitively. If there + are both `InstanceName.user-data` and `instancename.user-data`, which one + will be chosen is arbitrary and should not be relied on. Thus it's + recommended to avoid that scenario to prevent confusion. + +Since WSL instances are scoped by the Windows user, having the user data files +inside the ``%USERPROFILE%`` directory (typically ``C:\Users\``) +ensures that WSL instance initialization won't be subject to naming conflicts +if the Windows host is shared by multiple users. + + +Vendor and metadata +=================== + +The current implementation doesn't allow supplying vendor data. +The reasoning is that vendor data adds layering, thus complexity, for no real +benefit to the user. Supplying vendor data could be relevant to WSL itself, if +the subsystem was aware of cloud-init and intended to leverage it, which is not +the case to the best of our knowledge at the time of this writing. + +Most of what ``metadata`` is intended for is not applicable under WSL, such as +setting a hostname. Yet, the knowledge of ``metadata.instance-id`` is vital for +cloud-init. So, this datasource provides a default value but also supports +optionally sourcing metadata from a per-instance specific configuration file: +``%USERPROFILE%\.cloud-init\.meta-data``. If that file exists, it +is a YAML-formatted file minimally providing a value for instance ID +such as: ``instance-id: x-y-z``. Advanced users looking to share +snapshots or relaunch a snapshot where cloud-init is re-triggered, must run +``sudo cloud-init clean --logs`` on the instance before snapshot/export, or +create the appropriate ``.meta-data`` file containing ``instance-id: +some-new-instance-id``. + +Unsupported or restricted modules and features +=============================================== + +Certain features of cloud-init and its modules either require further +customization in the code to better fit the WSL platform or cannot be supported +at all due to the constraints of that platform. When writing user-data config +files, please check the following restrictions: + +* File paths in an include file must be Linux absolute paths. + + Users may be surprised with that requirement since the user data files are + inside the Windows file system. But remember that cloud-init is still running + inside a Linux instance, and the files referenced in the include user data + file will be read by cloud-init, thus they must be represented with paths + understandable inside the Linux instance. Most users will find their Windows + system drive mounted as `/mnt/c`, so let's consider that assumption in the + following example: + +``C:\Users\Me\.cloud-init\noble-cpp.user-data`` + +.. code-block:: + + #include + /mnt/c/Users/me/.cloud-init/config.user-data + /mnt/c/Users/me/Downloads/cpp.yaml + +When initializing an instance named ``Noble-Cpp`` cloud-init will find that +include file, referring to files inside the Windows file system, and will load +them effectively. A failure would happen if Windows paths were otherwise in the +include file. + +* Network configuration is not supported. + + WSL has full control of the instances' networking features and configuration. + A limited set of options for networking is exposed to the user via + ``/etc/wsl.conf``. Those options don't fit well with the networking model + cloud-init expects or understands. + +* Set hostname. + + WSL automatically assigns the instance hostname and any attempt to change it + will take effect only until the next boot when WSL takes over again. + The user can set the desired hostname via ``/etc/wsl.conf``, if necessary. + +* Default user. + + While creating users through cloud-init works as in any other platform, WSL + has the concept of the *default user*, which is the user logged in by + default. So, to create the default user with cloud-init, one must supply user + data to the :ref:`Users and Groups module ` and write the + entry in ``/etc/wsl.conf`` to make that user the default. See the example: + +.. code-block:: yaml + + #cloud-config + users: + - name: j + gecos: Agent J + groups: users,sudo,netdev,audio + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + lock_passwd: true + + write_files: + - path: /etc/wsl.conf + append: true + contents: | + [user] + default=j + +* Disk setup, Growpart, Mounts and Resizefs. + + The root filesystem must have the layout expected by WSL. Other mount points + may work, depending on how the hardware devices are exposed by the Windows + host, and fstab processing during boot is subject to configuration via + ``/etc/wsl.conf``, so users should expect limited functionality. + +* GRUB dpkg. + + WSL controls the boot process, meaning that attempts to install and configure + GRUB as any other bootloader won't be effective. + +* Resolv conf and update etc/ hosts. + + WSL automatically generates those files by default, unless configured to + behave otherwise in ``/etc/wsl.conf``. Overwriting may work, but only + until the next reboot. + diff -Nru cloud-init-23.4.4/doc/rtd/reference/datasources.rst cloud-init-24.1.3/doc/rtd/reference/datasources.rst --- cloud-init-23.4.4/doc/rtd/reference/datasources.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/datasources.rst 2024-03-27 13:14:04.000000000 +0000 @@ -60,9 +60,11 @@ datasources/oracle.rst datasources/ovf.rst datasources/rbxcloud.rst + datasources/scaleway.rst datasources/smartos.rst datasources/upcloud.rst datasources/vmware.rst datasources/vultr.rst + datasources/wsl.rst datasources/zstack.rst diff -Nru cloud-init-23.4.4/doc/rtd/reference/faq.rst cloud-init-24.1.3/doc/rtd/reference/faq.rst --- cloud-init-23.4.4/doc/rtd/reference/faq.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/faq.rst 2024-03-27 13:14:04.000000000 +0000 @@ -15,54 +15,6 @@ - Find a bug? Check out the :ref:`reporting_bugs` topic to find out how to report one -Why did ``cloud-init`` never complete? -====================================== - -To check if ``cloud-init`` is running still, run: - -.. code-block:: shell-session - - cloud-init status - -To wait for ``cloud-init`` to complete, run: - -.. code-block:: shell-session - - cloud-init status --wait - -There are a number of reasons that ``cloud-init`` might never complete. This -list is not exhaustive, but attempts to enumerate potential causes: - -External reasons ----------------- - -- Failed dependent services in the boot. -- Bugs in the kernel or drivers. -- Bugs in external userspace tools that are called by ``cloud-init``. - -Internal reasons ----------------- - -- A command in ``bootcmd`` or ``runcmd`` that never completes (e.g., running - :command:`cloud-init status --wait` will wait forever on itself and never - complete). -- Non-standard configurations that disable timeouts or set extremely high - values ("never" is used in a loose sense here). - -Failing to complete on ``systemd`` ----------------------------------- - -``Cloud-init`` consists of multiple services on ``systemd``. If a service -that ``cloud-init`` depends on stalls, ``cloud-init`` will not continue. -If reporting a bug related to ``cloud-init`` failing to complete on -``systemd``, please make sure to include the following logs. - -.. code-block:: shell-session - - systemd-analyze critical-chain cloud-init.target - journalctl --boot=-1 - systemctl --failed - ``autoinstall``, ``preruncmd``, ``postruncmd`` ============================================== diff -Nru cloud-init-23.4.4/doc/rtd/reference/index.rst cloud-init-24.1.3/doc/rtd/reference/index.rst --- cloud-init-23.4.4/doc/rtd/reference/index.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/index.rst 2024-03-27 13:14:04.000000000 +0000 @@ -21,3 +21,6 @@ network-config.rst base_config_reference.rst datasource_dsname_map.rst + performance_analysis.rst + ubuntu_stable_release_updates.rst + user_files.rst diff -Nru cloud-init-23.4.4/doc/rtd/reference/merging.rst cloud-init-24.1.3/doc/rtd/reference/merging.rst --- cloud-init-23.4.4/doc/rtd/reference/merging.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/merging.rst 2024-03-27 13:14:04.000000000 +0000 @@ -3,19 +3,12 @@ Merging user data sections ************************** -The ability to merge user data sections is a feature that was implemented by -popular request. It was identified that there should be a way to specify how +The ability to merge user data sections allows a way to specify how cloud-config YAML "dictionaries" provided as user data are handled when there are multiple YAML files to be merged together (e.g., when performing an #include). -The previous merging algorithm was very simple and would only overwrite -(and not append). So, it was decided to create a new and improved way to merge -dictionaries (and their contained objects) together in a customisable way, -thus allowing users who provide cloud-config user data to determine exactly -how their objects will be merged. - -For example: +For example merging these two configurations: .. code-block:: yaml @@ -29,17 +22,7 @@ - bash3 - bash4 -The previous way of merging the two objects above would result in a final -cloud-config object that contains the following: - -.. code-block:: yaml - - #cloud-config (merged) - runcmd: - - bash3 - - bash4 - -Typically this is not what users want - instead they would prefer: +Yields the following merged config: .. code-block:: yaml @@ -50,10 +33,6 @@ - bash3 - bash4 -This change makes it easier to combine the various cloud-config objects you -have into a more useful list. In this way, we reduce the duplication necessary -to accomplish the same result with the previous method. - Built-in mergers ================ diff -Nru cloud-init-23.4.4/doc/rtd/reference/modules.rst cloud-init-24.1.3/doc/rtd/reference/modules.rst --- cloud-init-23.4.4/doc/rtd/reference/modules.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/modules.rst 2024-03-27 13:14:04.000000000 +0000 @@ -41,7 +41,6 @@ .. automodule:: cloudinit.config.cc_locale .. automodule:: cloudinit.config.cc_lxd .. automodule:: cloudinit.config.cc_mcollective -.. automodule:: cloudinit.config.cc_migrator .. automodule:: cloudinit.config.cc_mounts .. _mod-ntp: @@ -54,7 +53,6 @@ .. automodule:: cloudinit.config.cc_resizefs .. automodule:: cloudinit.config.cc_resolv_conf .. automodule:: cloudinit.config.cc_rh_subscription -.. automodule:: cloudinit.config.cc_rightscale_userdata .. _mod-rsyslog: @@ -81,8 +79,8 @@ .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone -.. automodule:: cloudinit.config.cc_ubuntu_advantage .. automodule:: cloudinit.config.cc_ubuntu_drivers +.. automodule:: cloudinit.config.cc_ubuntu_pro .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname diff -Nru cloud-init-23.4.4/doc/rtd/reference/network-config-format-v1.rst cloud-init-24.1.3/doc/rtd/reference/network-config-format-v1.rst --- cloud-init-23.4.4/doc/rtd/reference/network-config-format-v1.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/network-config-format-v1.rst 2024-03-27 13:14:04.000000000 +0000 @@ -82,6 +82,13 @@ configuration time. It's possible to specify a value too large or to small for a device, and may be ignored by the device. +``accept-ra: `` +^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``accept-ra`` key is a boolean value that specifies whether or not to +accept Router Advertisements (RA) for this interface. Specifying ``accept-ra`` +is optional. + Physical example ^^^^^^^^^^^^^^^^ @@ -252,8 +259,8 @@ the following keys: - ``address``: List of IPv4 or IPv6 address of nameservers. -- ``search``: List of hostnames to include in the :file:`resolv.conf` search - path. +- ``search``: Optional. List of hostnames to include in the :file:`resolv.conf` + search path. - ``interface``: Optional. Ties the nameserver definition to the specified interface. The value specified here must match the ``name`` of an interface defined in this config. If unspecified, this nameserver will be considered @@ -296,6 +303,8 @@ interface will be handled during boot. - ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation. - ``netmask``: IPv4 subnet mask in dotted format or CIDR notation. +- ``broadcast`` : IPv4 broadcast address in dotted format. This is + only rendered if :file:`/etc/network/interfaces` is used. - ``gateway``: IPv4 address of the default gateway for this subnet. - ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in :file:`resolv.conf`. diff -Nru cloud-init-23.4.4/doc/rtd/reference/network-config-format-v2.rst cloud-init-24.1.3/doc/rtd/reference/network-config-format-v2.rst --- cloud-init-23.4.4/doc/rtd/reference/network-config-format-v2.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/network-config-format-v2.rst 2024-03-27 13:14:04.000000000 +0000 @@ -71,7 +71,7 @@ These can dynamically come and go between reboots and even during runtime (hotplugging). In the generic case, they can be selected by ``match:`` rules on desired properties, such as name/name pattern, MAC address, -driver, or device paths. In general these will match any number of +or driver. In general these will match any number of devices (unless they refer to properties which are unique such as the full path or MAC address), so without further knowledge about the hardware, these will always be considered as a group. @@ -149,7 +149,7 @@ name: en*s0 ``set-name: <(scalar)>`` -^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------ When matching on unique properties such as path or MAC, or with additional assumptions such as "there will only ever be one wifi device", match rules @@ -160,7 +160,7 @@ will show an error). ``wakeonlan: <(bool)>`` -^^^^^^^^^^^^^^^^^^^^^^^ +----------------------- Enable wake on LAN. Off by default. @@ -180,17 +180,17 @@ config to the instance. ``dhcp4: <(bool)>`` -^^^^^^^^^^^^^^^^^^^ +------------------- Enable DHCP for IPv4. Off by default. ``dhcp6: <(bool)>`` -^^^^^^^^^^^^^^^^^^^ +------------------- Enable DHCP for IPv6. Off by default. ``dhcp4-overrides and dhcp6-overrides: <(mapping)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------------------------------- DHCP behaviour overrides. Overrides will only have an effect if the corresponding DHCP type is enabled. Refer to `Netplan#dhcp-overrides`_ @@ -231,7 +231,7 @@ use-routes: false ``addresses: <(sequence of scalars)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-------------------------------------- Add static addresses to the interface in addition to the ones received through DHCP or RA. Each sequence entry is in CIDR notation, i.e., of the @@ -241,7 +241,7 @@ Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` ``gateway4: or gateway6: <(scalar)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------------------- Deprecated, see `Netplan#default-routes`_. Set default gateway for IPv4/6, for manual address configuration. This @@ -252,14 +252,14 @@ Example for IPv6: ``gateway6: 2001:4::1`` ``mtu: `` -^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------ The MTU key represents a device's Maximum Transmission Unit, the largest size packet or frame, specified in octets (eight-bit bytes), that can be sent in a packet- or frame-based network. Specifying ``mtu`` is optional. ``nameservers: <(mapping)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------- Set DNS servers and search domains, for manual address configuration. There are two supported fields: ``addresses:`` is a list of IPv4 or IPv6 addresses @@ -272,7 +272,7 @@ addresses: [8.8.8.8, FEDC::1] ``routes: <(sequence of mapping)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +----------------------------------- Add device specific routes. Each mapping includes a ``to``, ``via`` key with an IPv4 or IPv6 address as value. ``metric`` is an optional value. @@ -285,16 +285,16 @@ metric: 3 Ethernets ---------- +========= Ethernet device definitions do not support any specific properties beyond the common ones described above. Bonds ------ +===== ``interfaces: <(sequence of scalars)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------------------- All devices matching this ID list will be added to the bond. @@ -309,7 +309,7 @@ interfaces: [switchports] ``parameters: <(mapping)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------- Customisation parameters for special bonding options. Time values are specified in seconds unless otherwise specified. @@ -450,10 +450,10 @@ Bridges -------- +======= ``interfaces: <(sequence of scalars)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------------------- All devices matching this ID list will be added to the bridge. @@ -468,7 +468,7 @@ interfaces: [switchports] ``parameters: <(mapping)>`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------- Customisation parameters for special bridging options. Time values are specified in seconds unless otherwise stated. @@ -523,15 +523,15 @@ used. VLANs ------ +===== ``id: <(scalar)>`` -^^^^^^^^^^^^^^^^^^ +------------------ VLAN ID, a number between 0 and 4094. ``link: <(scalar)>`` -^^^^^^^^^^^^^^^^^^^^ +-------------------- ID of the underlying device definition on which this VLAN gets created. diff -Nru cloud-init-23.4.4/doc/rtd/reference/performance_analysis.rst cloud-init-24.1.3/doc/rtd/reference/performance_analysis.rst --- cloud-init-23.4.4/doc/rtd/reference/performance_analysis.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/performance_analysis.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,110 @@ +.. _performance: + +Performance analysis +******************** + +Occasionally, instances don't perform as well as expected, and so we provide +a simple tool to inspect which operations took the longest during boot and +setup. + +.. _boot_time_analysis: + +:command:`cloud-init analyze` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The `cloud-init` command has an analysis sub-command, :command:`analyze`, which +parses any :file:`cloud-init.log` file into formatted and sorted events. This +analysis reveals the most costly cloud-init operations and which configuration +options are responsible. These subcommands default to reading +:file:`/var/log/cloud-init.log`. + +:command:`analyze show` +^^^^^^^^^^^^^^^^^^^^^^^ + +Parse and organise :file:`cloud-init.log` events by stage and include each +sub-stage granularity with time delta reports. + +.. code-block:: shell-session + + $ cloud-init analyze show -i my-cloud-init.log + +Example output: + +.. code-block:: shell-session + + -- Boot Record 01 -- + The total time elapsed since completing an event is printed after the "@" + character. + The time the event takes is printed after the "+" character. + + Starting stage: modules-config + |`->config-snap_config ran successfully @05.47700s +00.00100s + |`->config-ssh-import-id ran successfully @05.47800s +00.00200s + |`->config-locale ran successfully @05.48000s +00.00100s + ... + + +:command:`analyze dump` +^^^^^^^^^^^^^^^^^^^^^^^ + +Parse :file:`cloud-init.log` into event records and return a list of +dictionaries that can be consumed for other reporting needs. + +.. code-block:: shell-session + + $ cloud-init analyze dump -i my-cloud-init.log + +Example output: + +.. code-block:: + + [ + { + "description": "running config modules", + "event_type": "start", + "name": "modules-config", + "origin": "cloudinit", + "timestamp": 1510807493.0 + },... + +:command:`analyze blame` +^^^^^^^^^^^^^^^^^^^^^^^^ + +Parse :file:`cloud-init.log` into event records and sort them based on the +highest time cost for a quick assessment of areas of cloud-init that may +need improvement. + +.. code-block:: shell-session + + $ cloud-init analyze blame -i my-cloud-init.log + +Example output: + +.. code-block:: + + -- Boot Record 11 -- + 00.01300s (modules-final/config-scripts-per-boot) + 00.00400s (modules-final/config-final-message) + ... + +:command:`analyze boot` +^^^^^^^^^^^^^^^^^^^^^^^ + +Make subprocess calls to the kernel in order to get relevant pre-cloud-init +timestamps, such as the kernel start, kernel finish boot, and cloud-init +start. + +.. code-block:: shell-session + + $ cloud-init analyze boot + +Example output: + +.. code-block:: + + -- Most Recent Boot Record -- + Kernel Started at: 2019-06-13 15:59:55.809385 + Kernel ended boot at: 2019-06-13 16:00:00.944740 + Kernel time to boot (seconds): 5.135355 + Cloud-init start: 2019-06-13 16:00:05.738396 + Time between Kernel boot and Cloud-init start (seconds): 4.793656 diff -Nru cloud-init-23.4.4/doc/rtd/reference/ubuntu_stable_release_updates.rst cloud-init-24.1.3/doc/rtd/reference/ubuntu_stable_release_updates.rst --- cloud-init-23.4.4/doc/rtd/reference/ubuntu_stable_release_updates.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/ubuntu_stable_release_updates.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,52 @@ +.. _stable_release_updates: + +Stable Release Updates (SRU) +**************************** + +Once upstream cloud-init has released a new version, the Ubuntu Server team +backports cloud-init to previous releases via a special procedure called a +"Stable Release Update" (`SRU`_). This helps ensure that new versions of +cloud-init on existing releases of Ubuntu will not experience breaking +changes. Breaking changes are allowed when transitioning from one Ubuntu +series to the next (Focal -> Jammy). + +SRU package version +=================== + +Ubuntu cloud-init packages follow the `SRU release version`_ format. + +.. _sru_testing: + +SRU testing for cloud-init +========================== + +The cloud-init project has a specific process it follows when validating +a cloud-init SRU, which is documented in the `CloudinitUpdates`_ wiki page. + +An SRU test of cloud-init performs the following: + + For each Ubuntu SRU, the Ubuntu Server team validates the new + version of cloud-init on these platforms: **Amazon EC2, Azure, GCE, + OpenStack, Oracle, Softlayer (IBM), LXD using the integration test + suite.** + +Test process: +------------- + +The `integration test suite` used for validation follows these steps: + +* :ref:`Install a pre-release version of cloud-init` + from the **-proposed** APT pocket (e.g., **jammy-proposed**). +* Upgrade cloud-init and attempt a clean run of cloud-init to assert + that the new version works properly on the specific platform and Ubuntu + series. +* Check for tracebacks and errors in behaviour. + +.. LINKS +.. include:: ../links.txt +.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates +.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates +.. _new cloud-init bug: https://github.com/canonical/cloud-init/issues +.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init +.. _integration test suite: https://github.com/canonical/cloud-init/tree/main/tests/integration_tests +.. _SRU release version: https://github.com/canonical/ubuntu-maintainers-handbook/blob/main/VersionStrings.md#version-adding-a-change-in-ubuntu-as-a-stable-release-update diff -Nru cloud-init-23.4.4/doc/rtd/reference/user_files.rst cloud-init-24.1.3/doc/rtd/reference/user_files.rst --- cloud-init-23.4.4/doc/rtd/reference/user_files.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/reference/user_files.rst 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,68 @@ +.. _user_files: + +Log and configuration files +********************************* + +Cloud-init uses the filesystem to read inputs and write outputs. These files +are configuration and log files, respectively. If other methods of +:ref:`debugging cloud-init` fail, then digging into log files is +your next step in debugging. + +.. _log_files: + +Cloud-init log files +==================== + +Cloud-init's early boot logic runs before system loggers are available +or filesystems are mounted. Runtime logs and early boot logs have different +locations. + +Runtime logs +------------ + +While booting, ``cloud-init`` logs to two different files: + + +- :file:`/var/log/cloud-init-output.log`: + Captures the output from each stage of ``cloud-init`` when it runs. +- :file:`/var/log/cloud-init.log`: + Very detailed log with debugging output, describing each action taken. + +Be aware that each time a system boots, new logs are appended to the files in +:file:`/var/log`. Therefore, the files may contain information from more +than one boot. + +When reviewing these logs, look for errors or Python tracebacks. + +Early boot logs +--------------- + +Prior to initialization, ``cloud-init`` runs early detection and +enablement / disablement logic. + +- :file:`/run/cloud-init/cloud-init-generator.log`: + On systemd systems, this log file describes early boot enablement of + cloud-init via the systemd generator. These logs are most useful if trying + to figure out why cloud-init did not run. +- :file:`/run/cloud-init/ds-identify.log`: + Contains logs about platform / datasource detection. These logs are most + useful if cloud-init did not identify the correct datasource (cloud) to run + on. + + + +.. _configuration_files: + +Configuration files +=================== + +``Cloud-init`` configuration files are provided in two places: + +- :file:`/etc/cloud/cloud.cfg` +- :file:`/etc/cloud/cloud.cfg.d/*.cfg` + +These files can define the modules that run during instance initialisation, +the datasources to evaluate on boot, as well as other settings. + +See the :ref:`configuration sources explanation` and +:ref:`configuration reference` pages for more details. diff -Nru cloud-init-23.4.4/doc/rtd/spelling.py cloud-init-24.1.3/doc/rtd/spelling.py --- cloud-init-23.4.4/doc/rtd/spelling.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/spelling.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,79 @@ +import pathlib +import re + +import enchant + + +class WordListFilter(enchant.tokenize.Filter): + word_list = "spelling_word_list.txt" + regex_list = "spelling_regex_list.txt" + + def __init__(self, *args, **kwargs): + """Use two files for ignoring correctly spelled words + + - spelling_word_list.txt: a list of exact matches to ignore + - spelling_regex_list.txt: a list of regular expressions to ignore + + Splits tokens on "/" and "-". + """ + super().__init__(*args, *kwargs) + directory = pathlib.Path(__file__).parent + with open(directory.joinpath(self.word_list)) as f: + lines = f.read().splitlines() + self._validate_lines(lines) + self.word_set = set(lines) + print(f"Loaded {self.word_list}: {lines})") + with open(directory.joinpath(self.regex_list)) as f: + regex_lines = f.read().splitlines() + self.regex_set = set(regex_lines) + print(f"Loaded {self.regex_list}: {regex_lines}") + + def _validate_lines(self, lines): + """Assert that the word_list file is legible and orderly""" + for line in lines: + if line != line.lower(): + raise Exception( + f"Uppercase characters in {self.word_list} detected. " + "Please use lowercase characters for legibility." + ) + if lines != sorted(lines): + first_missordered = next_item = previous_item = None + for item_a, item_b in zip(lines, sorted(lines)): + if first_missordered: + next_item = item_a + break + elif item_a != item_b: + first_missordered = item_a + else: + previous_item = item_a + unordered = ( + f"[..., {previous_item}, {first_missordered}, " + f"{next_item}, ...]" + ) + raise Exception( + f"Unsorted {self.word_list} detected. " + f"Please sort for legibility. Unordered list: {unordered}" + ) + + def _in_word_list(self, word): + """Lowercase match the set of words in spelling_word_list.txt""" + return word.lower() in self.word_set + + def _in_word_regex(self, word): + """Regex match the expressions in spelling_regex_list.txt""" + for regex in self.regex_set: + out = re.search(regex, word) + if out: + return True + + def _skip(self, word): + """Skip words and regex expressions in the allowlist files""" + return self._in_word_list(word) or self._in_word_regex(word) + + def _split(self, word): + """split words into sub-tokens on - and /""" + if "-" in word or "/" in word: + for i, token in enumerate(re.split("-|/", word)): + if self._skip(token): + continue + yield token, i diff -Nru cloud-init-23.4.4/doc/rtd/spelling_regex_list.txt cloud-init-24.1.3/doc/rtd/spelling_regex_list.txt --- cloud-init-23.4.4/doc/rtd/spelling_regex_list.txt 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/spelling_regex_list.txt 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,14 @@ +\.py$ +\.sources$ +\.list$ +\.yml$ +\.yaml$ +cloud-init +ami\- +ubuntu\: +IPv[46] +^/ +$/ +ecdsa-sha2-nistp +ed25519 +1.0/config/user diff -Nru cloud-init-23.4.4/doc/rtd/spelling_word_list.txt cloud-init-24.1.3/doc/rtd/spelling_word_list.txt --- cloud-init-23.4.4/doc/rtd/spelling_word_list.txt 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/spelling_word_list.txt 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,292 @@ +akamai +alibaba +almalinux +ami +analyze +ansible +apk +apport +ar +arg +args +artifacts +authkeys +autoinstaller +autospecced +avaliable +aways +aws +backend +backends +baseurl +behavior +bigstep +boolean +bootcmd +boothook +btrfs +busybox +byobu +cd +centos +chown +chrony +cleanup +cloudinit +cloudlinux +cloudplatform +conf +config +configdrive +configs +copybutton +cpu +csr +datasource +datasources +deadbeef +debconf +debian +devops +dhcp +dicts +dir +distro +dmesg +dmi +dns +doas +docstrings +dotnet +downstreams +dpkg +dropbearssh +dsa +dsmode +dsname +dss +eal +ec +ecdsa +ed +edwardo +errored +es +esm +etc +eth +ethernet +eurolinux +execve +exoscale +fabio +faillog +favor +favorite +fips +firstboot +flavors +flexibile +fqdn +freebsd +freenode +fs-freq +fstab +galic +gce +gotchas +gpart +growpart +gz +hacktoberfest +hetzner +honored +hostname +hpc +ids +inexhaustive +init +ip +ipv +javascript +jinja +joyent +js +json +juju +kenv +keygen +keytypes +kyler +labeled +lastlog +libvirt +linux +livepatch +localdomain +lxd +maipo +manpage +maxage +maxsize +mcollective +microos +miraclelinux +mkpasswd +mkswap +mntops +mountpoint +mountpoints +multipass +nameserver +nameservers +nd +neovim +netmask +netplan +networkd +nistp +nocloud +nonexistent +ntp +ntpd +ntpdate +openbsd +opendoas +openeuler +openmandriva +openntpd +openstack +opensuse +outscale +ovf +params +passno +passthrough +passw +pem +pid +pipelining +pki +playbook +plugins +postinstall +poweroff +ppc +pre +precompiled +preseed +proxmox +puppetlabs +puppetserver +py +pycloudlib +pytest +qcow +querystring +quickstart +rackspace +rb +rbx +rc +rd +readinessprobes +redhat +referesh +regex +renderes +repodata +repositoy +resizefs +resolv +restructuredtext +rhel +rhsm +rpctool +rsa +rsyslog +runcmd +scaleway +seedurl +serverurl +shortid +sigonly +sk +sle +sles +smtp +snapd +softlayer +somedir +spelunking +sr +sshd +ssk +ssl +st +stderr +stdin +subclassed +subiquity +subnet +subplatform +sudo +sysconfig +syslogd +systemd +tcp +teardown +th +timeframe +timesyncd +timezone +tinyssh +tlb +tmp +tmpfiles +tracebacks +transactional +ua +ubuntu +udev +udp +un +unconfigured +unentitled +unredacted +unrendered +url +urls +userdata +userspace +usr +util +validator +var +vcloud +ve +vendordata +veth +vfstype +virtuozzo +vm +vpc +vsphere +vultr +walkthrough +webserver +wg +wgx +whitepapers +whitespace +wifi +wireguard +xor +yakkety +yaml +zadara +zoneinfo +zstack +zypo +zypp +zypper diff -Nru cloud-init-23.4.4/doc/rtd/tutorial/qemu-script.sh cloud-init-24.1.3/doc/rtd/tutorial/qemu-script.sh --- cloud-init-23.4.4/doc/rtd/tutorial/qemu-script.sh 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/tutorial/qemu-script.sh 2024-03-27 13:14:04.000000000 +0000 @@ -21,7 +21,6 @@ cat << EOF > meta-data instance-id: someid/somehostname -local-hostname: jammy EOF touch vendor-data diff -Nru cloud-init-23.4.4/doc/rtd/tutorial/qemu.rst cloud-init-24.1.3/doc/rtd/tutorial/qemu.rst --- cloud-init-23.4.4/doc/rtd/tutorial/qemu.rst 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc/rtd/tutorial/qemu.rst 2024-03-27 13:14:04.000000000 +0000 @@ -143,7 +143,6 @@ $ cat << EOF > meta-data instance-id: someid/somehostname - local-hostname: jammy EOF diff -Nru cloud-init-23.4.4/doc-requirements.txt cloud-init-24.1.3/doc-requirements.txt --- cloud-init-23.4.4/doc-requirements.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/doc-requirements.txt 2024-03-27 13:14:04.000000000 +0000 @@ -6,3 +6,4 @@ sphinx-design sphinx-copybutton sphinx-notfound-page +sphinxcontrib-spelling diff -Nru cloud-init-23.4.4/integration-requirements.txt cloud-init-24.1.3/integration-requirements.txt --- cloud-init-23.4.4/integration-requirements.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/integration-requirements.txt 2024-03-27 13:14:04.000000000 +0000 @@ -11,3 +11,4 @@ packaging passlib +coverage==7.2.7 # Last version supported in Python 3.7 diff -Nru cloud-init-23.4.4/packages/bddeb cloud-init-24.1.3/packages/bddeb --- cloud-init-23.4.4/packages/bddeb 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/packages/bddeb 2024-03-27 13:14:04.000000000 +0000 @@ -90,8 +90,8 @@ # Fill in the change log template templater.render_to_file( - os.path.abspath(os.path.join( - find_root(), "packages", "debian", "changelog.in") + os.path.abspath( + os.path.join(find_root(), "packages", "debian", "changelog.in") ), os.path.abspath(os.path.join(deb_dir, "changelog")), params=templ_data, @@ -108,8 +108,9 @@ requires = ["cloud-utils | cloud-guest-utils"] if cloud_util_deps else [] # We consolidate all deps as Build-Depends as our package build runs all # tests so we need all runtime dependencies anyway. - # NOTE: python package was moved to the front after debuild -S would fail with - # 'Please add apropriate interpreter' errors (as in debian bug 861132) + # NOTE: python package was moved to the front after debuild -S would fail + # with 'Please add appropriate interpreter' errors + # (as in debian bug 861132) requires.extend(["python3"] + reqs + test_reqs) if templ_data["debian_release"] in ( "buster", @@ -127,11 +128,14 @@ if templ_data["debian_release"] == "bionic": # Bionic doesn't support debhelper-compat > 11 build_deps += ",debhelper-compat (= 11)" + elif templ_data["debian_release"] == "focal": + # Focal doesn't support debhelper-compat > 12 + build_deps += ",debhelper-compat (= 12)" else: build_deps += f",{debhelper_matches[-1]}" templater.render_to_file( - os.path.abspath(os.path.join( - find_root(), "packages", "debian", "control.in") + os.path.abspath( + os.path.join(find_root(), "packages", "debian", "control.in") ), os.path.abspath(os.path.join(deb_dir, "control")), params={"build_depends": build_deps}, @@ -178,14 +182,14 @@ "-v", "--verbose", dest="verbose", - help=("run verbosely" " (default: %(default)s)"), + help=("run verbosely (default: %(default)s)"), default=False, action="store_true", ) parser.add_argument( "--cloud-utils", dest="cloud_utils", - help=("depend on cloud-utils package" " (default: %(default)s)"), + help=("depend on cloud-utils package (default: %(default)s)"), default=False, action="store_true", ) @@ -193,7 +197,7 @@ parser.add_argument( "--init-system", dest="init_system", - help=("build deb with INIT_SYSTEM=xxx" " (default: %(default)s"), + help=("build deb with INIT_SYSTEM=xxx (default: %(default)s"), default=os.environ.get("INIT_SYSTEM", "systemd"), ) diff -Nru cloud-init-23.4.4/packages/brpm cloud-init-24.1.3/packages/brpm --- cloud-init-23.4.4/packages/brpm 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/packages/brpm 2024-03-27 13:14:04.000000000 +0000 @@ -14,29 +14,32 @@ top_dir = os.environ.get("CLOUD_INIT_TOP_D", None) if top_dir is None: top_dir = os.path.dirname( - os.path.dirname(os.path.abspath(sys.argv[0]))) - if os.path.isfile(os.path.join(top_dir, 'setup.py')): + os.path.dirname(os.path.abspath(sys.argv[0])) + ) + if os.path.isfile(os.path.join(top_dir, "setup.py")): return os.path.abspath(top_dir) - raise OSError(("Unable to determine where your cloud-init topdir is." - " set CLOUD_INIT_TOP_D?")) + raise OSError( + ( + "Unable to determine where your cloud-init topdir is." + " set CLOUD_INIT_TOP_D?" + ) + ) if "avoid-pep8-E402-import-not-top-of-file": # Use the util functions from cloudinit sys.path.insert(0, find_root()) - from cloudinit import subp - from cloudinit import templater - from cloudinit import util + from cloudinit import subp, templater, util # Subdirectories of the ~/rpmbuild dir -RPM_BUILD_SUBDIRS = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS'] +RPM_BUILD_SUBDIRS = ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"] def run_helper(helper, args=None, strip=True): if args is None: args = [] - cmd = [os.path.abspath(os.path.join(find_root(), 'tools', helper))] + args + cmd = [os.path.abspath(os.path.join(find_root(), "tools", helper))] + args (stdout, _stderr) = subp.subp(cmd) if strip: stdout = stdout.strip() @@ -44,25 +47,29 @@ def read_dependencies(distro): - """Returns the Python package depedencies from requirements.txt files. + """Returns the Python package dependencies from requirements.txt files. @returns a tuple of (build_deps, run_deps, test_deps) """ build_deps = run_helper( - 'read-dependencies',args=[ - '--distro', distro, '--build-requires']).splitlines() + "read-dependencies", args=["--distro", distro, "--build-requires"] + ).splitlines() run_deps = run_helper( - 'read-dependencies', args=[ - '--distro', distro, '--runtime-requires']).splitlines() + "read-dependencies", args=["--distro", distro, "--runtime-requires"] + ).splitlines() test_deps = run_helper( - 'read-dependencies', args=[ - '--requirements-file', 'test-requirements.txt', - '--system-pkg-names']).splitlines() + "read-dependencies", + args=[ + "--requirements-file", + "test-requirements.txt", + "--system-pkg-names", + ], + ).splitlines() return (build_deps, run_deps, test_deps) def read_version(): - return json.loads(run_helper('read-version', ['--json'])) + return json.loads(run_helper("read-version", ["--json"])) def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn): @@ -71,72 +78,95 @@ subs = {} if args.sub_release is not None: - subs['subrelease'] = str(args.sub_release) + subs["subrelease"] = str(args.sub_release) else: - subs['subrelease'] = "" + subs["subrelease"] = "" - subs['archive_name'] = arc_fn - subs['source_name'] = os.path.basename(arc_fn).replace('.tar.gz', '') + subs["archive_name"] = arc_fn + subs["source_name"] = os.path.basename(arc_fn).replace(".tar.gz", "") subs.update(version_data) # rpm does not like '-' in the Version, so change # X.Y.Z-N-gHASH to X.Y.Z+N.gHASH - if "-" in version_data.get('version'): - ver, commits, ghash = version_data['version'].split("-") + if "-" in version_data.get("version"): + ver, commits, ghash = version_data["version"].split("-") rpm_upstream_version = "%s+%s.%s" % (ver, commits, ghash) else: - rpm_upstream_version = version_data['version'] - subs['rpm_upstream_version'] = rpm_upstream_version + rpm_upstream_version = version_data["version"] + subs["rpm_upstream_version"] = rpm_upstream_version build_deps, run_deps, test_deps = read_dependencies(distro=args.distro) - subs['buildrequires'] = build_deps + test_deps - subs['requires'] = run_deps + subs["buildrequires"] = build_deps + test_deps + subs["requires"] = run_deps - if args.boot == 'sysvinit': - subs['sysvinit'] = True + if args.boot == "sysvinit": + subs["sysvinit"] = True else: - subs['sysvinit'] = False + subs["sysvinit"] = False - if args.boot == 'systemd': - subs['systemd'] = True + if args.boot == "systemd": + subs["systemd"] = True else: - subs['systemd'] = False + subs["systemd"] = False - subs['init_sys'] = args.boot - subs['patches'] = [os.path.basename(p) for p in args.patches] + subs["init_sys"] = args.boot + subs["patches"] = [os.path.basename(p) for p in args.patches] return templater.render_from_file(tmpl_fn, params=subs) def main(): parser = argparse.ArgumentParser() - parser.add_argument("-d", "--distro", dest="distro", - help="select distro (default: %(default)s)", - metavar="DISTRO", default='redhat', - choices=('redhat', 'suse')) - parser.add_argument('--srpm', - help='Produce a source rpm', - action='store_true') - parser.add_argument("-b", "--boot", dest="boot", - help="select boot type (default: %(default)s)", - metavar="TYPE", default='sysvinit', - choices=('sysvinit', 'systemd')) - parser.add_argument("-v", "--verbose", dest="verbose", - help=("run verbosely" - " (default: %(default)s)"), - default=False, - action='store_true') - parser.add_argument('-s', "--sub-release", dest="sub_release", - metavar="RELEASE", - help=("a 'internal' release number to concat" - " with the bzr version number to form" - " the final version number"), - type=int, - default=None) - parser.add_argument("-p", "--patch", dest="patches", - help=("include the following patch when building"), - default=[], - action='append') + parser.add_argument( + "-d", + "--distro", + dest="distro", + help="select distro (default: %(default)s)", + metavar="DISTRO", + default="redhat", + choices=("redhat", "suse"), + ) + parser.add_argument( + "--srpm", help="Produce a source rpm", action="store_true" + ) + parser.add_argument( + "-b", + "--boot", + dest="boot", + help="select boot type (default: %(default)s)", + metavar="TYPE", + default="sysvinit", + choices=("sysvinit", "systemd"), + ) + parser.add_argument( + "-v", + "--verbose", + dest="verbose", + help=("run verbosely (default: %(default)s)"), + default=False, + action="store_true", + ) + parser.add_argument( + "-s", + "--sub-release", + dest="sub_release", + metavar="RELEASE", + help=( + "a 'internal' release number to concat" + " with the bzr version number to form" + " the final version number" + ), + type=int, + default=None, + ) + parser.add_argument( + "-p", + "--patch", + dest="patches", + help=("include the following patch when building"), + default=[], + action="append", + ) args = parser.parse_args() capture = True if args.verbose: @@ -144,59 +174,82 @@ workdir = None try: - workdir = tempfile.mkdtemp(prefix='rpmbuild') - os.environ['HOME'] = workdir - topdir = os.path.join(workdir, 'rpmbuild') - build_dirs = [os.path.join(topdir, dir) - for dir in RPM_BUILD_SUBDIRS] + workdir = tempfile.mkdtemp(prefix="rpmbuild") + os.environ["HOME"] = workdir + topdir = os.path.join(workdir, "rpmbuild") + build_dirs = [os.path.join(topdir, dir) for dir in RPM_BUILD_SUBDIRS] util.ensure_dirs(build_dirs) version_data = read_version() # Archive the code - archive_fn = "cloud-init-%s.tar.gz" % version_data['version_long'] - real_archive_fn = os.path.join(topdir, 'SOURCES', archive_fn) + archive_fn = "cloud-init-%s.tar.gz" % version_data["version_long"] + real_archive_fn = os.path.join(topdir, "SOURCES", archive_fn) archive_fn = run_helper( - 'make-tarball', ['--long', '--output=' + real_archive_fn]) + "make-tarball", ["--long", "--output=" + real_archive_fn] + ) print("Archived the code in %r" % (real_archive_fn)) # Form the spec file to be used - tmpl_fn = os.path.abspath(os.path.join(find_root(), 'packages', - args.distro, 'cloud-init.spec.in')) - contents = generate_spec_contents(args, version_data, tmpl_fn, topdir, - os.path.basename(archive_fn)) - spec_fn = os.path.abspath(os.path.join(topdir, 'SPECS', 'cloud-init.spec')) + tmpl_fn = os.path.abspath( + os.path.join( + find_root(), "packages", args.distro, "cloud-init.spec.in" + ) + ) + contents = generate_spec_contents( + args, version_data, tmpl_fn, topdir, os.path.basename(archive_fn) + ) + spec_fn = os.path.abspath( + os.path.join(topdir, "SPECS", "cloud-init.spec") + ) util.write_file(spec_fn, contents) print("Created spec file at %r" % (spec_fn)) for p in args.patches: - util.copy(p, os.path.abspath(os.path.join(topdir, 'SOURCES', os.path.basename(p)))_ + util.copy( + p, + os.path.abspath( + os.path.join(topdir, "SOURCES", os.path.basename(p)) + ), + ) # Now build it! print("Running 'rpmbuild' in %r" % (topdir)) if args.srpm: - cmd = ['rpmbuild', '-bs', '--nodeps', spec_fn] + cmd = ["rpmbuild", "-bs", "--nodeps", spec_fn] else: - cmd = ['rpmbuild', '-ba', spec_fn] + cmd = ["rpmbuild", "-ba", spec_fn] subp.subp(cmd, capture=capture) # Copy the items built to our local dir globs = [] - globs.extend(glob.glob("%s/*.rpm" % - (os.path.abspath(os.path.join(topdir, 'RPMS', 'noarch'))))) - globs.extend(glob.glob("%s/*.rpm" % - (os.path.abspath(os.path.join(topdir, 'RPMS', - 'x86_64'))))) - globs.extend(glob.glob("%s/*.rpm" % - (os.path.abspath(os.path.join(topdir, - 'RPMS'))))) - globs.extend(glob.glob("%s/*.rpm" % - (os.path.abspath(os.path.join(topdir, - 'SRPMS'))))) + globs.extend( + glob.glob( + "%s/*.rpm" + % (os.path.abspath(os.path.join(topdir, "RPMS", "noarch"))) + ) + ) + globs.extend( + glob.glob( + "%s/*.rpm" + % (os.path.abspath(os.path.join(topdir, "RPMS", "x86_64"))) + ) + ) + globs.extend( + glob.glob( + "%s/*.rpm" % (os.path.abspath(os.path.join(topdir, "RPMS"))) + ) + ) + globs.extend( + glob.glob( + "%s/*.rpm" % (os.path.abspath(os.path.join(topdir, "SRPMS"))) + ) + ) for rpm_fn in globs: - tgt_fn = os.path.abspath(os.path.join(os.getcwd(), - os.path.basename(rpm_fn))) + tgt_fn = os.path.abspath( + os.path.join(os.getcwd(), os.path.basename(rpm_fn)) + ) shutil.move(rpm_fn, tgt_fn) print("Wrote out %s package %r" % (args.distro, tgt_fn)) finally: @@ -206,5 +259,5 @@ return 0 -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff -Nru cloud-init-23.4.4/packages/debian/cloud-init.logrotate cloud-init-24.1.3/packages/debian/cloud-init.logrotate --- cloud-init-23.4.4/packages/debian/cloud-init.logrotate 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/packages/debian/cloud-init.logrotate 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,11 @@ +/var/log/cloud-init*.log +{ + su root root + missingok + nocreate + notifempty + rotate 6 + compress + delayacompress + size 1M +} diff -Nru cloud-init-23.4.4/packages/redhat/cloud-init.spec.in cloud-init-24.1.3/packages/redhat/cloud-init.spec.in --- cloud-init-23.4.4/packages/redhat/cloud-init.spec.in 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/packages/redhat/cloud-init.spec.in 2024-03-27 13:14:04.000000000 +0000 @@ -184,13 +184,12 @@ %{_bindir}/cloud-id* # Docs -%doc LICENSE ChangeLog TODO.rst requirements.txt +%doc LICENSE ChangeLog requirements.txt %doc %{_defaultdocdir}/cloud-init/* # Configs %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg %dir %{_sysconfdir}/cloud/clean.d -%config(noreplace) %{_sysconfdir}/cloud/clean.d/README %dir %{_sysconfdir}/cloud/cloud.cfg.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README diff -Nru cloud-init-23.4.4/packages/suse/cloud-init.spec.in cloud-init-24.1.3/packages/suse/cloud-init.spec.in --- cloud-init-23.4.4/packages/suse/cloud-init.spec.in 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/packages/suse/cloud-init.spec.in 2024-03-27 13:14:04.000000000 +0000 @@ -115,7 +115,6 @@ # Configs %dir %{_sysconfdir}/cloud/clean.d -%config(noreplace) %{_sysconfdir}/cloud/clean.d/README %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg %dir %{_sysconfdir}/cloud/cloud.cfg.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg diff -Nru cloud-init-23.4.4/pyproject.toml cloud-init-24.1.3/pyproject.toml --- cloud-init-23.4.4/pyproject.toml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/pyproject.toml 2024-03-27 13:14:04.000000000 +0000 @@ -4,6 +4,8 @@ [tool.black] line-length = 79 +include = '(brpm|bddeb|\.py)$' + [tool.isort] profile = "black" diff -Nru cloud-init-23.4.4/requirements.txt cloud-init-24.1.3/requirements.txt --- cloud-init-23.4.4/requirements.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/requirements.txt 2024-03-27 13:14:04.000000000 +0000 @@ -28,7 +28,7 @@ jsonpatch # For validating cloud-config sections per schema definitions -jsonschema<=4.20.0 +jsonschema # Used by DataSourceVMware to inspect the host's network configuration during # the "setup()" function. diff -Nru cloud-init-23.4.4/setup.py cloud-init-24.1.3/setup.py --- cloud-init-23.4.4/setup.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/setup.py 2024-03-27 13:14:04.000000000 +0000 @@ -132,10 +132,16 @@ for f in glob("sysvinit/netbsd/*") if is_f(f) ], + "sysvinit_openbsd": lambda: [ + render_tmpl(f, mode=0o755) + for f in glob("sysvinit/openbsd/*") + if is_f(f) + ], "sysvinit_deb": lambda: [f for f in glob("sysvinit/debian/*") if is_f(f)], "sysvinit_openrc": lambda: [ - f for f in glob("sysvinit/gentoo/*") if is_f(f) + f for f in glob("sysvinit/openrc/*") if is_f(f) ], + "sysvinit_openrc.dep": lambda: ["tools/cloud-init-hotplugd"], "systemd": lambda: [ render_tmpl(f) for f in ( @@ -156,8 +162,10 @@ "sysvinit": "etc/rc.d/init.d", "sysvinit_freebsd": "usr/local/etc/rc.d", "sysvinit_netbsd": "usr/local/etc/rc.d", + "sysvinit_openbsd": "etc/rc.d", "sysvinit_deb": "etc/init.d", "sysvinit_openrc": "etc/init.d", + "sysvinit_openrc.dep": "usr/lib/cloud-init", "systemd": pkg_config_read("systemd", "systemdsystemunitdir"), "systemd.generators": pkg_config_read( "systemd", "systemdsystemgeneratordir" @@ -172,7 +180,7 @@ ETC = "etc" USR_LIB_EXEC = "usr/lib" LIB = "lib" -if os.uname()[0] in ["FreeBSD", "DragonFly"]: +if os.uname()[0] in ["FreeBSD", "DragonFly", "OpenBSD"]: USR = "usr/local" USR_LIB_EXEC = "usr/local/lib" elif os.path.isfile("/etc/redhat-release"): diff -Nru cloud-init-23.4.4/systemd/cloud-config.service.tmpl cloud-init-24.1.3/systemd/cloud-config.service.tmpl --- cloud-init-23.4.4/systemd/cloud-config.service.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/systemd/cloud-config.service.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -2,7 +2,6 @@ [Unit] Description=Apply the settings specified in cloud-config After=network-online.target cloud-config.target -After=snapd.seeded.service Before=systemd-user-sessions.service Wants=network-online.target cloud-config.target ConditionPathExists=!/etc/cloud/cloud-init.disabled diff -Nru cloud-init-23.4.4/systemd/cloud-final.service.tmpl cloud-init-24.1.3/systemd/cloud-final.service.tmpl --- cloud-init-23.4.4/systemd/cloud-final.service.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/systemd/cloud-final.service.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,7 +1,7 @@ ## template:jinja [Unit] Description=Execute cloud user/final scripts -After=network-online.target cloud-config.service rc-local.service +After=network-online.target time-sync.target cloud-config.service rc-local.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=multi-user.target Before=apt-daily.service diff -Nru cloud-init-23.4.4/systemd/cloud-init-hotplugd.service cloud-init-24.1.3/systemd/cloud-init-hotplugd.service --- cloud-init-23.4.4/systemd/cloud-init-hotplugd.service 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/systemd/cloud-init-hotplugd.service 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ # Paired with cloud-init-hotplugd.socket to read from the FIFO # /run/cloud-init/hook-hotplug-cmd which is created during a udev network -# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. +# add or remove event as processed by 90-cloud-init-hook-hotplug.rules. # On start, read args from the FIFO, process and provide structured arguments # to `cloud-init devel hotplug-hook` which will setup or teardown network diff -Nru cloud-init-23.4.4/systemd/cloud-init-hotplugd.socket cloud-init-24.1.3/systemd/cloud-init-hotplugd.socket --- cloud-init-23.4.4/systemd/cloud-init-hotplugd.socket 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/systemd/cloud-init-hotplugd.socket 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ # cloud-init-hotplugd.socket listens on the FIFO file # /run/cloud-init/hook-hotplug-cmd which is created during a udev network -# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. +# add or remove event as processed by 90-cloud-init-hook-hotplug.rules. # Known bug with an enforcing SELinux policy: LP: #1936229 [Unit] diff -Nru cloud-init-23.4.4/sysvinit/freebsd/cloudinitlocal.tmpl cloud-init-24.1.3/sysvinit/freebsd/cloudinitlocal.tmpl --- cloud-init-23.4.4/sysvinit/freebsd/cloudinitlocal.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/freebsd/cloudinitlocal.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -4,7 +4,7 @@ # PROVIDE: cloudinitlocal {# ``cloudinitlocal`` purposefully does not depend on ``dsidentify``. -That makes it easy for image builders to create images without ``dsidentify``. +That makes it easy for image builders to disable ``dsidentify``. #} # REQUIRE: ldconfig mountcritlocal # BEFORE: NETWORKING cloudinit cloudconfig cloudfinal diff -Nru cloud-init-23.4.4/sysvinit/gentoo/cloud-config cloud-init-24.1.3/sysvinit/gentoo/cloud-config --- cloud-init-23.4.4/sysvinit/gentoo/cloud-config 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/gentoo/cloud-config 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -#!/sbin/openrc-run - -depend() { - after cloud-init-local - after cloud-init - before cloud-final - provide cloud-config -} - -start() { - if grep -q 'cloud-init=disabled' /proc/cmdline; then - ewarn "$RC_SVCNAME is disabled via /proc/cmdline." - elif test -e /etc/cloud/cloud-init.disabled; then - ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" - else - cloud-init modules --mode config - fi - eend 0 -} diff -Nru cloud-init-23.4.4/sysvinit/gentoo/cloud-final cloud-init-24.1.3/sysvinit/gentoo/cloud-final --- cloud-init-23.4.4/sysvinit/gentoo/cloud-final 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/gentoo/cloud-final 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -#!/sbin/openrc-run - -depend() { - after cloud-config - provide cloud-final -} - -start() { - if grep -q 'cloud-init=disabled' /proc/cmdline; then - ewarn "$RC_SVCNAME is disabled via /proc/cmdline." - elif test -e /etc/cloud/cloud-init.disabled; then - ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" - else - cloud-init modules --mode final - fi - eend 0 -} diff -Nru cloud-init-23.4.4/sysvinit/gentoo/cloud-init cloud-init-24.1.3/sysvinit/gentoo/cloud-init --- cloud-init-23.4.4/sysvinit/gentoo/cloud-init 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/gentoo/cloud-init 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -#!/sbin/openrc-run -# add depends for network, dns, fs etc -depend() { - after cloud-init-local - after net - before cloud-config - provide cloud-init -} - -start() { - if grep -q 'cloud-init=disabled' /proc/cmdline; then - ewarn "$RC_SVCNAME is disabled via /proc/cmdline." - elif test -e /etc/cloud/cloud-init.disabled; then - ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" - else - cloud-init init - fi - eend 0 -} diff -Nru cloud-init-23.4.4/sysvinit/gentoo/cloud-init-local cloud-init-24.1.3/sysvinit/gentoo/cloud-init-local --- cloud-init-23.4.4/sysvinit/gentoo/cloud-init-local 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/gentoo/cloud-init-local 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -#!/sbin/openrc-run - -depend() { - after localmount - before net - before cloud-init - provide cloud-init-local -} - -start() { - if grep -q 'cloud-init=disabled' /proc/cmdline; then - ewarn "$RC_SVCNAME is disabled via /proc/cmdline." - elif test -e /etc/cloud/cloud-init.disabled; then - ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" - else - cloud-init init --local - fi - - eend 0 -} diff -Nru cloud-init-23.4.4/sysvinit/netbsd/cloudconfig.tmpl cloud-init-24.1.3/sysvinit/netbsd/cloudconfig.tmpl --- cloud-init-23.4.4/sysvinit/netbsd/cloudconfig.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/netbsd/cloudconfig.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -11,9 +11,10 @@ start_cmd="start_cloud_init" start_cloud_init() { - test -e {{prefix}}/etc/cloud/cloud-init.disabled \ - && warn "cloud-init disabled by cloud-init.disabled file" \ - && exit 0 + if test -e {{prefix}}/etc/cloud/cloud-init.disabled ; then + warn "cloud-init disabled by cloud-init.disabled file" + exit 0 + fi {{prefix}}/bin/cloud-init modules --mode config } diff -Nru cloud-init-23.4.4/sysvinit/netbsd/cloudfinal.tmpl cloud-init-24.1.3/sysvinit/netbsd/cloudfinal.tmpl --- cloud-init-23.4.4/sysvinit/netbsd/cloudfinal.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/netbsd/cloudfinal.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -10,9 +10,10 @@ start_cmd="start_cloud_init" start_cloud_init() { - test -e {{prefix}}/etc/cloud/cloud-init.disabled \ - && warn "cloud-init disabled by cloud-init.disabled file" \ - && exit 0 + if test -e {{prefix}}/etc/cloud/cloud-init.disabled ; then + warn "cloud-init disabled by cloud-init.disabled file" + exit 0 + fi {{prefix}}/bin/cloud-init modules --mode final } diff -Nru cloud-init-23.4.4/sysvinit/netbsd/cloudinit.tmpl cloud-init-24.1.3/sysvinit/netbsd/cloudinit.tmpl --- cloud-init-23.4.4/sysvinit/netbsd/cloudinit.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/netbsd/cloudinit.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -10,9 +10,10 @@ start_cmd="start_cloud_init" start_cloud_init() { - test -e {{prefix}}/etc/cloud/cloud-init.disabled \ - && warn "cloud-init disabled by cloud-init.disabled file" \ - && exit 0 + if test -e {{prefix}}/etc/cloud/cloud-init.disabled ; then + warn "cloud-init disabled by cloud-init.disabled file" + exit 0 + fi {{prefix}}/bin/cloud-init init } diff -Nru cloud-init-23.4.4/sysvinit/netbsd/cloudinitlocal.tmpl cloud-init-24.1.3/sysvinit/netbsd/cloudinitlocal.tmpl --- cloud-init-23.4.4/sysvinit/netbsd/cloudinitlocal.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/netbsd/cloudinitlocal.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -16,9 +16,10 @@ start_cmd="start_cloud_init_local" start_cloud_init_local() { - test -e {{prefix}}/etc/cloud/cloud-init.disabled \ - && warn "cloud-init disabled by cloud-init.disabled file" \ - && exit 0 + if test -e {{prefix}}/etc/cloud/cloud-init.disabled; then + warn "cloud-init disabled by cloud-init.disabled file" + exit 0 + fi {{prefix}}/bin/cloud-init init -l } diff -Nru cloud-init-23.4.4/sysvinit/netbsd/dsidentify.tmpl cloud-init-24.1.3/sysvinit/netbsd/dsidentify.tmpl --- cloud-init-23.4.4/sysvinit/netbsd/dsidentify.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/netbsd/dsidentify.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -11,9 +11,10 @@ start_cmd="start_dsidentify" start_dsidentify() { - test -e {{prefix}}/etc/cloud/cloud-init.disabled \ - && warn "cloud-init disabled by cloud-init.disabled file" \ - && exit 0 + if test -e {{prefix}}/etc/cloud/cloud-init.disabled ; then + warn "cloud-init disabled by cloud-init.disabled file" + exit 0 + fi {{prefix}}/lib/cloud-init/ds-identify } diff -Nru cloud-init-23.4.4/sysvinit/openbsd/cloudconfig.tmpl cloud-init-24.1.3/sysvinit/openbsd/cloudconfig.tmpl --- cloud-init-23.4.4/sysvinit/openbsd/cloudconfig.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openbsd/cloudconfig.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,37 @@ +## template:jinja +#!/bin/ksh + +# PROVIDE: cloudconfig +# REQUIRE: cloudinit +# BEFORE: sshd + + +daemon="cloud-init" +daemon_execdir="{{prefix}}/bin" +daemon_flags="modules --mode config" +daemon_user=root + +. /etc/rc.d/rc.subr + +rc_bg="YES" # (undefined or "YES") +rc_usercheck="YES" # (undefined or "NO") + +rc_start() { + if test -e /etc/cloud/cloud-init.disabled; then + echo -n "cloud-init is disabled via cloud-init.disabled file." | logger -t ${daemon} + else + echo -n "Starting..." | logger -t ${daemon} + rc_exec "${daemon_execdir}/${daemon} ${daemon_flags}" + fi +} + +rc_check() { + pgrep -f "${daemon}" >/dev/null +} + +rc_stop() { + echo -n "Stopping..." | logger -t ${daemon} + pkill -f "${daemon}" >/dev/null +} + +rc_cmd "$1" diff -Nru cloud-init-23.4.4/sysvinit/openbsd/cloudfinal.tmpl cloud-init-24.1.3/sysvinit/openbsd/cloudfinal.tmpl --- cloud-init-23.4.4/sysvinit/openbsd/cloudfinal.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openbsd/cloudfinal.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,35 @@ +## template:jinja +#!/bin/ksh + +# PROVIDE: cloudfinal +# REQUIRE: LOGIN cloudconfig + +daemon="cloud-init" +daemon_execdir="{{prefix}}/bin" +daemon_flags="modules --mode final" +daemon_user=root + +. /etc/rc.d/rc.subr + +rc_bg="YES" # (undefined or "YES") +rc_usercheck="YES" # (undefined or "NO") + +rc_start() { + if test -e /etc/cloud/cloud-init.disabled; then + echo -n "cloud-init is disabled via cloud-init.disabled file." | logger -t ${daemon} + else + echo -n "Starting..." | logger -t ${daemon} + rc_exec "${daemon_execdir}/${daemon} ${daemon_flags}" + fi +} + +rc_check() { + pgrep -f "${daemon}" >/dev/null +} + +rc_stop() { + echo -n "Stopping..." | logger -t ${daemon} + pkill -f "${daemon}" >/dev/null +} + +rc_cmd "$1" diff -Nru cloud-init-23.4.4/sysvinit/openbsd/cloudinit.tmpl cloud-init-24.1.3/sysvinit/openbsd/cloudinit.tmpl --- cloud-init-23.4.4/sysvinit/openbsd/cloudinit.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openbsd/cloudinit.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,35 @@ +## template:jinja +#!/bin/ksh + +# PROVIDE: cloudinit +# REQUIRE: cloudinitlocal + +daemon="cloud-init" +daemon_execdir="{{prefix}}/bin" +daemon_flags="init" +daemon_user=root + +. /etc/rc.d/rc.subr + +rc_bg="YES" # (undefined or "YES") +rc_usercheck="YES" # (undefined or "NO") + +rc_start() { + if test -e /etc/cloud/cloud-init.disabled; then + echo -n "cloud-init is disabled via cloud-init.disabled file." | logger -t ${daemon} + else + echo -n "Starting..." | logger -t ${daemon} + rc_exec "${daemon_execdir}/${daemon} ${daemon_flags}" + fi +} + +rc_check() { + pgrep -f "${daemon}" >/dev/null +} + +rc_stop() { + echo -n "Stopping..." | logger -t ${daemon} + pkill -f "${daemon}" >/dev/null +} + +rc_cmd "$1" diff -Nru cloud-init-23.4.4/sysvinit/openbsd/cloudinitlocal.tmpl cloud-init-24.1.3/sysvinit/openbsd/cloudinitlocal.tmpl --- cloud-init-23.4.4/sysvinit/openbsd/cloudinitlocal.tmpl 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openbsd/cloudinitlocal.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,38 @@ +## template:jinja +#!/bin/ksh + +# PROVIDE: cloudinitlocal +# REQUIRE: NETWORKING + +# After NETWORKING because we don't want staticroute to wipe +# the route set by the DHCP client toward the meta-data server. + +daemon="cloud-init" +daemon_execdir="{{prefix}}/bin" +daemon_flags="init -l" +daemon_user=root + +. /etc/rc.d/rc.subr + +rc_bg="YES" # (undefined or "YES") +rc_usercheck="YES" + +rc_start() { + if test -e /etc/cloud/cloud-init.disabled; then + echo -n "cloud-init is disabled via cloud-init.disabled file." | logger -t ${daemon} + else + echo -n "Starting..." | logger -t ${daemon} + rc_exec "${daemon_execdir}/${daemon} ${daemon_flags}" + fi +} + +rc_check() { + pgrep -f "${daemon}" >/dev/null +} + +rc_stop() { + echo -n "Stopping..." | logger -t ${daemon} + pkill -f "${daemon}" >/dev/null +} + +rc_cmd "$1" diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-config cloud-init-24.1.3/sysvinit/openrc/cloud-config --- cloud-init-23.4.4/sysvinit/openrc/cloud-config 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-config 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,22 @@ +#!/sbin/openrc-run + +description="cloud-init config stage" + +depend() { + after cloud-init-local + after cloud-init + before cloud-final + provide cloud-config +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "cloud-init config" + cloud-init modules --mode config + eend $? + fi +} diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-final cloud-init-24.1.3/sysvinit/openrc/cloud-final --- cloud-init-23.4.4/sysvinit/openrc/cloud-final 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-final 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,20 @@ +#!/sbin/openrc-run + +description="cloud-init final stage" + +depend() { + after cloud-config + provide cloud-final +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "cloud-init final" + cloud-init modules --mode final + eend $? + fi +} diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-init cloud-init-24.1.3/sysvinit/openrc/cloud-init --- cloud-init-23.4.4/sysvinit/openrc/cloud-init 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-init 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,23 @@ +#!/sbin/openrc-run + +description="cloud-init network stage" + +# add depends for network, dns, fs etc +depend() { + after cloud-init-local + after net + before cloud-config + provide cloud-init +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "cloud-init init" + cloud-init init + eend $? + fi +} diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-init-ds-identify cloud-init-24.1.3/sysvinit/openrc/cloud-init-ds-identify --- cloud-init-23.4.4/sysvinit/openrc/cloud-init-ds-identify 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-init-ds-identify 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,21 @@ +#!/sbin/openrc-run + +description="cloud-init ds-identify" + +depend() { + after localmount + before net + before cloud-init-local +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "$description" + /usr/lib/cloud-init/ds-identify + eend $? + fi +} diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-init-hotplug cloud-init-24.1.3/sysvinit/openrc/cloud-init-hotplug --- cloud-init-23.4.4/sysvinit/openrc/cloud-init-hotplug 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-init-hotplug 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,23 @@ +#!/sbin/openrc-run + +description="cloud-init hotplug daemon" + +command="/usr/lib/cloud-init/cloud-init-hotplugd" +pidfile="/run/$RC_SVCNAME.pid" + +depend() { + after cloud-final +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "$description" + start-stop-daemon --start --background --exec $command \ + --make-pidfile --pidfile $pidfile + eend $? + fi +} diff -Nru cloud-init-23.4.4/sysvinit/openrc/cloud-init-local cloud-init-24.1.3/sysvinit/openrc/cloud-init-local --- cloud-init-23.4.4/sysvinit/openrc/cloud-init-local 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/sysvinit/openrc/cloud-init-local 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,22 @@ +#!/sbin/openrc-run + +description="cloud-init local stage" + +depend() { + after localmount + before net + before cloud-init + provide cloud-init-local +} + +start() { + if grep -q 'cloud-init=disabled' /proc/cmdline; then + ewarn "$RC_SVCNAME is disabled via /proc/cmdline." + elif test -e /etc/cloud/cloud-init.disabled; then + ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" + else + ebegin "cloud-init local" + cloud-init init --local + eend $? + fi +} diff -Nru cloud-init-23.4.4/templates/chrony.conf.cos.tmpl cloud-init-24.1.3/templates/chrony.conf.cos.tmpl --- cloud-init-23.4.4/templates/chrony.conf.cos.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/chrony.conf.cos.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ ## template:jinja # Welcome to the chrony configuration file. See chrony.conf(5) for more -# information about usuable directives. +# information about usable directives. {% if pools %}# pools {% endif %} diff -Nru cloud-init-23.4.4/templates/chrony.conf.debian.tmpl cloud-init-24.1.3/templates/chrony.conf.debian.tmpl --- cloud-init-23.4.4/templates/chrony.conf.debian.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/chrony.conf.debian.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ ## template:jinja # Welcome to the chrony configuration file. See chrony.conf(5) for more -# information about usuable directives. +# information about usable directives. {% if pools %}# pools {% endif %} {% for pool in pools -%} diff -Nru cloud-init-23.4.4/templates/chrony.conf.ubuntu.tmpl cloud-init-24.1.3/templates/chrony.conf.ubuntu.tmpl --- cloud-init-23.4.4/templates/chrony.conf.ubuntu.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/chrony.conf.ubuntu.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ ## template:jinja # Welcome to the chrony configuration file. See chrony.conf(5) for more -# information about usuable directives. +# information about usable directives. # Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board # on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for diff -Nru cloud-init-23.4.4/templates/hosts.alpine.tmpl cloud-init-24.1.3/templates/hosts.alpine.tmpl --- cloud-init-23.4.4/templates/hosts.alpine.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/hosts.alpine.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -13,13 +13,12 @@ # /etc/cloud/cloud.cfg or cloud-config from user-data # # The following lines are desirable for IPv4 capable hosts -127.0.1.1 {{hostname}} {{fqdn}} -127.0.0.1 localhost localhost.localdomain -127.0.0.1 localhost4 localhost4.localdomain4 +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 +127.0.1.1 {{fqdn}} {{hostname}} # The following lines are desirable for IPv6 capable hosts -::1 {{hostname}} {{fqdn}} -::1 localhost6 localhost6.localdomain6 +::1 localhost6.localdomain6 localhost6 ff02::1 ip6-allnodes ff02::2 ip6-allrouters diff -Nru cloud-init-23.4.4/templates/hosts.mariner.tmpl cloud-init-24.1.3/templates/hosts.mariner.tmpl --- cloud-init-23.4.4/templates/hosts.mariner.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/hosts.mariner.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,22 +1,22 @@ -+## template:jinja -+{# -+This file /etc/cloud/templates/hosts.mariner.tmpl is only utilized -+if enabled in cloud-config. Specifically, in order to enable it -+you need to add the following to config: -+ manage_etc_hosts: True -+-#} -+# Your system has configured 'manage_etc_hosts' as True. -+# As a result, if you wish for changes to this file to persist -+# then you will need to either -+# a.) make changes to the master file in /etc/cloud/templates/hosts.mariner.tmpl -+# b.) change or remove the value of 'manage_etc_hosts' in -+# /etc/cloud/cloud.cfg or cloud-config from user-data -+# -+# The following lines are desirable for IPv4 capable hosts -+127.0.0.1 {{fqdn}} {{hostname}} -+127.0.0.1 localhost.localdomain localhost -+127.0.0.1 localhost4.localdomain4 localhost4 -+ -+# The following lines are desirable for IPv6 capable hosts -+::1 {{fqdn}} {{hostname}} -+::1 localhost6.localdomain6 localhost6 +## template:jinja +{# +This file /etc/cloud/templates/hosts.mariner.tmpl is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.mariner.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 + +# The following lines are desirable for IPv6 capable hosts +::1 {{fqdn}} {{hostname}} +::1 localhost6.localdomain6 localhost6 diff -Nru cloud-init-23.4.4/templates/ntp.conf.ubuntu.tmpl cloud-init-24.1.3/templates/ntp.conf.ubuntu.tmpl --- cloud-init-23.4.4/templates/ntp.conf.ubuntu.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/ntp.conf.ubuntu.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -67,7 +67,7 @@ #disable auth #broadcastclient -#Changes recquired to use pps synchonisation as explained in documentation: +#Changes required to use pps synchronisation as explained in documentation: #http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918 #server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS diff -Nru cloud-init-23.4.4/templates/sources.list.ubuntu.deb822.tmpl cloud-init-24.1.3/templates/sources.list.ubuntu.deb822.tmpl --- cloud-init-23.4.4/templates/sources.list.ubuntu.deb822.tmpl 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/templates/sources.list.ubuntu.deb822.tmpl 2024-03-27 13:14:04.000000000 +0000 @@ -1,61 +1,57 @@ ## template:jinja ## Note, this file is written by cloud-init on first boot of an instance ## modifications made here will not survive a re-bundle. -## if you wish to make changes you can: +## +## If you wish to make changes you can: ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg ## or do the same in user-data ## b.) add supplemental sources in /etc/apt/sources.list.d ## c.) make changes to template file ## /etc/cloud/templates/sources.list.ubuntu.deb822.tmpl ## -## See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -## newer versions of the distribution. + +# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to +# newer versions of the distribution. + +## Ubuntu distribution repository ## -## The following settings can be tweaked to configure which packages to use -## from Ubuntu. -## Mirror your choices (except for URIs and Suites) in the security section -## below to ensure timely security updates. +## The following settings can be adjusted to configure which packages to use from Ubuntu. +## Mirror your choices (except for URIs and Suites) in the security section below to +## ensure timely security updates. ## ## Types: Append deb-src to enable the fetching of source package. ## URIs: A URL to the repository (you may add multiple URLs) ## Suites: The following additional suites can be configured -## -updates - Major bug fix updates produced after the final release -## of the distribution. -## -backports - software from this repository may not have been tested -## as extensively as that contained in the main release, -## although it includes newer versions of some -## applications which may provide useful features. -## Also, please note that software in backports WILL NOT -## receive any review or updates from the Ubuntu security -## team. -## Components: Aside from main, the following components can be added to the -## list: -## restricted - Software that may not be under a free license, or protected -## by patents. -## universe - Community maintained packages. Software from this repository -## is ENTIRELY UNSUPPORTED by the Ubuntu team. Also, please -## note that software in universe WILL NOT receive any +## -updates - Major bug fix updates produced after the final release of the +## distribution. +## -backports - software from this repository may not have been tested as +## extensively as that contained in the main release, although it includes +## newer versions of some applications which may provide useful features. +## Also, please note that software in backports WILL NOT receive any review +## or updates from the Ubuntu security team. +## Components: Aside from main, the following components can be added to the list +## restricted - Software that may not be under a free license, or protected by patents. +## universe - Community maintained packages. +## Software from this repository is only maintained and supported by Canonical +## for machines with Ubuntu Pro subscriptions. Without Ubuntu Pro, the Ubuntu +## community provides best-effort security maintenance. +## multiverse - Community maintained of restricted. Software from this repository is +## ENTIRELY UNSUPPORTED by the Ubuntu team, and may not be under a free +## licence. Please satisfy yourself as to your rights to use the software. +## Also, please note that software in multiverse WILL NOT receive any ## review or updates from the Ubuntu security team. -## multiverse - Community maintained of restricted. Software from this -## repository is ENTIRELY UNSUPPORTED by the Ubuntu team, and -## may not be under a free licence. Please satisfy yourself as -## to your rights to use the software. -## Also, please note that software in multiverse WILL NOT -## receive any review or updates from the Ubuntu security team. ## ## See the sources.list(5) manual page for further settings. -# Types: deb deb-src Types: deb URIs: {{mirror}} Suites: {{codename}} {{codename}}-updates {{codename}}-backports -Components: main restricted universe multiverse +Components: main universe restricted multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg ## Ubuntu security updates. Aside from URIs and Suites, ## this should mirror your choices in the previous section. -# Types: deb deb-src Types: deb URIs: {{security}} Suites: {{codename}}-security -Components: main restricted universe multiverse +Components: main universe restricted multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg diff -Nru cloud-init-23.4.4/test-requirements.txt cloud-init-24.1.3/test-requirements.txt --- cloud-init-23.4.4/test-requirements.txt 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/test-requirements.txt 2024-03-27 13:14:04.000000000 +0000 @@ -9,6 +9,6 @@ pytest-cov pytest-mock setuptools -jsonschema<=4.20.0 +jsonschema responses passlib diff -Nru cloud-init-23.4.4/tests/data/merge_sources/expected9.yaml cloud-init-24.1.3/tests/data/merge_sources/expected9.yaml --- cloud-init-23.4.4/tests/data/merge_sources/expected9.yaml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/data/merge_sources/expected9.yaml 2024-03-27 13:14:04.000000000 +0000 @@ -2,4 +2,4 @@ phone_home: url: http://my.example.com/$INSTANCE_ID/$BLAH_BLAH - post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] + post: [ pub_key_rsa, pub_key_ecdsa, instance_id ] diff -Nru cloud-init-23.4.4/tests/data/merge_sources/source9-1.yaml cloud-init-24.1.3/tests/data/merge_sources/source9-1.yaml --- cloud-init-23.4.4/tests/data/merge_sources/source9-1.yaml 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/data/merge_sources/source9-1.yaml 2024-03-27 13:14:04.000000000 +0000 @@ -2,4 +2,4 @@ phone_home: url: http://my.example.com/$INSTANCE_ID/ - post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] + post: [ pub_key_rsa, pub_key_ecdsa, instance_id ] Binary files /tmp/tmp4yccu2hq/31PKxkcfK0/cloud-init-23.4.4/tests/data/net/dhcp/enp24s0.lease and /tmp/tmp4yccu2hq/iil4dsR_oD/cloud-init-24.1.3/tests/data/net/dhcp/enp24s0.lease differ Binary files /tmp/tmp4yccu2hq/31PKxkcfK0/cloud-init-23.4.4/tests/data/net/dhcp/eth0.lease and /tmp/tmp4yccu2hq/iil4dsR_oD/cloud-init-24.1.3/tests/data/net/dhcp/eth0.lease differ diff -Nru cloud-init-23.4.4/tests/helpers.py cloud-init-24.1.3/tests/helpers.py --- cloud-init-23.4.4/tests/helpers.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/helpers.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,21 @@ +from pathlib import Path + +import cloudinit + + +def get_top_level_dir() -> Path: + """Return the absolute path to the top cloudinit project directory + + @return Path('') + """ + return Path(cloudinit.__file__).parent.parent.resolve() + + +def cloud_init_project_dir(sub_path: str) -> str: + """Get a path within the cloudinit project directory + + @return str of the combined path + + Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path" + """ + return str(get_top_level_dir() / sub_path) diff -Nru cloud-init-23.4.4/tests/integration_tests/assets/enable_coverage.py cloud-init-24.1.3/tests/integration_tests/assets/enable_coverage.py --- cloud-init-23.4.4/tests/integration_tests/assets/enable_coverage.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/assets/enable_coverage.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,28 @@ +from pathlib import Path + +services = [ + "cloud-init-local.service", + "cloud-init.service", + "cloud-config.service", + "cloud-final.service", +] +service_dir = Path("/lib/systemd/system/") + +# Check for the existence of the service files +for service in services: + if not (service_dir / service).is_file(): + print(f"Error: {service} does not exist in {service_dir}") + exit(1) + +# Prepend the ExecStart= line with 'python3 -m coverage run' +for service in services: + file_path = service_dir / service + content = file_path.read_text() + content = content.replace( + "ExecStart=/usr", + ( + "ExecStart=python3 -m coverage run " + "--source=/usr/lib/python3/dist-packages/cloudinit --append /usr" + ), + ) + file_path.write_text(content) diff -Nru cloud-init-23.4.4/tests/integration_tests/bugs/test_lp1835584.py cloud-init-24.1.3/tests/integration_tests/bugs/test_lp1835584.py --- cloud-init-23.4.4/tests/integration_tests/bugs/test_lp1835584.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/bugs/test_lp1835584.py 2024-03-27 13:14:04.000000000 +0000 @@ -94,7 +94,5 @@ # We can't use setup_image fixture here because we want to avoid # taking a snapshot or cleaning the booted machine after cloud-init # upgrade. - instance.install_new_cloud_init( - source, take_snapshot=False, clean=False - ) + instance.install_new_cloud_init(source, clean=False) _check_iid_insensitive_across_kernel_upgrade(instance) diff -Nru cloud-init-23.4.4/tests/integration_tests/clouds.py cloud-init-24.1.3/tests/integration_tests/clouds.py --- cloud-init-23.4.4/tests/integration_tests/clouds.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/clouds.py 2024-03-27 13:14:04.000000000 +0000 @@ -209,20 +209,24 @@ ) def _perform_launch( - self, *, launch_kwargs, wait=True, **kwargs + self, *, launch_kwargs, wait=True, enable_ipv6=True, **kwargs ) -> EC2Instance: """Use a dual-stack VPC for cloud-init integration testing.""" - if "vpc" not in launch_kwargs: - launch_kwargs["vpc"] = self.cloud_instance.get_or_create_vpc( - name="ec2-cloud-init-integration" - ) - # Enable IPv6 metadata at http://[fd00:ec2::254] - if "Ipv6AddressCount" not in launch_kwargs: - launch_kwargs["Ipv6AddressCount"] = 1 - if "MetadataOptions" not in launch_kwargs: - launch_kwargs["MetadataOptions"] = {} - if "HttpProtocolIpv6" not in launch_kwargs["MetadataOptions"]: - launch_kwargs["MetadataOptions"] = {"HttpProtocolIpv6": "enabled"} + if enable_ipv6: + if "vpc" not in launch_kwargs: + launch_kwargs["vpc"] = self.cloud_instance.get_or_create_vpc( + name="ec2-cloud-init-integration" + ) + + # Enable IPv6 metadata at http://[fd00:ec2::254] + if "Ipv6AddressCount" not in launch_kwargs: + launch_kwargs["Ipv6AddressCount"] = 1 + if "MetadataOptions" not in launch_kwargs: + launch_kwargs["MetadataOptions"] = {} + if "HttpProtocolIpv6" not in launch_kwargs["MetadataOptions"]: + launch_kwargs["MetadataOptions"] = { + "HttpProtocolIpv6": "enabled" + } pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) self._maybe_wait(pycloudlib_instance, wait) diff -Nru cloud-init-23.4.4/tests/integration_tests/cmd/test_clean.py cloud-init-24.1.3/tests/integration_tests/cmd/test_clean.py --- cloud-init-23.4.4/tests/integration_tests/cmd/test_clean.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/cmd/test_clean.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,6 +4,7 @@ import pytest from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.releases import IS_UBUNTU USER_DATA = """\ #cloud-config @@ -25,15 +26,37 @@ content: '#!/bin/sh\necho DID NOT RUN BECAUSE NO EXEC PERMS' permissions: '0644' owner: root:root +packages: +- logrotate """ @pytest.mark.user_data(USER_DATA) class TestCleanCommand: - def test_clean_by_param(self, class_client: IntegrationInstance): + @pytest.mark.skipif( + not IS_UBUNTU, reason="Hasn't been tested on other distros" + ) + def test_clean_rotated_logs(self, client: IntegrationInstance): + """Clean with log params alters expected files without error""" + assert client.execute("cloud-init status --wait --long").ok + assert client.execute("logrotate /etc/logrotate.d/cloud-init").ok + log_paths = ( + "/var/log/cloud-init.log", + "/var/log/cloud-init.log.1.gz", + "/var/log/cloud-init-output.log", + "/var/log/cloud-init-output.log.1.gz", + ) + + assert client.execute("cloud-init clean --logs").ok + for path in log_paths: + assert client.execute( + f"test -f {path}" + ).failed, f"Unexpected file found {path}" + + def test_clean_by_param(self, client: IntegrationInstance): """Clean with various params alters expected files without error""" - assert class_client.execute("cloud-init status --wait").ok - result = class_client.execute("cloud-init clean") + assert client.execute("cloud-init status --wait --long").ok + result = client.execute("cloud-init clean") assert ( result.ok ), "non-zero exit on cloud-init clean runparts of /etc/cloud/clean.d" @@ -48,38 +71,38 @@ "/etc/systemd/network/10-cloud-init-eth0.network", ) for path in log_paths + net_cfg_paths: - assert class_client.execute( + assert client.execute( f"test -f {path}" ).ok, f"Missing expected file {path}" # /etc/cloud/clean.d runparts scripts are run if executable assert result.stdout == "/etc/cloud/clean.d/runme.sh RAN" # Log files removed with --logs - assert class_client.execute("cloud-init clean --logs").ok + assert client.execute("cloud-init clean --logs").ok for path in log_paths: - assert class_client.execute( + assert client.execute( f"test -f {path}" ).failed, f"Unexpected file found {path}" for path in net_cfg_paths: - assert class_client.execute( + assert client.execute( f"test -f {path}" ).ok, f"Missing expected file {path}" - prev_machine_id = class_client.read_from_file("/etc/machine-id") + prev_machine_id = client.read_from_file("/etc/machine-id") assert re.match( r"^[a-f0-9]{32}$", prev_machine_id ), f"Unexpected machine-id format {prev_machine_id}" # --machine-id sets /etc/machine-id - assert class_client.execute("cloud-init clean --machine-id").ok - machine_id = class_client.read_from_file("/etc/machine-id") + assert client.execute("cloud-init clean --machine-id").ok + machine_id = client.read_from_file("/etc/machine-id") assert machine_id != prev_machine_id assert "uninitialized" == machine_id # --configs remove network scope - assert class_client.execute("cloud-init clean --configs network").ok + assert client.execute("cloud-init clean --configs network").ok for path in log_paths + net_cfg_paths: - assert class_client.execute( + assert client.execute( f"test -f {path}" ).failed, f"Unexpected file found {path}" diff -Nru cloud-init-23.4.4/tests/integration_tests/cmd/test_schema.py cloud-init-24.1.3/tests/integration_tests/cmd/test_schema.py --- cloud-init-23.4.4/tests/integration_tests/cmd/test_schema.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/cmd/test_schema.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,6 +4,7 @@ import pytest from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.releases import CURRENT_RELEASE, MANTIC from tests.integration_tests.util import verify_clean_log USER_DATA = """\ @@ -13,6 +14,48 @@ apt_reboot_if_required: false """ +NET_CFG_V1 = """\ +network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp +""" +NET_CFG_V1_INVALID = NET_CFG_V1.replace("config", "junk") +NET_V1_ANNOTATED = """\ +network: # E1,E2 + version: 1 + junk: + - type: physical + name: eth0 + subnets: + - type: dhcp + +# Errors: ------------- +# E1: 'config' is a required property +# E2: Additional properties are not allowed ('junk' was unexpected)""" + +NET_CFG_V2 = """\ +version: 2 +ethernets: + eth0: + dhcp4: true +""" +NET_CFG_V2_INVALID = NET_CFG_V2.replace("true", "bogus") +NET_V2_ANNOTATED = """\ +--- +network: + ethernets: + eth0: + dhcp4: bogus # E1 + version: 2 +... + +# Errors: ------------- +# E1: Invalid netplan schema. Error in network definition: invalid boolean value 'bogus'""" # noqa: E501 + @pytest.mark.user_data(USER_DATA) class TestSchemaDeprecations: @@ -24,6 +67,65 @@ assert "apt_update: Default: ``false``. Deprecated in version" in log assert "apt_upgrade: Default: ``false``. Deprecated in version" in log + def test_network_config_schema_validation( + self, class_client: IntegrationInstance + ): + content_responses = { + NET_CFG_V1: {"out": "Valid schema /root/net.yaml"}, + NET_CFG_V1_INVALID: { + "out": "Invalid network-config /root/net.yaml", + "err": ( + "network: Additional properties are not allowed" + " ('junk' was unexpected)" + ), + "annotate": NET_V1_ANNOTATED, + }, + } + if CURRENT_RELEASE >= MANTIC: + # Support for netplan API available + content_responses[NET_CFG_V2] = { + "out": "Valid schema /root/net.yaml" + } + content_responses[NET_CFG_V2_INVALID] = { + "out": "Invalid network-config /root/net.yaml", + "err": ( + "Cloud config schema errors: format-l5.c20:" + " Invalid netplan schema. Error in network definition:" + " invalid boolean value 'bogus'" + ), + "annotate": NET_V2_ANNOTATED, + } + else: + # No netplan API available skips validation + content_responses[NET_CFG_V2] = { + "out": ( + "Skipping network-config schema validation." + " No network schema for version: 2" + ) + } + content_responses[NET_CFG_V2_INVALID] = { + "out": ( + "Skipping network-config schema validation." + " No network schema for version: 2" + ) + } + + for content, responses in content_responses.items(): + class_client.write_to_file("/root/net.yaml", content) + result = class_client.execute( + "cloud-init schema --schema-type network-config" + " --config-file /root/net.yaml" + ) + assert responses["out"] == result.stdout + if responses.get("err"): + assert responses["err"] in result.stderr + if responses.get("annotate"): + result = class_client.execute( + "cloud-init schema --schema-type network-config" + " --config-file /root/net.yaml --annotate" + ) + assert responses["annotate"] in result.stdout + def test_schema_deprecations(self, class_client: IntegrationInstance): """Test schema behavior with deprecated configs.""" user_data_fn = "/root/user-data" diff -Nru cloud-init-23.4.4/tests/integration_tests/cmd/test_status.py cloud-init-24.1.3/tests/integration_tests/cmd/test_status.py --- cloud-init-23.4.4/tests/integration_tests/cmd/test_status.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/cmd/test_status.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,6 +4,7 @@ import pytest from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, JAMMY @@ -19,6 +20,12 @@ client.instance._wait_for_execute(old_boot_id=old_boot_id) +@retry(tries=30, delay=1) +def retry_read_from_file(client: IntegrationInstance, path: str): + """Retry read_from_file expecting it shortly""" + return client.read_from_file(path) + + @pytest.mark.skipif(not IS_UBUNTU, reason="Only ever tested on Ubuntu") @pytest.mark.skipif( PLATFORM != "lxd_container", @@ -82,3 +89,74 @@ assert "Invalid cloud-config provided" in json.loads(status_json)[ "recoverable_errors" ].get("WARNING").pop(0) + + +EARLY_BOOT_WAIT_USER_DATA = """\ +#cloud-config +runcmd: [systemctl enable before-cloud-init-local.service] +write_files: +- path: /waitoncloudinit.sh + permissions: '0755' + content: | + #!/bin/sh + if [ -f /var/lib/cloud/data/status.json ]; then + MARKER_FILE="/$1.start-hasstatusjson" + else + MARKER_FILE="/$1.start-nostatusjson" + fi + cloud-init status --wait --long > $1 + date +%s.%N > $MARKER_FILE +- path: /lib/systemd/system/before-cloud-init-local.service + permissions: '0644' + content: | + [Unit] + Description=BEFORE cloud-init local + DefaultDependencies=no + After=systemd-remount-fs.service + Before=cloud-init-local.service + Before=shutdown.target + Before=sysinit.target + Conflicts=shutdown.target + RequiresMountsFor=/var/lib/cloud + + [Service] + Type=simple + ExecStart=/waitoncloudinit.sh /before-local + RemainAfterExit=yes + TimeoutSec=0 + + # Output needs to appear in instance console output + StandardOutput=journal+console + + [Install] + WantedBy=cloud-init.target +""" # noqa: E501 + + +@pytest.mark.user_data(EARLY_BOOT_WAIT_USER_DATA) +@pytest.mark.lxd_use_exec +@pytest.mark.skipif( + PLATFORM not in ("lxd_container", "lxd_vm"), + reason="Requires use of lxd exec", +) +def test_status_block_through_all_boot_status(client): + """Assert early boot cloud-init status --wait does not exit early.""" + client.execute("cloud-init clean --logs --reboot") + wait_for_cloud_init(client).stdout.strip() + client.execute("cloud-init status --wait") + + # Assert that before-cloud-init-local.service started before + # cloud-init-local.service could create status.json + client.execute("test -f /before-local.start-hasstatusjson").failed + + early_unit_timestamp = retry_read_from_file( + client, "/before-local.start-nostatusjson" + ) + # Assert the file created at the end of + # before-cloud-init-local.service is newer than the last log entry in + # /var/log/cloud-init.log + events = json.loads(client.execute("cloud-init analyze dump").stdout) + final_cloud_init_event = events[-1]["timestamp"] + assert final_cloud_init_event < float( + early_unit_timestamp + ), "Systemd unit didn't block on cloud-init status --wait" diff -Nru cloud-init-23.4.4/tests/integration_tests/conftest.py cloud-init-24.1.3/tests/integration_tests/conftest.py --- cloud-init-23.4.4/tests/integration_tests/conftest.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/conftest.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,11 +3,13 @@ import functools import logging import os +import shutil +import subprocess import sys from contextlib import contextmanager from pathlib import Path from tarfile import TarFile -from typing import Dict, Generator, Iterator, Type +from typing import Dict, Generator, Iterator, List, Type import pytest from pycloudlib.lxd.instance import LXDInstance @@ -115,11 +117,21 @@ So we can launch instances / run tests with the correct image """ source = get_validated_source(session_cloud) - if not source.installs_new_version(): + if not ( + source.installs_new_version() or integration_settings.INCLUDE_COVERAGE + ): return - log.info("Setting up environment for %s", session_cloud.datasource) + log.info("Setting up source image") client = session_cloud.launch() - client.install_new_cloud_init(source) + if source.installs_new_version(): + log.info("Installing cloud-init from %s", source.name) + client.install_new_cloud_init(source) + if integration_settings.INCLUDE_COVERAGE: + log.info("Installing coverage") + client.install_coverage() + # All done customizing the image, so snapshot it and make it global + snapshot_id = client.snapshot() + client.cloud.snapshot_id = snapshot_id # Even if we're keeping instances, we don't want to keep this # one around as it was just for image creation client.destroy() @@ -131,28 +143,35 @@ request.addfinalizer(session_cloud.delete_snapshot) -def _collect_logs( - instance: IntegrationInstance, node_id: str, test_failed: bool -): - """Collect logs from remote instance. - - Args: - instance: The current IntegrationInstance to collect logs from - node_id: The pytest representation of this test, E.g.: - tests/integration_tests/test_example.py::TestExample.test_example - test_failed: If test failed or not - """ - if any( - [ - integration_settings.COLLECT_LOGS == "NEVER", - integration_settings.COLLECT_LOGS == "ON_ERROR" - and not test_failed, - ] - ): - return +def _collect_logs(instance: IntegrationInstance, log_dir: Path): instance.execute( "cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz" ) + log.info("Writing logs to %s", log_dir) + + tarball_path = log_dir / "cloud-init.tar.gz" + try: + instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path) + except Exception as e: + log.error("Failed to pull logs: %s", e) + return + + tarball = TarFile.open(str(tarball_path)) + tarball.extractall(path=str(log_dir)) + tarball_path.unlink() + + +def _collect_coverage(instance: IntegrationInstance, log_dir: Path): + log.info("Writing coverage report to %s", log_dir) + try: + instance.pull_file("/.coverage", log_dir / ".coverage") + except Exception as e: + log.error("Failed to pull coverage for: %s", e) + + +def _setup_artifact_paths(node_id: str): + parent_dir = Path(integration_settings.LOCAL_LOG_PATH, session_start_time) + node_id_path = Path( node_id.replace( ".py", "" @@ -161,13 +180,9 @@ .replace("[", "-") # For parametrized names .replace("]", "") # For parameterized names ) - log_dir = ( - Path(integration_settings.LOCAL_LOG_PATH) - / session_start_time - / node_id_path - ) - log.info("Writing logs to %s", log_dir) + log_dir = parent_dir / node_id_path + # Create log dir if not exists if not log_dir.exists(): log_dir.mkdir(parents=True) @@ -175,18 +190,35 @@ last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / "last" if os.path.islink(last_symlink): os.unlink(last_symlink) - os.symlink(log_dir.parent, last_symlink) + os.symlink(parent_dir, last_symlink) + return log_dir - tarball_path = log_dir / "cloud-init.tar.gz" - try: - instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path) - except Exception as e: - log.error("Failed to pull logs: %s", e) + +def _collect_artifacts( + instance: IntegrationInstance, node_id: str, test_failed: bool +): + """Collect artifacts from remote instance. + + Args: + instance: The current IntegrationInstance to collect artifacts from + node_id: The pytest representation of this test, E.g.: + tests/integration_tests/test_example.py::TestExample.test_example + test_failed: If test failed or not + """ + should_collect_logs = integration_settings.COLLECT_LOGS == "ALWAYS" or ( + integration_settings.COLLECT_LOGS == "ON_ERROR" and test_failed + ) + should_collect_coverage = integration_settings.INCLUDE_COVERAGE + if not (should_collect_logs or should_collect_coverage): return - tarball = TarFile.open(str(tarball_path)) - tarball.extractall(path=str(log_dir)) - tarball_path.unlink() + log_dir = _setup_artifact_paths(node_id) + + if should_collect_logs: + _collect_logs(instance, log_dir) + + if should_collect_coverage: + _collect_coverage(instance, log_dir) @contextmanager @@ -237,7 +269,7 @@ previous_failures = request.session.testsfailed yield instance test_failed = request.session.testsfailed - previous_failures > 0 - _collect_logs(instance, request.node.nodeid, test_failed) + _collect_artifacts(instance, request.node.nodeid, test_failed) @pytest.fixture @@ -308,3 +340,52 @@ # If log_cli_level is available in this version of pytest and not set # to anything, set it to INFO. config.option.log_cli_level = "INFO" + + +def _copy_coverage_files(parent_dir: Path) -> List[Path]: + combined_files = [] + for dirpath in parent_dir.rglob("*"): + if (dirpath / ".coverage").exists(): + # Construct the new filename + relative_dir = dirpath.relative_to(parent_dir) + new_filename = ".coverage." + str(relative_dir).replace( + os.sep, "-" + ) + new_filepath = parent_dir / new_filename + + # Copy the file + shutil.copy(dirpath / ".coverage", new_filepath) + combined_files.append(new_filepath) + return combined_files + + +def _generate_coverage_report() -> None: + log.info("Generating coverage report") + parent_dir = Path(integration_settings.LOCAL_LOG_PATH, session_start_time) + coverage_files = _copy_coverage_files(parent_dir) + subprocess.run( + ["coverage", "combine"] + [str(f) for f in coverage_files], + check=True, + cwd=str(parent_dir), + stdout=subprocess.DEVNULL, + ) + html_dir = parent_dir / "html" + html_dir.mkdir() + subprocess.run( + [ + "coverage", + "html", + f"--data-file={parent_dir / '.coverage'}", + f"--directory={html_dir}", + "--ignore-errors", + ], + check=True, + stdout=subprocess.DEVNULL, + ) + log.info("Coverage report generated") + + +def pytest_sessionfinish(session, exitstatus) -> None: + if not integration_settings.INCLUDE_COVERAGE: + return + _generate_coverage_report() diff -Nru cloud-init-23.4.4/tests/integration_tests/datasources/test_azure.py cloud-init-24.1.3/tests/integration_tests/datasources/test_azure.py --- cloud-init-23.4.4/tests/integration_tests/datasources/test_azure.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/datasources/test_azure.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,47 @@ +import pytest +from pycloudlib.cloud import ImageType + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE + + +def _check_for_eject_errors( + instance: IntegrationInstance, +): + assert "sr0" not in instance.execute("mount") + log = instance.read_from_file("/var/log/cloud-init.log") + assert "Failed ejecting the provisioning iso" not in log + + +@pytest.mark.skipif(PLATFORM != "azure", reason="Test is Azure specific") +def test_azure_eject(session_cloud: IntegrationCloud): + """Integration test for GitHub #4732. + + Azure uses `eject` but that is not always available on minimal images. + Ensure udev's eject can be used on systemd-enabled systems. + """ + with session_cloud.launch( + launch_kwargs={ + "image_id": session_cloud.cloud_instance.daily_image( + CURRENT_RELEASE.series, image_type=ImageType.MINIMAL + ) + } + ) as instance: + source = get_validated_source(session_cloud) + if source.installs_new_version(): + instance.install_new_cloud_init(source, clean=True) + snapshot_id = instance.snapshot() + try: + with session_cloud.launch( + launch_kwargs={ + "image_id": snapshot_id, + } + ) as snapshot_instance: + _check_for_eject_errors(snapshot_instance) + finally: + session_cloud.cloud_instance.delete_image(snapshot_id) + else: + _check_for_eject_errors(instance) diff -Nru cloud-init-23.4.4/tests/integration_tests/datasources/test_ec2_ipv6.py cloud-init-24.1.3/tests/integration_tests/datasources/test_ec2_ipv6.py --- cloud-init-23.4.4/tests/integration_tests/datasources/test_ec2_ipv6.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/datasources/test_ec2_ipv6.py 2024-03-27 13:14:04.000000000 +0000 @@ -41,10 +41,14 @@ assert client.execute("ip6tables -I OUTPUT -d fd00:ec2::254 -j REJECT").ok _test_crawl(client, "http://169.254.169.254") - # Force NoDHCPLeaseError (by removing dhclient) and assert ipv6 still works + # Force NoDHCPLeaseError (by removing dhcp clients) and assert ipv6 still + # works # Destructive test goes last # dhclient is at /sbin/dhclient on bionic but /usr/sbin/dhclient elseware - assert client.execute("rm $(which dhclient)").ok + for dhcp_client in ("dhclient", "dhcpcd"): + if client.execute(f"command -v {dhcp_client}").ok: + assert client.execute(f"rm $(command -v {dhcp_client})").ok + client.restart() log = client.read_from_file("/var/log/cloud-init.log") assert "Crawl of metadata service using link-local ipv6 took" in log diff -Nru cloud-init-23.4.4/tests/integration_tests/decorators.py cloud-init-24.1.3/tests/integration_tests/decorators.py --- cloud-init-23.4.4/tests/integration_tests/decorators.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/decorators.py 2024-03-27 13:14:04.000000000 +0000 @@ -20,7 +20,7 @@ last_error = None for _ in range(tries): try: - func(*args, **kwargs) + retval = func(*args, **kwargs) break except Exception as e: last_error = e @@ -28,6 +28,7 @@ else: if last_error: raise last_error + return retval return wrapper diff -Nru cloud-init-23.4.4/tests/integration_tests/instances.py cloud-init-24.1.3/tests/integration_tests/instances.py --- cloud-init-23.4.4/tests/integration_tests/instances.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/instances.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,13 +3,17 @@ import os import uuid from enum import Enum +from pathlib import Path from tempfile import NamedTemporaryFile +from typing import Union from pycloudlib.instance import BaseInstance from pycloudlib.result import Result +from tests.helpers import cloud_init_project_dir from tests.integration_tests import integration_settings from tests.integration_tests.decorators import retry +from tests.integration_tests.util import ASSETS_DIR try: from typing import TYPE_CHECKING @@ -78,13 +82,21 @@ raise RuntimeError("Root user cannot run unprivileged") return self.instance.execute(command, use_sudo=use_sudo) - def pull_file(self, remote_path, local_path): + def pull_file( + self, + remote_path: Union[str, os.PathLike], + local_path: Union[str, os.PathLike], + ): # First copy to a temporary directory because of permissions issues tmp_path = _get_tmp_path() self.instance.execute("cp {} {}".format(str(remote_path), tmp_path)) self.instance.pull_file(tmp_path, str(local_path)) - def push_file(self, local_path, remote_path): + def push_file( + self, + local_path: Union[str, os.PathLike], + remote_path: Union[str, os.PathLike], + ): # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() self.instance.push_file(str(local_path), tmp_path) @@ -124,10 +136,35 @@ log.info("Created new image: %s", image_id) return image_id + def install_coverage(self): + # Determine coverage version from integration-requirements.txt + integration_requirements = Path( + cloud_init_project_dir("integration-requirements.txt") + ).read_text() + coverage_version = "" + for line in integration_requirements.splitlines(): + if line.startswith("coverage=="): + coverage_version = line.split("==")[1] + break + else: + raise RuntimeError( + "Could not find coverage in integration-requirements.txt" + ) + + # Update and install coverage from pip + # We use pip because the versions between distros are incompatible + self._apt_update() + self.execute("apt-get install -qy python3-pip") + self.execute(f"pip3 install coverage=={coverage_version}") + self.push_file( + local_path=ASSETS_DIR / "enable_coverage.py", + remote_path="/var/tmp/enable_coverage.py", + ) + assert self.execute("python3 /var/tmp/enable_coverage.py").ok + def install_new_cloud_init( self, source: CloudInitSource, - take_snapshot=True, clean=True, ): if source == CloudInitSource.DEB_PACKAGE: @@ -148,15 +185,7 @@ log.info("Installed cloud-init version: %s", version) if clean: self.instance.clean() - if take_snapshot: - snapshot_id = self.snapshot() - self.cloud.snapshot_id = snapshot_id - - # assert with retry because we can compete with apt already running in the - # background and get: E: Could not get lock /var/lib/apt/lists/lock - open - # (11: Resource temporarily unavailable) - @retry(tries=30, delay=1) def install_proposed_image(self): log.info("Installing proposed image") assert self.execute( @@ -164,18 +193,17 @@ '$(lsb_release -sc)-proposed main" >> ' "/etc/apt/sources.list.d/proposed.list" ).ok - assert self.execute("apt-get update -q").ok + self._apt_update() assert self.execute( "apt-get install -qy cloud-init -t=$(lsb_release -sc)-proposed" ).ok - @retry(tries=30, delay=1) def install_ppa(self): log.info("Installing PPA") assert self.execute( "add-apt-repository {} -y".format(self.settings.CLOUD_INIT_SOURCE) ).ok - assert self.execute("apt-get update -q").ok + self._apt_update() assert self.execute("apt-get install -qy cloud-init").ok @retry(tries=30, delay=1) @@ -188,15 +216,46 @@ local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path, ) - assert self.execute("apt-get install -qy python3-passlib").ok - assert self.execute("dpkg -i {path}".format(path=remote_path)).ok + # Update APT cache so all package data is recent to avoid inability + # to install missing dependency errors due to stale cache. + self.execute("apt update") + # Use apt install instead of dpkg -i to pull in any changed pkg deps + assert self.execute( + f"apt install {remote_path} --yes --allow-downgrades" + ).ok @retry(tries=30, delay=1) def upgrade_cloud_init(self): log.info("Upgrading cloud-init to latest version in archive") - assert self.execute("apt-get update -q").ok + self._apt_update() assert self.execute("apt-get install -qy cloud-init").ok + def _apt_update(self): + """Run an apt update. + + `cloud-init single` allows us to ensure apt update is only run once + for this instance. It could be done with an lru_cache too, but + dogfooding is fun.""" + self.write_to_file( + "/tmp/update-ci.yaml", "#cloud-config\npackage_update: true" + ) + response = self.execute( + "cloud-init single --name package_update_upgrade_install " + "--frequency instance --file /tmp/update-ci.yaml" + ) + if not response.ok: + if response.stderr.startswith("usage:"): + # https://github.com/canonical/cloud-init/pull/4559 hasn't + # landed yet, so we need to use the old syntax + response = self.execute( + "cloud-init --file /tmp/update-ci.yaml single --name " + "package_update_upgrade_install --frequency instance " + ) + if response.stderr: + raise RuntimeError( + f"Failed to update packages: {response.stderr}" + ) + def ip(self) -> str: if self._ip: return self._ip diff -Nru cloud-init-23.4.4/tests/integration_tests/integration_settings.py cloud-init-24.1.3/tests/integration_tests/integration_settings.py --- cloud-init-23.4.4/tests/integration_tests/integration_settings.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/integration_settings.py 2024-03-27 13:14:04.000000000 +0000 @@ -81,6 +81,13 @@ COLLECT_LOGS = "ON_ERROR" LOCAL_LOG_PATH = "/tmp/cloud_init_test_logs" +# We default our coverage to False because it involves modifying the +# cloud-init systemd services, which is too intrusive of a change to +# enable by default. If changed to true, the test directory corresponding +# to the test run under LOCAL_LOG_PATH defined above will contain an +# `html` directory with the coverage report. +INCLUDE_COVERAGE = False + ################################################################## # USER SETTINGS OVERRIDES ################################################################## diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_apt_functionality.py cloud-init-24.1.3/tests/integration_tests/modules/test_apt_functionality.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_apt_functionality.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_apt_functionality.py 2024-03-27 13:14:04.000000000 +0000 @@ -8,6 +8,7 @@ from cloudinit import gpg from cloudinit.config import cc_apt_configure from cloudinit.util import is_true +from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU @@ -336,6 +337,23 @@ assert 3 == sources_list.count(sec_deb_line) assert 3 == sources_list.count(sec_src_deb_line) + def test_no_duplicate_apt_sources(self, class_client: IntegrationInstance): + r = class_client.execute("apt-get update", use_sudo=True) + assert not re.match( + r"^W: Target Packages .+ is configured multiple times in", r.stderr + ) + + def test_disabled_apt_sources(self, class_client: IntegrationInstance): + feature_deb822 = is_true( + get_feature_flag_value(class_client, "APT_DEB822_SOURCE_LIST_FILE") + ) + if feature_deb822: + + assert ( + cc_apt_configure.UBUNTU_DEFAULT_APT_SOURCES_LIST.strip() + == class_client.read_from_file(ORIG_SOURCES_FILE) + ) + DEFAULT_DATA_WITH_URI = _DEFAULT_DATA.format( uri='uri: "http://something.random.invalid/ubuntu"' @@ -418,8 +436,6 @@ INSTALL_ANY_MISSING_RECOMMENDED_DEPENDENCIES = """\ #cloud-config -bootcmd: - - apt-get remove gpg -y apt: sources: test_keyserver: @@ -438,10 +454,24 @@ r" (gnupg software-properties-common|software-properties-common gnupg)" ) +REMOVE_GPG_USERDATA = """ +#cloud-config +runcmd: + - DEBIAN_FRONTEND=noninteractive apt-get remove gpg -y +""" + @pytest.mark.skipif(not IS_UBUNTU, reason="Apt usage") -@pytest.mark.user_data(INSTALL_ANY_MISSING_RECOMMENDED_DEPENDENCIES) -def test_install_missing_deps(client: IntegrationInstance): - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert re.search(RE_GPG_SW_PROPERTIES_INSTALLED, log) +def test_install_missing_deps(setup_image, session_cloud: IntegrationCloud): + # Two stage install: First stage: remove gpg noninteractively from image + instance1 = session_cloud.launch(user_data=REMOVE_GPG_USERDATA) + snapshot_id = instance1.snapshot() + instance1.destroy() + # Second stage: provide active apt user-data which will install missing gpg + with session_cloud.launch( + user_data=INSTALL_ANY_MISSING_RECOMMENDED_DEPENDENCIES, + launch_kwargs={"image_id": snapshot_id}, + ) as minimal_client: + log = minimal_client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert re.search(RE_GPG_SW_PROPERTIES_INSTALLED, log) diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ca_certs.py cloud-init-24.1.3/tests/integration_tests/modules/test_ca_certs.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ca_certs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ca_certs.py 2024-03-27 13:14:04.000000000 +0000 @@ -125,7 +125,7 @@ "snap", "timezone", "ubuntu_autoinstall", - "ubuntu_advantage", + "ubuntu_pro", "ubuntu_drivers", "update_etc_hosts", "wireguard", diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_cli.py cloud-init-24.1.3/tests/integration_tests/modules/test_cli.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_cli.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_cli.py 2024-03-27 13:14:04.000000000 +0000 @@ -7,6 +7,7 @@ from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE VALID_USER_DATA = """\ #cloud-config @@ -44,10 +45,10 @@ assert result.ok assert "Valid schema user-data" in result.stdout.strip() result = client.execute("cloud-init status --long") - if not result.ok: - raise AssertionError( - f"Unexpected error from cloud-init status: {result}" - ) + assert 0 == result.return_code, ( + f"Unexpected exit {result.return_code} from cloud-init status:" + f" {result}" + ) @pytest.mark.skipif( @@ -67,8 +68,12 @@ " #cloud-boothook, #cloud-config" in result.stderr ) result = client.execute("cloud-init status --long") + if CURRENT_RELEASE.series in ("focal", "jammy", "lunar", "mantic"): + return_code = 0 # Stable releases don't change exit code behavior + else: + return_code = 2 # 23.4 and later will exit 2 on warnings assert ( - 2 == result.return_code + return_code == result.return_code ), f"Unexpected exit code {result.return_code}" @@ -79,8 +84,12 @@ PR #1175 """ result = client.execute("cloud-init status --long") + if CURRENT_RELEASE.series in ("focal", "jammy", "lunar", "mantic"): + return_code = 0 # Stable releases don't change exit code behavior + else: + return_code = 2 # 23.4 and later will exit 2 on warnings assert ( - 2 == result.return_code + return_code == result.return_code ), f"Unexpected exit code {result.return_code}" log = client.read_from_file("/var/log/cloud-init.log") warning = ( diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_combined.py cloud-init-24.1.3/tests/integration_tests/modules/test_combined.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_combined.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_combined.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,7 +15,6 @@ import pytest import cloudinit.config -from cloudinit.features import get_features from cloudinit.util import is_true from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance @@ -323,7 +322,13 @@ "/run/cloud-init/combined-cloud-config.json" ) data = json.loads(combined_json) - assert data["features"] == get_features() + expected_features = json.loads( + client.execute( + "python3 -c 'import json; from cloudinit import features; " + "print(json.dumps(features.get_features()))'" + ) + ) + assert data["features"] == expected_features assert data["system_info"]["default_user"]["name"] == "ubuntu" @pytest.mark.skipif( diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_frequency_override.py cloud-init-24.1.3/tests/integration_tests/modules/test_frequency_override.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_frequency_override.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_frequency_override.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,7 @@ import pytest from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.releases import CURRENT_RELEASE USER_DATA = """\ #cloud-config @@ -17,6 +18,17 @@ in client.read_from_file("/var/log/cloud-init.log") ) assert client.read_from_file("/var/tmp/hi").strip().count("hi") == 1 + if CURRENT_RELEASE.os == "ubuntu": + if CURRENT_RELEASE.series in ("focal", "jammy", "lunar", "mantic"): + # Stable series will block on snapd.seeded.service and create a + # semaphore file + assert client.execute("test -f /var/lib/cloud/snap-seeded.once").ok + else: + # Newer series will not block on snapd.seeded.service nor create a + # semaphore file + assert not client.execute( + "test -f /var/lib/cloud/snap-seeded.once" + ).ok # Change frequency of scripts_user to always config = client.read_from_file("/etc/cloud/cloud.cfg") diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_hotplug.py cloud-init-24.1.3/tests/integration_tests/modules/test_hotplug.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_hotplug.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_hotplug.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,12 +1,20 @@ +import contextlib import time from collections import namedtuple import pytest import yaml +from cloudinit.subp import subp +from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL +from tests.integration_tests.releases import ( + CURRENT_RELEASE, + FOCAL, + UBUNTU_STABLE, +) +from tests.integration_tests.util import verify_clean_log USER_DATA = """\ #cloud-config @@ -15,11 +23,30 @@ when: ['hotplug'] """ +USER_DATA_HOTPLUG_DISABLED = """\ +#cloud-config +updates: + network: + when: ['boot-new-instance'] +""" + ip_addr = namedtuple("ip_addr", "interface state ip4 ip6") def _wait_till_hotplug_complete(client, expected_runs=1): for _ in range(60): + if client.execute("command -v systemctl").ok: + if "failed" == client.execute( + "systemctl is-active cloud-init-hotplugd.service" + ): + r = client.execute( + "systemctl status cloud-init-hotplugd.service" + ) + if not r.ok: + raise AssertionError( + "cloud-init-hotplugd.service failed: {r.stdout}" + ) + log = client.read_from_file("/var/log/cloud-init.log") if log.count("Exiting hotplug handler") == expected_runs: return log @@ -59,7 +86,7 @@ log = client.read_from_file("/var/log/cloud-init.log") assert "Exiting hotplug handler" not in log assert client.execute( - "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" + "test -f /etc/udev/rules.d/90-cloud-init-hook-hotplug.rules" ).ok # Add new NIC @@ -94,6 +121,79 @@ @pytest.mark.skipif( + PLATFORM not in ["lxd_container", "lxd_vm", "ec2", "openstack", "azure"], + reason=(f"HOTPLUG is not supported in {PLATFORM}."), +) +def _test_hotplug_enabled_by_cmd(client: IntegrationInstance): + assert "disabled" == client.execute( + "cloud-init devel hotplug-hook -s net query" + ) + ret = client.execute("cloud-init devel hotplug-hook -s net enable") + assert ret.ok, ret.stderr + log = client.read_from_file("/var/log/cloud-init.log") + assert ( + "hotplug-hook called with the following arguments: " + "{hotplug_action: enable" in log + ) + + assert "enabled" == client.execute( + "cloud-init devel hotplug-hook -s net query" + ) + log = client.read_from_file("/var/log/cloud-init.log") + assert ( + "hotplug-hook called with the following arguments: " + "{hotplug_action: query" in log + ) + assert client.execute( + "test -f /etc/udev/rules.d/90-cloud-init-hook-hotplug.rules" + ).ok + + +@pytest.mark.user_data(USER_DATA_HOTPLUG_DISABLED) +def test_hotplug_enable_cmd(client: IntegrationInstance): + _test_hotplug_enabled_by_cmd(client) + + +@pytest.mark.skipif( + PLATFORM != "ec2", + reason=( + f"Test was written for {PLATFORM} but can likely run on " + "other platforms." + ), +) +@pytest.mark.user_data(USER_DATA_HOTPLUG_DISABLED) +def test_hotplug_enable_cmd_ec2(client: IntegrationInstance): + _test_hotplug_enabled_by_cmd(client) + ips_before = _get_ip_addr(client) + + # Add new NIC + added_ip = client.instance.add_network_interface() + _wait_till_hotplug_complete(client, expected_runs=4) + ips_after_add = _get_ip_addr(client) + new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] + + assert len(ips_after_add) == len(ips_before) + 1 + assert added_ip not in [ip.ip4 for ip in ips_before] + assert added_ip in [ip.ip4 for ip in ips_after_add] + assert new_addition.state == "UP" + + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface in config["network"]["ethernets"] + + # Remove new NIC + client.instance.remove_network_interface(added_ip) + _wait_till_hotplug_complete(client, expected_runs=5) + ips_after_remove = _get_ip_addr(client) + assert len(ips_after_remove) == len(ips_before) + assert added_ip not in [ip.ip4 for ip in ips_after_remove] + + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface not in config["network"]["ethernets"] + + +@pytest.mark.skipif( PLATFORM != "openstack", reason=( f"Test was written for {PLATFORM} but can likely run on " @@ -105,7 +205,7 @@ log = client.read_from_file("/var/log/cloud-init.log") assert "Exiting hotplug handler" not in log assert client.execute( - "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" + "test -f /etc/udev/rules.d/90-cloud-init-hook-hotplug.rules" ).failed # Add new NIC @@ -124,3 +224,122 @@ assert "disabled" == client.execute( "cloud-init devel hotplug-hook -s net query" ) + + +@pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +def test_multi_nic_hotplug(setup_image, session_cloud: IntegrationCloud): + """Tests that additional secondary NICs are routable from non-local + networks after the hotplug hook is executed when network updates + are configured on the HOTPLUG event.""" + ec2 = session_cloud.cloud_instance.client + with session_cloud.launch(launch_kwargs={}, user_data=USER_DATA) as client: + ips_before = _get_ip_addr(client) + instance_pub_ip = client.instance.ip + secondary_priv_ip = client.instance.add_network_interface() + response = ec2.describe_network_interfaces( + Filters=[ + { + "Name": "private-ip-address", + "Values": [secondary_priv_ip], + }, + ], + ) + nic_id = response["NetworkInterfaces"][0]["NetworkInterfaceId"] + + # Create Elastic IP + # Refactor after https://github.com/canonical/pycloudlib/issues/337 is + # completed + allocation = ec2.allocate_address(Domain="vpc") + try: + secondary_pub_ip = allocation["PublicIp"] + association = ec2.associate_address( + AllocationId=allocation["AllocationId"], + NetworkInterfaceId=nic_id, + ) + assert association["ResponseMetadata"]["HTTPStatusCode"] == 200 + + _wait_till_hotplug_complete(client) + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + ips_after_add = _get_ip_addr(client) + + netplan_cfg = client.read_from_file( + "/etc/netplan/50-cloud-init.yaml" + ) + config = yaml.safe_load(netplan_cfg) + new_addition = [ + ip for ip in ips_after_add if ip.ip4 == secondary_priv_ip + ][0] + assert new_addition.interface in config["network"]["ethernets"] + new_nic_cfg = config["network"]["ethernets"][ + new_addition.interface + ] + assert "routing-policy" in new_nic_cfg + assert [{"from": secondary_priv_ip, "table": 101}] == new_nic_cfg[ + "routing-policy" + ] + + assert len(ips_after_add) == len(ips_before) + 1 + + # SSH over primary NIC works + subp("nc -w 5 -zv " + instance_pub_ip + " 22", shell=True) + + # THE TEST: SSH over secondary NIC works + subp("nc -w 5 -zv " + secondary_pub_ip + " 22", shell=True) + + # Remove new NIC + client.instance.remove_network_interface(secondary_priv_ip) + _wait_till_hotplug_complete(client, expected_runs=2) + + # SSH over primary NIC works + subp("nc -w 1 -zv " + instance_pub_ip + " 22", shell=True) + + ips_after_remove = _get_ip_addr(client) + assert len(ips_after_remove) == len(ips_before) + assert secondary_priv_ip not in [ip.ip4 for ip in ips_after_remove] + + netplan_cfg = client.read_from_file( + "/etc/netplan/50-cloud-init.yaml" + ) + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface not in config["network"]["ethernets"] + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + finally: + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation["AllocationId"]) + + +@pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +@pytest.mark.skipif( + CURRENT_RELEASE not in UBUNTU_STABLE, + reason="Docker repo does not contain pkgs for non stable releases.", +) +@pytest.mark.user_data(USER_DATA) +def test_no_hotplug_triggered_by_docker(client: IntegrationInstance): + # Install docker + r = client.execute("curl -fsSL https://get.docker.com | sh") + assert r.ok, r.stderr + + # Start and stop a container + r = client.execute("docker run -dit --name ff ubuntu:focal") + assert r.ok, r.stderr + r = client.execute("docker stop ff") + assert r.ok, r.stderr + + # Verify hotplug-hook was not called + log = client.read_from_file("/var/log/cloud-init.log") + assert "Exiting hotplug handler" not in log + assert "hotplug-hook" not in log + + # Verify hotplug was enabled + assert "enabled" == client.execute( + "cloud-init devel hotplug-hook -s net query" + ) diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_keys_to_console.py cloud-init-24.1.3/tests/integration_tests/modules/test_keys_to_console.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_keys_to_console.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_keys_to_console.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,13 +11,13 @@ BLACKLIST_USER_DATA = """\ #cloud-config -ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] -ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] +ssh_fp_console_blacklist: [ecdsa-sha2-nistp256] +ssh_key_console_blacklist: [ecdsa-sha2-nistp256] """ BLACKLIST_ALL_KEYS_USER_DATA = """\ #cloud-config -ssh_fp_console_blacklist: [ssh-dsa, ssh-ecdsa, ssh-ed25519, ssh-rsa, ssh-dss, ecdsa-sha2-nistp256] +ssh_fp_console_blacklist: [ssh-ecdsa, ssh-ed25519, ssh-rsa, ecdsa-sha2-nistp256] """ # noqa: E501 DISABLED_USER_DATA = """\ @@ -40,7 +40,7 @@ class TestKeysToConsoleBlacklist: """Test that the blacklist options work as expected.""" - @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"]) + @pytest.mark.parametrize("key_type", ["ECDSA"]) def test_excluded_keys(self, class_client, key_type): syslog = class_client.read_from_file("/var/log/syslog") assert "({})".format(key_type) not in syslog @@ -73,7 +73,7 @@ class TestKeysToConsoleDisabled: """Test that output can be fully disabled.""" - @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"]) + @pytest.mark.parametrize("key_type", ["ECDSA", "ED25519", "RSA"]) def test_keys_excluded(self, class_client, key_type): syslog = class_client.read_from_file("/var/log/syslog") assert "({})".format(key_type) not in syslog diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_set_hostname.py cloud-init-24.1.3/tests/integration_tests/modules/test_set_hostname.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_set_hostname.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_set_hostname.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,13 +4,13 @@ one updates the hostname and fqdn of the system. For both of these tests we will check is the changes requested by the user data are being respected after the system is boot. - -(This is ported from -``tests/cloud_tests/testcases/modules/set_hostname.yaml`` and -``tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml``.)""" +""" import pytest +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE, NOBLE + USER_DATA_HOSTNAME = """\ #cloud-config hostname: cloudinit2 @@ -30,35 +30,100 @@ fqdn: cloudinit2.test.io """ +REQUIRES_FILE_FLAG = PLATFORM == "gce" and CURRENT_RELEASE >= NOBLE + @pytest.mark.ci class TestHostname: @pytest.mark.user_data(USER_DATA_HOSTNAME) def test_hostname(self, client): hostname_output = client.execute("hostname") - assert "cloudinit2" in hostname_output.strip() + if REQUIRES_FILE_FLAG: + assert "cloudinit2" not in hostname_output.strip() + else: + assert "cloudinit2" in hostname_output.strip(), CURRENT_RELEASE @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True)) def test_prefer_fqdn(self, client): hostname_output = client.execute("hostname") - assert "cloudinit2.test.io" in hostname_output.strip() + if REQUIRES_FILE_FLAG: + assert "cloudinit2.test.io" not in hostname_output.strip() + else: + assert "cloudinit2.test.io" in hostname_output.strip() @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False)) def test_prefer_short_hostname(self, client): hostname_output = client.execute("hostname") - assert "cloudinit1" in hostname_output.strip() + if REQUIRES_FILE_FLAG: + assert "cloudinit1" not in hostname_output.strip() + else: + assert "cloudinit1" in hostname_output.strip() @pytest.mark.user_data(USER_DATA_FQDN) def test_hostname_and_fqdn(self, client): hostname_output = client.execute("hostname") + fqdn_output = client.execute("hostname --fqdn") + host_output = client.execute("grep ^127 /etc/hosts") + + assert "127.0.0.1 localhost" in host_output + if REQUIRES_FILE_FLAG: + assert "cloudinit1" not in hostname_output.strip() + assert "cloudinit2.i9n.cloud-init.io" not in fqdn_output.strip() + assert ( + f"127.0.1.1 {fqdn_output} {hostname_output}" not in host_output + ) + else: + assert "cloudinit1" in hostname_output.strip() + assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip() + assert f"127.0.1.1 {fqdn_output} {hostname_output}" in host_output + + +USER_DATA_HOSTNAME_FILE = """\ +#cloud-config +hostname: cloudinit2 +create_hostname_file: true +""" + +USER_DATA_FQDN_FILE = """\ +#cloud-config +manage_etc_hosts: true +hostname: cloudinit1 +fqdn: cloudinit2.i9n.cloud-init.io +create_hostname_file: true +""" + +USER_DATA_PREFER_FQDN_FILE = """\ +#cloud-config +prefer_fqdn_over_hostname: {} +hostname: cloudinit1 +fqdn: cloudinit2.test.io +create_hostname_file: true +""" + + +class TestCreateHostnameFile: + @pytest.mark.user_data(USER_DATA_HOSTNAME_FILE) + def test_hostname(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit2" in hostname_output.strip() + + @pytest.mark.user_data(USER_DATA_PREFER_FQDN_FILE.format(True)) + def test_prefer_fqdn(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit2.test.io" in hostname_output.strip() + + @pytest.mark.user_data(USER_DATA_PREFER_FQDN_FILE.format(False)) + def test_prefer_short_hostname(self, client): + hostname_output = client.execute("hostname") assert "cloudinit1" in hostname_output.strip() + @pytest.mark.user_data(USER_DATA_FQDN_FILE) + def test_hostname_and_fqdn(self, client): + hostname_output = client.execute("hostname") fqdn_output = client.execute("hostname --fqdn") - assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip() - host_output = client.execute("grep ^127 /etc/hosts") - assert ( - "127.0.1.1 {} {}".format(fqdn_output, hostname_output) - in host_output - ) + + assert "cloudinit1" in hostname_output.strip() + assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip() + assert f"127.0.1.1 {fqdn_output} {hostname_output}" in host_output assert "127.0.0.1 localhost" in host_output diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_set_password.py cloud-init-24.1.3/tests/integration_tests/modules/test_set_password.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_set_password.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_set_password.py 2024-03-27 13:14:04.000000000 +0000 @@ -8,6 +8,7 @@ they use a mixin to share their test definitions, because we can (of course) only specify one user-data per instance. """ + import pytest import yaml @@ -162,9 +163,17 @@ def test_explicit_password_set_correctly(self, class_client): """Test that an explicitly-specified password is set correctly.""" + minor_version = int( + class_client.execute( + "python3 -c 'import sys;print(sys.version_info[1])'" + ).strip() + ) + if minor_version > 12: + pytest.xfail("Instance under test doesn't have 'crypt' in stdlib") shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client) fmt_and_salt = shadow_users["tom"].rsplit("$", 1)[0] + GEN_CRYPT_CONTENT = ( "import crypt\n" f"print(crypt.crypt('mypassword123!', '{fmt_and_salt}'))\n" diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py 2024-03-27 13:14:04.000000000 +0000 @@ -49,7 +49,6 @@ assert re.search(r"256 SHA256:.*(ECDSA)", syslog_output) is not None assert re.search(r"256 SHA256:.*(ED25519)", syslog_output) is not None - assert re.search(r"1024 SHA256:.*(DSA)", syslog_output) is None assert re.search(r"2048 SHA256:.*(RSA)", syslog_output) is None diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_generate.py cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_generate.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_generate.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_generate.py 2024-03-27 13:14:04.000000000 +0000 @@ -25,8 +25,6 @@ @pytest.mark.parametrize( "ssh_key_path", ( - "/etc/ssh/ssh_host_dsa_key.pub", - "/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_rsa_key.pub", "/etc/ssh/ssh_host_rsa_key", ), diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_keys_provided.py cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_keys_provided.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ssh_keys_provided.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ssh_keys_provided.py 2024-03-27 13:14:04.000000000 +0000 @@ -47,20 +47,6 @@ -----END RSA PRIVATE KEY----- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97N root@xenial-lxd rsa_certificate: ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpgBP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97NAAAAAAAAAAAAAAACAAAACnhlbmlhbC1seGQAAAAAAAAAAF+vVEIAAAAAYY83bgAAAAAAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgz4SlDwbq53ZrRsnS6ISdwxgFDRpnEX44K8jFmLpI9NAAAABTAAAAC3NzaC1lZDI1NTE5AAAAQMWpiRWKNMFvRX0g6OQOELMqDhtNBpkIN92IyO25qiY2oDSd1NyVme6XnGDFt8CS7z5NufV04doP4aacLOBbQww= root@xenial-lxd - dsa_private: | - -----BEGIN DSA PRIVATE KEY----- - MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXrhOVAfzZ6+jklP - 55mzvC7jO53PWWC31hq10xBoWdev0WtcNF9Tv+4bAa1263y51Rqo4GI7xx+xic1d - mLqqfYijBT9k48J/1tV0cs1Wjs6FP/IJTD/kYVC930JjYQMi722lBnUxsQIVAL7i - z3fTGKTvSzvW0wQlwnYpS2QFAoGANp+KdyS9V93HgxGQEN1rlj/TSv/a3EVdCKtE - nQf55aPHxDAVDVw5JtRh4pZbbRV4oGRPc9KOdjo5BU28vSM3Lmhkb+UaaDXwHkgI - nK193o74DKjADWZxuLyyiKHiMOhxozoxDfjWxs8nz6uqvSW0pr521EwIY6RajbED - nZ2a3GkCgYEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pf - Q2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2E - wExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkICFA5kVUcW - nCPOXEQsayANi8+Cb7BH - -----END DSA PRIVATE KEY----- - dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM7nc9ZYLfWGrXTEGhZ16/Ra1w0X1O/7hsBrXbrfLnVGqjgYjvHH7GJzV2Yuqp9iKMFP2Tjwn/W1XRyzVaOzoU/8glMP+RhUL3fQmNhAyLvbaUGdTGxAAAAFQC+4s930xik70s71tMEJcJ2KUtkBQAAAIA2n4p3JL1X3ceDEZAQ3WuWP9NK/9rcRV0Iq0SdB/nlo8fEMBUNXDkm1GHillttFXigZE9z0o52OjkFTby9IzcuaGRv5RpoNfAeSAicrX3ejvgMqMANZnG4vLKIoeIw6HGjOjEN+NbGzyfPq6q9JbSmvnbUTAhjpFqNsQOdnZrcaQAAAIEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pfQ2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2EwExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkI= root@xenial-lxd ed25519_private: | -----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW @@ -88,16 +74,6 @@ "config_path,expected_out", ( ( - "/etc/ssh/ssh_host_dsa_key.pub", - "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" - "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM", - ), - ( - "/etc/ssh/ssh_host_dsa_key", - "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" - "hOVAfzZ6+jklP", - ), - ( "/etc/ssh/ssh_host_rsa_key.pub", "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4", diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ubuntu_advantage.py cloud-init-24.1.3/tests/integration_tests/modules/test_ubuntu_advantage.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ubuntu_advantage.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ubuntu_advantage.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,243 +0,0 @@ -import json -import logging -import os - -import pytest -from pycloudlib.cloud import ImageType - -from tests.integration_tests.clouds import IntegrationCloud -from tests.integration_tests.conftest import get_validated_source -from tests.integration_tests.instances import ( - CloudInitSource, - IntegrationInstance, -) -from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.releases import ( - BIONIC, - CURRENT_RELEASE, - FOCAL, - IS_UBUNTU, - JAMMY, -) -from tests.integration_tests.util import verify_clean_log - -LOG = logging.getLogger("integration_testing.test_ubuntu_advantage") - -CLOUD_INIT_UA_TOKEN = os.environ.get("CLOUD_INIT_UA_TOKEN") - -ATTACH_FALLBACK = """\ -#cloud-config -ubuntu_advantage: - features: - disable_auto_attach: true - token: {token} -""" - -ATTACH = """\ -#cloud-config -ubuntu_advantage: - token: {token} - enable: - - esm-infra -""" - -PRO_AUTO_ATTACH_DISABLED = """\ -#cloud-config -ubuntu_advantage: - features: - disable_auto_attach: true -""" - -PRO_DAEMON_DISABLED = """\ -#cloud-config -# Disable UA daemon (only needed in GCE) -ubuntu_advantage: - features: - disable_auto_attach: true -bootcmd: -- sudo systemctl mask ubuntu-advantage.service -""" - -AUTO_ATTACH_CUSTOM_SERVICES = """\ -#cloud-config -ubuntu_advantage: - enable: - - livepatch -""" - - -def did_ua_service_noop(client: IntegrationInstance) -> bool: - ua_log = client.read_from_file("/var/log/ubuntu-advantage.log") - return ( - "Skipping auto-attach and deferring to cloud-init to setup and" - " configure auto-attach" in ua_log - ) - - -def is_attached(client: IntegrationInstance) -> bool: - status_resp = client.execute("sudo pro status --format json") - assert status_resp.ok - status = json.loads(status_resp.stdout) - return bool(status.get("attached")) - - -def get_services_status(client: IntegrationInstance) -> dict: - """Creates a map of service -> is_enable. - - pro status --format json contains a key with list of service objects like: - - { - ... - "services":[ - { - "available":"yes", - "blocked_by":[ - - ], - "description":"Common Criteria EAL2 Provisioning Packages", - "description_override":null, - "entitled":"yes", - "name":"cc-eal", - "status":"disabled", - "status_details":"CC EAL2 is not configured" - }, - ... - ] - } - - :return: Dict where the keys are ua service names and the values - are booleans representing if the service is enable or not. - """ - status_resp = client.execute("sudo pro status --format json") - assert status_resp.ok - status = json.loads(status_resp.stdout) - return { - svc["name"]: svc["status"] == "enabled" for svc in status["services"] - } - - -@pytest.mark.adhoc -@pytest.mark.skipif(not IS_UBUNTU, reason="Test is Ubuntu specific") -@pytest.mark.skipif( - not CLOUD_INIT_UA_TOKEN, reason="CLOUD_INIT_UA_TOKEN env var not provided" -) -class TestUbuntuAdvantage: - @pytest.mark.user_data(ATTACH_FALLBACK.format(token=CLOUD_INIT_UA_TOKEN)) - def test_valid_token(self, client: IntegrationInstance): - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert is_attached(client) - - @pytest.mark.user_data(ATTACH.format(token=CLOUD_INIT_UA_TOKEN)) - def test_idempotency(self, client: IntegrationInstance): - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert is_attached(client) - - # Clean reboot to change instance-id and trigger cc_ua in next boot - assert client.execute("cloud-init clean --logs").ok - client.restart() - - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert is_attached(client) - - # Assert service-already-enabled handling for esm-infra. - # First totally destroy ubuntu-advantage-tools data and state. - # This is a hack but results in a system that thinks it - # is detached even though esm-infra is still enabled. - # When cloud-init runs again, it will successfully re-attach - # and then notice that esm-infra is already enabled. - client.execute("rm -rf /var/lib/ubuntu-advantage") - assert client.execute("cloud-init clean --logs").ok - client.restart() - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert "Service `esm-infra` already enabled" in log - - -def maybe_install_cloud_init(session_cloud: IntegrationCloud): - source = get_validated_source(session_cloud) - - launch_kwargs = { - "image_id": session_cloud.cloud_instance.daily_image( - CURRENT_RELEASE.series, image_type=ImageType.PRO - ) - } - - if source is CloudInitSource.NONE: - LOG.info( - "No need to customize cloud-init version. Return without spawning" - " an extra instance" - ) - return launch_kwargs - - user_data = ( - PRO_DAEMON_DISABLED - if session_cloud.settings.PLATFORM == "gce" - else PRO_AUTO_ATTACH_DISABLED - ) - - with session_cloud.launch( - user_data=user_data, - launch_kwargs=launch_kwargs, - ) as client: - # TODO: Re-enable this check after cloud images contain - # cloud-init 23.4. - # Explanation: We have to include something under - # user-data.ubuntu_advantage to skip the automatic auto-attach - # (driven by ua-auto-attach.service and/or ubuntu-advantage.service) - # while customizing the instance but in cloud-init < 23.4, - # user-data.ubuntu_advantage requires a token key. - - # log = client.read_from_file("/var/log/cloud-init.log") - # verify_clean_log(log) - - assert not is_attached( - client - ), "Test precondition error. Instance is auto-attached." - - if session_cloud.settings.PLATFORM == "gce": - LOG.info( - "Restore `ubuntu-advantage.service` original status for next" - " boot" - ) - assert client.execute( - "sudo systemctl unmask ubuntu-advantage.service" - ).ok - - client.install_new_cloud_init(source) - client.destroy() - - return {"image_id": session_cloud.snapshot_id} - - -@pytest.mark.skipif( - not all([IS_UBUNTU, CURRENT_RELEASE in [BIONIC, FOCAL, JAMMY]]), - reason="Test runs on Ubuntu LTS releases only", -) -@pytest.mark.skipif( - PLATFORM not in ["azure", "ec2", "gce"], - reason=f"Pro isn't offered on {PLATFORM}.", -) -class TestUbuntuAdvantagePro: - def test_custom_services(self, session_cloud: IntegrationCloud): - launch_kwargs = maybe_install_cloud_init(session_cloud) - with session_cloud.launch( - user_data=AUTO_ATTACH_CUSTOM_SERVICES, - launch_kwargs=launch_kwargs, - ) as client: - log = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) - assert did_ua_service_noop(client) - assert is_attached(client) - services_status = get_services_status(client) - assert services_status.pop( - "livepatch" - ), "livepatch expected to be enabled" - enabled_services = { - svc for svc, status in services_status.items() if status - } - assert ( - not enabled_services - ), f"Only livepatch must be enabled. Found: {enabled_services}" diff -Nru cloud-init-23.4.4/tests/integration_tests/modules/test_ubuntu_pro.py cloud-init-24.1.3/tests/integration_tests/modules/test_ubuntu_pro.py --- cloud-init-23.4.4/tests/integration_tests/modules/test_ubuntu_pro.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/modules/test_ubuntu_pro.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,274 @@ +import json +import logging +import os + +import pytest +from pycloudlib.cloud import ImageType + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.instances import ( + CloudInitSource, + IntegrationInstance, +) +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import ( + BIONIC, + CURRENT_RELEASE, + FOCAL, + IS_UBUNTU, + JAMMY, +) +from tests.integration_tests.util import verify_clean_log + +LOG = logging.getLogger("integration_testing.test_ubuntu_pro") + +CLOUD_INIT_UA_TOKEN = os.environ.get("CLOUD_INIT_UA_TOKEN") + +ATTACH_FALLBACK = """\ +#cloud-config +ubuntu_pro: + features: + disable_auto_attach: true + token: {token} +""" + +ATTACH = """\ +#cloud-config +ubuntu_pro: + token: {token} + enable: + - esm-infra +""" + +PRO_AUTO_ATTACH_DISABLED = """\ +#cloud-config +# ubuntu_advantage config kept as duplication until the release of this +# commit in proclient (ubuntu-advantage-tools v. 32): +# https://github.com/canonical/ubuntu-pro-client/commit/7bb69e3ad +# Without a top-level ubuntu_advantage key Pro will automatically attach +# instead of defer to cloud-init for all attach operations. +ubuntu_advantage: + features: + disable_auto_attach: true +ubuntu_pro: + features: + disable_auto_attach: true +""" + +PRO_DAEMON_DISABLED = """\ +#cloud-config +# Disable Pro daemon (only needed in GCE) +# Drop ubuntu_advantage key once ubuntu-advantage-tools v. 32 is SRU'd +ubuntu_advantage: + features: + disable_auto_attach: true +ubuntu_pro: + features: + disable_auto_attach: true +bootcmd: +- sudo systemctl mask ubuntu-advantage.service +""" + +AUTO_ATTACH_CUSTOM_SERVICES = """\ +#cloud-config +# Drop ubuntu_advantage key once ubuntu-advantage-tools v. 32 is SRU'd +ubuntu_advantage: + enable: + - esm-infra +ubuntu_pro: + enable: + - esm-infra +""" + + +def did_ua_service_noop(client: IntegrationInstance) -> bool: + ua_log = client.read_from_file("/var/log/ubuntu-advantage.log") + return ( + "Skipping auto-attach and deferring to cloud-init to setup and" + " configure auto-attach" in ua_log + ) + + +def is_attached(client: IntegrationInstance) -> bool: + status_resp = client.execute("sudo pro status --format json") + assert status_resp.ok + status = json.loads(status_resp.stdout) + return bool(status.get("attached")) + + +def get_services_status(client: IntegrationInstance) -> dict: + """Creates a map of service -> is_enable. + + pro status --format json contains a key with list of service objects like: + + { + ... + "services":[ + { + "available":"yes", + "blocked_by":[ + + ], + "description":"Common Criteria EAL2 Provisioning Packages", + "description_override":null, + "entitled":"yes", + "name":"cc-eal", + "status":"disabled", + "status_details":"CC EAL2 is not configured" + }, + ... + ] + } + + :return: Dict where the keys are ua service names and the values + are booleans representing if the service is enable or not. + """ + status_resp = client.execute("sudo pro status --format json") + assert status_resp.ok + status = json.loads(status_resp.stdout) + return { + svc["name"]: svc["status"] in ("enabled", "warning") + for svc in status["services"] + } + + +@pytest.mark.adhoc +@pytest.mark.skipif(not IS_UBUNTU, reason="Test is Ubuntu specific") +@pytest.mark.skipif( + not CLOUD_INIT_UA_TOKEN, reason="CLOUD_INIT_UA_TOKEN env var not provided" +) +class TestUbuntuAdvantage: + @pytest.mark.user_data(ATTACH_FALLBACK.format(token=CLOUD_INIT_UA_TOKEN)) + def test_valid_token(self, client: IntegrationInstance): + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert is_attached(client) + client.execute("pro detach") + # Replace ubuntu_pro with previously named ubuntu_advantage + client.execute( + "sed -i 's/ubuntu_pro$/ubuntu_advantage/' /etc/cloud/cloud.cfg" + ) + client.restart() + status_resp = client.execute("cloud-init status --format json") + status = json.loads(status_resp.stdout) + assert ( + "Module has been renamed from cc_ubuntu_advantage to cc_ubuntu_pro" + in "\n".join(status["recoverable_errors"]["DEPRECATED"]) + ) + assert is_attached(client) + + @pytest.mark.user_data(ATTACH.format(token=CLOUD_INIT_UA_TOKEN)) + def test_idempotency(self, client: IntegrationInstance): + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert is_attached(client) + + # Clean reboot to change instance-id and trigger cc_ua in next boot + assert client.execute("cloud-init clean --logs").ok + client.restart() + + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert is_attached(client) + + # Assert service-already-enabled handling for esm-infra. + # First totally destroy ubuntu-advantage-tools data and state. + # This is a hack but results in a system that thinks it + # is detached even though esm-infra is still enabled. + # When cloud-init runs again, it will successfully re-attach + # and then notice that esm-infra is already enabled. + client.execute("rm -rf /var/lib/ubuntu-advantage") + assert client.execute("cloud-init clean --logs").ok + client.restart() + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert "Service `esm-infra` already enabled" in log + + +def maybe_install_cloud_init(session_cloud: IntegrationCloud): + source = get_validated_source(session_cloud) + + launch_kwargs = { + "image_id": session_cloud.cloud_instance.daily_image( + CURRENT_RELEASE.series, image_type=ImageType.PRO + ) + } + + if source is CloudInitSource.NONE: + LOG.info( + "No need to customize cloud-init version. Return without spawning" + " an extra instance" + ) + return launch_kwargs + + user_data = ( + PRO_DAEMON_DISABLED + if session_cloud.settings.PLATFORM == "gce" + else PRO_AUTO_ATTACH_DISABLED + ) + + with session_cloud.launch( + user_data=user_data, + launch_kwargs=launch_kwargs, + ) as client: + # TODO: Re-enable this check after cloud images contain + # cloud-init 23.4. + # Explanation: We have to include something under + # user-data.ubuntu_pro to skip the automatic auto-attach + # (driven by ua-auto-attach.service and/or ubuntu-advantage.service) + # while customizing the instance but in cloud-init < 23.4, + # user-data.ubuntu_pro requires a token key. + + # log = client.read_from_file("/var/log/cloud-init.log") + # verify_clean_log(log) + + assert not is_attached( + client + ), "Test precondition error. Instance is auto-attached." + + if session_cloud.settings.PLATFORM == "gce": + LOG.info( + "Restore `ubuntu-advantage.service` original status for next" + " boot" + ) + assert client.execute( + "sudo systemctl unmask ubuntu-advantage.service" + ).ok + + client.install_new_cloud_init(source) + session_cloud.snapshot_id = client.snapshot() + client.destroy() + + return {"image_id": session_cloud.snapshot_id} + + +@pytest.mark.skipif( + not all([IS_UBUNTU, CURRENT_RELEASE in [BIONIC, FOCAL, JAMMY]]), + reason="Test runs on Ubuntu LTS releases only", +) +@pytest.mark.skipif( + PLATFORM not in ["azure", "ec2", "gce"], + reason=f"Pro isn't offered on {PLATFORM}.", +) +class TestUbuntuAdvantagePro: + def test_custom_services(self, session_cloud: IntegrationCloud): + launch_kwargs = maybe_install_cloud_init(session_cloud) + with session_cloud.launch( + user_data=AUTO_ATTACH_CUSTOM_SERVICES, + launch_kwargs=launch_kwargs, + ) as client: + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert did_ua_service_noop(client) + assert is_attached(client) + services_status = get_services_status(client) + assert services_status.pop( + "esm-infra" + ), "esm-infra expected to be enabled" + enabled_services = { + svc for svc, status in services_status.items() if status + } + assert ( + not enabled_services + ), f"Only livepatch must be enabled. Found: {enabled_services}" diff -Nru cloud-init-23.4.4/tests/integration_tests/net/test_dhcp.py cloud-init-24.1.3/tests/integration_tests/net/test_dhcp.py --- cloud-init-23.4.4/tests/integration_tests/net/test_dhcp.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/net/test_dhcp.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,91 @@ +"""Integration tests related to cloud-init dhcp.""" + +import pytest + +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, NOBLE +from tests.integration_tests.util import verify_clean_log + + +@pytest.mark.skipif(not IS_UBUNTU, reason="ubuntu-specific tests") +@pytest.mark.skipif( + PLATFORM not in ["azure", "ec2", "gce", "openstack"], + reason="not all platforms require dhcp", +) +class TestDHCP: + """Integration tests relating to dhcp""" + + @pytest.mark.skipif( + CURRENT_RELEASE >= NOBLE, reason="noble and later use dhcpcd" + ) + def test_old_ubuntu_uses_isc_dhclient_by_default(self, client): + """verify that old releases use dhclient""" + log = client.read_from_file("/var/log/cloud-init.log") + assert "DHCP client selected: dhclient" in log + verify_clean_log(log) + + @pytest.mark.xfail( + reason=( + "Noble images have dhclient installed and ordered first in their" + "configuration. Until this changes, dhcpcd will not be used" + ) + ) + @pytest.mark.skipif( + CURRENT_RELEASE < NOBLE, reason="pre-noble uses dhclient" + ) + def test_noble_and_newer_uses_dhcpcd_by_default(self, client): + """verify that noble will use dhcpcd""" + log = client.read_from_file("/var/log/cloud-init.log") + assert "DHCP client selected: dhcpcd" in log + assert ( + ", DHCP is still running" not in log + ), "cloud-init leaked a dhcp daemon that is still running" + verify_clean_log(log) + + @pytest.mark.skipif( + CURRENT_RELEASE < NOBLE, + reason="earlier Ubuntu releases have a package named dhcpcd5", + ) + @pytest.mark.parametrize( + "dhcp_client, package", + [ + ("dhcpcd", "dhcpcd-base"), + ("udhcpc", "udhcpc"), + ], + ) + def test_noble_and_newer_force_client(self, client, dhcp_client, package): + """force noble to use dhcpcd and test that it worked""" + assert client.execute(f"apt update && apt install -yq {package}").ok + assert client.execute( + "sed -i 's|" + "dhcp_client_priority.*$" + f"|dhcp_client_priority: [{dhcp_client}]" + "|' /etc/cloud/cloud.cfg" + ).ok + client.execute("cloud-init clean --logs") + client.restart() + log = client.read_from_file("/var/log/cloud-init.log") + for line in log.split("\n"): + if "DHCP client selected" in line: + assert ( + f"DHCP client selected: {dhcp_client}" in line + ), f"Selected incorrect dhcp client: {line}" + break + else: + assert False, "No dhcp client selected" + assert "Received dhcp lease on" in log, "No lease received" + assert ( + ", DHCP is still running" not in log + ), "cloud-init leaked a dhcp daemon that is still running" + if not "ec2" == PLATFORM: + assert "Received dhcp lease on " in log, "EphemeralDHCPv4 failed" + if "azure" == PLATFORM: + if "udhcpc" == dhcp_client: + pytest.xfail( + "udhcpc implementation doesn't support azure, see GH-4765" + ) + assert ( + "Obtained DHCP lease on interface" in log + ), "Failed to get unknown option 245" + assert "'unknown-245'" in log, "Failed to get unknown option 245" + verify_clean_log(log) diff -Nru cloud-init-23.4.4/tests/integration_tests/releases.py cloud-init-24.1.3/tests/integration_tests/releases.py --- cloud-init-23.4.4/tests/integration_tests/releases.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/releases.py 2024-03-27 13:14:04.000000000 +0000 @@ -95,6 +95,10 @@ JAMMY = Release("ubuntu", "jammy", "22.04") KINETIC = Release("ubuntu", "kinetic", "22.10") LUNAR = Release("ubuntu", "lunar", "23.04") +MANTIC = Release("ubuntu", "mantic", "23.10") +NOBLE = Release("ubuntu", "noble", "24.04") + +UBUNTU_STABLE = (FOCAL, JAMMY, MANTIC) CURRENT_RELEASE = Release.from_os_image() IS_UBUNTU = CURRENT_RELEASE.os == "ubuntu" diff -Nru cloud-init-23.4.4/tests/integration_tests/reporting/test_webhook_reporting.py cloud-init-24.1.3/tests/integration_tests/reporting/test_webhook_reporting.py --- cloud-init-23.4.4/tests/integration_tests/reporting/test_webhook_reporting.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/reporting/test_webhook_reporting.py 2024-03-27 13:14:04.000000000 +0000 @@ -54,7 +54,7 @@ events = [json.loads(line) for line in server_output] # Only time this should be less is if we remove modules - assert len(events) > 56, events + assert len(events) > 52, events # Assert our first and last expected messages exist ds_events = [ diff -Nru cloud-init-23.4.4/tests/integration_tests/test_kernel_commandline_match.py cloud-init-24.1.3/tests/integration_tests/test_kernel_commandline_match.py --- cloud-init-23.4.4/tests/integration_tests/test_kernel_commandline_match.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/test_kernel_commandline_match.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,7 +6,7 @@ from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.util import wait_for_cloud_init +from tests.integration_tests.util import lxd_has_nocloud, wait_for_cloud_init log = logging.getLogger("integration_testing") @@ -67,17 +67,19 @@ @pytest.mark.skipif(PLATFORM != "lxd_vm", reason="Modifies grub config") @pytest.mark.lxd_use_exec @pytest.mark.parametrize( - "ds_str, configured", + "ds_str, configured, cmdline_configured", ( ( "ds=nocloud;s=http://my-url/;h=hostname", "DataSourceNoCloud [seed=None][dsmode=net]", + True, ), - ("ci.ds=openstack", "DataSourceOpenStack"), + ("ci.ds=openstack", "DataSourceOpenStack", True), + ("bonding.max_bonds=0", "lxd_or_nocloud", False), ), ) def test_lxd_datasource_kernel_override( - ds_str, configured, client: IntegrationInstance + ds_str, configured, cmdline_configured, client: IntegrationInstance ): """This test is twofold: it tests kernel commandline override, which also validates OpenStack Ironic requirements. OpenStack Ironic does not @@ -89,11 +91,24 @@ kernel commandline in Python code is required. """ + if configured == "lxd_or_nocloud": + configured = ( + "DataSourceNoCloud" if lxd_has_nocloud(client) else "DataSourceLXD" + ) override_kernel_cmdline(ds_str, client) - assert ( - "Machine is configured by the kernel commandline to run on single " - f"datasource {configured}" - ) in client.execute("cat /var/log/cloud-init.log") + if cmdline_configured: + assert ( + "Machine is configured by the kernel commandline to run on single " + f"datasource {configured}" + ) in client.execute("cat /var/log/cloud-init.log") + else: + # verify that no plat + log = client.execute("cat /var/log/cloud-init.log") + assert (f"Detected platform: {configured}") in log + assert ( + "Machine is configured by the kernel " + "commandline to run on single " + ) not in log GH_REPO_PATH = "https://raw.githubusercontent.com/canonical/cloud-init/main/" @@ -131,9 +146,7 @@ client.instance.execute_via_ssh = False # pyright: ignore assert wait_for_cloud_init(client, num_retries=60).ok if source.installs_new_version(): - client.install_new_cloud_init( - source, take_snapshot=False, clean=False - ) + client.install_new_cloud_init(source, clean=False) override_kernel_cmdline(ds_str, client) logs = client.execute("cat /var/log/cloud-init.log") diff -Nru cloud-init-23.4.4/tests/integration_tests/test_networking.py cloud-init-24.1.3/tests/integration_tests/test_networking.py --- cloud-init-23.4.4/tests/integration_tests/test_networking.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/test_networking.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,9 +1,30 @@ """Networking-related tests.""" +import contextlib +import json + import pytest import yaml +from cloudinit.subp import subp +from tests.integration_tests import random_mac_address +from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import ( + CURRENT_RELEASE, + IS_UBUNTU, + JAMMY, + MANTIC, + NOBLE, +) +from tests.integration_tests.util import verify_clean_log + +# Older Ubuntu series didn't read cloud-init.* config keys +LXD_NETWORK_CONFIG_KEY = ( + "user.network-config" + if CURRENT_RELEASE < JAMMY + else "cloud-init.network-config" +) def _add_dummy_bridge_to_netplan(client: IntegrationInstance): @@ -91,3 +112,411 @@ client.execute("cat /etc/netplan/50-cloud-init.yaml") ) assert netplan != netplan_new, "changes expected in netplan config" + + +NET_V1_CONFIG = """ +config: +- name: eth0 + type: physical + mac_address: '{mac_addr}' + subnets: + - control: auto + type: dhcp +version: 1 +""" + + +NET_V2_MATCH_CONFIG = """ +version: 2 +ethernets: + eth0: + dhcp4: true + match: + macaddress: {mac_addr} + set-name: eth0 +""" + +EXPECTED_NETPLAN_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled}""" + +EXPECTED_NET_CONFIG = """\ +network: + version: 2 + ethernets: + eth0: + dhcp4: true + set-name: eth0 + match: + macaddress: {mac_addr} +""" + +BAD_NETWORK_V2 = """\ +version: 2 +ethernets: + eth0: + dhcp4: badval + match: + {match_condition} +""" + + +@pytest.mark.skipif( + PLATFORM != "lxd_vm", + reason="Test requires custom networking provided by LXD", +) +@pytest.mark.parametrize( + "net_config", + ( + pytest.param(NET_V1_CONFIG, id="v1"), + pytest.param(NET_V2_MATCH_CONFIG, id="v2"), + ), +) +def test_netplan_rendering( + net_config, session_cloud: IntegrationCloud, setup_image +): + mac_addr = random_mac_address() + launch_kwargs = { + "config_dict": { + LXD_NETWORK_CONFIG_KEY: net_config.format(mac_addr=mac_addr), + "volatile.eth0.hwaddr": mac_addr, + }, + } + expected = yaml.safe_load(EXPECTED_NET_CONFIG) + expected["network"]["ethernets"]["eth0"]["match"] = { + "macaddress": mac_addr + } + with session_cloud.launch(launch_kwargs=launch_kwargs) as client: + result = client.execute("cat /etc/netplan/50-cloud-init.yaml") + assert result.stdout.startswith(EXPECTED_NETPLAN_HEADER) + assert expected == yaml.safe_load(result.stdout) + + +NET_V1_NAME_TOO_LONG = """\ +config: +- name: eth01234567890123 + type: physical + mac_address: '{mac_addr}' + subnets: + - control: auto + type: dhcp +version: 1 +""" + + +@pytest.mark.skipif( + PLATFORM != "lxd_vm", + reason="Test requires custom networking provided by LXD", +) +@pytest.mark.parametrize("net_config", (NET_V1_NAME_TOO_LONG,)) +def test_schema_warnings( + net_config, session_cloud: IntegrationCloud, setup_image +): + # TODO: This test takes a lot more time than it needs to. + # The default launch wait will wait until cloud-init done, but the + # init network stage will wait 2 minutes for network timeout. + # We could set wait=False and do our own waiting, but there's also the + # issue of `execute_via_ssh=False` on pycloudlib means we `sudo -u ubuntu` + # the exec commands, but the ubuntu user won't exist until + # # after the init network stage runs. + mac_addr = random_mac_address() + launch_kwargs = { + "execute_via_ssh": False, + "config_dict": { + LXD_NETWORK_CONFIG_KEY: net_config.format(mac_addr=mac_addr), + "volatile.eth0.hwaddr": mac_addr, + }, + } + expected = yaml.safe_load(EXPECTED_NET_CONFIG) + expected["network"]["ethernets"]["eth0"]["match"] = {} + expected["network"]["ethernets"]["eth0"]["match"]["macaddress"] = mac_addr + with session_cloud.launch(launch_kwargs=launch_kwargs) as client: + result = client.execute("cloud-init status --format=json") + if CURRENT_RELEASE < NOBLE: + assert result.ok + assert result.return_code == 0 # Stable release still exit 0 + else: + assert result.failed + assert result.return_code == 2 # Warnings exit 2 after 23.4 + assert ( + 'eth01234567890123\\" is wrong: \\"name\\" not a valid ifname' + in result.stdout + ) + result = client.execute("cloud-init schema --system") + assert "Invalid network-config " in result.stdout + + +@pytest.mark.skipif( + not IS_UBUNTU, reason="Dependent on netplan API availability on Ubuntu" +) +@pytest.mark.skipif( + PLATFORM not in ("lxd_vm", "lxd_container"), + reason="Test requires lxc exec feature due to broken network config", +) +def test_invalid_network_v2_netplan( + session_cloud: IntegrationCloud, setup_image +): + mac_addr = random_mac_address() + + if PLATFORM == "lxd_vm": + config_dict = { + LXD_NETWORK_CONFIG_KEY: BAD_NETWORK_V2.format( + match_condition=f"macaddress: {mac_addr}" + ), + "volatile.eth0.hwaddr": mac_addr, + } + else: + config_dict = { + LXD_NETWORK_CONFIG_KEY: BAD_NETWORK_V2.format( + match_condition="name: eth0" + ) + } + + with session_cloud.launch( + launch_kwargs={ + "execute_via_ssh": False, + "config_dict": config_dict, + } + ) as client: + # Netplan python API only available on MANTIC and later + if CURRENT_RELEASE < MANTIC: + assert ( + "Skipping netplan schema validation. No netplan available" + ) in client.read_from_file("/var/log/cloud-init.log") + assert ( + "Skipping network-config schema validation. No network schema" + " for version: 2" + ) in client.execute("cloud-init schema --system") + else: + assert ( + "Invalid network-config provided: Please run " + "'sudo cloud-init schema --system' to see the schema errors." + ) in client.execute("cloud-init status --format=json") + assert ( + "Invalid network-config /var/lib/cloud/instances/" + in client.execute("cloud-init schema --system") + ) + assert ( + "# E1: Invalid netplan schema. Error in network definition:" + " invalid boolean value 'badval" + ) in client.execute("cloud-init schema --system --annotate") + + +@pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +def test_ec2_multi_nic_reboot(setup_image, session_cloud: IntegrationCloud): + """Tests that additional secondary NICs and secondary IPs on them are + routable from non-local networks after a reboot event when network updates + are configured on every boot.""" + ec2 = session_cloud.cloud_instance.client + with session_cloud.launch(launch_kwargs={}, user_data=USER_DATA) as client: + # Add secondary NIC + secondary_priv_ip_0 = client.instance.add_network_interface() + response = ec2.describe_network_interfaces( + Filters=[ + { + "Name": "private-ip-address", + "Values": [secondary_priv_ip_0], + }, + ], + ) + nic_id = response["NetworkInterfaces"][0]["NetworkInterfaceId"] + # Add secondary IP to secondary NIC + association_0 = ec2.assign_private_ip_addresses( + NetworkInterfaceId=nic_id, SecondaryPrivateIpAddressCount=1 + ) + assert association_0["ResponseMetadata"]["HTTPStatusCode"] == 200 + secondary_priv_ip_1 = association_0["AssignedPrivateIpAddresses"][0][ + "PrivateIpAddress" + ] + + # Assing elastic IPs + # Refactor after https://github.com/canonical/pycloudlib/issues/337 is + # completed + allocation_0 = ec2.allocate_address(Domain="vpc") + allocation_1 = ec2.allocate_address(Domain="vpc") + try: + secondary_pub_ip_0 = allocation_0["PublicIp"] + secondary_pub_ip_1 = allocation_1["PublicIp"] + + association_0 = ec2.associate_address( + AllocationId=allocation_0["AllocationId"], + NetworkInterfaceId=nic_id, + PrivateIpAddress=secondary_priv_ip_0, + ) + assert association_0["ResponseMetadata"]["HTTPStatusCode"] == 200 + association_1 = ec2.associate_address( + AllocationId=allocation_1["AllocationId"], + NetworkInterfaceId=nic_id, + PrivateIpAddress=secondary_priv_ip_1, + ) + assert association_1["ResponseMetadata"]["HTTPStatusCode"] == 200 + + # Reboot to update network config + client.execute("cloud-init clean --logs") + client.restart() + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + # SSH over primary NIC works + instance_pub_ip = client.instance.ip + subp("nc -w 5 -zv " + instance_pub_ip + " 22", shell=True) + + # SSH over secondary NIC works + subp("nc -w 5 -zv " + secondary_pub_ip_0 + " 22", shell=True) + subp("nc -w 5 -zv " + secondary_pub_ip_1 + " 22", shell=True) + finally: + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association_0["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation_0["AllocationId"]) + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association_1["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation_1["AllocationId"]) + + +@pytest.mark.adhoc # costly instance not available in all regions / azs +@pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +def test_ec2_multi_network_cards(setup_image, session_cloud: IntegrationCloud): + """ + Tests that with an interface type with multiple network cards (non unique + device indexes). + + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/p5-efa.html + """ + ec2 = session_cloud.cloud_instance.client + + vpc = session_cloud.cloud_instance.get_or_create_vpc( + name="ec2-cloud-init-integration" + ) + [subnet_id] = [s.id for s in vpc.vpc.subnets.all()] + security_group_ids = [sg.id for sg in vpc.vpc.security_groups.all()] + + launch_kwargs = { + "InstanceType": "p5.48xlarge", + "NetworkInterfaces": [ + { + "NetworkCardIndex": 0, + "DeviceIndex": 0, + "InterfaceType": "efa", + "DeleteOnTermination": True, + "Groups": security_group_ids, + "SubnetId": subnet_id, + }, + { + "NetworkCardIndex": 1, + "DeviceIndex": 1, + "InterfaceType": "efa", + "DeleteOnTermination": True, + "Groups": security_group_ids, + "SubnetId": subnet_id, + }, + { + "NetworkCardIndex": 2, + "DeviceIndex": 1, + "InterfaceType": "efa", + "DeleteOnTermination": True, + "Groups": security_group_ids, + "SubnetId": subnet_id, + }, + ], + } + # Instances with this network setups do not get a public ip. + # Do not wait until we associate one to the primary interface so that we + # can interact with it. + with session_cloud.launch( + launch_kwargs=launch_kwargs, + user_data=USER_DATA, + enable_ipv6=False, + wait=False, + ) as client: + client.instance._instance.wait_until_running( + Filters=[ + { + "Name": "instance-id", + "Values": [client.instance.id], + } + ] + ) + + network_interfaces = iter( + ec2.describe_network_interfaces( + Filters=[ + { + "Name": "attachment.instance-id", + "Values": [client.instance.id], + } + ] + )["NetworkInterfaces"] + ) + nic_id_0 = next(network_interfaces)["NetworkInterfaceId"] + + try: + allocation_0 = ec2.allocate_address(Domain="vpc") + association_0 = ec2.associate_address( + AllocationId=allocation_0["AllocationId"], + NetworkInterfaceId=nic_id_0, + ) + assert association_0["ResponseMetadata"]["HTTPStatusCode"] == 200 + + result = client.execute( + "cloud-init query ds.meta-data.network.interfaces.macs" + ) + assert result.ok, result.stderr + for _macs, net_metadata in json.load(result.stdout): + assert "network-card" in net_metadata + + nic_id_1 = next(network_interfaces)["NetworkInterfaceId"] + allocation_1 = ec2.allocate_address(Domain="vpc") + association_1 = ec2.associate_address( + AllocationId=allocation_1["AllocationId"], + NetworkInterfaceId=nic_id_1, + ) + assert association_1["ResponseMetadata"]["HTTPStatusCode"] == 200 + + nic_id_2 = next(network_interfaces)["NetworkInterfaceId"] + allocation_2 = ec2.allocate_address(Domain="vpc") + association_2 = ec2.associate_address( + AllocationId=allocation_2["AllocationId"], + NetworkInterfaceId=nic_id_2, + ) + assert association_2["ResponseMetadata"]["HTTPStatusCode"] == 200 + + # Reboot to update network config + client.execute("cloud-init clean --logs") + client.restart() + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + # SSH over secondary NICs works + subp("nc -w 5 -zv " + allocation_1["PublicIp"] + " 22", shell=True) + subp("nc -w 5 -zv " + allocation_2["PublicIp"] + " 22", shell=True) + finally: + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association_0["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation_0["AllocationId"]) + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association_1["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation_1["AllocationId"]) + with contextlib.suppress(Exception): + ec2.disassociate_address( + AssociationId=association_2["AssociationId"] + ) + with contextlib.suppress(Exception): + ec2.release_address(AllocationId=allocation_2["AllocationId"]) diff -Nru cloud-init-23.4.4/tests/integration_tests/test_upgrade.py cloud-init-24.1.3/tests/integration_tests/test_upgrade.py --- cloud-init-23.4.4/tests/integration_tests/test_upgrade.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/test_upgrade.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,11 +3,17 @@ import os import pytest +import yaml from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL, IS_UBUNTU +from tests.integration_tests.releases import ( + CURRENT_RELEASE, + FOCAL, + IS_UBUNTU, + NOBLE, +) from tests.integration_tests.util import verify_clean_log LOG = logging.getLogger("integration_testing.test_upgrade") @@ -88,7 +94,7 @@ ) # Upgrade - instance.install_new_cloud_init(source, take_snapshot=False) + instance.install_new_cloud_init(source) # 'cloud-init init' helps us understand if our pickling upgrade paths # have broken across re-constitution of a cached datasource. Some @@ -137,7 +143,18 @@ assert post_json["v1"]["datasource"].startswith( "DataSourceAzure" ) - assert pre_network == post_network + if PLATFORM in ["gce", "qemu"] and CURRENT_RELEASE < NOBLE: + # GCE regenerates network config per boot AND + # GCE uses fallback config AND + # #4474 changed fallback configuration. + # Once the baseline includes #4474, this can be removed + pre_network = yaml.load(pre_network, Loader=yaml.Loader) + post_network = yaml.load(post_network, Loader=yaml.Loader) + for values in post_network["network"]["ethernets"].values(): + values.pop("dhcp6") + assert yaml.dump(pre_network) == yaml.dump(post_network) + else: + assert pre_network == post_network # Calculate and log all the boot numbers pre_analyze_totals = [ @@ -185,9 +202,9 @@ launch_kwargs = {"image_id": session_cloud.initial_image_id} with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: - instance.install_new_cloud_init( - source, take_snapshot=False, clean=False - ) + instance.install_new_cloud_init(source, clean=False) + # Ensure we aren't looking at any prior warnings/errors from prior boot + instance.execute("rm /var/log/cloud-init.log") instance.restart() log = instance.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) diff -Nru cloud-init-23.4.4/tests/integration_tests/util.py cloud-init-24.1.3/tests/integration_tests/util.py --- cloud-init-23.4.4/tests/integration_tests/util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/integration_tests/util.py 2024-03-27 13:14:04.000000000 +0000 @@ -8,12 +8,15 @@ from functools import lru_cache from itertools import chain from pathlib import Path -from typing import Set +from typing import TYPE_CHECKING, Set import pytest from cloudinit.subp import subp -from tests.integration_tests.instances import IntegrationInstance + +if TYPE_CHECKING: + # instances.py has imports util.py, so avoid circular import + from tests.integration_tests.instances import IntegrationInstance log = logging.getLogger("integration_testing") key_pair = namedtuple("key_pair", "public_key private_key") @@ -67,6 +70,8 @@ # Ubuntu lxd storage "thinpool by default on Ubuntu due to LP #1982780", "WARNING]: Could not match supplied host pattern, ignoring:", + # https://bugs.launchpad.net/ubuntu/+source/netplan.io/+bug/2041727 + "Cannot call Open vSwitch: ovsdb-server.service is not running.", ] traceback_texts = [] if "install canonical-livepatch" in log: @@ -107,7 +112,7 @@ for traceback_text in traceback_texts: expected_tracebacks += log.count(traceback_text) - assert warning_count == expected_warnings, ( + assert warning_count <= expected_warnings, ( f"Unexpected warning count != {expected_warnings}. Found: " f"{re.findall('WARNING.*', log)}" ) @@ -168,7 +173,7 @@ # We're implementing our own here in case cloud-init status --wait # isn't working correctly (LP: #1966085) -def wait_for_cloud_init(client: IntegrationInstance, num_retries: int = 30): +def wait_for_cloud_init(client: "IntegrationInstance", num_retries: int = 30): last_exception = None for _ in range(num_retries): try: @@ -176,7 +181,7 @@ if ( result and result.ok - and ("running" not in result or "not run" not in result) + and ("running" not in result or "not started" not in result) ): return result except Exception as e: @@ -187,7 +192,7 @@ ) from last_exception -def get_console_log(client: IntegrationInstance): +def get_console_log(client: "IntegrationInstance"): try: console_log = client.instance.console_log() except NotImplementedError: @@ -198,7 +203,7 @@ @lru_cache() -def lxd_has_nocloud(client: IntegrationInstance) -> bool: +def lxd_has_nocloud(client: "IntegrationInstance") -> bool: # Bionic or Focal may be detected as NoCloud rather than LXD lxd_image_metadata = subp( ["lxc", "config", "metadata", "show", client.instance.name] @@ -206,7 +211,7 @@ return "/var/lib/cloud/seed/nocloud" in lxd_image_metadata.stdout -def get_feature_flag_value(client: IntegrationInstance, key): +def get_feature_flag_value(client: "IntegrationInstance", key): value = client.execute( 'python3 -c "from cloudinit import features; ' f'print(features.{key})"' diff -Nru cloud-init-23.4.4/tests/unittests/analyze/test_boot.py cloud-init-24.1.3/tests/unittests/analyze/test_boot.py --- cloud-init-23.4.4/tests/unittests/analyze/test_boot.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/analyze/test_boot.py 2024-03-27 13:14:04.000000000 +0000 @@ -28,17 +28,12 @@ class TestSystemCtlReader: - @pytest.mark.parametrize( - "args", - [ - pytest.param(["dummyProperty"], id="invalid_property"), - pytest.param( - ["dummyProperty", "dummyParameter"], id="invalid_parameter" - ), - ], - ) - def test_systemctl_invalid(self, args): - reader = SystemctlReader(*args) + def test_systemctl_invalid(self, mocker): + mocker.patch( + "cloudinit.analyze.show.subp.subp", + return_value=("", "something_invalid"), + ) + reader = SystemctlReader("dont", "care") with pytest.raises(RuntimeError): reader.parse_epoch_as_float() diff -Nru cloud-init-23.4.4/tests/unittests/analyze/test_show.py cloud-init-24.1.3/tests/unittests/analyze/test_show.py --- cloud-init-23.4.4/tests/unittests/analyze/test_show.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/analyze/test_show.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,24 @@ +from collections import namedtuple + +import pytest + +from cloudinit.analyze import analyze_show + + +@pytest.fixture +def mock_io(tmp_path): + """Mock args for configure_io function""" + infile = tmp_path / "infile" + outfile = tmp_path / "outfile" + return namedtuple("MockIO", ["infile", "outfile"])(infile, outfile) + + +class TestAnalyzeShow: + """Test analyze_show (and/or helpers) in cloudinit/analyze/__init__.py""" + + def test_empty_logfile(self, mock_io, capsys): + """Test analyze_show with an empty logfile""" + mock_io.infile.write_text("") + with pytest.raises(SystemExit): + analyze_show("dontcare", mock_io) + assert capsys.readouterr().err == f"Empty file {mock_io.infile}\n" diff -Nru cloud-init-23.4.4/tests/unittests/cmd/devel/test_hotplug_hook.py cloud-init-24.1.3/tests/unittests/cmd/devel/test_hotplug_hook.py --- cloud-init-23.4.4/tests/unittests/cmd/devel/test_hotplug_hook.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/devel/test_hotplug_hook.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,14 +4,17 @@ import pytest -from cloudinit.cmd.devel.hotplug_hook import handle_hotplug +from cloudinit import settings +from cloudinit.cmd.devel.hotplug_hook import enable_hotplug, handle_hotplug from cloudinit.distros import Distro -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.net.activators import NetworkActivator from cloudinit.net.network_state import NetworkState from cloudinit.sources import DataSource from cloudinit.stages import Init +M_PATH = "cloudinit.cmd.devel.hotplug_hook." + hotplug_args = namedtuple("hotplug_args", "udevaction, subsystem, devpath") FAKE_MAC = "11:22:33:44:55:66" @@ -244,3 +247,105 @@ call(10), call(30), ] + + +@pytest.mark.usefixtures("fake_filesystem") +class TestEnableHotplug: + @mock.patch(M_PATH + "util.write_file") + @mock.patch( + M_PATH + "util.read_hotplug_enabled_file", + return_value={"scopes": []}, + ) + @mock.patch(M_PATH + "install_hotplug") + def test_enabling( + self, + m_install_hotplug, + m_read_hotplug_enabled_file, + m_write_file, + mocks, + ): + mocks.m_init.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + mocks.m_init.paths.get_cpath.return_value = ( + "/var/lib/cloud/hotplug.enabled" + ) + + enable_hotplug(mocks.m_init, "net") + + assert [ + call([EventType.HOTPLUG]) + ] == mocks.m_init.datasource.get_supported_events.call_args_list + m_read_hotplug_enabled_file.assert_called_once() + assert [ + call( + settings.HOTPLUG_ENABLED_FILE, + '{"scopes": ["network"]}', + omode="w", + mode=0o640, + ) + ] == m_write_file.call_args_list + assert [ + call( + mocks.m_init.datasource, + network_hotplug_enabled=True, + cfg=mocks.m_init.cfg, + ) + ] == m_install_hotplug.call_args_list + + @pytest.mark.parametrize( + ["supported_events"], [({},), ({EventScope.NETWORK: {}},)] + ) + @mock.patch(M_PATH + "util.write_file") + @mock.patch( + M_PATH + "util.read_hotplug_enabled_file", + return_value={"scopes": []}, + ) + @mock.patch(M_PATH + "install_hotplug") + def test_hotplug_not_supported_in_ds( + self, + m_install_hotplug, + m_read_hotplug_enabled_file, + m_write_file, + supported_events, + mocks, + ): + mocks.m_init.datasource.get_supported_events.return_value = ( + supported_events + ) + enable_hotplug(mocks.m_init, "net") + + assert [ + call([EventType.HOTPLUG]) + ] == mocks.m_init.datasource.get_supported_events.call_args_list + assert [] == m_read_hotplug_enabled_file.call_args_list + assert [] == m_write_file.call_args_list + assert [] == m_install_hotplug.call_args_list + + @mock.patch(M_PATH + "util.write_file") + @mock.patch( + M_PATH + "util.read_hotplug_enabled_file", + return_value={"scopes": [EventScope.NETWORK.value]}, + ) + @mock.patch(M_PATH + "install_hotplug") + def test_hotplug_already_enabled_in_file( + self, + m_install_hotplug, + m_read_hotplug_enabled_file, + m_write_file, + mocks, + ): + mocks.m_init.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + mocks.m_init.paths.get_cpath.return_value = ( + "/var/lib/cloud/hotplug.enabled" + ) + enable_hotplug(mocks.m_init, "net") + + assert [ + call([EventType.HOTPLUG]) + ] == mocks.m_init.datasource.get_supported_events.call_args_list + m_read_hotplug_enabled_file.assert_called_once() + assert [] == m_write_file.call_args_list + assert [] == m_install_hotplug.call_args_list diff -Nru cloud-init-23.4.4/tests/unittests/cmd/devel/test_logs.py cloud-init-24.1.3/tests/unittests/cmd/devel/test_logs.py --- cloud-init-23.4.4/tests/unittests/cmd/devel/test_logs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/devel/test_logs.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,7 +11,7 @@ from cloudinit.cmd.devel import logs from cloudinit.cmd.devel.logs import ApportFile from cloudinit.subp import SubpResult, subp -from cloudinit.util import ensure_dir, load_file, write_file +from cloudinit.util import ensure_dir, load_text_file, write_file from tests.unittests.helpers import mock M_PATH = "cloudinit.cmd.devel.logs." @@ -40,8 +40,12 @@ m_getuid.return_value = 100 log1 = tmpdir.join("cloud-init.log") write_file(log1, "cloud-init-log") + log1_rotated = tmpdir.join("cloud-init.log.1.gz") + write_file(log1_rotated, "cloud-init-log-rotated") log2 = tmpdir.join("cloud-init-output.log") write_file(log2, "cloud-init-output-log") + log2_rotated = tmpdir.join("cloud-init-output.log.1.gz") + write_file(log2_rotated, "cloud-init-output-log-rotated") run_dir = tmpdir.join("run") write_file(run_dir.join("results.json"), "results") write_file( @@ -52,6 +56,12 @@ ) output_tarfile = str(tmpdir.join("logs.tgz")) + mocker.patch(M_PATH + "Init", autospec=True) + mocker.patch( + M_PATH + "get_config_logfiles", + return_value=[log1, log1_rotated, log2, log2_rotated], + ) + date = datetime.utcnow().date().strftime("%Y-%m-%d") date_logdir = "cloud-init-logs-{0}".format(date) @@ -98,7 +108,6 @@ M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) mocker.patch(M_PATH + "sys.stderr", fake_stderr) - mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2]) mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) @@ -116,23 +125,31 @@ ), ( "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE ) - assert "0.7fake\n" == load_file( + assert "0.7fake\n" == load_text_file( os.path.join(out_logdir, "dpkg-version") ) - assert version_out == load_file(os.path.join(out_logdir, "version")) - assert "cloud-init-log" == load_file( + assert version_out == load_text_file( + os.path.join(out_logdir, "version") + ) + assert "cloud-init-log" == load_text_file( os.path.join(out_logdir, "cloud-init.log") ) - assert "cloud-init-output-log" == load_file( + assert "cloud-init-log-rotated" == load_text_file( + os.path.join(out_logdir, "cloud-init.log.1.gz") + ) + assert "cloud-init-output-log" == load_text_file( os.path.join(out_logdir, "cloud-init-output.log") ) - assert "dmesg-out\n" == load_file( + assert "cloud-init-output-log-rotated" == load_text_file( + os.path.join(out_logdir, "cloud-init-output.log.1.gz") + ) + assert "dmesg-out\n" == load_text_file( os.path.join(out_logdir, "dmesg.txt") ) - assert "journal-out\n" == load_file( + assert "journal-out\n" == load_text_file( os.path.join(out_logdir, "journal.txt") ) - assert "results" == load_file( + assert "results" == load_text_file( os.path.join(out_logdir, "run", "cloud-init", "results.json") ) fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) @@ -156,6 +173,12 @@ ) output_tarfile = str(tmpdir.join("logs.tgz")) + mocker.patch(M_PATH + "Init", autospec=True) + mocker.patch( + M_PATH + "get_config_logfiles", + return_value=[log1, log2], + ) + date = datetime.utcnow().date().strftime("%Y-%m-%d") date_logdir = "cloud-init-logs-{0}".format(date) @@ -200,7 +223,6 @@ M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) mocker.patch(M_PATH + "sys.stderr", fake_stderr) - mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2]) mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) @@ -209,10 +231,10 @@ # unpack the tarfile and check file contents subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)]) out_logdir = tmpdir.join(date_logdir) - assert "user-data" == load_file( + assert "user-data" == load_text_file( os.path.join(out_logdir, "user-data.txt") ) - assert "sensitive" == load_file( + assert "sensitive" == load_text_file( os.path.join( out_logdir, "run", @@ -231,15 +253,14 @@ "cloud-init? more like cloud-innit!\n", ), ( - ["ls", "/nonexistent-directory"], + ["sh", "-c", "echo test 1>&2; exit 42"], ( "Unexpected error while running command.\n" - "Command: ['ls', '/nonexistent-directory']\n" - "Exit code: 2\n" + "Command: ['sh', '-c', 'echo test 1>&2; exit 42']\n" + "Exit code: 42\n" "Reason: -\n" "Stdout: \n" - "Stderr: ls: cannot access '/nonexistent-directory': " - "No such file or directory" + "Stderr: test" ), None, ), @@ -264,19 +285,13 @@ ) assert expected_return_value == return_output - assert expected_file_contents == load_file(output_file) + assert expected_file_contents == load_text_file(output_file) @pytest.mark.parametrize( "cmd, expected_file_contents", [ (["echo", "cloud-init, shmoud-init"], "cloud-init, shmoud-init\n"), - ( - ["ls", "/nonexistent-directory"], - ( - "ls: cannot access '/nonexistent-directory': " - "No such file or directory\n" - ), - ), + (["sh", "-c", "echo test 1>&2; exit 42"], "test\n"), ], ) def test_stream_command_output_to_file( @@ -292,7 +307,7 @@ verbosity=1, ) - assert expected_file_contents == load_file(output_file) + assert expected_file_contents == load_text_file(output_file) class TestCollectInstallerLogs: diff -Nru cloud-init-23.4.4/tests/unittests/cmd/devel/test_render.py cloud-init-24.1.3/tests/unittests/cmd/devel/test_render.py --- cloud-init-23.4.4/tests/unittests/cmd/devel/test_render.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/devel/test_render.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,7 @@ from cloudinit.cmd.devel import render from cloudinit.helpers import Paths +from cloudinit.templater import JinjaSyntaxParsingException from cloudinit.util import ensure_dir, write_file from tests.unittests.helpers import mock, skipUnlessJinja @@ -148,3 +149,19 @@ write_file(instance_data, '{"my-var": "jinja worked"}') render.render_template(user_data, instance_data, False) assert "Unable to render user-data file" in caplog.text + + @skipUnlessJinja() + def test_invalid_jinja_syntax(self, caplog, tmpdir): + user_data = tmpdir.join("user-data") + write_file(user_data, "##template: jinja\nrendering: {{ my_var } }") + instance_data = tmpdir.join("instance-data") + write_file(instance_data, '{"my-var": "jinja worked"}') + assert render.render_template(user_data, instance_data, True) == 1 + assert ( + JinjaSyntaxParsingException.format_error_message( + syntax_error="unexpected '}'", + line_number=2, + line_content="rendering: {{ my_var } }", + ) + in caplog.text + ) diff -Nru cloud-init-23.4.4/tests/unittests/cmd/test_cloud_id.py cloud-init-24.1.3/tests/unittests/cmd/test_cloud_id.py --- cloud-init-23.4.4/tests/unittests/cmd/test_cloud_id.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/test_cloud_id.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,8 +12,9 @@ M_PATH = "cloudinit.cmd.cloud_id." STATUS_DETAILS_DONE = status.StatusDetails( - status.UXAppStatus.DONE, - status.UXAppBootStatusCode.UNKNOWN, + status.RunningStatus.DONE, + status.ConditionStatus.PEACHY, + status.EnabledStatus.UNKNOWN, "DataSourceNoCloud somedetail", [], {}, @@ -22,8 +23,9 @@ {}, ) STATUS_DETAILS_DISABLED = status.StatusDetails( - status.UXAppStatus.DISABLED, - status.UXAppBootStatusCode.DISABLED_BY_GENERATOR, + status.RunningStatus.DISABLED, + status.ConditionStatus.PEACHY, + status.EnabledStatus.DISABLED_BY_GENERATOR, "DataSourceNoCloud somedetail", [], {}, @@ -31,9 +33,10 @@ "", {}, ) -STATUS_DETAILS_NOT_RUN = status.StatusDetails( - status.UXAppStatus.NOT_RUN, - status.UXAppBootStatusCode.UNKNOWN, +STATUS_DETAILS_NOT_STARTED = status.StatusDetails( + status.RunningStatus.NOT_STARTED, + status.ConditionStatus.PEACHY, + status.EnabledStatus.UNKNOWN, "", [], {}, @@ -42,8 +45,9 @@ {}, ) STATUS_DETAILS_RUNNING = status.StatusDetails( - status.UXAppStatus.RUNNING, - status.UXAppBootStatusCode.UNKNOWN, + status.RunningStatus.RUNNING, + status.ConditionStatus.PEACHY, + status.EnabledStatus.UNKNOWN, "", [], {}, @@ -54,8 +58,9 @@ STATUS_DETAILS_RUNNING_DS_NONE = status.StatusDetails( - status.UXAppStatus.RUNNING, - status.UXAppBootStatusCode.UNKNOWN, + status.RunningStatus.RUNNING, + status.ConditionStatus.PEACHY, + status.EnabledStatus.UNKNOWN, "", [], {}, @@ -221,19 +226,24 @@ "details, exit_code", ( (STATUS_DETAILS_DISABLED, 2), - (STATUS_DETAILS_NOT_RUN, 3), + (STATUS_DETAILS_NOT_STARTED, 3), (STATUS_DETAILS_RUNNING, 0), (STATUS_DETAILS_RUNNING_DS_NONE, 0), ), ) @mock.patch(M_PATH + "get_status_details") def test_cloud_id_unique_exit_codes_for_status( - self, get_status_details, details, exit_code, tmpdir, capsys + self, + get_status_details, + details: status.StatusDetails, + exit_code, + tmpdir, + capsys, ): """cloud-id returns unique exit codes for status.""" get_status_details.return_value = details instance_data = tmpdir.join("instance-data.json") - if details.status == cloud_id.UXAppStatus.RUNNING: + if details.running_status == cloud_id.RunningStatus.RUNNING: instance_data.write("{}") cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"] with mock.patch("sys.argv", cmd): diff -Nru cloud-init-23.4.4/tests/unittests/cmd/test_main.py cloud-init-24.1.3/tests/unittests/cmd/test_main.py --- cloud-init-23.4.4/tests/unittests/cmd/test_main.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/test_main.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,7 +10,7 @@ from cloudinit import safeyaml from cloudinit.cmd import main -from cloudinit.util import ensure_dir, load_file, write_file +from cloudinit.util import ensure_dir, load_text_file, write_file from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call MyArgs = namedtuple("MyArgs", "debug files force local reporter subcommand") @@ -82,12 +82,12 @@ self.assertEqual( "iid-datasource-none\n", os.path.join( - load_file(os.path.join(self.new_root, instance_id_path)) + load_text_file(os.path.join(self.new_root, instance_id_path)) ), ) # modules are run (including write_files) self.assertEqual( - "blah", load_file(os.path.join(self.new_root, "etc/blah.ini")) + "blah", load_text_file(os.path.join(self.new_root, "etc/blah.ini")) ) expected_logs = [ "network config is disabled by fallback", # apply_network_config @@ -152,12 +152,12 @@ self.assertEqual( "iid-datasource-none\n", os.path.join( - load_file(os.path.join(self.new_root, instance_id_path)) + load_text_file(os.path.join(self.new_root, instance_id_path)) ), ) # modules are run (including write_files) self.assertEqual( - "blah", load_file(os.path.join(self.new_root, "etc/blah.ini")) + "blah", load_text_file(os.path.join(self.new_root, "etc/blah.ini")) ) expected_logs = [ "network config is disabled by fallback", # apply_network_config @@ -166,6 +166,19 @@ for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) + @mock.patch("cloudinit.cmd.clean.get_parser") + @mock.patch("cloudinit.cmd.clean.handle_clean_args") + @mock.patch("cloudinit.log.configure_root_logger") + def test_main_sys_argv( + self, + _m_configure_root_logger, + _m_handle_clean_args, + m_clean_get_parser, + ): + with mock.patch("sys.argv", ["cloudinit", "--debug", "clean"]): + main.main() + m_clean_get_parser.assert_called_once() + class TestShouldBringUpInterfaces: @pytest.mark.parametrize( diff -Nru cloud-init-23.4.4/tests/unittests/cmd/test_query.py cloud-init-24.1.3/tests/unittests/cmd/test_query.py --- cloud-init-23.4.4/tests/unittests/cmd/test_query.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/test_query.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,6 +15,7 @@ from cloudinit.cmd import query from cloudinit.helpers import Paths from cloudinit.sources import REDACT_SENSITIVE_VALUE +from cloudinit.templater import JinjaSyntaxParsingException from cloudinit.util import write_file from tests.unittests.helpers import mock @@ -165,7 +166,7 @@ vendor_data="vd", varname=None, ) - with mock.patch(M_PATH + "util.load_file") as m_load: + with mock.patch(M_PATH + "util.load_binary_file") as m_load: m_load.side_effect = OSError(errno.EACCES, "Not allowed") assert 1 == query.handle_args("anyname", args) msg = "No read permission on '%s'. Try sudo" % noread_fn @@ -565,5 +566,69 @@ ) with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 + assert 1 == query.handle_args("anyname", args) + assert expected_error in caplog.text + + @pytest.mark.parametrize( + "header_included", + [True, False], + ) + def test_handle_args_formats_jinja_successfully( + self, caplog, tmpdir, capsys, header_included + ): + """Test that rendering a jinja template works as expected.""" + instance_data = tmpdir.join("instance-data") + instance_data.write( + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + '{"v2_2": "val2.2"}, "top": "gun"}' + ) + header = "## template: jinja\n" if header_included else "" + format = header + "v1_1: {{ v1.v1_1 }}" + expected = header + "v1_1: val1.1\n" + + args = self.Args( + debug=False, + dump_all=False, + format=format, + instance_data=instance_data.strpath, + list_keys=False, + user_data="ud", + vendor_data="vd", + varname=None, + ) + with mock.patch("os.getuid") as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args("anyname", args) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_invalid_jinja_exception(self, caplog, tmpdir, capsys): + """Raise an error when a jinja syntax error is encountered.""" + instance_data = tmpdir.join("instance-data") + instance_data.write( + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + '{"v2_2": "val2.2"}, "top": "gun"}' + ) + format = "v1_1: {{ v1.v1_1 } }" + expected_error = ( + "Failed to render templated data. " + + JinjaSyntaxParsingException.format_error_message( + syntax_error="unexpected '}'", + line_number=2, + line_content="v1_1: {{ v1.v1_1 } }", + ) + ) + args = self.Args( + debug=False, + dump_all=False, + format=format, + instance_data=instance_data.strpath, + list_keys=False, + user_data="ud", + vendor_data="vd", + varname=None, + ) + with mock.patch("os.getuid") as m_getuid: + m_getuid.return_value = 100 assert 1 == query.handle_args("anyname", args) assert expected_error in caplog.text diff -Nru cloud-init-23.4.4/tests/unittests/cmd/test_status.py cloud-init-24.1.3/tests/unittests/cmd/test_status.py --- cloud-init-23.4.4/tests/unittests/cmd/test_status.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/cmd/test_status.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,10 +12,6 @@ from cloudinit import subp from cloudinit.atomic_helper import write_json from cloudinit.cmd import status -from cloudinit.cmd.status import ( - UXAppStatus, - _get_error_or_running_from_systemd, -) from cloudinit.subp import SubpResult from cloudinit.util import ensure_file from tests.unittests.helpers import wrap_and_call @@ -41,31 +37,41 @@ ) +EXAMPLE_STATUS_RUNNING: Dict[str, Dict] = { + "v1": { + "datasource": None, + "init-local": { + "start": 1669231096.9621563, + "finished": None, + "errors": [], + }, + "init": {"start": None, "finished": None, "errors": []}, + "modules-config": {"start": None, "finished": None, "errors": []}, + "modules-final": {"start": None, "finished": None, "errors": []}, + "stage": "init-local", + } +} + + class TestStatus: maxDiff = None @mock.patch( - M_PATH + "load_file", - return_value=( - '{"v1": {"datasource": null, "init": {"errors": [], "finished": ' - 'null, "start": null}, "init-local": {"errors": [], "finished": ' - 'null, "start": 1669231096.9621563}, "modules-config": ' - '{"errors": [], "finished": null, "start": null},' - '"modules-final": {"errors": [], "finished": null, ' - '"start": null}, "stage": "init-local"} }' - ), + M_PATH + "load_text_file", + return_value=json.dumps(EXAMPLE_STATUS_RUNNING), ) @mock.patch(M_PATH + "os.path.exists", return_value=True) + @mock.patch(M_PATH + "is_running", return_value=True) @mock.patch( M_PATH + "get_bootstatus", return_value=( - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + status.EnabledStatus.ENABLED_BY_GENERATOR, "Cloud-init enabled by systemd cloud-init-generator", ), ) @mock.patch( - f"{M_PATH}_get_error_or_running_from_systemd", - return_value=None, + f"{M_PATH}systemd_failed", + return_value=False, ) def test_get_status_details_ds_none( self, @@ -78,20 +84,21 @@ paths = mock.Mock() paths.run_dir = str(tmpdir) assert status.StatusDetails( - status.UXAppStatus.RUNNING, - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + status.RunningStatus.RUNNING, + status.ConditionStatus.PEACHY, + status.EnabledStatus.ENABLED_BY_GENERATOR, "Running in stage: init-local", [], {}, "Wed, 23 Nov 2022 19:18:16 +0000", None, # datasource { - "init": {"errors": [], "finished": None, "start": None}, "init-local": { "errors": [], "finished": None, "start": 1669231096.9621563, }, + "init": {"errors": [], "finished": None, "start": None}, "modules-config": { "errors": [], "finished": None, @@ -106,6 +113,49 @@ }, ) == status.get_status_details(paths) + @mock.patch( + M_PATH + "load_text_file", + return_value=json.dumps(EXAMPLE_STATUS_RUNNING), + ) + @mock.patch(M_PATH + "os.path.exists", return_value=True) + @mock.patch(M_PATH + "is_running", return_value=True) + @mock.patch( + M_PATH + "get_bootstatus", + return_value=( + status.EnabledStatus.ENABLED_BY_GENERATOR, + "Cloud-init enabled by systemd cloud-init-generator", + ), + ) + @mock.patch( + f"{M_PATH}systemd_failed", + return_value=True, + ) + @mock.patch( + f"{M_PATH}uses_systemd", + return_value=True, + ) + def test_get_status_systemd_failure( + self, + m_uses_systemd, + m_systemd_status, + m_boot_status, + m_is_running, + m_p_exists, + m_load_json, + tmpdir, + ): + paths = mock.Mock() + paths.run_dir = str(tmpdir) + details = status.get_status_details(paths) + assert details.running_status == status.RunningStatus.DONE + assert details.condition_status == status.ConditionStatus.ERROR + assert details.description == "Failed due to systemd unit failure" + assert details.errors == [ + "Failed due to systemd unit failure. Ensure all cloud-init " + "services are enabled, and check 'systemctl' or 'journalctl' " + "for more information." + ] + @pytest.mark.parametrize( [ "ensured_file", @@ -121,7 +171,7 @@ lambda config: config.disable_file, False, "root=/dev/my-root not-important", - status.UXAppBootStatusCode.ENABLED_BY_SYSVINIT, + status.EnabledStatus.ENABLED_BY_SYSVINIT, "expected enabled cloud-init on sysvinit", "Cloud-init enabled on sysvinit", id="false_on_sysvinit", @@ -131,7 +181,7 @@ lambda config: config.disable_file, True, "root=/dev/my-root not-important", - status.UXAppBootStatusCode.DISABLED_BY_MARKER_FILE, + status.EnabledStatus.DISABLED_BY_MARKER_FILE, "expected disabled cloud-init", lambda config: f"Cloud-init disabled by {config.disable_file}", id="true_on_disable_file", @@ -141,7 +191,7 @@ lambda config: config.disable_file, True, "something cloud-init=enabled else", - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, "expected enabled cloud-init", "Cloud-init enabled by kernel command line cloud-init=enabled", id="false_on_kernel_cmdline_enable", @@ -151,7 +201,7 @@ None, True, "something cloud-init=disabled else", - status.UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.DISABLED_BY_KERNEL_CMDLINE, "expected disabled cloud-init", "Cloud-init disabled by kernel parameter cloud-init=disabled", id="true_on_kernel_cmdline", @@ -161,7 +211,7 @@ lambda config: os.path.join(config.paths.run_dir, "disabled"), True, "something", - status.UXAppBootStatusCode.DISABLED_BY_GENERATOR, + status.EnabledStatus.DISABLED_BY_GENERATOR, "expected disabled cloud-init", "Cloud-init disabled by cloud-init-generator", id="true_when_generator_disables", @@ -171,7 +221,7 @@ lambda config: os.path.join(config.paths.run_dir, "enabled"), True, "something ignored", - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + status.EnabledStatus.ENABLED_BY_GENERATOR, "expected enabled cloud-init", "Cloud-init enabled by systemd cloud-init-generator", id="false_when_enabled_in_systemd", @@ -216,7 +266,7 @@ def test_status_returns_not_run( self, m_read_cfg_paths, config: Config, capsys ): - """When status.json does not exist yet, return 'not run'.""" + """When status.json does not exist yet, return 'not started'.""" m_read_cfg_paths.return_value = config.paths assert not os.path.exists( config.status_file @@ -224,14 +274,14 @@ cmdargs = MyArgs(long=False, wait=False, format="tabular") retcode = wrap_and_call( M_NAME, - {"get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, "")}, + {"get_bootstatus": (status.EnabledStatus.UNKNOWN, "")}, status.handle_status_args, "ignored", cmdargs, ) assert retcode == 0 out, _err = capsys.readouterr() - assert out == "status: not run\n" + assert out == "status: not started\n" @mock.patch(M_PATH + "read_cfg_paths") def test_status_returns_disabled_long_on_presence_of_disable_file( @@ -239,20 +289,14 @@ ): """When cloudinit is disabled, return disabled reason.""" m_read_cfg_paths.return_value = config.paths - checked_files = [] - - def fakeexists(filepath): - checked_files.append(filepath) - status_file = os.path.join(config.paths.run_dir, "status.json") - return bool(not filepath == status_file) cmdargs = MyArgs(long=True, wait=False, format="tabular") retcode = wrap_and_call( M_NAME, { - "os.path.exists": {"side_effect": fakeexists}, + "os.path.exists": {"return_value": False}, "get_bootstatus": ( - status.UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.DISABLED_BY_KERNEL_CMDLINE, "disabled for some reason", ), }, @@ -261,16 +305,12 @@ cmdargs, ) assert retcode == 0 - assert checked_files == [ - os.path.join(config.paths.run_dir, "status.json") - ] expected = dedent( """\ status: disabled extended_status: disabled boot_status_code: disabled-by-kernel-cmdline - detail: - disabled for some reason + detail: disabled for some reason errors: [] recoverable_errors: {} """ @@ -292,7 +332,7 @@ # Report running when status.json exists but result.json does not. pytest.param( None, - status.UXAppBootStatusCode.UNKNOWN, + status.EnabledStatus.UNKNOWN, {}, lambda config: config.result_file, MyArgs(long=False, wait=False, format="tabular"), @@ -300,21 +340,10 @@ "status: running\n", id="running_on_no_results_json", ), - # Report running when status exists with an unfinished stage. - pytest.param( - lambda config: config.result_file, - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, - {"v1": {"init": {"start": 1, "finished": None}}}, - None, - MyArgs(long=False, wait=False, format="tabular"), - 0, - "status: running\n", - id="running", - ), # Report done results.json exists no stages are unfinished. pytest.param( lambda config: config.result_file, - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + status.EnabledStatus.ENABLED_BY_GENERATOR, { "v1": { "stage": None, # No current stage running @@ -341,7 +370,7 @@ # Long format of done status includes datasource info. pytest.param( lambda config: config.result_file, - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + status.EnabledStatus.ENABLED_BY_GENERATOR, { "v1": { "stage": None, @@ -363,19 +392,17 @@ extended_status: done boot_status_code: enabled-by-generator last_update: Thu, 01 Jan 1970 00:02:05 +0000 - detail: - DataSourceNoCloud [seed=/var/.../seed/nocloud-net]\ -[dsmode=net] + detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] errors: [] recoverable_errors: {} - """ + """ # noqa: E501 ), id="returns_done_long", ), # Reports error when any stage has errors. pytest.param( - None, - status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + lambda config: config.result_file, + status.EnabledStatus.ENABLED_BY_GENERATOR, { "v1": { "stage": None, @@ -397,7 +424,7 @@ # Long format of error status includes all error messages. pytest.param( None, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": None, @@ -424,24 +451,23 @@ dedent( """\ status: error - extended_status: error + extended_status: error - running boot_status_code: enabled-by-kernel-cmdline last_update: Thu, 01 Jan 1970 00:02:05 +0000 - detail: - DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] + detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] errors: \t- error1 \t- error2 \t- error3 recoverable_errors: {} - """ + """ # noqa: E501 ), id="on_errors_long", ), # Long format reports the stage in which we are running. pytest.param( None, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": "init", @@ -458,8 +484,7 @@ extended_status: running boot_status_code: enabled-by-kernel-cmdline last_update: Thu, 01 Jan 1970 00:02:04 +0000 - detail: - Running in stage: init + detail: Running in stage: init errors: [] recoverable_errors: {} """ @@ -468,7 +493,7 @@ ), pytest.param( None, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": "init", @@ -505,7 +530,7 @@ ), pytest.param( None, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": "init", @@ -533,7 +558,7 @@ ), pytest.param( None, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": None, @@ -566,7 +591,7 @@ ), "errors": ["error1", "error2", "error3"], "status": "error", - "extended_status": "error", + "extended_status": "error - running", "init": { "finished": 125.678, "start": 124.567, @@ -585,7 +610,7 @@ ), pytest.param( lambda config: config.result_file, - status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE, + status.EnabledStatus.ENABLED_BY_KERNEL_CMDLINE, { "v1": { "stage": None, @@ -708,7 +733,7 @@ ) @mock.patch(M_PATH + "read_cfg_paths") @mock.patch( - f"{M_PATH}_get_error_or_running_from_systemd", + f"{M_PATH}systemd_failed", return_value=None, ) def test_status_output( @@ -716,7 +741,7 @@ m_get_systemd_status, m_read_cfg_paths, ensured_file: Optional[Callable], - bootstatus: status.UXAppBootStatusCode, + bootstatus: status.EnabledStatus, status_content: Dict, assert_file, cmdargs: MyArgs, @@ -752,7 +777,7 @@ @mock.patch(M_PATH + "read_cfg_paths") @mock.patch( - f"{M_PATH}_get_error_or_running_from_systemd", + f"{M_PATH}systemd_failed", return_value=None, ) def test_status_wait_blocks_until_done( @@ -793,7 +818,7 @@ M_NAME, { "sleep": {"side_effect": fake_sleep}, - "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""), + "get_bootstatus": (status.EnabledStatus.UNKNOWN, ""), }, status.handle_status_args, "ignored", @@ -806,7 +831,7 @@ @mock.patch(M_PATH + "read_cfg_paths") @mock.patch( - f"{M_PATH}_get_error_or_running_from_systemd", + f"{M_PATH}systemd_failed", return_value=None, ) def test_status_wait_blocks_until_error( @@ -843,13 +868,14 @@ write_json(config.status_file, running_json) elif sleep_calls == 3: write_json(config.status_file, error_json) + write_json(config.result_file, "{}") cmdargs = MyArgs(long=False, wait=True, format="tabular") retcode = wrap_and_call( M_NAME, { "sleep": {"side_effect": fake_sleep}, - "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""), + "get_bootstatus": (status.EnabledStatus.UNKNOWN, ""), }, status.handle_status_args, "ignored", @@ -862,7 +888,7 @@ @mock.patch(M_PATH + "read_cfg_paths") @mock.patch( - f"{M_PATH}_get_error_or_running_from_systemd", + f"{M_PATH}systemd_failed", return_value=None, ) def test_status_main( @@ -879,7 +905,7 @@ M_NAME, { "sys.argv": {"new": ["status"]}, - "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""), + "get_bootstatus": (status.EnabledStatus.UNKNOWN, ""), }, status.main, ) @@ -888,14 +914,20 @@ assert out == "status: running\n" -class TestGetErrorOrRunningFromSystemd: +class TestSystemdFailed: @pytest.fixture(autouse=True) def common_mocks(self, mocker): mocker.patch("cloudinit.cmd.status.sleep") yield @pytest.mark.parametrize( - ["active_state", "unit_file_state", "sub_state", "main_pid", "status"], + [ + "active_state", + "unit_file_state", + "sub_state", + "main_pid", + "expected_failed", + ], [ # To cut down on the combination of states, I'm grouping # enabled, enabled-runtime, and static into an "enabled" state @@ -904,34 +936,39 @@ # different depending on the ActiveState they are mapped to. # Because of this I'm only testing SubState combinations seen # in real-world testing (or using "any" string if we dont care). - ("activating", "enabled", "start", "123", UXAppStatus.RUNNING), - ("activating", "enabled", "start", "123", UXAppStatus.RUNNING), - ("active", "enabled-runtime", "exited", "0", None), - ("active", "enabled", "exited", "0", None), - ("active", "enabled", "running", "345", UXAppStatus.RUNNING), - ("active", "enabled", "running", "0", None), - # Dead doesn't mean exited here. It means not run yet. - ("inactive", "static", "dead", "123", UXAppStatus.RUNNING), - ("reloading", "enabled", "start", "123", UXAppStatus.RUNNING), + ("activating", "enabled", "start", "123", False), + ("activating", "enabled", "start", "123", False), + ("active", "enabled-runtime", "exited", "0", False), + ("active", "enabled", "exited", "0", False), + ("active", "enabled", "running", "345", False), + ("active", "enabled", "running", "0", False), + # Dead doesn't mean exited here. It means not started yet. + ("inactive", "static", "dead", "123", False), + ("reloading", "enabled", "start", "123", False), ( "deactivating", "enabled-runtime", "any", "123", - UXAppStatus.RUNNING, + False, ), - ("failed", "static", "failed", "0", UXAppStatus.ERROR), + ("failed", "static", "failed", "0", True), # Try previous combinations again with "not enabled" states - ("activating", "linked", "start", "0", UXAppStatus.ERROR), - ("active", "linked-runtime", "exited", "0", UXAppStatus.ERROR), - ("inactive", "masked", "dead", "0", UXAppStatus.ERROR), - ("reloading", "masked-runtime", "start", "0", UXAppStatus.ERROR), - ("deactivating", "disabled", "any", "0", UXAppStatus.ERROR), - ("failed", "invalid", "failed", "0", UXAppStatus.ERROR), + ("activating", "linked", "start", "0", True), + ("active", "linked-runtime", "exited", "0", True), + ("inactive", "masked", "dead", "0", True), + ("reloading", "masked-runtime", "start", "0", True), + ("deactivating", "disabled", "any", "0", True), + ("failed", "invalid", "failed", "0", True), ], ) - def test_get_error_or_running_from_systemd( - self, active_state, unit_file_state, sub_state, main_pid, status + def test_systemd_failed( + self, + active_state, + unit_file_state, + sub_state, + main_pid, + expected_failed, ): with mock.patch( f"{M_PATH}subp.subp", @@ -943,25 +980,7 @@ stderr=None, ), ): - assert ( - _get_error_or_running_from_systemd(UXAppStatus.RUNNING, False) - == status - ) - - def test_exception_while_running(self, mocker, capsys): - m_subp = mocker.patch( - f"{M_PATH}subp.subp", - side_effect=subp.ProcessExecutionError( - "Message recipient disconnected from message bus without" - " replying" - ), - ) - assert ( - _get_error_or_running_from_systemd(UXAppStatus.RUNNING, wait=True) - is None - ) - assert 1 == m_subp.call_count - assert "Failed to get status" not in capsys.readouterr().err + assert status.systemd_failed(wait=False) == expected_failed def test_retry(self, mocker, capsys): m_subp = mocker.patch( @@ -982,10 +1001,7 @@ ), ], ) - assert ( - _get_error_or_running_from_systemd(UXAppStatus.ERROR, wait=True) - is UXAppStatus.RUNNING - ) + assert status.systemd_failed(wait=True) is False assert 3 == m_subp.call_count assert "Failed to get status" not in capsys.readouterr().err @@ -993,19 +1009,20 @@ m_subp = mocker.patch( f"{M_PATH}subp.subp", side_effect=subp.ProcessExecutionError( - "Message recipient disconnected from message bus without" - " replying" + stderr=( + "Message recipient disconnected from message bus without " + "replying" + ), ), ) mocker.patch("time.time", side_effect=[1, 2, 50]) - assert ( - _get_error_or_running_from_systemd(UXAppStatus.ERROR, wait=False) - is None - ) + assert status.systemd_failed(wait=False) is False assert 1 == m_subp.call_count assert ( "Failed to get status from systemd. " - "Cloud-init status may be inaccurate." + "Cloud-init status may be inaccurate. " + "Error from systemctl: Message recipient disconnected from " + "message bus without replying" ) in capsys.readouterr().err @@ -1025,9 +1042,9 @@ "Message recipient disconnected", stderr="oh noes!" ), ) - assert status.query_systemctl(["some", "args"], wait=False) == "" + with pytest.raises(subp.ProcessExecutionError): + status.query_systemctl(["some", "args"], wait=False) m_subp.assert_called_once_with(["systemctl", "some", "args"]) - assert "Error from systemctl: oh noes!" in capsys.readouterr().err def test_query_systemctl_wait_with_exception(self, mocker): m_sleep = mocker.patch(f"{M_PATH}sleep") @@ -1044,23 +1061,3 @@ assert status.query_systemctl(["some", "args"], wait=True) == "hello" assert m_subp.call_count == 4 assert m_sleep.call_count == 3 - - def test_query_systemctl_wait_with_exception_status(self, mocker): - m_sleep = mocker.patch(f"{M_PATH}sleep") - m_subp = mocker.patch( - f"{M_PATH}subp.subp", - side_effect=subp.ProcessExecutionError( - "Message recipient disconnected" - ), - ) - - assert ( - status.query_systemctl( - ["some", "args"], - wait=True, - existing_status=UXAppStatus.RUNNING, - ) - == "" - ) - assert m_subp.call_count == 1 - assert m_sleep.call_count == 0 diff -Nru cloud-init-23.4.4/tests/unittests/config/test_apt_conf_v1.py cloud-init-24.1.3/tests/unittests/config/test_apt_conf_v1.py --- cloud-init-23.4.4/tests/unittests/config/test_apt_conf_v1.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_apt_conf_v1.py 2024-03-27 13:14:04.000000000 +0000 @@ -33,7 +33,7 @@ self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = util.load_file(self.pfile) + contents = util.load_text_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_http_proxy_written(self): @@ -43,7 +43,7 @@ self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = util.load_file(self.pfile) + contents = util.load_text_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_all_proxy_written(self): @@ -64,7 +64,7 @@ self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = util.load_file(self.pfile) + contents = util.load_text_file(self.pfile) for ptype, pval in values.items(): self.assertTrue(self._search_apt_config(contents, ptype, pval)) @@ -81,7 +81,7 @@ {"proxy": "foo"}, self.pfile, self.cfile ) self.assertTrue(os.path.isfile(self.pfile)) - contents = util.load_file(self.pfile) + contents = util.load_text_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo")) def test_config_written(self): @@ -93,7 +93,7 @@ self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) - self.assertEqual(util.load_file(self.cfile), payload) + self.assertEqual(util.load_text_file(self.cfile), payload) def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") @@ -101,7 +101,7 @@ {"conf": "foo"}, self.pfile, self.cfile ) self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(util.load_file(self.cfile), "foo") + self.assertEqual(util.load_text_file(self.cfile), "foo") def test_config_deleted(self): # if no 'conf' is provided, delete any previously written file diff -Nru cloud-init-23.4.4/tests/unittests/config/test_apt_configure_sources_list_v1.py cloud-init-24.1.3/tests/unittests/config/test_apt_configure_sources_list_v1.py --- cloud-init-23.4.4/tests/unittests/config/test_apt_configure_sources_list_v1.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_apt_configure_sources_list_v1.py 2024-03-27 13:14:04.000000000 +0000 @@ -161,9 +161,18 @@ assert 0o644 == stat.S_IMODE(sources_file.stat().mode) self.subp.assert_called_once_with( - ["ps", "-o", "ppid,pid", "-C", "dirmngr", "-C", "gpg-agent"], + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], capture=True, - target=None, rcs=[0, 1], ) @@ -221,9 +230,18 @@ mockresolve.assert_any_call("http://does.not.exist") mockresolve.assert_any_call(mirrorcheck) self.subp.assert_called_once_with( - ["ps", "-o", "ppid,pid", "-C", "dirmngr", "-C", "gpg-agent"], + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], capture=True, - target=None, rcs=[0, 1], ) @@ -284,8 +302,17 @@ assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) self.subp.assert_called_once_with( - ["ps", "-o", "ppid,pid", "-C", "dirmngr", "-C", "gpg-agent"], + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], capture=True, - target=None, rcs=[0, 1], ) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_apt_configure_sources_list_v3.py cloud-init-24.1.3/tests/unittests/config/test_apt_configure_sources_list_v3.py --- cloud-init-23.4.4/tests/unittests/config/test_apt_configure_sources_list_v3.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_apt_configure_sources_list_v3.py 2024-03-27 13:14:04.000000000 +0000 @@ -331,8 +331,17 @@ assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) self.subp.assert_called_once_with( - ["ps", "-o", "ppid,pid", "-C", "dirmngr", "-C", "gpg-agent"], + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], capture=True, - target=None, rcs=[0, 1], ) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_apt_source_v1.py cloud-init-24.1.3/tests/unittests/config/test_apt_source_v1.py --- cloud-init-23.4.4/tests/unittests/config/test_apt_source_v1.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_apt_source_v1.py 2024-03-27 13:14:04.000000000 +0000 @@ -119,7 +119,7 @@ assert os.path.isfile(filename) - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ( @@ -166,7 +166,7 @@ self.apt_src_basic(apt_lists[0], cfg) # extra verify on two extra files of this test - contents = util.load_file(apt_lists[1]) + contents = util.load_text_file(apt_lists[1]) assert re.search( r"%s %s %s %s\n" % ( @@ -178,7 +178,7 @@ contents, flags=re.IGNORECASE, ) - contents = util.load_file(apt_lists[2]) + contents = util.load_text_file(apt_lists[2]) assert re.search( r"%s %s %s %s\n" % ( @@ -270,7 +270,7 @@ assert os.path.isfile(filename) - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"), @@ -295,7 +295,7 @@ # extra verify on two extra files of this test params = self._get_default_params() - contents = util.load_file(apt_lists[1]) + contents = util.load_text_file(apt_lists[1]) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "main"), @@ -303,7 +303,7 @@ flags=re.IGNORECASE, ) - contents = util.load_file(apt_lists[2]) + contents = util.load_text_file(apt_lists[2]) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "universe"), @@ -362,13 +362,13 @@ sources = cfg["apt"]["sources"] for src in sources: print(sources[src]) - calls.append(call(sources[src], cloud, None)) + calls.append(call(sources[src], cloud)) mockobj.assert_has_calls(calls, any_order=True) assert os.path.isfile(filename) - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ( @@ -429,7 +429,7 @@ } self.apt_src_keyid(apt_lists[0], [cfg1, cfg2, cfg3], 3) - contents = util.load_file(apt_lists[1]) + contents = util.load_text_file(apt_lists[1]) assert re.search( r"%s %s %s %s\n" % ( @@ -441,7 +441,7 @@ contents, flags=re.IGNORECASE, ) - contents = util.load_file(apt_lists[2]) + contents = util.load_text_file(apt_lists[2]) assert re.search( r"%s %s %s %s\n" % ( @@ -485,13 +485,13 @@ calls = [] for src in sources: print(sources[src]) - calls.append(call(sources[src], cloud, None)) + calls.append(call(sources[src], cloud)) mockobj.assert_has_calls(calls, any_order=True) assert os.path.isfile(filename) - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ( @@ -665,7 +665,6 @@ "--no-update", "ppa:smoser/cloud-init-test", ], - target=None, ), mock.call( [ @@ -673,12 +672,13 @@ "-o", "ppid,pid", "-C", + "keyboxd", + "-C", "dirmngr", "-C", "gpg-agent", ], capture=True, - target=None, rcs=[0, 1], ), ] @@ -712,7 +712,6 @@ "--no-update", "ppa:smoser/cloud-init-test", ], - target=None, ), call( [ @@ -720,7 +719,6 @@ "--no-update", "ppa:smoser/cloud-init-test2", ], - target=None, ), call( [ @@ -728,7 +726,6 @@ "--no-update", "ppa:smoser/cloud-init-test3", ], - target=None, ), ] mockobj.assert_has_calls(calls, any_order=True) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_apt_source_v3.py cloud-init-24.1.3/tests/unittests/config/test_apt_source_v3.py --- cloud-init-23.4.4/tests/unittests/config/test_apt_source_v3.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_apt_source_v3.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,7 +4,6 @@ Testing various config variations of the apt_source custom config This tries to call all in the new v3 format and cares about new features """ -import glob import logging import os import pathlib @@ -90,7 +89,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -99,7 +97,7 @@ os.path.isfile(filename) is True ), f"Missing expected file {filename}" - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ( @@ -153,7 +151,7 @@ self._apt_src_basic(self.aptlistfile, cfg, tmpdir) # extra verify on two extra files of this test - contents = util.load_file(self.aptlistfile2) + contents = util.load_text_file(self.aptlistfile2) assert re.search( r"%s %s %s %s\n" % ( @@ -165,7 +163,7 @@ contents, flags=re.IGNORECASE, ), f"Unexpected APT format of {self.aptlistfile2}: contents" - contents = util.load_file(self.aptlistfile3) + contents = util.load_text_file(self.aptlistfile3) assert re.search( r"%s %s %s %s\n" % ( @@ -186,14 +184,13 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) assert os.path.isfile(filename) is True, f"Unexpected file {filename}" - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"), @@ -211,7 +208,7 @@ cfg = { "ignored": { "source": "deb $MIRROR $RELEASE multiverse", - "filename": self.aptlistfile.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile, } } # second file should overwrite the dict key @@ -226,14 +223,14 @@ # extra verify on two extra files of this test params = self._get_default_params() - contents = util.load_file(self.aptlistfile2) + contents = util.load_text_file(self.aptlistfile2) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "main"), contents, flags=re.IGNORECASE, ), f"Unexpected APT format {self.aptlistfile2}: {contents}" - contents = util.load_file(self.aptlistfile3) + contents = util.load_text_file(self.aptlistfile3) assert re.search( r"%s %s %s %s\n" % ("deb", params["MIRROR"], params["RELEASE"], "universe"), @@ -247,7 +244,7 @@ self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}, "notused": { "source": "deb $MIRROR $RELEASE main", - "filename": self.aptlistfile2.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile2, }, self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"}, } @@ -263,7 +260,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -274,13 +270,13 @@ if is_hardened is not None: calls.append(call(cfg[key], None, hardened=is_hardened)) else: - calls.append(call(cfg[key], None, tmpdir.strpath)) + calls.append(call(cfg[key], None)) mockobj.assert_has_calls(calls, any_order=True) assert os.path.isfile(filename) is True - contents = util.load_file(filename) + contents = util.load_text_file(filename) assert re.search( r"%s %s %s %s\n" % ( @@ -303,7 +299,7 @@ "smoser/cloud-init-test/ubuntu" " xenial main" ), - "filename": self.aptlistfile.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile, "keyid": "03683F77", } } @@ -329,7 +325,7 @@ " xenial universe" ), "keyid": "03683F77", - "filename": self.aptlistfile2.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile2, }, self.aptlistfile3: { "source": ( @@ -338,13 +334,13 @@ "smoser/cloud-init-test/ubuntu" " xenial multiverse" ), - "filename": self.aptlistfile3.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile3, "keyid": "03683F77", }, } self._apt_src_keyid(self.aptlistfile, cfg, 3, tmpdir) - contents = util.load_file(self.aptlistfile2) + contents = util.load_text_file(self.aptlistfile2) assert re.search( r"%s %s %s %s\n" % ( @@ -356,7 +352,7 @@ contents, flags=re.IGNORECASE, ) - contents = util.load_file(self.aptlistfile3) + contents = util.load_text_file(self.aptlistfile3) assert re.search( r"%s %s %s %s\n" % ( @@ -369,7 +365,7 @@ flags=re.IGNORECASE, ) - def test_apt_v3_src_key(self, mocker, tmpdir): + def test_apt_v3_src_key(self, mocker): """test_apt_v3_src_key - Test source + key""" params = self._get_default_params() cfg = { @@ -380,7 +376,7 @@ "smoser/cloud-init-test/ubuntu" " xenial main" ), - "filename": self.aptlistfile.replace(tmpdir.strpath, ""), + "filename": self.aptlistfile, "key": "fakekey 4321", } } @@ -388,7 +384,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -402,7 +397,7 @@ ), ) mockobj.assert_has_calls(calls, any_order=True) - contents = util.load_file(self.aptlistfile) + contents = util.load_text_file(self.aptlistfile) assert re.search( r"%s %s %s %s\n" % ( @@ -415,7 +410,7 @@ flags=re.IGNORECASE, ) - def test_apt_v3_src_keyonly(self, tmpdir, mocker): + def test_apt_v3_src_keyonly(self, mocker): """test_apt_v3_src_keyonly - Test key without source""" params = self._get_default_params() cfg = {self.aptlistfile: {"key": "fakekey 4242"}} @@ -424,7 +419,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -442,7 +436,7 @@ # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyidonly(self, tmpdir): + def test_apt_v3_src_keyidonly(self): """test_apt_v3_src_keyidonly - Test keyid without source""" params = self._get_default_params() cfg = {self.aptlistfile: {"keyid": "03683F77"}} @@ -453,7 +447,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -473,7 +466,7 @@ os.path.isfile(self.aptlistfile) is False ), f"Unexpected file {self.aptlistfile} found" - def apt_src_keyid_real(self, cfg, expectedkey, tmpdir, is_hardened=None): + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -488,7 +481,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -500,28 +492,28 @@ if is_hardened is not None: mockkey.assert_called_with( expectedkey, - keycfg["keyfile"].replace(tmpdir.strpath, ""), + keycfg["keyfile"], hardened=is_hardened, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyid_real(self, tmpdir): + def test_apt_v3_src_keyid_real(self): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, tmpdir, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - def test_apt_v3_src_longkeyid_real(self, tmpdir): + def test_apt_v3_src_longkeyid_real(self): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, tmpdir, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - def test_apt_v3_src_longkeyid_ks_real(self, tmpdir): + def test_apt_v3_src_longkeyid_ks_real(self): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = { @@ -532,9 +524,9 @@ } } - self.apt_src_keyid_real(cfg, EXPECTEDKEY, tmpdir) + self.apt_src_keyid_real(cfg, EXPECTEDKEY) - def test_apt_v3_src_keyid_keyserver(self, tmpdir): + def test_apt_v3_src_keyid_keyserver(self): """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" keyid = "03683F77" params = self._get_default_params() @@ -557,7 +549,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -565,14 +556,14 @@ mockgetkey.assert_called_with("03683F77", "test.random.com") mockadd.assert_called_with( "fakekey", - self.aptlistfile.replace(tmpdir.strpath, ""), + self.aptlistfile, hardened=False, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_ppa(self, tmpdir): + def test_apt_v3_src_ppa(self): """test_apt_v3_src_ppa - Test specification of a ppa""" params = self._get_default_params() cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}} @@ -581,7 +572,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -591,7 +581,6 @@ "--no-update", "ppa:smoser/cloud-init-test", ], - target=tmpdir.strpath, ) # adding ppa should ignore filename (uses add-apt-repository) @@ -599,7 +588,7 @@ os.path.isfile(self.aptlistfile) is False ), f"Unexpected file found {self.aptlistfile}" - def test_apt_v3_src_ppa_tri(self, tmpdir): + def test_apt_v3_src_ppa_tri(self): """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" params = self._get_default_params() cfg = { @@ -612,7 +601,6 @@ self._add_apt_sources( cfg, cloud=None, - target=tmpdir.strpath, template_params=params, aa_repo_match=self.matcher, ) @@ -623,7 +611,6 @@ "--no-update", "ppa:smoser/cloud-init-test", ], - target=tmpdir.strpath, ), call( [ @@ -631,7 +618,6 @@ "--no-update", "ppa:smoser/cloud-init-test2", ], - target=tmpdir.strpath, ), call( [ @@ -639,7 +625,6 @@ "--no-update", "ppa:smoser/cloud-init-test3", ], - target=tmpdir.strpath, ), ] mockobj.assert_has_calls(calls, any_order=True) @@ -651,9 +636,9 @@ ), f"Unexpected file {path}" @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename(self, m_get_dpkg_architecture, tmpdir): + def test_apt_v3_list_rename(self, m_get_dpkg_architecture): """test_apt_v3_list_rename - Test find mirror and apt list renaming""" - pre = tmpdir.join("/var/lib/apt/lists") + pre = cc_apt_configure.APT_LISTS # filenames are archive dependent arch = "s390x" @@ -690,62 +675,14 @@ mirrors["SECURITY"] == "http://testsec.ubuntu.com/%s/" % component ) - with mock.patch.object(os, "rename") as mockren: - with mock.patch.object(glob, "glob", return_value=[fromfn]): - cc_apt_configure.rename_apt_lists( - mirrors, tmpdir.strpath, arch - ) + with mock.patch.object(cc_apt_configure.os, "rename") as mockren: + with mock.patch.object( + cc_apt_configure.glob, "glob", return_value=[fromfn] + ): + cc_apt_configure.rename_apt_lists(mirrors, arch) mockren.assert_any_call(fromfn, tofn) - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename_non_slash( - self, m_get_dpkg_architecture, tmpdir - ): - target = tmpdir.join("rename_non_slash") - - apt_lists_d = target.join(cc_apt_configure.APT_LISTS).strpath - - arch = "amd64" - m_get_dpkg_architecture.return_value = arch - - mirror_path = "some/random/path/" - primary = "http://test.ubuntu.com/" + mirror_path - security = "http://test-security.ubuntu.com/" + mirror_path - mirrors = {"PRIMARY": primary, "SECURITY": security} - - # these match default archive prefixes - opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial" - osec_pre = "security.ubuntu.com_ubuntu_dists_xenial" - # this one won't match and should not be renamed defaults. - other_pre = "dl.google.com_linux_chrome_deb_dists_stable" - # these are our new expected prefixes - npri_pre = "test.ubuntu.com_some_random_path_dists_xenial" - nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial" - - files = [ - # orig prefix, new prefix, suffix - (opri_pre, npri_pre, "_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "_main_binary-amd64_InRelease"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"), - (other_pre, other_pre, "_main_binary-amd64_Packages"), - (other_pre, other_pre, "_Release"), - (other_pre, other_pre, "_Release.gpg"), - (osec_pre, nsec_pre, "_InRelease"), - (osec_pre, nsec_pre, "_main_binary-amd64_Packages"), - (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"), - ] - - expected = sorted([npre + suff for opre, npre, suff in files]) - # create files - for opre, _npre, suff in files: - fpath = os.path.join(apt_lists_d, opre + suff) - util.write_file(fpath, content=fpath) - - cc_apt_configure.rename_apt_lists(mirrors, target.strpath, arch) - assert expected == sorted(os.listdir(apt_lists_d)) - @staticmethod def test_apt_v3_proxy(): """test_apt_v3_proxy - Test apt_*proxy configuration""" @@ -1288,7 +1225,7 @@ } with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: - cc_apt_configure.add_mirror_keys(cfg, None, tmpdir.strpath) + cc_apt_configure.add_mirror_keys(cfg, None) calls = [ mock.call("fakekey_primary", "primary", hardened=False), mock.call("fakekey_security", "security", hardened=False), @@ -1307,7 +1244,6 @@ ["debconf-set-selections"], data=selections + b"\n", capture=True, - target=None, ) assert [m_call, m_call] == m_subp.call_args_list @@ -1406,7 +1342,6 @@ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") def test_dpkg_reconfigure_does_reconfigure(self, m_subp, tmpdir): - target = tmpdir.strpath # due to the way the cleaners are called (via dictionary reference) # mocking clean_cloud_init directly does not work. So we mock @@ -1417,12 +1352,9 @@ values={"cloud-init": ci_cleaner}, clear=True, ): - cc_apt_configure.dpkg_reconfigure( - ["pkga", "cloud-init"], target=target - ) + cc_apt_configure.dpkg_reconfigure(["pkga", "cloud-init"]) # cloud-init is actually the only package we have a cleaner for # so for now, its the only one that should reconfigured - ci_cleaner.assert_called_with(target) assert m_subp.call_count == 1 found = m_subp.call_args_list[0][0][0] expected = [ diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ansible.py cloud-init-24.1.3/tests/unittests/config/test_cc_ansible.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ansible.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ansible.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,6 @@ +import os import re from copy import deepcopy -from os import environ from textwrap import dedent from unittest import mock from unittest.mock import MagicMock @@ -298,7 +298,6 @@ M_PATH + "AnsiblePullDistro.is_installed", return_value=False, ) - mocker.patch.dict(M_PATH + "os.environ", clear=True) if exception: with raises(exception): cc_ansible.handle("", cfg, get_cloud(), None) @@ -385,21 +384,20 @@ """verify expected ansible invocation from userdata config""" pull_type = cfg["ansible"]["install_method"] distro = get_cloud().distro - with mock.patch.dict(M_PATH + "os.environ", clear=True): - ansible_pull = ( - cc_ansible.AnsiblePullPip(distro, "ansible") - if pull_type == "pip" - else cc_ansible.AnsiblePullDistro(distro) - ) + ansible_pull = ( + cc_ansible.AnsiblePullPip(distro, "ansible") + if pull_type == "pip" + else cc_ansible.AnsiblePullDistro(distro) + ) cc_ansible.run_ansible_pull( ansible_pull, deepcopy(cfg["ansible"]["pull"]) ) if pull_type != "pip": assert m_subp2.call_args[0][0] == expected - assert m_subp2.call_args[1]["env"].get("HOME") == environ.get( + assert m_subp2.call_args[1]["update_env"].get( "HOME" - ) + ) == os.environ.get("HOME", "/root") @mock.patch(M_PATH + "validate_config") def test_do_not_run(self, m_validate): @@ -435,5 +433,5 @@ if isinstance(m_subp.call_args.kwargs, dict): assert ( "/etc/ansible/ansible.cfg" - == m_subp.call_args.kwargs["env"]["ansible_config"] + == m_subp.call_args.kwargs["update_env"]["ansible_config"] ) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_apk_configure.py cloud-init-24.1.3/tests/unittests/config/test_cc_apk_configure.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_apk_configure.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_apk_configure.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,7 +10,7 @@ import pytest -from cloudinit import cloud, helpers, util +from cloudinit import cloud, helpers, temp_utils, util from cloudinit.config import cc_apk_configure from cloudinit.config.schema import ( SchemaValidationError, @@ -18,6 +18,7 @@ validate_cloudconfig_schema, ) from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, FilesystemMockingTestCase, mock, skipUnlessJsonSchema, @@ -59,6 +60,11 @@ self.name = "apk_configure" self.cloud = cloud.Cloud(None, self.paths, None, None, None) self.args = [] + temp_utils._TMPDIR = self.new_root + + def tearDown(self): + super().tearDown() + temp_utils._TMPDIR = None @mock.patch(CC_APK + "._write_repositories_file") def test_no_repo_settings(self, m_write_repos): @@ -107,7 +113,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) def test_main_and_community_repos(self): """ @@ -142,7 +148,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) def test_main_community_testing_repos(self): """ @@ -182,7 +188,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) def test_edge_main_community_testing_repos(self): """ @@ -219,7 +225,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) def test_main_community_testing_local_repos(self): """ @@ -266,7 +272,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) def test_edge_main_community_testing_local_repos(self): """ @@ -310,7 +316,7 @@ ) ) - self.assertEqual(expected_content, util.load_file(REPO_FILE)) + self.assertEqual(expected_content, util.load_text_file(REPO_FILE)) class TestApkConfigureSchema: @@ -355,7 +361,7 @@ ( {"apk_repos": {"alpine_repo": {}}}, "apk_repos.alpine_repo: 'version' is a required property," - " apk_repos.alpine_repo: {} does not have enough properties", + f" apk_repos.alpine_repo: {{}} {SCHEMA_EMPTY_ERROR}", ), ( {"apk_repos": {"alpine_repo": True}}, @@ -368,7 +374,7 @@ ), ( {"apk_repos": {}}, - "apk_repos: {} does not have enough properties", + f"apk_repos: {{}} {SCHEMA_EMPTY_ERROR}", ), ( {"apk_repos": {"local_repo_base_url": None}}, diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_apt_configure.py cloud-init-24.1.3/tests/unittests/config/test_cc_apt_configure.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_apt_configure.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_apt_configure.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,18 +3,23 @@ """ Tests for cc_apt_configure module """ import re +from pathlib import Path +from unittest import mock import pytest -from cloudinit.config import cc_apt_configure +from cloudinit import features +from cloudinit.config import cc_apt_configure as cc_apt from cloudinit.config.schema import ( SchemaValidationError, get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import skipUnlessJsonSchema +from tests.unittests.helpers import SCHEMA_EMPTY_ERROR, skipUnlessJsonSchema from tests.unittests.util import get_cloud +M_PATH = "cloudinit.config.cc_apt_configure." + class TestAPTConfigureSchema: @pytest.mark.parametrize( @@ -34,7 +39,7 @@ " ('boguskey' was unexpected)" ), ), - ({"apt": {}}, "apt: {} does not have enough properties"), + ({"apt": {}}, f"apt: {{}} {SCHEMA_EMPTY_ERROR}"), ( {"apt": {"preserve_sources_list": 1}}, "apt.preserve_sources_list: 1 is not of type 'boolean'", @@ -45,7 +50,7 @@ ), ( {"apt": {"disable_suites": []}}, - re.escape("apt.disable_suites: [] is too short"), + re.escape("apt.disable_suites: [] ") + SCHEMA_EMPTY_ERROR, ), ( {"apt": {"disable_suites": [1]}}, @@ -65,7 +70,7 @@ ), ( {"apt": {"primary": []}}, - re.escape("apt.primary: [] is too short"), + re.escape("apt.primary: [] ") + SCHEMA_EMPTY_ERROR, ), ( {"apt": {"primary": ["nonobj"]}}, @@ -102,7 +107,7 @@ ), ( {"apt": {"primary": [{"arches": ["amd64"], "search": []}]}}, - re.escape("apt.primary.0.search: [] is too short"), + re.escape("apt.primary.0.search: [] ") + SCHEMA_EMPTY_ERROR, ), ( { @@ -134,7 +139,7 @@ ), ( {"apt": {"debconf_selections": {}}}, - "apt.debconf_selections: {} does not have enough properties", + f"apt.debconf_selections: {{}} {SCHEMA_EMPTY_ERROR}", ), ( {"apt": {"sources_list": True}}, @@ -170,7 +175,7 @@ ), ( {"apt": {"sources": {"opaquekey": {}}}}, - "apt.sources.opaquekey: {} does not have enough properties", + f"apt.sources.opaquekey: {{}} {SCHEMA_EMPTY_ERROR}", ), ( {"apt": {"sources": {"opaquekey": {"boguskey": True}}}}, @@ -252,17 +257,91 @@ install_packages = mocker.patch.object( mycloud.distro, "install_packages" ) - matcher = re.compile(cc_apt_configure.ADD_APT_REPO_MATCH).search + matcher = re.compile(cc_apt.ADD_APT_REPO_MATCH).search def fake_which(cmd): if cmd in already_installed: return "foundit" return None - which = mocker.patch.object(cc_apt_configure.shutil, "which") + which = mocker.patch.object(cc_apt.shutil, "which") which.side_effect = fake_which - cc_apt_configure._ensure_dependencies(cfg, matcher, mycloud) + cc_apt._ensure_dependencies(cfg, matcher, mycloud) if expected_install: install_packages.assert_called_once_with(expected_install) else: install_packages.assert_not_called() + + +class TestAptConfigure: + @pytest.mark.parametrize( + "src_content,distro_name,expected_content", + ( + pytest.param( + "content", + "ubuntu", + cc_apt.UBUNTU_DEFAULT_APT_SOURCES_LIST, + id="ubuntu_replace_invalid_apt_source_list_with_default", + ), + pytest.param( + "content", + "debian", + None, + id="debian_remove_invalid_apt_source_list", + ), + pytest.param( + cc_apt.UBUNTU_DEFAULT_APT_SOURCES_LIST, + "ubuntu", + cc_apt.UBUNTU_DEFAULT_APT_SOURCES_LIST, + id="ubuntu_no_warning_when_existig_sources_list_content_allowed", + ), + ), + ) + @mock.patch(M_PATH + "get_apt_cfg") + def test_remove_source( + self, + m_get_apt_cfg, + src_content, + distro_name, + expected_content, + caplog, + tmpdir, + ): + m_get_apt_cfg.return_value = { + "sourcelist": f"{tmpdir}/etc/apt/sources.list", + "sourceparts": f"{tmpdir}/etc/apt/sources.list.d/", + } + cloud = get_cloud(distro_name) + features.APT_DEB822_SOURCE_LIST_FILE = True + sources_file = tmpdir.join("/etc/apt/sources.list") + deb822_sources_file = tmpdir.join( + f"/etc/apt/sources.list.d/{distro_name}.sources" + ) + Path(sources_file).parent.mkdir(parents=True, exist_ok=True) + sources_file.write(src_content) + + cfg = { + "sources_list": """\ +Types: deb +URIs: {{mirror}} +Suites: {{codename}} {{codename}}-updates {{codename}}-backports +Components: main restricted universe multiverse +Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg""" + } + cc_apt.generate_sources_list(cfg, "noble", {}, cloud) + if expected_content is None: + assert not sources_file.exists() + assert f"Removing {sources_file} to favor deb822" in caplog.text + else: + if src_content != expected_content: + assert ( + f"Replacing {sources_file} to favor deb822" in caplog.text + ) + + assert ( + cc_apt.UBUNTU_DEFAULT_APT_SOURCES_LIST == sources_file.read() + ) + assert ( + f"Removing {sources_file} to favor deb822" not in caplog.text + ) + assert deb822_sources_file.exists() diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_bootcmd.py cloud-init-24.1.3/tests/unittests/config/test_cc_bootcmd.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_bootcmd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_bootcmd.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,7 +11,12 @@ get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, + CiTestCase, + mock, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud @@ -92,7 +97,7 @@ with self.allow_subp(["/bin/sh"]): handle("cc_bootcmd", valid_config, cc, []) self.assertEqual( - my_id + " iid-datasource-none\n", util.load_file(out_file) + my_id + " iid-datasource-none\n", util.load_text_file(out_file) ) def test_handler_runs_bootcmd_script_with_error(self): @@ -128,12 +133,14 @@ "Cloud config schema errors: bootcmd: 1 is not of type" " 'array'", ), - ({"bootcmd": []}, re.escape("bootcmd: [] is too short")), ( {"bootcmd": []}, - re.escape( - "Cloud config schema errors: bootcmd: [] is too short" - ), + re.escape("bootcmd: [] ") + SCHEMA_EMPTY_ERROR, + ), + ( + {"bootcmd": []}, + re.escape("Cloud config schema errors: bootcmd: [] ") + + SCHEMA_EMPTY_ERROR, ), ( { diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ca_certs.py cloud-init-24.1.3/tests/unittests/config/test_cc_ca_certs.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ca_certs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ca_certs.py 2024-03-27 13:14:04.000000000 +0000 @@ -8,16 +8,18 @@ import pytest -from cloudinit import distros, helpers -from cloudinit import log as logger -from cloudinit import subp, util +from cloudinit import distros, helpers, subp, util from cloudinit.config import cc_ca_certs from cloudinit.config.schema import ( SchemaValidationError, get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import TestCase, skipUnlessJsonSchema +from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, + TestCase, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud @@ -327,7 +329,7 @@ ) mock_load = mocks.enter_context( mock.patch.object( - util, "load_file", return_value=ca_certs_content + util, "load_text_file", return_value=ca_certs_content ) ) mock_subp = mocks.enter_context( @@ -339,7 +341,7 @@ cc_ca_certs.disable_default_ca_certs(distro_name, conf) - if distro_name == "rhel": + if distro_name in ["rhel", "photon"]: mock_delete.assert_has_calls( [ mock.call(conf["ca_cert_path"]), @@ -398,7 +400,7 @@ ), ( {"ca_certs": {}}, - re.escape("ca_certs: {} does not have enough properties"), + re.escape("ca_certs: {} ") + SCHEMA_EMPTY_ERROR, ), ( {"ca_certs": {"boguskey": 1}}, @@ -417,7 +419,7 @@ ), ( {"ca_certs": {"trusted": []}}, - re.escape("ca_certs.trusted: [] is too short"), + re.escape("ca_certs.trusted: [] ") + SCHEMA_EMPTY_ERROR, ), ), ) @@ -435,7 +437,6 @@ @mock.patch.object(cc_ca_certs, "update_ca_certs") def test_deprecate_key_warnings(self, update_ca_certs, caplog): """Assert warnings are logged for deprecated keys.""" - logger.setup_logging() cloud = get_cloud("ubuntu") cc_ca_certs.handle( "IGNORE", {"ca-certs": {"remove-defaults": False}}, cloud, [] diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_chef.py cloud-init-24.1.3/tests/unittests/config/test_cc_chef.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_chef.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_chef.py 2024-03-27 13:14:04.000000000 +0000 @@ -14,10 +14,11 @@ get_schema, validate_cloudconfig_schema, ) +from tests.helpers import cloud_init_project_dir from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, FilesystemMockingTestCase, ResponsesTestCase, - cloud_init_project_dir, mock, skipIf, skipUnlessJsonSchema, @@ -155,7 +156,7 @@ Chef::Log::Formatter.show_time = true encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ - tpl_file = util.load_file(CLIENT_TEMPL) + tpl_file = util.load_text_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -175,7 +176,7 @@ cc_chef.handle("chef", cfg, get_cloud(), []) for d in cc_chef.CHEF_DIRS: self.assertTrue(os.path.isdir(d)) - c = util.load_file(cc_chef.CHEF_RB_PATH) + c = util.load_text_file(cc_chef.CHEF_RB_PATH) # the content of these keys is not expected to be rendered to tmpl unrendered_keys = ("validation_cert",) @@ -190,7 +191,7 @@ val = cfg["chef"].get(k, v) if isinstance(val, str): self.assertIn(val, c) - c = util.load_file(cc_chef.CHEF_FB_PATH) + c = util.load_text_file(cc_chef.CHEF_FB_PATH) self.assertEqual({}, json.loads(c)) def test_firstboot_json(self): @@ -208,7 +209,7 @@ }, } cc_chef.handle("chef", cfg, get_cloud(), []) - c = util.load_file(cc_chef.CHEF_FB_PATH) + c = util.load_text_file(cc_chef.CHEF_FB_PATH) self.assertEqual( { "run_list": ["a", "b", "c"], @@ -221,7 +222,7 @@ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available" ) def test_template_deletes(self): - tpl_file = util.load_file(CLIENT_TEMPL) + tpl_file = util.load_text_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -235,7 +236,7 @@ }, } cc_chef.handle("chef", cfg, get_cloud(), []) - c = util.load_file(cc_chef.CHEF_RB_PATH) + c = util.load_text_file(cc_chef.CHEF_RB_PATH) self.assertNotIn("json_attribs", c) self.assertNotIn("Formatter.show_time", c) @@ -244,7 +245,7 @@ ) def test_validation_cert_and_validation_key(self): # test validation_cert content is written to validation_key path - tpl_file = util.load_file(CLIENT_TEMPL) + tpl_file = util.load_text_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -260,14 +261,14 @@ }, } cc_chef.handle("chef", cfg, get_cloud(), []) - content = util.load_file(cc_chef.CHEF_RB_PATH) + content = util.load_text_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(v_cert, util.load_file(v_path)) + util.load_text_file(v_path) + self.assertEqual(v_cert, util.load_text_file(v_path)) def test_validation_cert_with_system(self): # test validation_cert content is not written over system file - tpl_file = util.load_file(CLIENT_TEMPL) + tpl_file = util.load_text_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -285,10 +286,10 @@ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) util.write_file(v_path, expected_cert) cc_chef.handle("chef", cfg, get_cloud(), []) - content = util.load_file(cc_chef.CHEF_RB_PATH) + content = util.load_text_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(expected_cert, util.load_file(v_path)) + util.load_text_file(v_path) + self.assertEqual(expected_cert, util.load_text_file(v_path)) @skipUnlessJsonSchema() @@ -306,7 +307,7 @@ ), ( {"chef": {}}, - re.escape(" chef: {} does not have enough properties"), + re.escape(" chef: {} ") + SCHEMA_EMPTY_ERROR, ), ( {"chef": {"boguskey": True}}, @@ -321,7 +322,7 @@ ), ( {"chef": {"directories": []}}, - re.escape("chef.directories: [] is too short"), + re.escape("chef.directories: [] ") + SCHEMA_EMPTY_ERROR, ), ( {"chef": {"directories": [1]}}, diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_disk_setup.py cloud-init-24.1.3/tests/unittests/config/test_cc_disk_setup.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_disk_setup.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_disk_setup.py 2024-03-27 13:14:04.000000000 +0000 @@ -33,7 +33,7 @@ ) def tearDown(self): - super(TestIsDiskUsed, self).tearDown() + super().tearDown() self.patches.close() def test_multiple_child_nodes_returns_true(self): @@ -65,7 +65,7 @@ ) def tearDown(self): - super(TestGetMbrHddSize, self).tearDown() + super().tearDown() self.patches.close() def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_growpart.py cloud-init-24.1.3/tests/unittests/config/test_cc_growpart.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_growpart.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_growpart.py 2024-03-27 13:14:04.000000000 +0000 @@ -20,6 +20,7 @@ get_schema, validate_cloudconfig_schema, ) +from cloudinit.subp import SubpResult from tests.unittests.helpers import ( TestCase, does_not_raise, @@ -135,29 +136,30 @@ def tearDown(self): self.tmpfile.close() os.remove(self.tmppath) + super().tearDown() @mock.patch.object(os.path, "isfile", return_value=False) - @mock.patch.dict("os.environ", clear=True) def test_no_resizers_auto_is_fine(self, m_isfile): with mock.patch.object( - subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "") + subp, "subp", return_value=SubpResult(HELP_GROWPART_NO_RESIZE, "") ) as mockobj: config = {"growpart": {"mode": "auto"}} self.handle(self.name, config, self.cloud, self.args) mockobj.assert_has_calls( [ - mock.call(["growpart", "--help"], env={"LANG": "C"}), mock.call( - ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1] + ["growpart", "--help"], update_env={"LANG": "C"} + ), + mock.call( + ["gpart", "help"], update_env={"LANG": "C"}, rcs=[0, 1] ), ] ) - @mock.patch.dict("os.environ", clear=True) def test_no_resizers_mode_growpart_is_exception(self): with mock.patch.object( - subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "") + subp, "subp", return_value=SubpResult(HELP_GROWPART_NO_RESIZE, "") ) as mockobj: config = {"growpart": {"mode": "growpart"}} self.assertRaises( @@ -170,13 +172,12 @@ ) mockobj.assert_called_once_with( - ["growpart", "--help"], env={"LANG": "C"} + ["growpart", "--help"], update_env={"LANG": "C"} ) - @mock.patch.dict("os.environ", clear=True) def test_mode_auto_prefers_growpart(self): with mock.patch.object( - subp, "subp", return_value=(HELP_GROWPART_RESIZE, "") + subp, "subp", return_value=SubpResult(HELP_GROWPART_RESIZE, "") ) as mockobj: ret = cc_growpart.resizer_factory( mode="auto", distro=mock.Mock(), devices=["/"] @@ -184,10 +185,9 @@ self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) mockobj.assert_called_once_with( - ["growpart", "--help"], env={"LANG": "C"} + ["growpart", "--help"], update_env={"LANG": "C"} ) - @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True) @mock.patch.object(temp_utils, "mkdtemp", return_value="/tmp/much-random") @mock.patch.object(stat, "S_ISDIR", return_value=False) @mock.patch.object(os.path, "samestat", return_value=True) @@ -203,7 +203,7 @@ @mock.patch.object(os, "lstat", return_value="interesting metadata") def test_force_lang_check_tempfile(self, *args, **kwargs): with mock.patch.object( - subp, "subp", return_value=(HELP_GROWPART_RESIZE, "") + subp, "subp", return_value=SubpResult(HELP_GROWPART_RESIZE, "") ) as mockobj: ret = cc_growpart.resizer_factory( mode="auto", distro=mock.Mock(), devices=["/"] @@ -217,20 +217,19 @@ [ mock.call( ["growpart", "--dry-run", diskdev, partnum], - env={"LANG": "C", "TMPDIR": "/tmp"}, + update_env={"LANG": "C", "TMPDIR": "/tmp"}, ), mock.call( ["growpart", diskdev, partnum], - env={"LANG": "C", "TMPDIR": "/tmp"}, + update_env={"LANG": "C", "TMPDIR": "/tmp"}, ), ] ) - @mock.patch.dict("os.environ", clear=True) @mock.patch.object(os.path, "isfile", return_value=True) def test_mode_use_growfs_on_root(self, m_isfile): with mock.patch.object( - subp, "subp", return_value=("File not found", "") + subp, "subp", return_value=SubpResult("File not found", "") ) as mockobj: ret = cc_growpart.resizer_factory( mode="auto", distro=mock.Mock(), devices=["/"] @@ -239,14 +238,15 @@ mockobj.assert_has_calls( [ - mock.call(["growpart", "--help"], env={"LANG": "C"}), + mock.call( + ["growpart", "--help"], update_env={"LANG": "C"} + ), ] ) - @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True) def test_mode_auto_falls_back_to_gpart(self): with mock.patch.object( - subp, "subp", return_value=("", HELP_GPART) + subp, "subp", return_value=SubpResult("", HELP_GPART) ) as mockobj: ret = cc_growpart.resizer_factory( mode="auto", distro=mock.Mock(), devices=["/", "/opt"] @@ -255,18 +255,19 @@ mockobj.assert_has_calls( [ - mock.call(["growpart", "--help"], env={"LANG": "C"}), mock.call( - ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1] + ["growpart", "--help"], update_env={"LANG": "C"} + ), + mock.call( + ["gpart", "help"], update_env={"LANG": "C"}, rcs=[0, 1] ), ] ) @mock.patch.object(os.path, "isfile", return_value=True) - @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True) def test_mode_auto_falls_back_to_growfs(self, m_isfile): with mock.patch.object( - subp, "subp", return_value=("", HELP_GPART) + subp, "subp", return_value=SubpResult("", HELP_GPART) ) as mockobj: ret = cc_growpart.resizer_factory( mode="auto", distro=mock.Mock(), devices=["/"] @@ -275,7 +276,9 @@ mockobj.assert_has_calls( [ - mock.call(["growpart", "--help"], env={"LANG": "C"}), + mock.call( + ["growpart", "--help"], update_env={"LANG": "C"} + ), ] ) @@ -313,13 +316,15 @@ factory.assert_called_once_with( "auto", distro=self.distro, devices=["/"] ) - rsdevs.assert_called_once_with(myresizer, ["/"]) + rsdevs.assert_called_once_with(myresizer, ["/"], self.distro.name) class TestResize(unittest.TestCase): def setUp(self): super(TestResize, self).setUp() self.name = "growpart" + self.distro = mock.Mock() + self.distro.name = "ubuntu" self.log = logging.getLogger("TestResize") def test_simple_devices(self): @@ -364,7 +369,9 @@ cc_growpart.device_part_info = simple_device_part_info os.stat = mystat - resized = cc_growpart.resize_devices(myresizer(), devs + enoent) + resized = cc_growpart.resize_devices( + myresizer(), devs + enoent, self.distro.name + ) def find(name, res): for f in res: @@ -389,6 +396,22 @@ os.stat = real_stat +class TestGetSize: + @pytest.mark.parametrize( + "file_exists, expected", + ( + (False, None), + (True, 1), + ), + ) + def test_get_size_behaves(self, file_exists, expected, tmp_path): + """Ensure that get_size() doesn't raise exception""" + tmp_file = tmp_path / "tmp.txt" + if file_exists: + tmp_file.write_bytes(b"0") + assert expected == cc_growpart.get_size(tmp_file) + + class TestEncrypted: """Attempt end-to-end scenarios using encrypted devices. @@ -479,11 +502,16 @@ mocker.patch("pathlib.Path.exists", return_value=True) self.m_unlink = mocker.patch("pathlib.Path.unlink", autospec=True) + self.distro = mock.Mock() + self.distro.name = "ubuntu" + self.resizer = mock.Mock() self.resizer.resize = mock.Mock(return_value=(1024, 1024)) def test_resize_when_encrypted(self, common_mocks, caplog): - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" assert info[0][2].startswith("no change necessary") @@ -501,7 +529,9 @@ self.assert_resize_and_cleanup() def test_resize_when_unencrypted(self, common_mocks): - info = cc_growpart.resize_devices(self.resizer, ["/"]) + info = cc_growpart.resize_devices( + self.resizer, ["/"], self.distro.name + ) assert len(info) == 1 assert info[0][0] == "/" assert "encrypted" not in info[0][2] @@ -514,7 +544,9 @@ "cloudinit.config.cc_growpart.subp.which", return_value=None, ) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 1 assert "skipped as it is not encrypted" in info[0][2] @@ -530,7 +562,9 @@ "cloudinit.config.cc_growpart.subp.subp", side_effect=_subp_side_effect, ) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 1 assert info[0][0] == "/fake_encrypted" assert info[0][1] == "FAILED" @@ -549,7 +583,9 @@ "cloudinit.config.cc_growpart.subp.subp", side_effect=_subp_side_effect, ) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 1 assert info[0][0] == "/fake_encrypted" assert info[0][1] == "FAILED" @@ -562,7 +598,9 @@ # Note that this will be standard behavior after first boot # on a system with an encrypted root partition mocker.patch("pathlib.Path.open", side_effect=FileNotFoundError()) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" assert info[0][2].startswith("no change necessary") @@ -589,7 +627,9 @@ side_effect=_subp_side_effect, ) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" assert info[0][2].startswith("no change necessary") @@ -607,7 +647,9 @@ def test_resize_skipped(self, common_mocks, mocker, caplog): mocker.patch("pathlib.Path.exists", return_value=False) - info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"]) + info = cc_growpart.resize_devices( + self.resizer, ["/fake_encrypted"], self.distro.name + ) assert len(info) == 2 assert info[1] == ( "/fake_encrypted", @@ -649,7 +691,7 @@ ), ) @mock.patch("cloudinit.util.is_BSD") - def test_device_part_into( + def test_device_part_info( self, m_is_BSD, is_BSD, devpath, expected, raised_exception ): m_is_BSD.return_value = is_BSD diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_install_hotplug.py cloud-init-24.1.3/tests/unittests/config/test_cc_install_hotplug.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_install_hotplug.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_install_hotplug.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,6 +10,7 @@ handle, ) from cloudinit.event import EventScope, EventType +from cloudinit.sources.DataSourceEc2 import DataSourceEc2 @pytest.fixture() @@ -51,6 +52,7 @@ m_cloud.datasource.get_supported_events.return_value = { EventScope.NETWORK: {EventType.HOTPLUG} } + m_cloud.datasource.extra_hotplug_udev_rules = None if libexec_exists: libexecdir = "/usr/libexec/cloud-init" @@ -61,7 +63,7 @@ mocks.m_write.assert_called_once_with( filename=HOTPLUG_UDEV_PATH, content=HOTPLUG_UDEV_RULES_TEMPLATE.format( - libexecdir=libexecdir + extra_rules="", libexecdir=libexecdir ), ) assert mocks.m_subp.call_args_list == [ @@ -127,3 +129,43 @@ assert mocks.m_del.call_args_list == [] assert mocks.m_write.call_args_list == [] assert mocks.m_subp.call_args_list == [] + + def test_rules_installed_on_ec2(self, mocks): + mocks.m_which.return_value = "udevadm" + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + m_cloud.datasource.extra_hotplug_udev_rules = ( + DataSourceEc2.extra_hotplug_udev_rules + ) + + with mock.patch("os.path.exists", return_value=True): + handle(None, {}, m_cloud, None) + + udev_rules = """\ +# Installed by cloud-init due to network hotplug userdata +ACTION!="add|remove", GOTO="cloudinit_end" + +ENV{ID_NET_DRIVER}=="vif|ena|ixgbevf", GOTO="cloudinit_hook" +GOTO="cloudinit_end" + +LABEL="cloudinit_hook" +SUBSYSTEM=="net", RUN+="/usr/libexec/cloud-init/hook-hotplug" +LABEL="cloudinit_end" +""" + mocks.m_write.assert_called_once_with( + filename=HOTPLUG_UDEV_PATH, + content=udev_rules, + ) + assert mocks.m_subp.call_args_list == [ + mock.call( + [ + "udevadm", + "control", + "--reload-rules", + ] + ) + ] + assert mocks.m_del.call_args_list == [] diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_locale.py cloud-init-24.1.3/tests/unittests/config/test_cc_locale.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_locale.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_locale.py 2024-03-27 13:14:04.000000000 +0000 @@ -55,7 +55,7 @@ locale_configfile, ) - contents = util.load_file(cc.distro.locale_gen_fn) + contents = util.load_text_file(cc.distro.locale_gen_fn) self.assertIn("%s UTF-8" % locale, contents) m_subp.assert_called_with( ["localectl", "set-locale", locale], capture=False @@ -72,7 +72,7 @@ locale_conf = cc.distro.systemd_locale_conf_fn else: locale_conf = cc.distro.locale_conf_fn - contents = util.load_file(locale_conf, decode=False) + contents = util.load_binary_file(locale_conf) n_cfg = ConfigObj(BytesIO(contents)) if cc.distro.uses_systemd(): self.assertEqual({"LANG": cfg["locale"]}, dict(n_cfg)) @@ -91,7 +91,7 @@ locale_conf = cc.distro.locale_conf_fn keyname = "RC_LANG" - contents = util.load_file(locale_conf, decode=False) + contents = util.load_binary_file(locale_conf) n_cfg = ConfigObj(BytesIO(contents)) self.assertEqual({keyname: "en_US.UTF-8"}, dict(n_cfg)) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_lxd.py cloud-init-24.1.3/tests/unittests/config/test_cc_lxd.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_lxd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_lxd.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,6 +11,8 @@ get_schema, validate_cloudconfig_schema, ) +from cloudinit.helpers import Paths +from cloudinit.util import del_file from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud @@ -44,7 +46,9 @@ ) def test_lxd_init(self, maybe_clean, which, subp, exists, system_info): system_info.return_value = {"uname": [0, 1, "mykernel"]} - cc = get_cloud(mocked_distro=True) + tmpdir = self.tmp_dir() + sem_file = f"{tmpdir}/sem/snap_seeded.once" + cc = get_cloud(mocked_distro=True, paths=Paths({"cloud_dir": tmpdir})) install = cc.distro.install_packages for backend, cmd, package in BACKEND_DEF: @@ -84,21 +88,23 @@ if backend == "lvm": self.assertEqual( [ + mock.call(sem_file), mock.call( "/lib/modules/mykernel/" "kernel/drivers/md/dm-thin-pool.ko" - ) + ), ], exists.call_args_list, ) else: - self.assertEqual([], exists.call_args_list) + self.assertEqual([mock.call(sem_file)], exists.call_args_list) + del_file(sem_file) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False) def test_lxd_install(self, m_which, mock_subp, m_maybe_clean): - cc = get_cloud() + cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) cc.distro = mock.MagicMock() mock_subp.which.return_value = None cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, []) @@ -112,7 +118,7 @@ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() + cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) cc.distro = mock.MagicMock() cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, []) self.assertFalse(cc.distro.install_packages.called) @@ -122,16 +128,17 @@ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() + cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) cc.distro = mock.MagicMock() cc_lxd.handle("cc_lxd", {"package_update": True}, cc, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_subp.subp.called) self.assertFalse(m_maybe_clean.called) + @mock.patch("cloudinit.config.cc_lxd.util.wait_for_snap_seeded") @mock.patch("cloudinit.config.cc_lxd.subp") - def test_lxd_preseed(self, mock_subp): - cc = get_cloud() + def test_lxd_preseed(self, mock_subp, wait_for_snap_seeded): + cc = get_cloud(paths=Paths({"cloud_dir": self.tmp_dir()})) cc.distro = mock.MagicMock() cc_lxd.handle( "cc_lxd", @@ -146,6 +153,7 @@ ], mock_subp.subp.call_args_list, ) + wait_for_snap_seeded.assert_called_once_with(cc) def test_lxd_debconf_new_full(self): data = { @@ -385,7 +393,7 @@ # Require bridge.mode ({"lxd": {"bridge": {}}}, "bridge: 'mode' is a required property"), # Require init or bridge keys - ({"lxd": {}}, "lxd: {} does not have enough properties"), + ({"lxd": {}}, f"lxd: {{}} {t_help.SCHEMA_EMPTY_ERROR}"), # Require some non-empty preseed config of type string ({"lxd": {"preseed": {}}}, "not of type 'string'"), ({"lxd": {"preseed": ""}}, None), diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_mcollective.py cloud-init-24.1.3/tests/unittests/config/test_cc_mcollective.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_mcollective.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_mcollective.py 2024-03-27 13:14:04.000000000 +0000 @@ -86,7 +86,7 @@ self.patchUtils(self.tmp) cc_mcollective.configure(cfg["mcollective"]["conf"]) - contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False) + contents = util.load_binary_file(cc_mcollective.SERVER_CFG) contents = configobj.ConfigObj(BytesIO(contents)) self.assertEqual(expected, dict(contents)) @@ -97,7 +97,7 @@ self.assertTrue(os.path.exists(self.server_cfg)) self.assertTrue(os.path.exists(self.server_cfg + ".old")) self.assertEqual( - util.load_file(self.server_cfg + ".old"), STOCK_CONFIG + util.load_text_file(self.server_cfg + ".old"), STOCK_CONFIG ) def test_existing_updated(self): @@ -136,9 +136,11 @@ self.assertEqual(found["securityprovider"], "ssl") self.assertEqual( - util.load_file(self.pricert_file), cfg["private-cert"] + util.load_text_file(self.pricert_file), cfg["private-cert"] + ) + self.assertEqual( + util.load_text_file(self.pubcert_file), cfg["public-cert"] ) - self.assertEqual(util.load_file(self.pubcert_file), cfg["public-cert"]) class TestHandler(t_help.TestCase): @@ -147,7 +149,7 @@ def test_mcollective_install(self, mock_util, mock_subp): cc = get_cloud() cc.distro = t_help.mock.MagicMock() - mock_util.load_file.return_value = b"" + mock_util.load_binary_file.return_value = b"" mycfg = {"mcollective": {"conf": {"loglevel": "debug"}}} cc_mcollective.handle("cc_mcollective", mycfg, cc, []) self.assertTrue(cc.distro.install_packages.called) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_mounts.py cloud-init-24.1.3/tests/unittests/config/test_cc_mounts.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_mounts.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_mounts.py 2024-03-27 13:14:04.000000000 +0000 @@ -583,9 +583,15 @@ "config, error_msg", [ # We expect to see one mount if provided in user-data. - ({"mounts": []}, re.escape("mounts: [] is too short")), + ( + {"mounts": []}, + re.escape("mounts: [] ") + test_helpers.SCHEMA_EMPTY_ERROR, + ), # Disallow less than 1 item per mount entry - ({"mounts": [[]]}, re.escape("mounts.0: [] is too short")), + ( + {"mounts": [[]]}, + re.escape("mounts.0: [] ") + test_helpers.SCHEMA_EMPTY_ERROR, + ), # Disallow more than 6 items per mount entry ({"mounts": [["1"] * 7]}, "mounts.0:.* is too long"), # Disallow mount_default_fields will anything other than 6 items diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ntp.py cloud-init-24.1.3/tests/unittests/config/test_cc_ntp.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ntp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ntp.py 2024-03-27 13:14:04.000000000 +0000 @@ -150,7 +150,7 @@ ) self.assertEqual( "servers []\npools ['10.0.0.1', '10.0.0.2']\n", - util.load_file(confpath), + util.load_text_file(confpath), ) def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): @@ -174,7 +174,8 @@ template=None, ) self.assertEqual( - "servers []\npools {0}\n".format(pools), util.load_file(confpath) + "servers []\npools {0}\n".format(pools), + util.load_text_file(confpath), ) def test_defaults_pools_empty_lists_sles(self): @@ -199,7 +200,7 @@ self.assertIn("opensuse", pool) self.assertEqual( "servers []\npools {0}\n".format(default_pools), - util.load_file(confpath), + util.load_text_file(confpath), ) self.assertIn( "Adding distro default ntp pool servers: {0}".format( @@ -225,7 +226,7 @@ ) self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - util.load_file(confpath), + util.load_text_file(confpath), ) def test_distro_ntp_client_configs(self): @@ -313,7 +314,7 @@ path=confpath, template_fn=template_fn, ) - content = util.load_file(confpath) + content = util.load_text_file(confpath) if client in ["ntp", "chrony"]: content_lines = content.splitlines() expected_servers = self._get_expected_servers( @@ -388,13 +389,13 @@ servers = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( "servers {0}\npools []\n".format(servers), - util.load_file(confpath), + util.load_text_file(confpath), ) else: pools = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( "servers []\npools {0}\n".format(pools), - util.load_file(confpath), + util.load_text_file(confpath), ) self.assertNotIn( "Invalid cloud-config provided:", self.logs.getvalue() @@ -417,7 +418,7 @@ cc_ntp.handle("cc_ntp", cfg, mycloud, []) self.assertEqual( "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - util.load_file(confpath), + util.load_text_file(confpath), ) @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") @@ -506,7 +507,7 @@ expected_service_call, capture=True, rcs=None ) - self.assertEqual(expected_content, util.load_file(confpath)) + self.assertEqual(expected_content, util.load_text_file(confpath)) @mock.patch("cloudinit.util.system_info") def test_opensuse_picks_chrony(self, m_sysinfo): @@ -674,7 +675,7 @@ cc_ntp.handle("notimportant", cfg, mycloud, None) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath), + util.load_text_file(confpath), ) @mock.patch("cloudinit.config.cc_ntp.supplemental_schema_validation") @@ -714,7 +715,7 @@ cc_ntp.handle("notimportant", {"ntp": cfg}, mycloud, None) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath), + util.load_text_file(confpath), ) m_schema.assert_called_with(expected_merged_cfg) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_package_update_upgrade_install.py cloud-init-24.1.3/tests/unittests/config/test_cc_package_update_upgrade_install.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_package_update_upgrade_install.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_package_update_upgrade_install.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,7 +5,10 @@ import pytest from cloudinit import subp -from cloudinit.config.cc_package_update_upgrade_install import handle +from cloudinit.config.cc_package_update_upgrade_install import ( + REBOOT_FILES, + handle, +) from cloudinit.config.schema import ( SchemaValidationError, get_schema, @@ -13,9 +16,15 @@ ) from cloudinit.distros import PackageInstallerError from cloudinit.subp import SubpResult -from tests.unittests.helpers import skipUnlessJsonSchema +from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, + does_not_raise, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud +M_PATH = "cloudinit.config.cc_package_update_upgrade_install." + @pytest.fixture def common_mocks(mocker): @@ -28,6 +37,105 @@ "cloudinit.distros.package_management.apt.Apt._apt_lock_available", return_value=True, ) + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=True, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=True, + ) + + +class TestRebootIfRequired: + @pytest.mark.parametrize( + "cloud_cfg,reboot_file,expectation", + ( + pytest.param( + {"package_reboot_if_required": True}, + "/run/reboot-needed", + does_not_raise(), + id="no_reboot_when_no_package_changes", + ), + pytest.param( + {"package_reboot_if_required": True, "package_upgrade": True}, + "/run/reboot-needed", + pytest.raises( + RuntimeError + ), # _fire_reboot raises RuntimeError + id="perform_reboot_on_package_upgrade_and_suse_reboot_marker", + ), + pytest.param( + {"package_reboot_if_required": True, "package_upgrade": True}, + "", # No reboot-needed flag file present + does_not_raise(), + id="no_reboot_on_package_upgrade_and_no_reboot_required_file", + ), + pytest.param( + {"package_reboot_if_required": True, "package_upgrade": True}, + "/var/run/reboot-required", + pytest.raises( + RuntimeError + ), # _fire_reboot raises RuntimeError + id="perform_reboot_on_package_upgrade_and_reboot_marker", + ), + pytest.param( + {"package_reboot_if_required": True, "packages": ["sl"]}, + "/var/run/reboot-required", + pytest.raises( + RuntimeError + ), # _fire_reboot raises RuntimeError + id="perform_reboot_on_packages_and_reboot_marker", + ), + ), + ) + def test_wb_only_reboot_on_reboot_when_configured_and_required( + self, cloud_cfg, reboot_file, expectation, common_mocks, caplog + ): + """Only reboot when packages are updated and reboot_if_required. + + Whitebox testing because _fire_reboot will not actually reboot the + system and we expect to fallback to a raised RuntimeError in testing + + NOOP when any of the following are not true: + - no reboot_if_requred: true config + - no reboot-required flag files exist + - no packages were changed by cloud-init via upgrade or packages cfg + """ + + def _isfile(filename: str): + return filename == reboot_file + + cloud = get_cloud("ubuntu") + + subp_call = None + sleep_count = 0 + if cloud_cfg.get("package_reboot_if_required"): + if reboot_file in REBOOT_FILES: + if cloud_cfg.get("package_upgrade") or cloud_cfg.get( + "packages" + ): + sleep_count = 6 + # Expect a RuntimeError after sleeps because of mocked + # subp and not really rebooting the system + subp_call = ["/sbin/reboot"] + + caplog.set_level(logging.WARNING) + with mock.patch( + "cloudinit.subp.subp", return_value=("fakeout", "fakeerr") + ) as m_subp: + with mock.patch("os.path.isfile", side_effect=_isfile): + with mock.patch(M_PATH + "time.sleep") as m_sleep: + with mock.patch(M_PATH + "flush_loggers"): + with expectation: + handle("", cloud_cfg, cloud, []) + assert sleep_count == m_sleep.call_count + if subp_call: + assert ( + f"Rebooting after upgrade or install per {reboot_file}" + in caplog.text + ) + m_subp.assert_called_with(subp_call) class TestMultiplePackageManagers: @@ -175,7 +283,7 @@ assert caplog.records[-3].levelname == "WARNING" assert ( caplog.records[-3].message - == "Failed to install packages: ['pkg1']" + == "Failure when attempting to install packages: ['pkg1']" ) @@ -188,7 +296,7 @@ # packages list with three entries (2 required) ({"packages": ["p1", ["p2", "p3", "p4"]]}, ""), # empty packages list - ({"packages": []}, "is too short"), + ({"packages": []}, SCHEMA_EMPTY_ERROR), ( {"apt_update": False}, ( diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_phone_home.py cloud-init-24.1.3/tests/unittests/config/test_cc_phone_home.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_phone_home.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_phone_home.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,7 +18,7 @@ @pytest.fixture(autouse=True) def common_mocks(mocker): - mocker.patch("cloudinit.util.load_file", side_effect=count()) + mocker.patch("cloudinit.util.load_text_file", side_effect=count()) @mock.patch("cloudinit.url_helper.readurl") @@ -29,10 +29,9 @@ assert m_readurl.call_args == mock.call( "myurl", data={ - "pub_key_dsa": "0", - "pub_key_rsa": "1", - "pub_key_ecdsa": "2", - "pub_key_ed25519": "3", + "pub_key_rsa": "0", + "pub_key_ecdsa": "1", + "pub_key_ed25519": "2", "instance_id": "iid-datasource-none", "hostname": "hostname", "fqdn": "hostname", @@ -98,11 +97,11 @@ "config", [ # phone_home definition with url - {"phone_home": {"post": ["pub_key_dsa"]}}, + {"phone_home": {"post": ["pub_key_rsa"]}}, # post using string other than "all" - {"phone_home": {"url": "test_url", "post": "pub_key_dsa"}}, + {"phone_home": {"url": "test_url", "post": "pub_key_rsa"}}, # post using list with misspelled entry - {"phone_home": {"url": "test_url", "post": ["pub_kye_dsa"]}}, + {"phone_home": {"url": "test_url", "post": ["pub_kye_rsa"]}}, ], ) @skipUnlessJsonSchema() diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_puppet.py cloud-init-24.1.3/tests/unittests/config/test_cc_puppet.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_puppet.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_puppet.py 2024-03-27 13:14:04.000000000 +0000 @@ -261,7 +261,7 @@ util.write_file(self.conf, "[agent]\nserver = origpuppet\nother = 3") self.cloud.distro = mock.MagicMock() cc_puppet.handle("notimportant", cfg, self.cloud, None) - content = util.load_file(self.conf) + content = util.load_text_file(self.conf) expected = "[agent]\nserver = puppetserver.example.org\nother = 3\n\n" self.assertEqual(expected, content) @@ -298,7 +298,7 @@ } } cc_puppet.handle("notimportant", cfg, self.cloud, None) - content = util.load_file(self.csr_attributes_path) + content = util.load_text_file(self.csr_attributes_path) expected = textwrap.dedent( """\ custom_attributes: diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_resizefs.py cloud-init-24.1.3/tests/unittests/config/test_cc_resizefs.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_resizefs.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_resizefs.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,6 +13,7 @@ _resize_xfs, _resize_zfs, can_skip_resize, + get_device_info_from_zpool, handle, maybe_get_writable_device_path, ) @@ -25,12 +26,14 @@ from tests.unittests.helpers import ( CiTestCase, mock, + readResource, skipUnlessJsonSchema, util, wrap_and_call, ) LOG = logging.getLogger(__name__) +M_PATH = "cloudinit.config.cc_resizefs." class TestResizefs(CiTestCase): @@ -180,7 +183,7 @@ @mock.patch("cloudinit.util.is_container", return_value=False) @mock.patch("cloudinit.util.parse_mount") - @mock.patch("cloudinit.util.get_device_info_from_zpool") + @mock.patch("cloudinit.config.cc_resizefs.get_device_info_from_zpool") @mock.patch("cloudinit.util.get_mount_info") def test_handle_zfs_root( self, mount_info, zpool_info, parse_mount, is_container @@ -204,7 +207,7 @@ @mock.patch("cloudinit.util.is_container", return_value=False) @mock.patch("cloudinit.util.get_mount_info") - @mock.patch("cloudinit.util.get_device_info_from_zpool") + @mock.patch("cloudinit.config.cc_resizefs.get_device_info_from_zpool") @mock.patch("cloudinit.util.parse_mount") def test_handle_modern_zfsroot( self, mount_info, zpool_info, parse_mount, is_container @@ -517,3 +520,48 @@ else: with pytest.raises(SchemaValidationError, match=error_msg): validate_cloudconfig_schema(config, get_schema(), strict=True) + + +class TestZpool: + @mock.patch(M_PATH + "os") + @mock.patch("cloudinit.subp.subp") + def test_get_device_info_from_zpool(self, zpool_output, m_os): + # mock /dev/zfs exists + m_os.path.exists.return_value = True + # mock subp command from util.get_mount_info_fs_on_zpool + zpool_output.return_value = ( + readResource("zpool_status_simple.txt"), + "", + ) + ret = get_device_info_from_zpool("vmzroot") + assert "gpt/system" == ret + m_os.path.exists.assert_called_with("/dev/zfs") + + @mock.patch(M_PATH + "os") + @mock.patch("cloudinit.subp.subp", return_value=("", "")) + def test_get_device_info_from_zpool_no_dev_zfs(self, m_os, m_subp): + # mock /dev/zfs missing + m_os.path.exists.return_value = False + assert not get_device_info_from_zpool("vmzroot") + + @mock.patch(M_PATH + "os") + @mock.patch("cloudinit.subp.subp") + def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os): + """Handle case where there is no zpool command""" + # mock /dev/zfs exists + m_os.path.exists.return_value = True + m_sub.side_effect = ProcessExecutionError("No zpool cmd") + assert not get_device_info_from_zpool("vmzroot") + + @mock.patch(M_PATH + "os") + @mock.patch("cloudinit.subp.subp") + def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): + # mock /dev/zfs exists + m_os.path.exists.return_value = True + + # mock subp command from get_mount_info_fs_on_zpool + zpool_output.return_value = ( + readResource("zpool_status_simple.txt"), + "error", + ) + assert not get_device_info_from_zpool("vmzroot") diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_resolv_conf.py cloud-init-24.1.3/tests/unittests/config/test_cc_resolv_conf.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_resolv_conf.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_resolv_conf.py 2024-03-27 13:14:04.000000000 +0000 @@ -17,9 +17,9 @@ get_schema, validate_cloudconfig_schema, ) +from tests.helpers import cloud_init_project_dir from tests.unittests.helpers import ( FilesystemMockingTestCase, - cloud_init_project_dir, skipUnlessJsonSchema, ) from tests.unittests.util import MockDistro diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_rsyslog.py cloud-init-24.1.3/tests/unittests/config/test_cc_rsyslog.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_rsyslog.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_rsyslog.py 2024-03-27 13:14:04.000000000 +0000 @@ -115,7 +115,7 @@ fname = os.path.join(self.tmp, "foo.cfg") self.assertEqual([fname], changed) - self.assertEqual(util.load_file(fname), cfgline + "\n") + self.assertEqual(util.load_text_file(fname), cfgline + "\n") def test_multiple_files(self): configs = [ @@ -139,11 +139,11 @@ self.assertEqual([f[0] for f in expected], changed) actual = [] for fname, _content in expected: - util.load_file(fname) + util.load_text_file(fname) actual.append( ( fname, - util.load_file(fname), + util.load_text_file(fname), ) ) self.assertEqual(expected, actual) @@ -159,7 +159,7 @@ self.assertEqual([fname], changed) expected_content = "\n".join([c for c in configs]) + "\n" - found_content = util.load_file(fname) + found_content = util.load_text_file(fname) self.assertEqual(expected_content, found_content) def test_multiline_content(self): @@ -171,7 +171,7 @@ fname = os.path.join(self.tmp, "default.cfg") expected_content = "\n".join([c for c in configs]) - found_content = util.load_file(fname) + found_content = util.load_text_file(fname) self.assertEqual(expected_content, found_content) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_runcmd.py cloud-init-24.1.3/tests/unittests/config/test_cc_runcmd.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_runcmd.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_runcmd.py 2024-03-27 13:14:04.000000000 +0000 @@ -14,6 +14,7 @@ validate_cloudconfig_schema, ) from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, FilesystemMockingTestCase, skipUnlessJsonSchema, ) @@ -75,7 +76,9 @@ self.new_root, "var/lib/cloud/instances/iid-datasource-none/scripts/runcmd", ) - self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file)) + self.assertEqual( + "#!/bin/sh\n'ls' '/'\n", util.load_text_file(runcmd_file) + ) file_stat = os.stat(runcmd_file) self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) @@ -90,7 +93,7 @@ ({"runcmd": ["echo bye", "echo bye"]}, None), # Invalid schemas ({"runcmd": 1}, "1 is not of type 'array'"), - ({"runcmd": []}, r"runcmd: \[\] is too short"), + ({"runcmd": []}, rf"runcmd: \[\] {SCHEMA_EMPTY_ERROR}"), ( { "runcmd": [ diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_seed_random.py cloud-init-24.1.3/tests/unittests/config/test_cc_seed_random.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_seed_random.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_seed_random.py 2024-03-27 13:14:04.000000000 +0000 @@ -42,6 +42,7 @@ def tearDown(self): apply_patches([i for i in reversed(self.unapply)]) util.del_file(self._seed_file) + super().tearDown() def apply_patches(self, patches): ret = apply_patches(patches) @@ -72,7 +73,7 @@ } } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("tiny-tim-was-here", contents) def test_append_random_unknown_encoding(self): @@ -103,7 +104,7 @@ } } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("tiny-toe", contents) def test_append_random_gz(self): @@ -116,7 +117,7 @@ } } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("big-toe", contents) def test_append_random_base64(self): @@ -129,7 +130,7 @@ } } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("bubbles", contents) def test_append_random_b64(self): @@ -142,7 +143,7 @@ } } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("kit-kat", contents) def test_append_random_metadata(self): @@ -154,7 +155,7 @@ } c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"}) cc_seed_random.handle("test", cfg, c, []) - contents = util.load_file(self._seed_file) + contents = util.load_text_file(self._seed_file) self.assertEqual("tiny-tim-was-here-so-was-josh", contents) def test_seed_command_provided_and_available(self): @@ -209,7 +210,7 @@ # this just instists that the first time subp was called, # RANDOM_SEED_FILE was in the environment set up correctly - subp_env = [f["env"] for f in self.subp_called] + subp_env = [f["update_env"] for f in self.subp_called] self.assertEqual(subp_env[0].get("RANDOM_SEED_FILE"), self._seed_file) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_set_hostname.py cloud-init-24.1.3/tests/unittests/config/test_cc_set_hostname.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_set_hostname.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_set_hostname.py 2024-03-27 13:14:04.000000000 +0000 @@ -46,7 +46,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("blah.yahoo.com", contents.strip()) @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False) @@ -62,7 +62,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) + contents = util.load_binary_file("/etc/sysconfig/network") n_cfg = ConfigObj(BytesIO(contents)) self.assertEqual({"HOSTNAME": "blah"}, dict(n_cfg)) @@ -75,7 +75,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) + contents = util.load_binary_file("/etc/sysconfig/network") n_cfg = ConfigObj(BytesIO(contents)) self.assertEqual({"HOSTNAME": "blah.blah.blah.yahoo.com"}, dict(n_cfg)) @@ -90,7 +90,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("blah", contents.strip()) @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False) @@ -104,7 +104,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file(distro.hostname_conf_fn) + contents = util.load_text_file(distro.hostname_conf_fn) self.assertEqual("blah", contents.strip()) @mock.patch("cloudinit.distros.photon.subp.subp") @@ -166,7 +166,7 @@ cc_set_hostname.handle( "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, [] ) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("hostname1", contents.strip()) cc_set_hostname.handle( "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, [] @@ -178,7 +178,7 @@ cc_set_hostname.handle( "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, [] ) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("hostname2", contents.strip()) self.assertIn( "Non-persistently setting the system hostname to hostname2", @@ -199,7 +199,7 @@ util.write_file("/etc/hostname", "") cc_set_hostname.handle("cc_set_hostname", {}, cc, []) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("", contents.strip()) @mock.patch("cloudinit.util.get_hostname", return_value="localhost") @@ -218,7 +218,7 @@ cc_set_hostname.handle( "cc_set_hostname", {"hostname": "localhost"}, cc, [] ) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("localhost", contents.strip()) def test_error_on_distro_set_hostname_errors(self): @@ -256,7 +256,7 @@ prev_fn = Path(cc.get_cpath("data")) / "set-hostname" prev_fn.touch() cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) - contents = util.load_file("/etc/hostname") + contents = util.load_text_file("/etc/hostname") self.assertEqual("blah", contents.strip()) def test_create_hostname_file_false(self): @@ -272,7 +272,7 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") def test_create_hostname_file_false_arch(self): cfg = { @@ -287,7 +287,7 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") def test_create_hostname_file_false_alpine(self): cfg = { @@ -302,7 +302,7 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") def test_create_hostname_file_false_gentoo(self): cfg = { @@ -317,7 +317,7 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") def test_create_hostname_file_false_photon(self): cfg = { @@ -332,7 +332,7 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") def test_create_hostname_file_false_rhel(self): cfg = { @@ -347,4 +347,4 @@ self.patchUtils(self.tmp) cc_set_hostname.handle("cc_set_hostname", cfg, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file("/etc/hostname") + util.load_text_file("/etc/hostname") diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_set_passwords.py cloud-init-24.1.3/tests/unittests/config/test_cc_set_passwords.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_set_passwords.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_set_passwords.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,7 +12,11 @@ get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import does_not_raise, skipUnlessJsonSchema +from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, + does_not_raise, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud MODPATH = "cloudinit.config.cc_set_passwords." @@ -718,7 +722,8 @@ ( {"chpasswd": {"list": []}}, pytest.raises( - SchemaValidationError, match=r"\[\] is too short" + SchemaValidationError, + match=rf"\[\] {SCHEMA_EMPTY_ERROR}", ), ), ], diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_snap.py cloud-init-24.1.3/tests/unittests/config/test_cc_snap.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_snap.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_snap.py 2024-03-27 13:14:04.000000000 +0000 @@ -2,8 +2,6 @@ import logging import os -import re -from io import StringIO import pytest @@ -14,7 +12,12 @@ get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, + CiTestCase, + mock, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud M_PATH = "cloudinit.config.cc_snap." @@ -133,7 +136,9 @@ ] == m_subp.call_args_list compare_file = tmpdir.join("comparison") util.write_file(compare_file, "\n".join(assertions).encode("utf-8")) - assert util.load_file(compare_file) == util.load_file(assert_file) + assert util.load_text_file(compare_file) == util.load_text_file( + assert_file + ) @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_add_assertions_adds_assertions_as_dict( @@ -161,7 +166,9 @@ compare_file = tmpdir.join("comparison") combined = "\n".join(assertions.values()) util.write_file(compare_file, combined.encode("utf-8")) - assert util.load_file(compare_file) == util.load_file(assert_file) + assert util.load_text_file(compare_file) == util.load_text_file( + assert_file + ) class TestRunCommands(CiTestCase): @@ -188,71 +195,34 @@ str(context_manager.exception), ) - def test_run_command_logs_commands_and_exit_codes_to_stderr(self): - """All exit codes are logged to stderr.""" - outfile = self.tmp_path("output.log", dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = "bogus command" - cmd3 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2, cmd3] - - mock_path = "cloudinit.config.cc_snap.sys.stderr" - with mock.patch(mock_path, new_callable=StringIO) as m_stderr: - with self.assertRaises(RuntimeError) as context_manager: - run_commands(commands=commands) - - self.assertIsNotNone( - re.search( - r"bogus: (command )?not found", str(context_manager.exception) - ), - msg="Expected bogus command not found", - ) - expected_stderr_log = "\n".join( - [ - "Begin run command: {cmd}".format(cmd=cmd1), - "End run command: exit(0)", - "Begin run command: {cmd}".format(cmd=cmd2), - "ERROR: End run command: exit(127)", - "Begin run command: {cmd}".format(cmd=cmd3), - "End run command: exit(0)\n", - ] - ) - self.assertEqual(expected_stderr_log, m_stderr.getvalue()) - - def test_run_command_as_lists(self): - """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path("output.log", dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2] - mock_path = "cloudinit.config.cc_snap.sys.stderr" - with mock.patch(mock_path, new_callable=StringIO): - run_commands(commands=commands) - - self.assertIn( - "DEBUG: Running user-provided snap commands", self.logs.getvalue() - ) - self.assertEqual("HI\nMOM\n", util.load_file(outfile)) - self.assertIn( - "WARNING: Non-snap commands in snap config:", self.logs.getvalue() - ) - def test_run_command_dict_sorted_as_command_script(self): +@pytest.mark.allow_all_subp +class TestCommands: + def test_run_command_dict_sorted_as_command_script(self, caplog, tmp_path): """When commands are a dict, sort them and run.""" - outfile = self.tmp_path("output.log", dir=self.tmp) - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile + outfile = f"{tmp_path}/output.log" + cmd1 = f'echo "HI" >> {outfile}' + cmd2 = f'echo "MOM" >> {outfile}' commands = {"02": cmd1, "01": cmd2} - mock_path = "cloudinit.config.cc_snap.sys.stderr" - with mock.patch(mock_path, new_callable=StringIO): - run_commands(commands=commands) + run_commands(commands=commands) - expected_messages = ["DEBUG: Running user-provided snap commands"] + expected_messages = ["Running user-provided snap commands"] for message in expected_messages: - self.assertIn(message, self.logs.getvalue()) - self.assertEqual("MOM\nHI\n", util.load_file(outfile)) + assert message in caplog.text + assert "MOM\nHI\n" == util.load_text_file(outfile) + + def test_run_command_as_lists(self, caplog, tmp_path): + """When commands are specified as a list, run them in order.""" + outfile = "output.log" + + cmd1 = f'echo "HI" >> {tmp_path}/{outfile}' + cmd2 = f'echo "MOM" >> {tmp_path}/{outfile}' + commands = [cmd1, cmd2] + run_commands(commands=commands) + + assert "Running user-provided snap commands" in caplog.text + assert "HI\nMOM\n" == util.load_text_file(f"{tmp_path}/{outfile}") + assert "Non-snap commands in snap config:" in caplog.text @skipUnlessJsonSchema() @@ -288,15 +258,18 @@ {"snap": {"commands": ["ls"], "invalid-key": ""}}, "Additional properties are not allowed", ), - ({"snap": {}}, "{} does not have enough properties"), + ({"snap": {}}, f"{{}} {SCHEMA_EMPTY_ERROR}"), ( {"snap": {"commands": "broken"}}, "'broken' is not of type 'object', 'array'", ), - ({"snap": {"commands": []}}, r"snap.commands: \[\] is too short"), + ( + {"snap": {"commands": []}}, + rf"snap.commands: \[\] {SCHEMA_EMPTY_ERROR}", + ), ( {"snap": {"commands": {}}}, - r"snap.commands: {} does not have enough properties", + rf"snap.commands: {{}} {SCHEMA_EMPTY_ERROR}", ), ({"snap": {"commands": [123]}}, ""), ({"snap": {"commands": {"01": 123}}}, ""), @@ -311,10 +284,10 @@ {"snap": {"assertions": "broken"}}, "'broken' is not of type 'object', 'array'", ), - ({"snap": {"assertions": []}}, r"\[\] is too short"), + ({"snap": {"assertions": []}}, rf"\[\] {SCHEMA_EMPTY_ERROR}"), ( {"snap": {"assertions": {}}}, - r"\{} does not have enough properties", + rf"\{{}} {SCHEMA_EMPTY_ERROR}", ), ], ) @@ -328,8 +301,11 @@ class TestHandle: + @mock.patch("cloudinit.util.wait_for_snap_seeded") @mock.patch("cloudinit.config.cc_snap.subp.subp") - def test_handle_adds_assertions(self, m_subp, fake_cloud, tmpdir): + def test_handle_adds_assertions( + self, m_subp, wait_for_snap_seeded, fake_cloud, tmpdir + ): """Any configured snap assertions are provided to add_assertions.""" assert_file = os.path.join( fake_cloud.paths.get_ipath_cur(), "snapd.assertions" @@ -341,4 +317,7 @@ handle("snap", cfg=cfg, cloud=fake_cloud, args=None) content = "\n".join(cfg["snap"]["assertions"]) util.write_file(compare_file, content.encode("utf-8")) - assert util.load_file(compare_file) == util.load_file(assert_file) + assert util.load_text_file(compare_file) == util.load_text_file( + assert_file + ) + wait_for_snap_seeded.assert_called_once_with(fake_cloud) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ssh.py cloud-init-24.1.3/tests/unittests/config/test_cc_ssh.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ssh.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ssh.py 2024-03-27 13:14:04.000000000 +0000 @@ -20,15 +20,11 @@ LOG = logging.getLogger(__name__) MODPATH = "cloudinit.config.cc_ssh." -KEY_NAMES_NO_DSA = [ - name for name in cc_ssh.GENERATE_KEY_NAMES if name not in "dsa" -] @pytest.fixture(scope="function") def publish_hostkey_test_setup(tmpdir): test_hostkeys = { - "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"), "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"), "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"), "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"), @@ -128,12 +124,11 @@ if not m_fips(): expected_calls = [ mock.call("/etc/ssh/ssh_host_rsa_key"), - mock.call("/etc/ssh/ssh_host_dsa_key"), mock.call("/etc/ssh/ssh_host_ecdsa_key"), mock.call("/etc/ssh/ssh_host_ed25519_key"), ] else: - # Enabled fips doesn't generate dsa or ed25519 + # Enabled fips doesn't generate ed25519 expected_calls = [ mock.call("/etc/ssh/ssh_host_rsa_key"), mock.call("/etc/ssh/ssh_host_ecdsa_key"), @@ -228,10 +223,10 @@ @pytest.mark.parametrize( "cfg, expected_key_types", [ - pytest.param({}, KEY_NAMES_NO_DSA, id="default"), + pytest.param({}, cc_ssh.GENERATE_KEY_NAMES, id="default"), pytest.param( {"ssh_publish_hostkeys": {"enabled": True}}, - KEY_NAMES_NO_DSA, + cc_ssh.GENERATE_KEY_NAMES, id="config_enable", ), pytest.param( @@ -491,10 +486,6 @@ ( ({"ssh_authorized_keys": ["key1", "key2"]}, None), ( - {"ssh_keys": {"dsa_private": "key1", "rsa_public": "key2"}}, - None, - ), - ( {"ssh_keys": {"rsa_a": "key"}}, "'rsa_a' does not match any of the regexes", ), diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_timezone.py cloud-init-24.1.3/tests/unittests/config/test_cc_timezone.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_timezone.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_timezone.py 2024-03-27 13:14:04.000000000 +0000 @@ -42,9 +42,9 @@ cc_timezone.handle("cc_timezone", cfg, cc, []) - contents = util.load_file("/etc/sysconfig/clock", decode=False) + contents = util.load_binary_file("/etc/sysconfig/clock") n_cfg = ConfigObj(BytesIO(contents)) self.assertEqual({"TIMEZONE": cfg["timezone"]}, dict(n_cfg)) - contents = util.load_file("/etc/localtime") + contents = util.load_text_file("/etc/localtime") self.assertEqual(dummy_contents, contents.strip()) diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_advantage.py cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_advantage.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_advantage.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_advantage.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1412 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import json -import logging -import re -import sys -from collections import namedtuple - -import pytest - -from cloudinit import subp -from cloudinit.config.cc_ubuntu_advantage import ( - _attach, - _auto_attach, - _should_auto_attach, - configure_ua, - handle, - maybe_install_ua_tools, - set_ua_config, - validate_schema_features, -) -from cloudinit.config.schema import ( - SchemaValidationError, - get_schema, - validate_cloudconfig_schema, -) -from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema -from tests.unittests.util import get_cloud - -# Module path used in mocks -MPATH = "cloudinit.config.cc_ubuntu_advantage" - - -class FakeUserFacingError(Exception): - def __init__(self, msg: str): - self.msg = msg - - -class FakeAlreadyAttachedError(FakeUserFacingError): - pass - - -class FakeAlreadyAttachedOnPROError(FakeUserFacingError): - pass - - -@pytest.fixture -def fake_uaclient(mocker): - """Mocks `uaclient` module""" - - mocker.patch.dict("sys.modules") - m_uaclient = mock.Mock() - - sys.modules["uaclient"] = m_uaclient - - # Exceptions - _exceptions = namedtuple( - "exceptions", - [ - "UserFacingError", - "AlreadyAttachedError", - ], - )( - FakeUserFacingError, - FakeAlreadyAttachedError, - ) - sys.modules["uaclient.api.exceptions"] = _exceptions - - -@pytest.mark.usefixtures("fake_uaclient") -@mock.patch(f"{MPATH}.subp.subp") -class TestConfigureUA: - def test_configure_ua_attach_error(self, m_subp): - """Errors from pro attach command are raised.""" - m_subp.side_effect = subp.ProcessExecutionError( - "Invalid token SomeToken" - ) - match = ( - "Failure attaching Ubuntu Advantage:\nUnexpected error while" - " running command.\nCommand: -\nExit code: -\nReason: -\n" - "Stdout: Invalid token REDACTED\nStderr: -" - ) - with pytest.raises(RuntimeError, match=match): - configure_ua(token="SomeToken") - - @pytest.mark.parametrize( - "kwargs, call_args_list, log_record_tuples", - [ - # When token is provided, attach to pro using the token. - pytest.param( - {"token": "SomeToken"}, - [ - mock.call( - ["pro", "attach", "SomeToken"], - logstring=["pro", "attach", "REDACTED"], - rcs={0, 2}, - ) - ], - [ - ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach REDACTED", - ) - ], - id="with_token", - ), - # When services is an empty list, do not auto-enable attach. - pytest.param( - {"token": "SomeToken", "enable": []}, - [ - mock.call( - ["pro", "attach", "SomeToken"], - logstring=["pro", "attach", "REDACTED"], - rcs={0, 2}, - ) - ], - [ - ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach REDACTED", - ) - ], - id="with_empty_services", - ), - # When services a list, only enable specific services. - pytest.param( - {"token": "SomeToken", "enable": ["fips"]}, - [ - mock.call( - ["pro", "attach", "--no-auto-enable", "SomeToken"], - logstring=[ - "pro", - "attach", - "--no-auto-enable", - "REDACTED", - ], - rcs={0, 2}, - ), - mock.call( - [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - "fips", - ], - capture=True, - rcs={0, 1}, - ), - ], - [ - ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach" - " --no-auto-enable REDACTED", - ) - ], - id="with_specific_services", - ), - # When services a string, treat as singleton list and warn - pytest.param( - {"token": "SomeToken", "enable": "fips"}, - [ - mock.call( - ["pro", "attach", "--no-auto-enable", "SomeToken"], - logstring=[ - "pro", - "attach", - "--no-auto-enable", - "REDACTED", - ], - rcs={0, 2}, - ), - mock.call( - [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - "fips", - ], - capture=True, - rcs={0, 1}, - ), - ], - [ - ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach" - " --no-auto-enable REDACTED", - ), - ( - MPATH, - logging.WARNING, - "ubuntu_advantage: enable should be a list, not a " - "string; treating as a single enable", - ), - ], - id="with_string_services", - ), - # When services not string or list, warn but still attach - pytest.param( - {"token": "SomeToken", "enable": {"deffo": "wont work"}}, - [ - mock.call( - ["pro", "attach", "SomeToken"], - logstring=["pro", "attach", "REDACTED"], - rcs={0, 2}, - ) - ], - [ - ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach REDACTED", - ), - ( - MPATH, - logging.WARNING, - "ubuntu_advantage: enable should be a list, not a" - " dict; skipping enabling services", - ), - ], - id="with_weird_services", - ), - ], - ) - @mock.patch(f"{MPATH}.maybe_install_ua_tools", mock.MagicMock()) - def test_configure_ua_attach( - self, m_subp, kwargs, call_args_list, log_record_tuples, caplog - ): - m_subp.return_value = subp.SubpResult(json.dumps({"errors": []}), "") - configure_ua(**kwargs) - assert call_args_list == m_subp.call_args_list - for record_tuple in log_record_tuples: - assert record_tuple in caplog.record_tuples - - def test_configure_ua_already_attached(self, m_subp, caplog): - """pro is already attached to an subscription""" - m_subp.rcs = 2 - configure_ua(token="SomeToken") - assert m_subp.call_args_list == [ - mock.call( - ["pro", "attach", "SomeToken"], - logstring=["pro", "attach", "REDACTED"], - rcs={0, 2}, - ) - ] - assert ( - MPATH, - logging.DEBUG, - "Attaching to Ubuntu Advantage. pro attach REDACTED", - ) in caplog.record_tuples - - def test_configure_ua_attach_on_service_enabled( - self, m_subp, caplog, fake_uaclient - ): - """retry enabling an already enabled service""" - - def fake_subp(cmd, capture=None, rcs=None, logstring=None): - fail_cmds = [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - "livepatch", - ] - if cmd == fail_cmds and capture: - response = { - "errors": [ - { - "message": "Does not matter", - "message_code": "service-already-enabled", - "service": cmd[-1], - "type": "service", - } - ] - } - return subp.SubpResult(json.dumps(response), "") - - m_subp.side_effect = fake_subp - - configure_ua(token="SomeToken", enable=["livepatch"]) - assert m_subp.call_args_list == [ - mock.call( - ["pro", "attach", "--no-auto-enable", "SomeToken"], - logstring=["pro", "attach", "--no-auto-enable", "REDACTED"], - rcs={0, 2}, - ), - mock.call( - [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - "livepatch", - ], - capture=True, - rcs={0, 1}, - ), - ] - assert ( - MPATH, - logging.DEBUG, - "Service `livepatch` already enabled.", - ) in caplog.record_tuples - - def test_configure_ua_attach_on_service_error(self, m_subp, caplog): - """all services should be enabled and then any failures raised""" - - def fake_subp(cmd, capture=None, rcs=None, logstring=None): - fail_cmd = [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - ] - if cmd[: len(fail_cmd)] == fail_cmd and capture: - response = { - "errors": [ - { - "message": f"Invalid {svc} credentials", - "message_code": "some-code", - "service": svc, - "type": "service", - } - for svc in ["esm", "cc"] - ] - + [ - { - "message": "Cannot enable unknown service 'asdf'", - "message_code": "invalid-service-or-failure", - "service": None, - "type": "system", - } - ] - } - return subp.SubpResult(json.dumps(response), "") - return subp.SubpResult(json.dumps({"errors": []}), "") - - m_subp.side_effect = fake_subp - - with pytest.raises( - RuntimeError, - match=re.escape( - "Failure enabling Ubuntu Advantage service(s): esm, cc" - ), - ): - configure_ua( - token="SomeToken", enable=["esm", "cc", "fips", "asdf"] - ) - assert m_subp.call_args_list == [ - mock.call( - ["pro", "attach", "--no-auto-enable", "SomeToken"], - logstring=["pro", "attach", "--no-auto-enable", "REDACTED"], - rcs={0, 2}, - ), - mock.call( - [ - "pro", - "enable", - "--assume-yes", - "--format", - "json", - "esm", - "cc", - "fips", - "asdf", - ], - capture=True, - rcs={0, 1}, - ), - ] - assert ( - MPATH, - logging.WARNING, - "Failure enabling `esm`: Invalid esm credentials", - ) in caplog.record_tuples - assert ( - MPATH, - logging.WARNING, - "Failure enabling `cc`: Invalid cc credentials", - ) in caplog.record_tuples - assert ( - MPATH, - logging.WARNING, - "Failure of type `system`: Cannot enable unknown service 'asdf'", - ) in caplog.record_tuples - assert 'Failure enabling "fips"' not in caplog.text - - def test_ua_enable_unexpected_error_codes(self, m_subp): - def fake_subp(cmd, capture=None, **kwargs): - if cmd[:2] == ["pro", "enable"] and capture: - raise subp.ProcessExecutionError(exit_code=255) - return subp.SubpResult(json.dumps({"errors": []}), "") - - m_subp.side_effect = fake_subp - - with pytest.raises( - RuntimeError, - match=re.escape("Error while enabling service(s): esm"), - ): - configure_ua(token="SomeToken", enable=["esm"]) - - def test_ua_enable_non_json_response(self, m_subp): - def fake_subp(cmd, capture=None, **kwargs): - if cmd[:2] == ["pro", "enable"] and capture: - return subp.SubpResult("I dream to be a Json", "") - return subp.SubpResult(json.dumps({"errors": []}), "") - - m_subp.side_effect = fake_subp - - with pytest.raises( - RuntimeError, - match=re.escape("UA response was not json: I dream to be a Json"), - ): - configure_ua(token="SomeToken", enable=["esm"]) - - -class TestUbuntuAdvantageSchema: - @pytest.mark.parametrize( - "config, expectation", - [ - ({"ubuntu_advantage": {}}, does_not_raise()), - # Strict keys - pytest.param( - {"ubuntu_advantage": {"token": "win", "invalidkey": ""}}, - pytest.raises( - SchemaValidationError, - match=re.escape( - "ubuntu_advantage: Additional properties are not" - " allowed ('invalidkey" - ), - ), - id="additional_properties", - ), - pytest.param( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": True} - } - }, - does_not_raise(), - id="disable_auto_attach", - ), - pytest.param( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": False}, - "enable": ["fips"], - "enable_beta": ["realtime-kernel"], - "token": "", - } - }, - does_not_raise(), - id="pro_custom_services", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "enable_beta": ["realtime-kernel"], - "token": "", - } - }, - does_not_raise(), - id="non_pro_beta_services", - ), - pytest.param( - { - "ubuntu_advantage": { - "features": {"asdf": False}, - "enable": ["fips"], - "enable_beta": ["realtime-kernel"], - "token": "", - } - }, - pytest.raises( - SchemaValidationError, - match=re.escape( - "ubuntu_advantage.features: Additional properties are" - " not allowed ('asdf'" - ), - ), - id="pro_additional_features", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "token": "", - "config": { - "http_proxy": "http://some-proxy:8088", - "https_proxy": "https://some-proxy:8088", - "global_apt_https_proxy": "https://some-global-apt-proxy:8088/", # noqa: E501 - "global_apt_http_proxy": "http://some-global-apt-proxy:8088/", # noqa: E501 - "ua_apt_http_proxy": "http://10.0.10.10:3128", - "ua_apt_https_proxy": "https://10.0.10.10:3128", - }, - } - }, - does_not_raise(), - id="ua_config_valid_set", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "token": "", - "config": { - "http_proxy": None, - "https_proxy": None, - "global_apt_https_proxy": None, - "global_apt_http_proxy": None, - "ua_apt_http_proxy": None, - "ua_apt_https_proxy": None, - }, - } - }, - does_not_raise(), - id="ua_config_valid_unset", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "token": "", - "config": ["http_proxy=http://some-proxy:8088"], - } - }, - pytest.raises( - SchemaValidationError, - match=re.escape( - "errors: ubuntu_advantage.config:" - " ['http_proxy=http://some-proxy:8088']" - ), - ), - id="ua_config_invalid_type", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "token": "", - "config": { - "http_proxy": 8888, - "https_proxy": ["http://some-proxy:8088"], - }, - } - }, - pytest.raises( - SchemaValidationError, - match=re.escape( - "errors: ubuntu_advantage.config.http_proxy: 8888" - " is not of type 'string', 'null'," - " ubuntu_advantage.config.https_proxy:" - " ['http://some-proxy:8088']" - ), - ), - id="ua_config_invalid_type", - ), - pytest.param( - { - "ubuntu_advantage": { - "enable": ["fips"], - "token": "", - "config": { - "http_proxy": "http://some-proxy:8088", - "hola": "adios", - }, - } - }, - does_not_raise(), - id="ua_config_unknown_props_allowed", - ), - ], - ) - @skipUnlessJsonSchema() - def test_schema_validation(self, config, expectation, caplog): - with expectation: - validate_cloudconfig_schema(config, get_schema(), strict=True) - - @pytest.mark.parametrize( - "ua_section, expectation, log_msgs", - [ - ({}, does_not_raise(), None), - ({"features": {}}, does_not_raise(), None), - ( - {"features": {"disable_auto_attach": True}}, - does_not_raise(), - None, - ), - ( - {"features": {"disable_auto_attach": False}}, - does_not_raise(), - None, - ), - ( - {"features": [0, 1]}, - pytest.raises( - RuntimeError, - match=( - "'ubuntu_advantage.features' should be a dict," - " not a list" - ), - ), - ["'ubuntu_advantage.features' should be a dict, not a list\n"], - ), - ( - {"features": {"disable_auto_attach": [0, 1]}}, - pytest.raises( - RuntimeError, - match=( - "'ubuntu_advantage.features.disable_auto_attach'" - " should be a bool, not a list" - ), - ), - [ - "'ubuntu_advantage.features.disable_auto_attach' should be" - " a bool, not a list\n" - ], - ), - ], - ) - def test_validate_schema_features( - self, ua_section, expectation, log_msgs, caplog - ): - with expectation: - validate_schema_features(ua_section) - if log_msgs is not None: - for log_msg in log_msgs: - assert log_msg in caplog.text - else: - assert not caplog.text - - -class TestHandle: - - cloud = get_cloud() - - @pytest.mark.parametrize( - [ - "cfg", - "cloud", - "log_record_tuples", - "maybe_install_call_args_list", - "set_ua_config_call_args_list", - "configure_ua_call_args_list", - ], - [ - # When no ua-related configuration is provided, nothing happens. - pytest.param( - {}, - None, - [ - ( - MPATH, - logging.DEBUG, - "Skipping module named nomatter, no 'ubuntu_advantage'" - " configuration found", - ) - ], - [], - [], - [], - id="no_config", - ), - # If ubuntu_advantage is provided, try installing ua-tools package. - pytest.param( - {"ubuntu_advantage": {"token": "valid"}}, - cloud, - [], - [mock.call(cloud)], - [mock.call(None)], - None, - id="tries_to_install_ubuntu_advantage_tools", - ), - # If ubuntu_advantage config provided, configure it. - pytest.param( - { - "ubuntu_advantage": { - "token": "valid", - "config": {"http_proxy": "http://proxy.org"}, - } - }, - cloud, - [], - None, - [mock.call({"http_proxy": "http://proxy.org"})], - None, - id="set_ua_config", - ), - # All ubuntu_advantage config keys are passed to configure_ua. - pytest.param( - {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}}, - cloud, - [], - [mock.call(cloud)], - [mock.call(None)], - [mock.call(token="token", enable=["esm"])], - id="passes_credentials_and_services_to_configure_ua", - ), - # Warning when ubuntu-advantage key is present with new config - pytest.param( - {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}}, - None, - [ - ( - MPATH, - logging.WARNING, - 'Deprecated configuration key "ubuntu-advantage"' - " provided. Expected underscore delimited " - '"ubuntu_advantage"; will attempt to continue.', - ) - ], - None, - [mock.call(None)], - [mock.call(token="token", enable=["esm"])], - id="warns_on_deprecated_ubuntu_advantage_key_w_config", - ), - # Warning with beta services during attach - pytest.param( - { - "ubuntu_advantage": { - "token": "token", - "enable": ["esm"], - "enable_beta": ["realtime-kernel"], - } - }, - None, - [ - ( - MPATH, - logging.DEBUG, - "Ignoring `ubuntu_advantage.enable_beta` services in" - " UA attach: realtime-kernel", - ) - ], - None, - [mock.call(None)], - [mock.call(token="token", enable=["esm"])], - id="warns_on_enable_beta_in_attach", - ), - # ubuntu_advantage should be preferred over ubuntu-advantage - pytest.param( - { - "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]}, - "ubuntu_advantage": {"token": "token", "enable": ["esm"]}, - }, - None, - [ - ( - MPATH, - logging.WARNING, - 'Deprecated configuration key "ubuntu-advantage"' - " provided. Expected underscore delimited " - '"ubuntu_advantage"; will attempt to continue.', - ) - ], - None, - [mock.call(None)], - [mock.call(token="token", enable=["esm"])], - id="prefers_new_style_config", - ), - ], - ) - @mock.patch(f"{MPATH}._should_auto_attach", return_value=False) - @mock.patch(f"{MPATH}._auto_attach") - @mock.patch(f"{MPATH}.configure_ua") - @mock.patch(f"{MPATH}.set_ua_config") - @mock.patch(f"{MPATH}.maybe_install_ua_tools") - def test_handle_attach( - self, - m_maybe_install_ua_tools, - m_set_ua_config, - m_configure_ua, - m_auto_attach, - m_should_auto_attach, - cfg, - cloud, - log_record_tuples, - maybe_install_call_args_list, - set_ua_config_call_args_list, - configure_ua_call_args_list, - caplog, - ): - """Non-Pro schemas and instance.""" - handle("nomatter", cfg=cfg, cloud=cloud, args=None) - for record_tuple in log_record_tuples: - assert record_tuple in caplog.record_tuples - if maybe_install_call_args_list is not None: - assert ( - maybe_install_call_args_list - == m_maybe_install_ua_tools.call_args_list - ) - if set_ua_config_call_args_list is not None: - assert ( - set_ua_config_call_args_list == m_set_ua_config.call_args_list - ) - if configure_ua_call_args_list is not None: - assert configure_ua_call_args_list == m_configure_ua.call_args_list - assert [] == m_auto_attach.call_args_list - - @pytest.mark.parametrize( - [ - "cfg", - "cloud", - "log_record_tuples", - "auto_attach_side_effect", - "should_auto_attach", - "auto_attach_call_args_list", - "attach_call_args_list", - "expectation", - ], - [ - # When auto_attach successes, no call to configure_ua. - pytest.param( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": False} - } - }, - cloud, - [], - None, # auto_attach successes - True, # Pro instance - [ - mock.call({"features": {"disable_auto_attach": False}}) - ], # auto_attach_call_args_list - [], # attach_call_args_list - does_not_raise(), - id="auto_attach_success", - ), - # When auto_attach fails in a Pro instance, no call to - # configure_ua. - pytest.param( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": False} - } - }, - cloud, - [], - RuntimeError("Auto attach error"), - True, # Pro instance - [ - mock.call({"features": {"disable_auto_attach": False}}) - ], # auto_attach_call_args_list - [], # attach_call_args_list - pytest.raises(RuntimeError, match="Auto attach error"), - id="auto_attach_error", - ), - # In a non-Pro instance with token, fallback to normal attach. - pytest.param( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": False}, - "token": "token", - } - }, - cloud, - [], - None, - False, # non-Pro instance - [], # auto_attach_call_args_list - [ - mock.call( - { - "features": {"disable_auto_attach": False}, - "token": "token", - }, - ) - ], # attach_call_args_list - does_not_raise(), - id="not_pro_with_token", - ), - # In a non-Pro instance with enable, fallback to normal attach. - pytest.param( - {"ubuntu_advantage": {"enable": ["esm"]}}, - cloud, - [], - None, - False, # non-Pro instance - [], # auto_attach_call_args_list - [ - mock.call( - { - "enable": ["esm"], - }, - ) - ], # attach_call_args_list - does_not_raise(), - id="not_pro_with_enable", - ), - ], - ) - @mock.patch(f"{MPATH}._should_auto_attach") - @mock.patch(f"{MPATH}._auto_attach") - @mock.patch(f"{MPATH}._attach") - def test_handle_auto_attach_vs_attach( - self, - m_attach, - m_auto_attach, - m_should_auto_attach, - cfg, - cloud, - log_record_tuples, - auto_attach_side_effect, - should_auto_attach, - auto_attach_call_args_list, - attach_call_args_list, - expectation, - caplog, - ): - m_should_auto_attach.return_value = should_auto_attach - if auto_attach_side_effect is not None: - m_auto_attach.side_effect = auto_attach_side_effect - - with expectation: - handle("nomatter", cfg=cfg, cloud=cloud, args=None) - - for record_tuple in log_record_tuples: - assert record_tuple in caplog.record_tuples - if attach_call_args_list is not None: - assert attach_call_args_list == m_attach.call_args_list - else: - assert [] == m_attach.call_args_list - assert auto_attach_call_args_list == m_auto_attach.call_args_list - - @pytest.mark.parametrize("is_pro", [False, True]) - @pytest.mark.parametrize( - "cfg", - [ - ( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": False}, - } - } - ), - ( - { - "ubuntu_advantage": { - "features": {"disable_auto_attach": True}, - } - } - ), - ], - ) - @mock.patch(f"{MPATH}._should_auto_attach") - @mock.patch(f"{MPATH}._auto_attach") - @mock.patch(f"{MPATH}._attach") - def test_no_fallback_attach( - self, - m_attach, - m_auto_attach, - m_should_auto_attach, - cfg, - is_pro, - ): - """Checks that attach is not called in the case where we want only to - enable or disable pro auto-attach. - """ - m_should_auto_attach.return_value = is_pro - handle("nomatter", cfg=cfg, cloud=self.cloud, args=None) - assert not m_attach.call_args_list - - @pytest.mark.parametrize( - "cfg, handle_kwargs, match", - [ - pytest.param( - {"ubuntu-advantage": {"commands": "nogo"}}, - dict(cloud=None, args=None), - ( - 'Deprecated configuration "ubuntu-advantage: commands" ' - 'provided. Expected "token"' - ), - id="key_dashed", - ), - pytest.param( - {"ubuntu_advantage": {"commands": "nogo"}}, - dict(cloud=None, args=None), - ( - 'Deprecated configuration "ubuntu-advantage: commands" ' - 'provided. Expected "token"' - ), - id="key_underscore", - ), - ], - ) - @mock.patch("%s.configure_ua" % MPATH) - def test_handle_error_on_deprecated_commands_key_dashed( - self, m_configure_ua, cfg, handle_kwargs, match - ): - with pytest.raises(RuntimeError, match=match): - handle("nomatter", cfg=cfg, **handle_kwargs) - assert 0 == m_configure_ua.call_count - - @pytest.mark.parametrize( - "cfg, match", - [ - pytest.param( - {"ubuntu_advantage": [0, 1]}, - "'ubuntu_advantage' should be a dict, not a list", - id="on_non_dict_config", - ), - pytest.param( - {"ubuntu_advantage": {"features": [0, 1]}}, - "'ubuntu_advantage.features' should be a dict, not a list", - id="on_non_dict_ua_section", - ), - ], - ) - def test_handle_errors(self, cfg, match): - with pytest.raises(RuntimeError, match=match): - handle( - "nomatter", - cfg=cfg, - cloud=self.cloud, - args=None, - ) - - @mock.patch(f"{MPATH}.subp.subp") - def test_ua_config_error_invalid_url(self, m_subp, caplog): - """Errors from pro config command are raised.""" - cfg = { - "ubuntu_advantage": { - "token": "SomeToken", - "config": {"http_proxy": "not-a-valid-url"}, - } - } - m_subp.side_effect = subp.ProcessExecutionError( - 'Failure enabling "http_proxy"' - ) - with pytest.raises( - ValueError, - match=re.escape( - "Invalid ubuntu_advantage configuration:\nExpected URL scheme" - " http/https for ua:config:http_proxy" - ), - ): - handle( - "nomatter", - cfg=cfg, - cloud=self.cloud, - args=None, - ) - assert not caplog.text - - @mock.patch(f"{MPATH}._should_auto_attach", return_value=False) - @mock.patch(f"{MPATH}.subp.subp") - def test_fallback_to_attach_no_token( - self, m_subp, m_should_auto_attach, caplog - ): - cfg = {"ubuntu_advantage": {"enable": ["esm"]}} - with pytest.raises( - RuntimeError, - match=re.escape( - "`ubuntu_advantage.token` required in non-Pro Ubuntu" - " instances." - ), - ): - handle( - "nomatter", - cfg=cfg, - cloud=self.cloud, - args=None, - ) - assert [] == m_subp.call_args_list - assert ( - "`ubuntu_advantage.token` required in non-Pro Ubuntu" - " instances.\n" - ) in caplog.text - - -class TestShouldAutoAttach: - def test_should_auto_attach_error(self, caplog, fake_uaclient): - m_should_auto_attach = mock.Mock() - m_should_auto_attach.should_auto_attach.side_effect = ( - FakeUserFacingError("Some error") # noqa: E501 - ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach - assert not _should_auto_attach({}) - assert "Error during `should_auto_attach`: Some error" in caplog.text - assert ( - "Unable to determine if this is an Ubuntu Pro instance." - " Fallback to normal UA attach." in caplog.text - ) - - @pytest.mark.parametrize( - "ua_section, expected_result", - [ - ({}, None), - ({"features": {"disable_auto_attach": False}}, None), - # The user explicitly disables auto-attach, therefore we do not do - # it: - ({"features": {"disable_auto_attach": True}}, False), - ], - ) - def test_happy_path( - self, ua_section, expected_result, caplog, fake_uaclient - ): - m_should_auto_attach = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach - should_auto_attach_value = object() - m_should_auto_attach.should_auto_attach.return_value.should_auto_attach = ( # noqa: E501 - should_auto_attach_value - ) - if expected_result is None: # UA API does respond - assert should_auto_attach_value == _should_auto_attach(ua_section) - assert ( - "Checking if the instance can be attached to Ubuntu Pro took" - in caplog.text - ) - else: # cloud-init does respond - assert expected_result == _should_auto_attach(ua_section) - assert not caplog.text - - -class TestAutoAttach: - - ua_section: dict = {} - - def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): - mocker.patch.dict("sys.modules") - sys.modules["uaclient.config"] = mock.Mock() - m_full_auto_attach = mock.Mock() - m_full_auto_attach.full_auto_attach.side_effect = FakeUserFacingError( - "Some error" - ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = m_full_auto_attach - expected_msg = "Error during `full_auto_attach`: Some error" - with pytest.raises(RuntimeError, match=re.escape(expected_msg)): - _auto_attach(self.ua_section) - assert expected_msg in caplog.text - - def test_happy_path(self, caplog, mocker, fake_uaclient): - mocker.patch.dict("sys.modules") - sys.modules["uaclient.config"] = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = mock.Mock() - _auto_attach(self.ua_section) - assert "Attaching to Ubuntu Pro took" in caplog.text - - -class TestAttach: - @mock.patch(f"{MPATH}.configure_ua") - def test_attach_without_token_raises_error(self, m_configure_ua): - with pytest.raises( - RuntimeError, - match=( - "`ubuntu_advantage.token` required in non-Pro Ubuntu" - " instances." - ), - ): - _attach({"enable": ["esm"]}) - assert [] == m_configure_ua.call_args_list - - -@mock.patch(f"{MPATH}.subp.which") -class TestMaybeInstallUATools: - @pytest.mark.parametrize( - [ - "which_return", - "update_side_effect", - "install_side_effect", - "expectation", - "log_msg", - ], - [ - # Do nothing if ubuntu-advantage-tools already exists. - pytest.param( - "/usr/bin/ua", # already installed - RuntimeError("Some apt error"), - None, - does_not_raise(), # No RuntimeError - None, - id="noop_when_ua_tools_present", - ), - # logs and raises apt update errors - pytest.param( - None, - RuntimeError("Some apt error"), - None, - pytest.raises(RuntimeError, match="Some apt error"), - "Package update failed\nTraceback", - id="raises_update_errors", - ), - # logs and raises package install errors - pytest.param( - None, - None, - RuntimeError("Some install error"), - pytest.raises(RuntimeError, match="Some install error"), - "Failed to install ubuntu-advantage-tools\n", - id="raises_install_errors", - ), - ], - ) - def test_maybe_install_ua_tools( - self, - m_which, - which_return, - update_side_effect, - install_side_effect, - expectation, - log_msg, - caplog, - ): - m_which.return_value = which_return - cloud = mock.MagicMock() - if install_side_effect is None: - cloud.distro.update_package_sources.side_effect = ( - update_side_effect - ) - else: - cloud.distro.update_package_sources.return_value = None - cloud.distro.install_packages.side_effect = install_side_effect - with expectation: - maybe_install_ua_tools(cloud=cloud) - if log_msg is not None: - assert log_msg in caplog.text - - def test_maybe_install_ua_tools_happy_path(self, m_which): - """maybe_install_ua_tools installs ubuntu-advantage-tools.""" - m_which.return_value = None - cloud = mock.MagicMock() # No errors raised - maybe_install_ua_tools(cloud=cloud) - assert [ - mock.call() - ] == cloud.distro.update_package_sources.call_args_list - assert [ - mock.call(["ubuntu-advantage-tools"]) - ] == cloud.distro.install_packages.call_args_list - - -@mock.patch(f"{MPATH}.subp.subp") -class TestSetUAConfig: - def test_valid_config(self, m_subp, caplog): - ua_config = { - "http_proxy": "http://some-proxy:8088", - "https_proxy": "https://user:pass@some-proxy:8088", - "global_apt_https_proxy": "https://some-global-apt-proxy:8088/", - "global_apt_http_proxy": "http://some-global-apt-proxy:8088/", - "ua_apt_http_proxy": "http://10.0.10.10:3128", - "ua_apt_https_proxy": "https://10.0.10.10:3128", - } - set_ua_config(ua_config) - for ua_arg, redacted_arg in [ - ( - "http_proxy=http://some-proxy:8088", - "http_proxy=REDACTED", - ), - ( - "https_proxy=https://user:pass@some-proxy:8088", - "https_proxy=REDACTED", - ), - ( - "global_apt_https_proxy=https://some-global-apt-proxy:8088/", - "global_apt_https_proxy=REDACTED", - ), - ( - "global_apt_http_proxy=http://some-global-apt-proxy:8088/", - "global_apt_http_proxy=REDACTED", - ), - ( - "ua_apt_http_proxy=http://10.0.10.10:3128", - "ua_apt_http_proxy=REDACTED", - ), - ( - "ua_apt_https_proxy=https://10.0.10.10:3128", - "ua_apt_https_proxy=REDACTED", - ), - ]: - assert ( - mock.call( - ["pro", "config", "set", ua_arg], - logstring=["pro", "config", "set", redacted_arg], - ) - in m_subp.call_args_list - ) - assert f"Enabling UA config {redacted_arg}\n" in caplog.text - assert ua_arg not in caplog.text - - assert 6 == m_subp.call_count - - def test_ua_config_unset(self, m_subp, caplog): - ua_config = { - "https_proxy": "https://user:pass@some-proxy:8088", - "http_proxy": None, - } - set_ua_config(ua_config) - for call in [ - mock.call(["pro", "config", "unset", "http_proxy"]), - mock.call( - [ - "pro", - "config", - "set", - "https_proxy=https://user:pass@some-proxy:8088", - ], - logstring=["pro", "config", "set", "https_proxy=REDACTED"], - ), - ]: - assert call in m_subp.call_args_list - assert 2 == m_subp.call_count - assert "Enabling UA config https_proxy=REDACTED\n" in caplog.text - assert "https://user:pass@some-proxy:8088" not in caplog.text - assert "Disabling UA config for http_proxy\n" in caplog.text - - def test_ua_config_error_non_string_values(self, m_subp, caplog): - """ValueError raised for any values expected as string type.""" - ua_config = { - "global_apt_http_proxy": "noscheme", - "http_proxy": ["no-proxy"], - "https_proxy": 3.14, - } - match = re.escape( - "Invalid ubuntu_advantage configuration:\n" - "Expected URL scheme http/https for" - " ua:config:global_apt_http_proxy\n" - "Expected a URL for ua:config:http_proxy\n" - "Expected a URL for ua:config:https_proxy" - ) - with pytest.raises(ValueError, match=match): - set_ua_config(ua_config) - assert 0 == m_subp.call_count - assert not caplog.text - - def test_ua_config_unknown_prop(self, m_subp, caplog): - """On unknown config props, a log is issued and the prop is set.""" - ua_config = {"asdf": "qwer"} - set_ua_config(ua_config) - assert [ - mock.call( - ["pro", "config", "set", "asdf=qwer"], - logstring=["pro", "config", "set", "asdf=REDACTED"], - ) - ] == m_subp.call_args_list - assert "qwer" not in caplog.text - assert ( - "Not validating unknown ubuntu_advantage.config.asdf property\n" - in caplog.text - ) - - def test_ua_config_wrong_type(self, m_subp, caplog): - ua_config = ["asdf", "qwer"] - with pytest.raises( - RuntimeError, - match=( - "ubuntu_advantage: config should be a dict, not" - " a list; skipping enabling config parameters" - ), - ): - set_ua_config(ua_config) - assert 0 == m_subp.call_count - assert not caplog.text - - def test_set_ua_config_error(self, m_subp, caplog): - ua_config = { - "https_proxy": "https://user:pass@some-proxy:8088", - } - # Simulate UA error - m_subp.side_effect = subp.ProcessExecutionError( - "Invalid proxy: https://user:pass@some-proxy:8088" - ) - with pytest.raises( - RuntimeError, - match=re.escape( - "Failure enabling/disabling Ubuntu Advantage config(s):" - ' "https_proxy"' - ), - ): - set_ua_config(ua_config) - assert 1 == m_subp.call_count - assert "https://user:pass@some-proxy:8088" not in caplog.text - assert "Enabling UA config https_proxy=REDACTED\n" in caplog.text - assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text - - def test_unset_ua_config_error(self, m_subp, caplog): - ua_config = {"https_proxy": None} - # Simulate UA error - m_subp.side_effect = subp.ProcessExecutionError( - "Error unsetting https_proxy" - ) - with pytest.raises( - RuntimeError, - match=re.escape( - "Failure enabling/disabling Ubuntu Advantage config(s): " - '"https_proxy"' - ), - ): - set_ua_config(ua_config) - assert 1 == m_subp.call_count - assert "https://user:pass@some-proxy:8088" not in caplog.text - assert "Disabling UA config for https_proxy\n" in caplog.text - assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_autoinstall.py cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_autoinstall.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_autoinstall.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_autoinstall.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,6 +11,7 @@ get_schema, validate_cloudconfig_schema, ) +from cloudinit.helpers import Paths from tests.unittests.helpers import skipUnlessJsonSchema from tests.unittests.util import get_cloud @@ -64,18 +65,20 @@ cc_ubuntu_autoinstall.validate_config_schema(src_cfg) +@mock.patch(MODPATH + "util.wait_for_snap_seeded") @mock.patch(MODPATH + "subp") class TestHandleAutoinstall: """Test cc_ubuntu_autoinstall handling of config.""" @pytest.mark.parametrize( - "cfg,snap_list,subp_calls,logs", + "cfg,snap_list,subp_calls,logs,snap_wait_called", [ pytest.param( {}, SAMPLE_SNAP_LIST_OUTPUT, [], ["Skipping module named name, no 'autoinstall' key"], + False, id="skip_no_cfg", ), pytest.param( @@ -87,6 +90,7 @@ " installer snap packages to be present: subiquity," " ubuntu-desktop-installer" ], + True, id="valid_autoinstall_schema_checks_snaps", ), pytest.param( @@ -97,6 +101,7 @@ "Valid autoinstall schema. Config will be processed by" " subiquity" ], + True, id="valid_autoinstall_schema_sees_subiquity", ), pytest.param( @@ -107,19 +112,33 @@ "Valid autoinstall schema. Config will be processed by" " ubuntu-desktop-installer" ], + True, id="valid_autoinstall_schema_sees_desktop_installer", ), ], ) def test_handle_autoinstall_cfg( - self, subp, cfg, snap_list, subp_calls, logs, caplog + self, + subp, + wait_for_snap_seeded, + cfg, + snap_list, + subp_calls, + logs, + snap_wait_called, + caplog, + tmpdir, ): subp.return_value = snap_list, "" - cloud = get_cloud(distro="ubuntu") + cloud = get_cloud(distro="ubuntu", paths=Paths({"cloud_dir": tmpdir})) cc_ubuntu_autoinstall.handle("name", cfg, cloud, None) assert subp_calls == subp.call_args_list for log in logs: assert log in caplog.text + if snap_wait_called: + wait_for_snap_seeded.assert_called_once_with(cloud) + else: + wait_for_snap_seeded.assert_not_called() class TestAutoInstallSchema: diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_pro.py cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_pro.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_ubuntu_pro.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_ubuntu_pro.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,1438 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import json +import logging +import re +import sys +from collections import namedtuple + +import jsonschema +import pytest + +from cloudinit import subp +from cloudinit.config.cc_ubuntu_pro import ( + _attach, + _auto_attach, + _should_auto_attach, + configure_pro, + handle, + maybe_install_ua_tools, + set_pro_config, + validate_schema_features, +) +from cloudinit.config.schema import ( + SchemaValidationError, + get_schema, + validate_cloudconfig_schema, +) +from cloudinit.util import Version +from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema +from tests.unittests.util import get_cloud + +# Module path used in mocks +MPATH = "cloudinit.config.cc_ubuntu_pro" + + +class FakeUserFacingError(Exception): + def __init__(self, msg: str): + self.msg = msg + + +class FakeAlreadyAttachedError(FakeUserFacingError): + pass + + +class FakeAlreadyAttachedOnPROError(FakeUserFacingError): + pass + + +@pytest.fixture +def fake_uaclient(mocker): + """Mocks `uaclient` module""" + + mocker.patch.dict("sys.modules") + m_uaclient = mock.Mock() + + sys.modules["uaclient"] = m_uaclient + + # Exceptions + _exceptions = namedtuple( + "exceptions", + [ + "UserFacingError", + "AlreadyAttachedError", + ], + )( + FakeUserFacingError, + FakeAlreadyAttachedError, + ) + sys.modules["uaclient.api.exceptions"] = _exceptions + + +@pytest.mark.usefixtures("fake_uaclient") +@mock.patch(f"{MPATH}.subp.subp") +class TestConfigurePro: + def test_configure_pro_attach_error(self, m_subp): + """Errors from pro attach command are raised.""" + m_subp.side_effect = subp.ProcessExecutionError( + "Invalid token SomeToken" + ) + match = ( + "Failure attaching Ubuntu Pro:\nUnexpected error while" + " running command.\nCommand: -\nExit code: -\nReason: -\n" + "Stdout: Invalid token REDACTED\nStderr: -" + ) + with pytest.raises(RuntimeError, match=match): + configure_pro(token="SomeToken") + + @pytest.mark.parametrize( + "kwargs, call_args_list, log_record_tuples", + [ + # When token is provided, attach to pro using the token. + pytest.param( + {"token": "SomeToken"}, + [ + mock.call( + ["pro", "attach", "SomeToken"], + logstring=["pro", "attach", "REDACTED"], + rcs={0, 2}, + ) + ], + [ + ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach REDACTED", + ) + ], + id="with_token", + ), + # When services is an empty list, do not auto-enable attach. + pytest.param( + {"token": "SomeToken", "enable": []}, + [ + mock.call( + ["pro", "attach", "SomeToken"], + logstring=["pro", "attach", "REDACTED"], + rcs={0, 2}, + ) + ], + [ + ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach REDACTED", + ) + ], + id="with_empty_services", + ), + # When services a list, only enable specific services. + pytest.param( + {"token": "SomeToken", "enable": ["fips"]}, + [ + mock.call( + ["pro", "attach", "--no-auto-enable", "SomeToken"], + logstring=[ + "pro", + "attach", + "--no-auto-enable", + "REDACTED", + ], + rcs={0, 2}, + ), + mock.call( + [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + "fips", + ], + capture=True, + rcs={0, 1}, + ), + ], + [ + ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach" + " --no-auto-enable REDACTED", + ) + ], + id="with_specific_services", + ), + # When services a string, treat as singleton list and warn + pytest.param( + {"token": "SomeToken", "enable": "fips"}, + [ + mock.call( + ["pro", "attach", "--no-auto-enable", "SomeToken"], + logstring=[ + "pro", + "attach", + "--no-auto-enable", + "REDACTED", + ], + rcs={0, 2}, + ), + mock.call( + [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + "fips", + ], + capture=True, + rcs={0, 1}, + ), + ], + [ + ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach" + " --no-auto-enable REDACTED", + ), + ( + MPATH, + logging.WARNING, + "ubuntu_pro: enable should be a list, not a " + "string; treating as a single enable", + ), + ], + id="with_string_services", + ), + # When services not string or list, warn but still attach + pytest.param( + {"token": "SomeToken", "enable": {"deffo": "wont work"}}, + [ + mock.call( + ["pro", "attach", "SomeToken"], + logstring=["pro", "attach", "REDACTED"], + rcs={0, 2}, + ) + ], + [ + ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach REDACTED", + ), + ( + MPATH, + logging.WARNING, + "ubuntu_pro: enable should be a list, not a" + " dict; skipping enabling services", + ), + ], + id="with_weird_services", + ), + ], + ) + @mock.patch(f"{MPATH}.maybe_install_ua_tools", mock.MagicMock()) + def test_configure_pro_attach( + self, m_subp, kwargs, call_args_list, log_record_tuples, caplog + ): + m_subp.return_value = subp.SubpResult(json.dumps({"errors": []}), "") + configure_pro(**kwargs) + assert call_args_list == m_subp.call_args_list + for record_tuple in log_record_tuples: + assert record_tuple in caplog.record_tuples + + def test_configure_pro_already_attached(self, m_subp, caplog): + """pro is already attached to an subscription""" + m_subp.rcs = 2 + configure_pro(token="SomeToken") + assert m_subp.call_args_list == [ + mock.call( + ["pro", "attach", "SomeToken"], + logstring=["pro", "attach", "REDACTED"], + rcs={0, 2}, + ) + ] + assert ( + MPATH, + logging.DEBUG, + "Attaching to Ubuntu Pro. pro attach REDACTED", + ) in caplog.record_tuples + + def test_configure_pro_attach_on_service_enabled( + self, m_subp, caplog, fake_uaclient + ): + """retry enabling an already enabled service""" + + def fake_subp(cmd, capture=None, rcs=None, logstring=None): + fail_cmds = [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + "livepatch", + ] + if cmd == fail_cmds and capture: + response = { + "errors": [ + { + "message": "Does not matter", + "message_code": "service-already-enabled", + "service": cmd[-1], + "type": "service", + } + ] + } + return subp.SubpResult(json.dumps(response), "") + + m_subp.side_effect = fake_subp + + configure_pro(token="SomeToken", enable=["livepatch"]) + assert m_subp.call_args_list == [ + mock.call( + ["pro", "attach", "--no-auto-enable", "SomeToken"], + logstring=["pro", "attach", "--no-auto-enable", "REDACTED"], + rcs={0, 2}, + ), + mock.call( + [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + "livepatch", + ], + capture=True, + rcs={0, 1}, + ), + ] + assert ( + MPATH, + logging.DEBUG, + "Service `livepatch` already enabled.", + ) in caplog.record_tuples + + def test_configure_pro_attach_on_service_error(self, m_subp, caplog): + """all services should be enabled and then any failures raised""" + + def fake_subp(cmd, capture=None, rcs=None, logstring=None): + fail_cmd = [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + ] + if cmd[: len(fail_cmd)] == fail_cmd and capture: + response = { + "errors": [ + { + "message": f"Invalid {svc} credentials", + "message_code": "some-code", + "service": svc, + "type": "service", + } + for svc in ["esm", "cc"] + ] + + [ + { + "message": "Cannot enable unknown service 'asdf'", + "message_code": "invalid-service-or-failure", + "service": None, + "type": "system", + } + ] + } + return subp.SubpResult(json.dumps(response), "") + return subp.SubpResult(json.dumps({"errors": []}), "") + + m_subp.side_effect = fake_subp + + with pytest.raises( + RuntimeError, + match=re.escape("Failure enabling Ubuntu Pro service(s): esm, cc"), + ): + configure_pro( + token="SomeToken", enable=["esm", "cc", "fips", "asdf"] + ) + assert m_subp.call_args_list == [ + mock.call( + ["pro", "attach", "--no-auto-enable", "SomeToken"], + logstring=["pro", "attach", "--no-auto-enable", "REDACTED"], + rcs={0, 2}, + ), + mock.call( + [ + "pro", + "enable", + "--assume-yes", + "--format", + "json", + "esm", + "cc", + "fips", + "asdf", + ], + capture=True, + rcs={0, 1}, + ), + ] + assert ( + MPATH, + logging.WARNING, + "Failure enabling `esm`: Invalid esm credentials", + ) in caplog.record_tuples + assert ( + MPATH, + logging.WARNING, + "Failure enabling `cc`: Invalid cc credentials", + ) in caplog.record_tuples + assert ( + MPATH, + logging.WARNING, + "Failure of type `system`: Cannot enable unknown service 'asdf'", + ) in caplog.record_tuples + assert 'Failure enabling "fips"' not in caplog.text + + def test_pro_enable_unexpected_error_codes(self, m_subp): + def fake_subp(cmd, capture=None, **kwargs): + if cmd[:2] == ["pro", "enable"] and capture: + raise subp.ProcessExecutionError(exit_code=255) + return subp.SubpResult(json.dumps({"errors": []}), "") + + m_subp.side_effect = fake_subp + + with pytest.raises( + RuntimeError, + match=re.escape("Error while enabling service(s): esm"), + ): + configure_pro(token="SomeToken", enable=["esm"]) + + def test_pro_enable_non_json_response(self, m_subp): + def fake_subp(cmd, capture=None, **kwargs): + if cmd[:2] == ["pro", "enable"] and capture: + return subp.SubpResult("I dream to be a Json", "") + return subp.SubpResult(json.dumps({"errors": []}), "") + + m_subp.side_effect = fake_subp + + with pytest.raises( + RuntimeError, + match=re.escape("Pro response was not json: I dream to be a Json"), + ): + configure_pro(token="SomeToken", enable=["esm"]) + + +JSONSCHEMA_SKIP_REASON = ( + "deprecation unraised as jsonschema ver can't merge $defs and inline keys" +) + + +class TestUbuntuProSchema: + @pytest.mark.parametrize( + "config, expectation, skip_reason", + [ + pytest.param({"ubuntu_pro": {}}, does_not_raise(), ""), + pytest.param( + {"ubuntu_advantage": {}}, + pytest.raises( + SchemaValidationError, + match=re.escape( + "ubuntu_advantage: Deprecated in version 24.1." + " Use ``ubuntu_pro`` instead" + ), + ), + # If __version__ no longer exists on jsonschema, that means + # we're using a high enough version of jsonschema to not need + # to skip this test. + JSONSCHEMA_SKIP_REASON + if Version.from_str(getattr(jsonschema, "__version__", "999")) + < Version(4) + else "", + id="deprecation_of_ubuntu_advantage_skip_old_json", + ), + # Strict keys + pytest.param( + {"ubuntu_pro": {"token": "win", "invalidkey": ""}}, + pytest.raises( + SchemaValidationError, + match=re.escape( + "ubuntu_pro: Additional properties are not" + " allowed ('invalidkey" + ), + ), + "", + id="additional_properties", + ), + pytest.param( + {"ubuntu_pro": {"features": {"disable_auto_attach": True}}}, + does_not_raise(), + "", + id="disable_auto_attach", + ), + pytest.param( + { + "ubuntu_pro": { + "features": {"disable_auto_attach": False}, + "enable": ["fips"], + "enable_beta": ["realtime-kernel"], + "token": "", + } + }, + does_not_raise(), + "", + id="pro_custom_services", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "enable_beta": ["realtime-kernel"], + "token": "", + } + }, + does_not_raise(), + "", + id="non_pro_beta_services", + ), + pytest.param( + { + "ubuntu_pro": { + "features": {"asdf": False}, + "enable": ["fips"], + "enable_beta": ["realtime-kernel"], + "token": "", + } + }, + pytest.raises( + SchemaValidationError, + match=re.escape( + "ubuntu_pro.features: Additional properties are" + " not allowed ('asdf'" + ), + ), + "", + id="pro_additional_features", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "token": "", + "config": { + "http_proxy": "http://some-proxy:8088", + "https_proxy": "https://some-proxy:8088", + "global_apt_https_proxy": "https://some-global-apt-proxy:8088/", # noqa: E501 + "global_apt_http_proxy": "http://some-global-apt-proxy:8088/", # noqa: E501 + "ua_apt_http_proxy": "http://10.0.10.10:3128", + "ua_apt_https_proxy": "https://10.0.10.10:3128", + }, + } + }, + does_not_raise(), + "", + id="pro_config_valid_set", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "token": "", + "config": { + "http_proxy": None, + "https_proxy": None, + "global_apt_https_proxy": None, + "global_apt_http_proxy": None, + "ua_apt_http_proxy": None, + "ua_apt_https_proxy": None, + }, + } + }, + does_not_raise(), + "", + id="pro_config_valid_unset", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "token": "", + "config": ["http_proxy=http://some-proxy:8088"], + } + }, + pytest.raises( + SchemaValidationError, + match=re.escape( + "errors: ubuntu_pro.config:" + " ['http_proxy=http://some-proxy:8088']" + ), + ), + "", + id="pro_config_invalid_type", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "token": "", + "config": { + "http_proxy": 8888, + "https_proxy": ["http://some-proxy:8088"], + }, + } + }, + pytest.raises( + SchemaValidationError, + match=re.escape( + "errors: ubuntu_pro.config.http_proxy: 8888" + " is not of type 'string', 'null'," + " ubuntu_pro.config.https_proxy:" + " ['http://some-proxy:8088']" + ), + ), + "", + id="pro_config_invalid_proxy_type", + ), + pytest.param( + { + "ubuntu_pro": { + "enable": ["fips"], + "token": "", + "config": { + "http_proxy": "http://some-proxy:8088", + "hola": "adios", + }, + } + }, + does_not_raise(), + "", + id="pro_config_unknown_props_allowed", + ), + ], + ) + @skipUnlessJsonSchema() + def test_schema_validation(self, config, expectation, skip_reason, caplog): + if skip_reason: + pytest.skip(skip_reason) + with expectation: + validate_cloudconfig_schema(config, get_schema(), strict=True) + + @pytest.mark.parametrize( + "ua_section, expectation, log_msgs", + [ + ({}, does_not_raise(), None), + ({"features": {}}, does_not_raise(), None), + ( + {"features": {"disable_auto_attach": True}}, + does_not_raise(), + None, + ), + ( + {"features": {"disable_auto_attach": False}}, + does_not_raise(), + None, + ), + ( + {"features": [0, 1]}, + pytest.raises( + RuntimeError, + match=( + "'ubuntu_pro.features' should be a dict, not a list" + ), + ), + ["'ubuntu_pro.features' should be a dict, not a list\n"], + ), + ( + {"features": {"disable_auto_attach": [0, 1]}}, + pytest.raises( + RuntimeError, + match=( + "'ubuntu_pro.features.disable_auto_attach'" + " should be a bool, not a list" + ), + ), + [ + "'ubuntu_pro.features.disable_auto_attach' should be" + " a bool, not a list\n" + ], + ), + ], + ) + def test_validate_schema_features( + self, ua_section, expectation, log_msgs, caplog + ): + with expectation: + validate_schema_features(ua_section) + if log_msgs is not None: + for log_msg in log_msgs: + assert log_msg in caplog.text + else: + assert not caplog.text + + +class TestHandle: + + cloud = get_cloud() + + @pytest.mark.parametrize( + [ + "cfg", + "cloud", + "log_record_tuples", + "maybe_install_call_args_list", + "set_pro_config_call_args_list", + "configure_pro_call_args_list", + ], + [ + # When no ua-related configuration is provided, nothing happens. + pytest.param( + {}, + None, + [ + ( + MPATH, + logging.DEBUG, + "Skipping module named nomatter, no 'ubuntu_pro'" + " configuration found", + ) + ], + [], + [], + [], + id="no_config", + ), + # If ubuntu_pro is provided, try installing ua-tools package. + pytest.param( + {"ubuntu_pro": {"token": "valid"}}, + cloud, + [], + [mock.call(cloud)], + [mock.call(None)], + None, + id="tries_to_install_ubuntu_advantage_tools", + ), + # If ubuntu_pro config provided, configure it. + pytest.param( + { + "ubuntu_pro": { + "token": "valid", + "config": {"http_proxy": "http://proxy.org"}, + } + }, + cloud, + [], + None, + [mock.call({"http_proxy": "http://proxy.org"})], + None, + id="set_pro_config", + ), + # All ubuntu_pro config keys are passed to configure_pro. + pytest.param( + {"ubuntu_pro": {"token": "token", "enable": ["esm"]}}, + cloud, + [], + [mock.call(cloud)], + [mock.call(None)], + [mock.call(token="token", enable=["esm"])], + id="passes_credentials_and_services_to_configure_pro", + ), + # Warning when ubuntu-advantage key is present with new config + pytest.param( + {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}}, + None, + [ + ( + MPATH, + logging.WARNING, + "Deprecated configuration key(s) provided:" + ' ubuntu-advantage. Expected "ubuntu_pro"; will' + " attempt to continue.", + ), + ], + None, + [mock.call(None)], + [mock.call(token="token", enable=["esm"])], + id="warns_on_deprecated_ubuntu_pro_key_w_config", + ), + # Warning with beta services during attach + pytest.param( + { + "ubuntu_pro": { + "token": "token", + "enable": ["esm"], + "enable_beta": ["realtime-kernel"], + } + }, + None, + [ + ( + MPATH, + logging.DEBUG, + "Ignoring `ubuntu_pro.enable_beta` services in" + " Pro attach: realtime-kernel", + ) + ], + None, + [mock.call(None)], + [mock.call(token="token", enable=["esm"])], + id="warns_on_enable_beta_in_attach", + ), + # ubuntu_pro should be preferred over ubuntu-advantage + pytest.param( + { + "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]}, + "ubuntu_pro": {"token": "token", "enable": ["esm"]}, + }, + None, + [ + ( + MPATH, + logging.WARNING, + "Deprecated configuration key(s) provided:" + ' ubuntu-advantage. Expected "ubuntu_pro"; will' + " attempt to continue.", + ), + ( + MPATH, + logging.WARNING, + "Ignoring deprecated key ubuntu-advantage and" + " preferring ubuntu_pro config", + ), + ], + None, + [mock.call(None)], + [mock.call(token="token", enable=["esm"])], + id="prefers_new_style_config", + ), + ], + ) + @mock.patch(f"{MPATH}._should_auto_attach", return_value=False) + @mock.patch(f"{MPATH}._auto_attach") + @mock.patch(f"{MPATH}.configure_pro") + @mock.patch(f"{MPATH}.set_pro_config") + @mock.patch(f"{MPATH}.maybe_install_ua_tools") + def test_handle_attach( + self, + m_maybe_install_ua_tools, + m_set_pro_config, + m_configure_pro, + m_auto_attach, + m_should_auto_attach, + cfg, + cloud, + log_record_tuples, + maybe_install_call_args_list, + set_pro_config_call_args_list, + configure_pro_call_args_list, + caplog, + ): + """Non-Pro schemas and instance.""" + handle("nomatter", cfg=cfg, cloud=cloud, args=None) + for record_tuple in log_record_tuples: + assert record_tuple in caplog.record_tuples + if maybe_install_call_args_list is not None: + assert ( + maybe_install_call_args_list + == m_maybe_install_ua_tools.call_args_list + ) + if set_pro_config_call_args_list is not None: + assert ( + set_pro_config_call_args_list + == m_set_pro_config.call_args_list + ) + if configure_pro_call_args_list is not None: + assert ( + configure_pro_call_args_list == m_configure_pro.call_args_list + ) + assert [] == m_auto_attach.call_args_list + + @pytest.mark.parametrize( + [ + "cfg", + "cloud", + "log_record_tuples", + "auto_attach_side_effect", + "should_auto_attach", + "auto_attach_call_args_list", + "attach_call_args_list", + "expectation", + ], + [ + # When auto_attach successes, no call to configure_pro. + pytest.param( + {"ubuntu_pro": {"features": {"disable_auto_attach": False}}}, + cloud, + [], + None, # auto_attach successes + True, # Pro instance + [ + mock.call({"features": {"disable_auto_attach": False}}) + ], # auto_attach_call_args_list + [], # attach_call_args_list + does_not_raise(), + id="auto_attach_success", + ), + # When auto_attach fails in a Pro instance, no call to + # configure_pro. + pytest.param( + {"ubuntu_pro": {"features": {"disable_auto_attach": False}}}, + cloud, + [], + RuntimeError("Auto attach error"), + True, # Pro instance + [ + mock.call({"features": {"disable_auto_attach": False}}) + ], # auto_attach_call_args_list + [], # attach_call_args_list + pytest.raises(RuntimeError, match="Auto attach error"), + id="auto_attach_error", + ), + # In a non-Pro instance with token, fallback to normal attach. + pytest.param( + { + "ubuntu_pro": { + "features": {"disable_auto_attach": False}, + "token": "token", + } + }, + cloud, + [], + None, + False, # non-Pro instance + [], # auto_attach_call_args_list + [ + mock.call( + { + "features": {"disable_auto_attach": False}, + "token": "token", + }, + ) + ], # attach_call_args_list + does_not_raise(), + id="not_pro_with_token", + ), + # In a non-Pro instance with enable, fallback to normal attach. + pytest.param( + {"ubuntu_pro": {"enable": ["esm"]}}, + cloud, + [], + None, + False, # non-Pro instance + [], # auto_attach_call_args_list + [ + mock.call( + { + "enable": ["esm"], + }, + ) + ], # attach_call_args_list + does_not_raise(), + id="not_pro_with_enable", + ), + ], + ) + @mock.patch(f"{MPATH}._should_auto_attach") + @mock.patch(f"{MPATH}._auto_attach") + @mock.patch(f"{MPATH}._attach") + def test_handle_auto_attach_vs_attach( + self, + m_attach, + m_auto_attach, + m_should_auto_attach, + cfg, + cloud, + log_record_tuples, + auto_attach_side_effect, + should_auto_attach, + auto_attach_call_args_list, + attach_call_args_list, + expectation, + caplog, + ): + m_should_auto_attach.return_value = should_auto_attach + if auto_attach_side_effect is not None: + m_auto_attach.side_effect = auto_attach_side_effect + + with expectation: + handle("nomatter", cfg=cfg, cloud=cloud, args=None) + + for record_tuple in log_record_tuples: + assert record_tuple in caplog.record_tuples + if attach_call_args_list is not None: + assert attach_call_args_list == m_attach.call_args_list + else: + assert [] == m_attach.call_args_list + assert auto_attach_call_args_list == m_auto_attach.call_args_list + + @pytest.mark.parametrize("is_pro", [False, True]) + @pytest.mark.parametrize( + "cfg", + [ + ( + { + "ubuntu_pro": { + "features": {"disable_auto_attach": False}, + } + } + ), + ( + { + "ubuntu_pro": { + "features": {"disable_auto_attach": True}, + } + } + ), + ], + ) + @mock.patch(f"{MPATH}._should_auto_attach") + @mock.patch(f"{MPATH}._auto_attach") + @mock.patch(f"{MPATH}._attach") + def test_no_fallback_attach( + self, + m_attach, + m_auto_attach, + m_should_auto_attach, + cfg, + is_pro, + ): + """Checks that attach is not called in the case where we want only to + enable or disable pro auto-attach. + """ + m_should_auto_attach.return_value = is_pro + handle("nomatter", cfg=cfg, cloud=self.cloud, args=None) + assert not m_attach.call_args_list + + @pytest.mark.parametrize( + "cfg, handle_kwargs, match", + [ + pytest.param( + {"ubuntu-advantage": {"commands": "nogo"}}, + dict(cloud=None, args=None), + ( + 'Deprecated configuration "ubuntu-advantage: commands" ' + 'provided. Expected "token"' + ), + id="key_dashed", + ), + pytest.param( + {"ubuntu_pro": {"commands": "nogo"}}, + dict(cloud=None, args=None), + ( + 'Deprecated configuration "ubuntu-advantage: commands" ' + 'provided. Expected "token"' + ), + id="key_underscore", + ), + ], + ) + @mock.patch("%s.configure_pro" % MPATH) + def test_handle_error_on_deprecated_commands_key_dashed( + self, m_configure_pro, cfg, handle_kwargs, match + ): + with pytest.raises(RuntimeError, match=match): + handle("nomatter", cfg=cfg, **handle_kwargs) + assert 0 == m_configure_pro.call_count + + @pytest.mark.parametrize( + "cfg, match", + [ + pytest.param( + {"ubuntu_pro": [0, 1]}, + "'ubuntu_pro' should be a dict, not a list", + id="on_non_dict_config", + ), + pytest.param( + {"ubuntu_pro": {"features": [0, 1]}}, + "'ubuntu_pro.features' should be a dict, not a list", + id="on_non_dict_ua_section", + ), + ], + ) + def test_handle_errors(self, cfg, match): + with pytest.raises(RuntimeError, match=match): + handle( + "nomatter", + cfg=cfg, + cloud=self.cloud, + args=None, + ) + + @mock.patch(f"{MPATH}.subp.subp") + def test_pro_config_error_invalid_url(self, m_subp, caplog): + """Errors from pro config command are raised.""" + cfg = { + "ubuntu_pro": { + "token": "SomeToken", + "config": {"http_proxy": "not-a-valid-url"}, + } + } + m_subp.side_effect = subp.ProcessExecutionError( + 'Failure enabling "http_proxy"' + ) + with pytest.raises( + ValueError, + match=re.escape( + "Invalid ubuntu_pro configuration:\nExpected URL scheme" + " http/https for ua:config:http_proxy" + ), + ): + handle( + "nomatter", + cfg=cfg, + cloud=self.cloud, + args=None, + ) + assert not caplog.text + + @mock.patch(f"{MPATH}._should_auto_attach", return_value=False) + @mock.patch(f"{MPATH}.subp.subp") + def test_fallback_to_attach_no_token( + self, m_subp, m_should_auto_attach, caplog + ): + cfg = {"ubuntu_pro": {"enable": ["esm"]}} + with pytest.raises( + RuntimeError, + match=re.escape( + "`ubuntu_pro.token` required in non-Pro Ubuntu instances." + ), + ): + handle( + "nomatter", + cfg=cfg, + cloud=self.cloud, + args=None, + ) + assert [] == m_subp.call_args_list + assert ( + "`ubuntu_pro.token` required in non-Pro Ubuntu instances.\n" + ) in caplog.text + + +class TestShouldAutoAttach: + def test_should_auto_attach_error(self, caplog, fake_uaclient): + m_should_auto_attach = mock.Mock() + m_should_auto_attach.should_auto_attach.side_effect = ( + FakeUserFacingError("Some error") # noqa: E501 + ) + sys.modules[ + "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" + ] = m_should_auto_attach + assert not _should_auto_attach({}) + assert "Error during `should_auto_attach`: Some error" in caplog.text + assert ( + "Unable to determine if this is an Ubuntu Pro instance." + " Fallback to normal Pro attach." in caplog.text + ) + + @pytest.mark.parametrize( + "ua_section, expected_result", + [ + ({}, None), + ({"features": {"disable_auto_attach": False}}, None), + # The user explicitly disables auto-attach, therefore we do not do + # it: + ({"features": {"disable_auto_attach": True}}, False), + ], + ) + def test_happy_path( + self, ua_section, expected_result, caplog, fake_uaclient + ): + m_should_auto_attach = mock.Mock() + sys.modules[ + "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" + ] = m_should_auto_attach + should_auto_attach_value = object() + m_should_auto_attach.should_auto_attach.return_value.should_auto_attach = ( # noqa: E501 + should_auto_attach_value + ) + if expected_result is None: # Pro API does respond + assert should_auto_attach_value == _should_auto_attach(ua_section) + assert ( + "Checking if the instance can be attached to Ubuntu Pro took" + in caplog.text + ) + else: # cloud-init does respond + assert expected_result == _should_auto_attach(ua_section) + assert not caplog.text + + +class TestAutoAttach: + + ua_section: dict = {} + + def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): + mocker.patch.dict("sys.modules") + sys.modules["uaclient.config"] = mock.Mock() + m_full_auto_attach = mock.Mock() + m_full_auto_attach.full_auto_attach.side_effect = FakeUserFacingError( + "Some error" + ) + sys.modules[ + "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" + ] = m_full_auto_attach + expected_msg = "Error during `full_auto_attach`: Some error" + with pytest.raises(RuntimeError, match=re.escape(expected_msg)): + _auto_attach(self.ua_section) + assert expected_msg in caplog.text + + def test_happy_path(self, caplog, mocker, fake_uaclient): + mocker.patch.dict("sys.modules") + sys.modules["uaclient.config"] = mock.Mock() + sys.modules[ + "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" + ] = mock.Mock() + _auto_attach(self.ua_section) + assert "Attaching to Ubuntu Pro took" in caplog.text + + +class TestAttach: + @mock.patch(f"{MPATH}.configure_pro") + def test_attach_without_token_raises_error(self, m_configure_pro): + with pytest.raises( + RuntimeError, + match=("`ubuntu_pro.token` required in non-Pro Ubuntu instances."), + ): + _attach({"enable": ["esm"]}) + assert [] == m_configure_pro.call_args_list + + +@mock.patch(f"{MPATH}.subp.which") +class TestMaybeInstallUATools: + @pytest.mark.parametrize( + [ + "which_return", + "update_side_effect", + "install_side_effect", + "expectation", + "log_msg", + ], + [ + # Do nothing if ubuntu-advantage-tools already exists. + pytest.param( + "/usr/bin/ua", # already installed + RuntimeError("Some apt error"), + None, + does_not_raise(), # No RuntimeError + None, + id="noop_when_ua_tools_present", + ), + # logs and raises apt update errors + pytest.param( + None, + RuntimeError("Some apt error"), + None, + pytest.raises(RuntimeError, match="Some apt error"), + "Package update failed\nTraceback", + id="raises_update_errors", + ), + # logs and raises package install errors + pytest.param( + None, + None, + RuntimeError("Some install error"), + pytest.raises(RuntimeError, match="Some install error"), + "Failed to install ubuntu-advantage-tools\n", + id="raises_install_errors", + ), + ], + ) + def test_maybe_install_ua_tools( + self, + m_which, + which_return, + update_side_effect, + install_side_effect, + expectation, + log_msg, + caplog, + ): + m_which.return_value = which_return + cloud = mock.MagicMock() + if install_side_effect is None: + cloud.distro.update_package_sources.side_effect = ( + update_side_effect + ) + else: + cloud.distro.update_package_sources.return_value = None + cloud.distro.install_packages.side_effect = install_side_effect + with expectation: + maybe_install_ua_tools(cloud=cloud) + if log_msg is not None: + assert log_msg in caplog.text + + def test_maybe_install_ua_tools_happy_path(self, m_which): + """maybe_install_ua_tools installs ubuntu-advantage-tools.""" + m_which.return_value = None + cloud = mock.MagicMock() # No errors raised + maybe_install_ua_tools(cloud=cloud) + assert [ + mock.call() + ] == cloud.distro.update_package_sources.call_args_list + assert [ + mock.call(["ubuntu-advantage-tools"]) + ] == cloud.distro.install_packages.call_args_list + + +@mock.patch(f"{MPATH}.subp.subp") +class TestSetProConfig: + def test_valid_config(self, m_subp, caplog): + pro_config = { + "http_proxy": "http://some-proxy:8088", + "https_proxy": "https://user:pass@some-proxy:8088", + "global_apt_https_proxy": "https://some-global-apt-proxy:8088/", + "global_apt_http_proxy": "http://some-global-apt-proxy:8088/", + "ua_apt_http_proxy": "http://10.0.10.10:3128", + "ua_apt_https_proxy": "https://10.0.10.10:3128", + } + set_pro_config(pro_config) + for ua_arg, redacted_arg in [ + ( + "http_proxy=http://some-proxy:8088", + "http_proxy=REDACTED", + ), + ( + "https_proxy=https://user:pass@some-proxy:8088", + "https_proxy=REDACTED", + ), + ( + "global_apt_https_proxy=https://some-global-apt-proxy:8088/", + "global_apt_https_proxy=REDACTED", + ), + ( + "global_apt_http_proxy=http://some-global-apt-proxy:8088/", + "global_apt_http_proxy=REDACTED", + ), + ( + "ua_apt_http_proxy=http://10.0.10.10:3128", + "ua_apt_http_proxy=REDACTED", + ), + ( + "ua_apt_https_proxy=https://10.0.10.10:3128", + "ua_apt_https_proxy=REDACTED", + ), + ]: + assert ( + mock.call( + ["pro", "config", "set", ua_arg], + logstring=["pro", "config", "set", redacted_arg], + ) + in m_subp.call_args_list + ) + assert f"Enabling Pro config {redacted_arg}\n" in caplog.text + assert ua_arg not in caplog.text + + assert 6 == m_subp.call_count + + def test_pro_config_unset(self, m_subp, caplog): + pro_config = { + "https_proxy": "https://user:pass@some-proxy:8088", + "http_proxy": None, + } + set_pro_config(pro_config) + for call in [ + mock.call(["pro", "config", "unset", "http_proxy"]), + mock.call( + [ + "pro", + "config", + "set", + "https_proxy=https://user:pass@some-proxy:8088", + ], + logstring=["pro", "config", "set", "https_proxy=REDACTED"], + ), + ]: + assert call in m_subp.call_args_list + assert 2 == m_subp.call_count + assert "Enabling Pro config https_proxy=REDACTED\n" in caplog.text + assert "https://user:pass@some-proxy:8088" not in caplog.text + assert "Disabling Pro config for http_proxy\n" in caplog.text + + def test_pro_config_error_non_string_values(self, m_subp, caplog): + """ValueError raised for any values expected as string type.""" + pro_config = { + "global_apt_http_proxy": "noscheme", + "http_proxy": ["no-proxy"], + "https_proxy": 3.14, + } + match = re.escape( + "Invalid ubuntu_pro configuration:\n" + "Expected URL scheme http/https for" + " ua:config:global_apt_http_proxy\n" + "Expected a URL for ua:config:http_proxy\n" + "Expected a URL for ua:config:https_proxy" + ) + with pytest.raises(ValueError, match=match): + set_pro_config(pro_config) + assert 0 == m_subp.call_count + assert not caplog.text + + def test_pro_config_unknown_prop(self, m_subp, caplog): + """On unknown config props, a log is issued and the prop is set.""" + pro_config = {"asdf": "qwer"} + set_pro_config(pro_config) + assert [ + mock.call( + ["pro", "config", "set", "asdf=qwer"], + logstring=["pro", "config", "set", "asdf=REDACTED"], + ) + ] == m_subp.call_args_list + assert "qwer" not in caplog.text + assert ( + "Not validating unknown ubuntu_pro.config.asdf property\n" + in caplog.text + ) + + def test_pro_config_wrong_type(self, m_subp, caplog): + pro_config = ["asdf", "qwer"] + with pytest.raises( + RuntimeError, + match=( + "ubuntu_pro: config should be a dict, not" + " a list; skipping enabling config parameters" + ), + ): + set_pro_config(pro_config) + assert 0 == m_subp.call_count + assert not caplog.text + + def test_set_pro_config_error(self, m_subp, caplog): + pro_config = { + "https_proxy": "https://user:pass@some-proxy:8088", + } + # Simulate Pro error + m_subp.side_effect = subp.ProcessExecutionError( + "Invalid proxy: https://user:pass@some-proxy:8088" + ) + with pytest.raises( + RuntimeError, + match=re.escape( + "Failure enabling/disabling Ubuntu Pro config(s):" + ' "https_proxy"' + ), + ): + set_pro_config(pro_config) + assert 1 == m_subp.call_count + assert "https://user:pass@some-proxy:8088" not in caplog.text + assert "Enabling Pro config https_proxy=REDACTED\n" in caplog.text + assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text + + def test_unset_pro_config_error(self, m_subp, caplog): + pro_config = {"https_proxy": None} + # Simulate Pro error + m_subp.side_effect = subp.ProcessExecutionError( + "Error unsetting https_proxy" + ) + with pytest.raises( + RuntimeError, + match=re.escape( + "Failure enabling/disabling Ubuntu Pro config(s): " + '"https_proxy"' + ), + ): + set_pro_config(pro_config) + assert 1 == m_subp.call_count + assert "https://user:pass@some-proxy:8088" not in caplog.text + assert "Disabling Pro config for https_proxy\n" in caplog.text + assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_update_etc_hosts.py cloud-init-24.1.3/tests/unittests/config/test_cc_update_etc_hosts.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_update_etc_hosts.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_update_etc_hosts.py 2024-03-27 13:14:04.000000000 +0000 @@ -46,7 +46,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_update_etc_hosts.handle("test", cfg, cc, []) - contents = util.load_file("%s/etc/hosts" % self.tmp) + contents = util.load_text_file("%s/etc/hosts" % self.tmp) if "127.0.1.1\tcloud-init.test.us\tcloud-init" not in contents: self.assertIsNone("No entry for 127.0.1.1 in etc/hosts") if "192.168.1.1\tblah.blah.us\tblah" not in contents: @@ -69,7 +69,7 @@ cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_update_etc_hosts.handle("test", cfg, cc, []) - contents = util.load_file("%s/etc/hosts" % self.tmp) + contents = util.load_text_file("%s/etc/hosts" % self.tmp) if "127.0.1.1 cloud-init.test.us cloud-init" not in contents: self.assertIsNone("No entry for 127.0.1.1 in etc/hosts") if "::1 cloud-init.test.us cloud-init" not in contents: diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_write_files.py cloud-init-24.1.3/tests/unittests/config/test_cc_write_files.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_write_files.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_write_files.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,6 +18,7 @@ validate_cloudconfig_schema, ) from tests.unittests.helpers import ( + SCHEMA_EMPTY_ERROR, CiTestCase, FilesystemMockingTestCase, skipUnlessJsonSchema, @@ -81,7 +82,7 @@ [{"content": expected, "path": filename}], self.owner, ) - self.assertEqual(util.load_file(filename), expected) + self.assertEqual(util.load_text_file(filename), expected) def test_append(self): self.patchUtils(self.tmp) @@ -95,14 +96,14 @@ [{"content": added, "path": filename, "append": "true"}], self.owner, ) - self.assertEqual(util.load_file(filename), expected) + self.assertEqual(util.load_text_file(filename), expected) def test_yaml_binary(self): self.patchUtils(self.tmp) data = util.load_yaml(YAML_TEXT) write_files("testname", data["write_files"], self.owner) for path, content in YAML_CONTENT_EXPECTED.items(): - self.assertEqual(util.load_file(path), content) + self.assertEqual(util.load_text_file(path), content) def test_all_decodings(self): self.patchUtils(self.tmp) @@ -137,7 +138,7 @@ write_files("test_decoding", files, self.owner) for path, content in expected: - self.assertEqual(util.load_file(path, decode=False), content) + self.assertEqual(util.load_binary_file(path), content) # make sure we actually wrote *some* files. flen_expected = len(gz_aliases + gz_b64_aliases + b64_aliases) * len( @@ -161,7 +162,7 @@ } cc = self.tmp_cloud("ubuntu") handle("ignored", cfg, cc, []) - assert content == util.load_file(file_path) + assert content == util.load_text_file(file_path) self.assertNotIn( "Unknown encoding type text/plain", self.logs.getvalue() ) @@ -173,7 +174,7 @@ cc = self.tmp_cloud("ubuntu") handle("cc_write_file", config, cc, []) with self.assertRaises(FileNotFoundError): - util.load_file(file_path) + util.load_text_file(file_path) class TestDecodePerms(CiTestCase): @@ -222,7 +223,10 @@ [ # Top-level write_files type validation ({"write_files": 1}, "write_files: 1 is not of type 'array'"), - ({"write_files": []}, re.escape("write_files: [] is too short")), + ( + {"write_files": []}, + re.escape("write_files: [] ") + SCHEMA_EMPTY_ERROR, + ), ( {"write_files": [{}]}, "write_files.0: 'path' is a required property", diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_write_files_deferred.py cloud-init-24.1.3/tests/unittests/config/test_cc_write_files_deferred.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_write_files_deferred.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_write_files_deferred.py 2024-03-27 13:14:04.000000000 +0000 @@ -45,9 +45,9 @@ } cc = self.tmp_cloud("ubuntu") handle("cc_write_files_deferred", config, cc, []) - self.assertEqual(util.load_file("/tmp/deferred.file"), expected) + self.assertEqual(util.load_text_file("/tmp/deferred.file"), expected) with self.assertRaises(FileNotFoundError): - util.load_file("/tmp/not_deferred.file") + util.load_text_file("/tmp/not_deferred.file") class TestWriteFilesDeferredSchema: diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_yum_add_repo.py cloud-init-24.1.3/tests/unittests/config/test_cc_yum_add_repo.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_yum_add_repo.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_yum_add_repo.py 2024-03-27 13:14:04.000000000 +0000 @@ -43,7 +43,7 @@ self.patchUtils(self.tmp) cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) self.assertRaises( - IOError, util.load_file, "/etc/yum.repos.d/epel_testing.repo" + IOError, util.load_text_file, "/etc/yum.repos.d/epel_testing.repo" ) def test_write_config(self): @@ -62,7 +62,7 @@ self.patchUtils(self.tmp) self.patchOS(self.tmp) cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) - contents = util.load_file("/etc/yum.repos.d/epel-testing.repo") + contents = util.load_text_file("/etc/yum.repos.d/epel-testing.repo") parser = configparser.ConfigParser() parser.read_string(contents) expected = { @@ -102,7 +102,9 @@ } self.patchUtils(self.tmp) cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) - contents = util.load_file("/etc/yum.repos.d/puppetlabs-products.repo") + contents = util.load_text_file( + "/etc/yum.repos.d/puppetlabs-products.repo" + ) parser = configparser.ConfigParser() parser.read_string(contents) expected = { @@ -139,7 +141,7 @@ ), ( {"yum_repos": {}}, - re.escape("yum_repos: {} does not have enough properties"), + re.escape("yum_repos: {} ") + helpers.SCHEMA_EMPTY_ERROR, ), # baseurl required ( diff -Nru cloud-init-23.4.4/tests/unittests/config/test_cc_zypper_add_repo.py cloud-init-24.1.3/tests/unittests/config/test_cc_zypper_add_repo.py --- cloud-init-23.4.4/tests/unittests/config/test_cc_zypper_add_repo.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_cc_zypper_add_repo.py 2024-03-27 13:14:04.000000000 +0000 @@ -29,7 +29,7 @@ self.patchUtils(self.tmp) cc_zypper_add_repo._write_repos(cfg["repos"], "/etc/zypp/repos.d") self.assertRaises( - IOError, util.load_file, "/etc/zypp/repos.d/foo.repo" + IOError, util.load_text_file, "/etc/zypp/repos.d/foo.repo" ) def test_write_repos(self): @@ -60,7 +60,7 @@ } root_d = self.tmp_dir() cc_zypper_add_repo._write_repos(cfg["repos"], root_d) - contents = util.load_file("%s/testing-foo.repo" % root_d) + contents = util.load_text_file("%s/testing-foo.repo" % root_d) parser = configparser.ConfigParser() parser.read_string(contents) expected = { @@ -87,7 +87,7 @@ self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg["config"]) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) expected = [ "# Zypp config", "# Added via cloud.cfg", @@ -113,7 +113,7 @@ self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg["config"]) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) expected = [ "# Zypp config", "# Added via cloud.cfg", @@ -136,7 +136,7 @@ self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) self.assertEqual(contents, "# No data") def test_empty_config_value_no_new_data(self): @@ -149,7 +149,7 @@ self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) self.assertEqual(contents, "# No data") def test_handler_full_setup(self): @@ -164,7 +164,7 @@ self.reRoot(root_d) cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, []) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) expected = [ "# Zypp config", "# Added via cloud.cfg", @@ -191,7 +191,7 @@ self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) self.assertEqual(contents, "# No data") def test_no_repo_data(self): diff -Nru cloud-init-23.4.4/tests/unittests/config/test_modules.py cloud-init-24.1.3/tests/unittests/config/test_modules.py --- cloud-init-23.4.4/tests/unittests/config/test_modules.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_modules.py 2024-03-27 13:14:04.000000000 +0000 @@ -15,7 +15,8 @@ from cloudinit.distros import ALL_DISTROS from cloudinit.settings import FREQUENCIES from cloudinit.stages import Init -from tests.unittests.helpers import cloud_init_project_dir, mock +from tests.helpers import cloud_init_project_dir +from tests.unittests.helpers import mock M_PATH = "cloudinit.config.modules." diff -Nru cloud-init-23.4.4/tests/unittests/config/test_schema.py cloud-init-24.1.3/tests/unittests/config/test_schema.py --- cloud-init-23.4.4/tests/unittests/config/test_schema.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/config/test_schema.py 2024-03-27 13:14:04.000000000 +0000 @@ -23,6 +23,7 @@ VERSIONED_USERDATA_SCHEMA_FILE, MetaSchema, SchemaProblem, + SchemaType, SchemaValidationError, annotated_cloudconfig_file, get_jsonschema_validator, @@ -32,6 +33,7 @@ handle_schema_args, load_doc, main, + netplan_validate_network_schema, validate_cloudconfig_file, validate_cloudconfig_metaschema, validate_cloudconfig_schema, @@ -40,12 +42,13 @@ from cloudinit.safeyaml import load, load_with_marks from cloudinit.settings import FREQUENCIES from cloudinit.sources import DataSourceNotFoundException -from cloudinit.util import load_file, write_file +from cloudinit.templater import JinjaSyntaxParsingException +from cloudinit.util import load_text_file, write_file +from tests.helpers import cloud_init_project_dir from tests.hypothesis import given from tests.hypothesis_jsonschema import from_schema from tests.unittests.helpers import ( CiTestCase, - cloud_init_project_dir, does_not_raise, mock, skipUnlessHypothesisJsonSchema, @@ -135,7 +138,7 @@ r"https:\/\/raw.githubusercontent.com\/canonical\/" r"cloud-init\/main\/cloudinit\/config\/schemas\/", f"file://{schema_dir}/", - load_file(version_schemafile), + load_text_file(version_schemafile), ) ) if error_msg: @@ -245,7 +248,6 @@ {"$ref": "#/$defs/cc_locale"}, {"$ref": "#/$defs/cc_lxd"}, {"$ref": "#/$defs/cc_mcollective"}, - {"$ref": "#/$defs/cc_migrator"}, {"$ref": "#/$defs/cc_mounts"}, {"$ref": "#/$defs/cc_ntp"}, {"$ref": "#/$defs/cc_package_update_upgrade_install"}, @@ -268,8 +270,8 @@ {"$ref": "#/$defs/cc_ssh_import_id"}, {"$ref": "#/$defs/cc_ssh"}, {"$ref": "#/$defs/cc_timezone"}, - {"$ref": "#/$defs/cc_ubuntu_advantage"}, {"$ref": "#/$defs/cc_ubuntu_drivers"}, + {"$ref": "#/$defs/cc_ubuntu_pro"}, {"$ref": "#/$defs/cc_update_etc_hosts"}, {"$ref": "#/$defs/cc_update_hostname"}, {"$ref": "#/$defs/cc_users_groups"}, @@ -329,11 +331,96 @@ self.assertTrue(isinstance(exception, ValueError)) +class FakeNetplanParserException(Exception): + def __init__(self, filename, line, column, message): + self.filename = filename + self.line = line + self.column = column + self.message = message + + +class TestNetplanValidateNetworkSchema: + """Tests for netplan_validate_network_schema. + + Heavily mocked because github.com/canonical/netplan project does not + have a pyproject.toml or setup.py or pypi release that allows us to + define tox unittest dependencies. + """ + + @pytest.mark.parametrize( + "config,expected_log", + ( + ({}, ""), + ({"version": 1}, ""), + ( + {"version": 2}, + "Skipping netplan schema validation. No netplan available", + ), + ( + {"network": {"version": 2}}, + "Skipping netplan schema validation. No netplan available", + ), + ), + ) + def test_network_config_schema_validation_false_when_skipped( + self, config, expected_log, caplog + ): + """netplan_validate_network_schema returns false when skipped.""" + with mock.patch.dict("sys.modules"): + sys.modules.pop("netplan", None) + assert False is netplan_validate_network_schema(config) + assert expected_log in caplog.text + + @pytest.mark.parametrize( + "error,error_log", + ( + (None, ""), + ( + FakeNetplanParserException( + "net.yaml", + line=1, + column=12, + message="incorrect YAML value: yes for dhcp value", + ), + r"Invalid network-config provided:.*format-l1.c12: Invalid" + " netplan schema. incorrect YAML value: yes for dhcp value", + ), + ), + ) + def test_network_config_schema_validation( + self, error, error_log, caplog, tmpdir + ): + + fake_tmpdir = tmpdir.join("mkdtmp") + + class FakeParser: + def load_yaml_hierarchy(self, parse_dir): + # Since we mocked mkdtemp to tmpdir, assert we pass tmpdir + assert parse_dir == fake_tmpdir + if error: + raise error + + # Mock expected imports + with mock.patch.dict( + "sys.modules", + netplan=mock.MagicMock( + NetplanParserException=FakeNetplanParserException, + Parser=FakeParser, + ), + ): + with mock.patch( + "cloudinit.config.schema.mkdtemp", + return_value=fake_tmpdir.strpath, + ): + with caplog.at_level(logging.WARNING): + assert netplan_validate_network_schema({"version": 2}) + if error_log: + assert re.match(error_log, caplog.records[0].msg, re.DOTALL) + + class TestValidateCloudConfigSchema: """Tests for validate_cloudconfig_schema.""" - with_logs = True - @pytest.mark.parametrize( "schema, call_count", ((None, 1), ({"properties": {"p1": {"type": "string"}}}, 0)), @@ -355,7 +442,7 @@ def test_validateconfig_schema_non_strict_emits_warnings(self, caplog): """When strict is False validate_cloudconfig_schema emits warnings.""" schema = {"properties": {"p1": {"type": "string"}}} - validate_cloudconfig_schema({"p1": -1}, schema, strict=False) + validate_cloudconfig_schema({"p1": -1}, schema=schema, strict=False) [(module, log_level, log_msg)] = caplog.record_tuples assert "cloudinit.config.schema" == module assert logging.WARNING == log_level @@ -373,7 +460,7 @@ } validate_cloudconfig_schema( {"hashed-password": "secret"}, - schema, + schema=schema, strict=False, log_details=False, ) @@ -402,7 +489,7 @@ """When strict is True validate_cloudconfig_schema raises errors.""" schema = {"properties": {"p1": {"type": "string"}}} with pytest.raises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({"p1": -1}, schema, strict=True) + validate_cloudconfig_schema({"p1": -1}, schema=schema, strict=True) assert ( "Cloud config schema errors: p1: -1 is not of type 'string'" == (str(context_mgr.value)) @@ -413,7 +500,9 @@ """With strict True, validate_cloudconfig_schema errors on format.""" schema = {"properties": {"p1": {"type": "string", "format": "email"}}} with pytest.raises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({"p1": "-1"}, schema, strict=True) + validate_cloudconfig_schema( + {"p1": "-1"}, schema=schema, strict=True + ) assert "Cloud config schema errors: p1: '-1' is not a 'email'" == ( str(context_mgr.value) ) @@ -424,7 +513,10 @@ schema = {"properties": {"p1": {"type": "string", "format": "email"}}} with pytest.raises(SchemaValidationError) as context_mgr: validate_cloudconfig_schema( - {"p1": "-1"}, schema, strict=True, strict_metaschema=True + {"p1": "-1"}, + schema=schema, + strict=True, + strict_metaschema=True, ) assert "Cloud config schema errors: p1: '-1' is not a 'email'" == str( context_mgr.value @@ -441,7 +533,7 @@ """ schema = {"properties": {"p1": {"types": "string", "format": "email"}}} validate_cloudconfig_schema( - {"p1": "-1"}, schema, strict_metaschema=True + {"p1": "-1"}, schema=schema, strict_metaschema=True ) assert ( "Meta-schema validation failed, attempting to validate config" @@ -656,7 +748,7 @@ ): validate_cloudconfig_schema( config, - schema, + schema=schema, strict_metaschema=True, log_deprecations=log_deprecations, ) @@ -698,7 +790,7 @@ # Some module examples reference keys defined in multiple schemas supplemental_schemas = { "cc_landscape": ["cc_apt_configure"], - "cc_ubuntu_advantage": ["cc_power_state_change"], + "cc_ubuntu_pro": ["cc_power_state_change"], "cc_update_hostname": ["cc_set_hostname"], "cc_users_groups": ["cc_ssh_import_id"], "cc_disk_setup": ["cc_mounts"], @@ -713,7 +805,7 @@ ] ) schema["properties"].update(supplemental_props) - validate_cloudconfig_schema(config_load, schema, strict=True) + validate_cloudconfig_schema(config_load, schema=schema, strict=True) @pytest.mark.usefixtures("fake_filesystem") @@ -826,6 +918,41 @@ " Nothing to validate" in out ) + @pytest.mark.parametrize("annotate", (True, False)) + def test_validateconfig_file_raises_jinja_syntax_error( + self, annotate, tmpdir, mocker, capsys + ): + """ """ + # will throw error because of space between last two }'s + invalid_jinja_template = "## template: jinja\na:b\nc:{{ d } }" + mocker.patch("os.path.exists", return_value=True) + mocker.patch( + "cloudinit.util.load_text_file", + return_value=invalid_jinja_template, + ) + mocker.patch( + "cloudinit.handlers.jinja_template.load_text_file", + return_value='{"c": "d"}', + ) + config_file = tmpdir.join("my.yaml") + config_file.write(invalid_jinja_template) + with pytest.raises(SystemExit) as context_manager: + validate_cloudconfig_file(config_file.strpath, {}, annotate) + assert 1 == context_manager.value.code + + _out, err = capsys.readouterr() + expected = ( + "Error:\n" + "Failed to render templated user-data. " + + JinjaSyntaxParsingException.format_error_message( + syntax_error="unexpected '}'", + line_number=3, + line_content="c:{{ d } }", + ) + + "\n" + ) + assert expected == err + class TestSchemaDocMarkdown: """Tests for get_meta_doc.""" @@ -1751,15 +1878,158 @@ assert "\nNTP\n---\n" in out assert "\nRuncmd\n------\n" in out - def test_main_validates_config_file(self, _read_cfg_paths, tmpdir, capsys): + @pytest.mark.parametrize( + "schema_type,content,expected", + ( + (None, b"#cloud-config\nntp:", "Valid schema"), + ("cloud-config", b"#cloud-config\nntp:", "Valid schema"), + ( + "network-config", + ( + b"network: {'version': 2, 'ethernets':" + b" {'eth0': {'dhcp': true}}}" + ), + "Skipping network-config schema validation. No network schema" + " for version: 2", + ), + ( + "network-config", + ( + b"network:\n version: 1\n config:\n - type: physical\n" + b" name: eth0\n subnets:\n - type: dhcp\n" + ), + "Valid schema", + ), + ), + ) + def test_main_validates_config_file( + self, + _read_cfg_paths, + schema_type, + content, + expected, + tmpdir, + capsys, + caplog, + ): """When --config-file parameter is provided, main validates schema.""" myyaml = tmpdir.join("my.yaml") myargs = ["mycmd", "--config-file", myyaml.strpath] - myyaml.write(b"#cloud-config\nntp:") # shortest ntp schema + if schema_type: + myargs += ["--schema-type", schema_type] + myyaml.write(content) # shortest ntp schema with mock.patch("sys.argv", myargs): - assert 0 == main(), "Expected 0 exit code" + # Always assert we have no netplan module which triggers + # schema skip of network-config version: 2 until cloud-init + # grows internal schema-network-config-v2.json. + with mock.patch.dict("sys.modules", netplan=ImportError()): + assert 0 == main(), "Expected 0 exit code" out, _err = capsys.readouterr() - assert f"Valid schema {myyaml}\n" == out + assert expected in out + + @pytest.mark.parametrize( + "update_path_content_by_key, expected_keys", + ( + pytest.param( + {}, + { + "ud_key": "cloud_config", + "vd_key": "vendor_cloud_config", + "vd2_key": "vendor2_cloud_config", + "net_key": "network_config", + }, + id="prefer_processed_data_when_present_and_non_empty", + ), + pytest.param( + { + "cloud_config": "", + "vendor_cloud_config": "", + "vendor2_cloud_config": "", + }, + { + "ud_key": "userdata_raw", + "vd_key": "vendordata_raw", + "vd2_key": "vendordata2_raw", + "net_key": "network_config", + }, + id="prefer_raw_data_when_processed_is_empty", + ), + pytest.param( + {"cloud_config": "", "userdata_raw": ""}, + { + "ud_key": "cloud_config", + "vd_key": "vendor_cloud_config", + "vd2_key": "vendor2_cloud_config", + "net_key": "network_config", + }, + id="prefer_processed_vd_file_path_when_raw_and_processed_empty", + ), + ), + ) + @mock.patch(M_PATH + "read_cfg_paths") + @mock.patch(M_PATH + "os.getuid", return_value=0) + def test_main_processed_data_preference_over_raw_data( + self, + _read_cfg_paths, + _getuid, + read_cfg_paths, + update_path_content_by_key, + expected_keys, + paths, + capsys, + ): + """""" + paths.get_ipath = paths.get_ipath_cur + read_cfg_paths.return_value = paths + path_content_by_key = { + "cloud_config": "#cloud-config\n{}", + "vendor_cloud_config": "#cloud-config\n{}", + "vendor2_cloud_config": "#cloud-config\n{}", + "vendordata_raw": "#cloud-config\n{}", + "vendordata2_raw": "#cloud-config\n{}", + "network_config": "{version: 1, config: []}", + "userdata_raw": "#cloud-config\n{}", + } + expected_paths = dict( + (key, paths.get_ipath_cur(expected_keys[key])) + for key in expected_keys + ) + path_content_by_key.update(update_path_content_by_key) + for path_key, path_content in path_content_by_key.items(): + write_file(paths.get_ipath_cur(path_key), path_content) + data_types = "user-data, vendor-data, vendor2-data, network-config" + ud_msg = " Valid schema user-data" + if ( + not path_content_by_key["cloud_config"] + and not path_content_by_key["userdata_raw"] + ): + ud_msg = ( + f"Empty 'cloud-config' found at {expected_paths['ud_key']}." + " Nothing to validate." + ) + + expected = dedent( + f"""\ + Found cloud-config data types: {data_types} + + 1. user-data at {expected_paths["ud_key"]}: + {ud_msg} + + 2. vendor-data at {expected_paths['vd_key']}: + Valid schema vendor-data + + 3. vendor2-data at {expected_paths['vd2_key']}: + Valid schema vendor2-data + + 4. network-config at {expected_paths['net_key']}: + Valid schema network-config + """ + ) + myargs = ["mycmd", "--system"] + with mock.patch("sys.argv", myargs): + main() + out, _err = capsys.readouterr() + assert expected == out @pytest.mark.parametrize( "net_config,net_output,error_raised", @@ -1779,7 +2049,7 @@ id="netv2_validation_is_skipped", ), pytest.param( - "network:\n", + "network: {}\n", "Skipping network-config schema validation on empty config.", does_not_raise(), id="empty_net_validation_is_skipped", @@ -1791,6 +2061,14 @@ pytest.raises(SystemExit), id="netv1_schema_errors_handled", ), + pytest.param( + "network:\n version: 1\n config:\n - type: physical\n" + " name: eth01234567890123\n subnets:\n" + " - type: dhcp\n", + " Invalid network-config {network_file}", + pytest.raises(SystemExit), + id="netv1_schema_error_on_nic_name_length", + ), ), ) @mock.patch(M_PATH + "read_cfg_paths") @@ -1820,8 +2098,12 @@ write_file(network_file, net_config) myargs = ["mycmd", "--system"] with error_raised: - with mock.patch("sys.argv", myargs): - main() + # Always assert we have no netplan module which triggers + # schema skip of network-config version: 2 until cloud-init + # grows internal schema-network-config-v2.json. + with mock.patch.dict("sys.modules", netplan=ImportError()): + with mock.patch("sys.argv", myargs): + main() out, _err = capsys.readouterr() net_output = net_output.format(network_file=network_file) @@ -1880,7 +2162,7 @@ class TestSchemaDocExamples: schema = get_schema() - net_schema = get_schema(schema_type="network-config") + net_schema = get_schema(schema_type=SchemaType.NETWORK_CONFIG) @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() @@ -1953,18 +2235,16 @@ @skipUnlessJsonSchema() class TestNetworkSchema: - net_schema = get_schema(schema_type="network-config") + net_schema = get_schema(schema_type=SchemaType.NETWORK_CONFIG) @pytest.mark.parametrize( - "src_config, expectation", + "src_config, expectation, log", ( pytest.param( {"network": {"config": [], "version": 2}}, - pytest.raises( - SchemaValidationError, - match=re.escape("network.version: 2 is not one of [1]"), - ), - id="net_v2_invalid", + does_not_raise(), + "Skipping netplan schema validation. No netplan available", + id="net_v2_skipped", ), pytest.param( {"network": {"version": 1}}, @@ -1972,11 +2252,13 @@ SchemaValidationError, match=re.escape("'config' is a required property"), ), + "", id="config_key_required", ), pytest.param( {"network": {"version": 1, "config": []}}, does_not_raise(), + "", id="config_key_required", ), pytest.param( @@ -1993,6 +2275,7 @@ " not valid under any of the given schemas" ), ), + "", id="unknown_config_type_item", ), pytest.param( @@ -2001,6 +2284,7 @@ SchemaValidationError, match=r"network.config.0: 'name' is a required property.*", ), + "", id="physical_requires_name_property", ), pytest.param( @@ -2011,6 +2295,7 @@ } }, does_not_raise(), + "", id="physical_with_name_succeeds", ), pytest.param( @@ -2026,6 +2311,7 @@ SchemaValidationError, match=r"Additional properties are not allowed.*", ), + "", id="physical_no_additional_properties", ), pytest.param( @@ -2036,6 +2322,7 @@ } }, does_not_raise(), + "", id="physical_with_all_known_properties", ), pytest.param( @@ -2046,18 +2333,35 @@ } }, does_not_raise(), + "", id="bond_with_all_known_properties", ), + pytest.param( + { + "network": { + "version": 1, + "config": [ + {"type": "physical", "name": "eth0", "mtu": None}, + {"type": "nameserver", "address": "8.8.8.8"}, + ], + } + }, + does_not_raise(), + "", + id="GH-4710_mtu_none_and_str_address", + ), ), ) - def test_network_schema(self, src_config, expectation): + def test_network_schema(self, src_config, expectation, log, caplog): with expectation: validate_cloudconfig_schema( config=src_config, schema=self.net_schema, - schema_type="netork-config", + schema_type=SchemaType.NETWORK_CONFIG, strict=True, ) + if log: + assert log in caplog.text class TestStrictMetaschema: @@ -2323,9 +2627,9 @@ """ # noqa: E501 ), - "", - does_not_raise(), - id="root_annotate_unique_errors_no_exception", + """Error: Invalid schema: user-data\n\n""", + pytest.raises(SystemExit), + id="root_annotate_errors_with_exception", ), pytest.param( 0, @@ -2414,7 +2718,7 @@ False, dedent( """\ - Invalid UNKNOWN_CONFIG_HEADER {cfg_file} + Invalid user-data {cfg_file} """ # noqa: E501 ), dedent( @@ -2422,7 +2726,7 @@ Error: Cloud config schema errors: format-l1.c1: Unrecognized user-data header in {cfg_file}: "#bogus-config". Expected first line to be one of: #!, ## template: jinja, #cloud-boothook, #cloud-config, #cloud-config-archive, #cloud-config-jsonp, #include, #include-once, #part-handler - Error: Invalid schema: UNKNOWN_CONFIG_HEADER + Error: Invalid schema: user-data """ # noqa: E501 ), diff -Nru cloud-init-23.4.4/tests/unittests/conftest.py cloud-init-24.1.3/tests/unittests/conftest.py --- cloud-init-23.4.4/tests/unittests/conftest.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/conftest.py 2024-03-27 13:14:04.000000000 +0000 @@ -30,7 +30,8 @@ util: [ ("write_file", 1), ("append_file", 1), - ("load_file", 1), + ("load_binary_file", 1), + ("load_text_file", 1), ("ensure_dir", 1), ("chmod", 1), ("delete_dir_contents", 1), @@ -61,6 +62,26 @@ mocker.patch.object(mod, f, trap_func) +@pytest.fixture(scope="session", autouse=True) +def disable_sysfs_net(request, tmpdir_factory): + """Avoid tests which read the undertying host's /syc/class/net. + + To allow unobscured reads of /sys/class/net on the host we can + parametrize the fixture with: + + @pytest.mark.parametrize("disable_sysfs_net", [False], indirect=True) + """ + if hasattr(request, "param") and getattr(request, "param") is False: + # Test disabled this fixture, perform no mocks. + yield + return + mock_sysfs = f"{tmpdir_factory.mktemp('sysfs')}/" + with mock.patch( + "cloudinit.net.get_sys_class_path", return_value=mock_sysfs + ): + yield mock_sysfs + + @pytest.fixture(autouse=True) def disable_dns_lookup(request): if "allow_dns_lookup" in request.keywords: @@ -75,6 +96,16 @@ ): yield + +@pytest.fixture() +def dhclient_exists(): + with mock.patch( + "cloudinit.net.dhcp.subp.which", + return_value="/sbin/dhclient", + autospec=True, + ): + yield + log.configure_root_logger() diff -Nru cloud-init-23.4.4/tests/unittests/distros/package_management/test_apt.py cloud-init-24.1.3/tests/unittests/distros/package_management/test_apt.py --- cloud-init-23.4.4/tests/unittests/distros/package_management/test_apt.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/package_management/test_apt.py 2024-03-27 13:14:04.000000000 +0000 @@ -7,6 +7,8 @@ from cloudinit import subp from cloudinit.distros.package_management.apt import APT_GET_COMMAND, Apt +M_PATH = "cloudinit.distros.package_management.apt.Apt." + @mock.patch.dict("os.environ", {}, clear=True) @mock.patch("cloudinit.distros.debian.subp.which", return_value=True) @@ -22,7 +24,7 @@ expected_call = { "args": ["eatmydata"] + list(APT_GET_COMMAND) + ["update"], "capture": False, - "env": {"DEBIAN_FRONTEND": "noninteractive"}, + "update_env": {"DEBIAN_FRONTEND": "noninteractive"}, } assert m_subp.call_args == mock.call(**expected_call) @@ -86,3 +88,27 @@ ) with pytest.raises(TimeoutError): apt._wait_for_apt_command("stub", {"args": "stub2"}, timeout=5) + + def test_search_stem(self, m_subp, m_which, mocker): + """Test that containing `-`, `^`, `/`, or `=` is handled correctly.""" + mocker.patch(f"{M_PATH}update_package_sources") + mocker.patch( + f"{M_PATH}get_all_packages", + return_value=["cloud-init", "pkg2", "pkg3", "pkg4", "pkg5"], + ) + m_install = mocker.patch(f"{M_PATH}run_package_command") + + apt = Apt(runner=mock.Mock()) + apt.install_packages( + ["cloud-init", "pkg2-", "pkg3/jammy-updates", "pkg4=1.2", "pkg5^"] + ) + m_install.assert_called_with( + "install", + pkgs=[ + "cloud-init", + "pkg2-", + "pkg3/jammy-updates", + "pkg4=1.2", + "pkg5^", + ], + ) diff -Nru cloud-init-23.4.4/tests/unittests/distros/test__init__.py cloud-init-24.1.3/tests/unittests/distros/test__init__.py --- cloud-init-23.4.4/tests/unittests/distros/test__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -8,6 +8,8 @@ import pytest from cloudinit import distros, util +from cloudinit.distros.ubuntu import Distro +from cloudinit.net.dhcp import Dhcpcd, IscDhclient, Udhcpc from tests.unittests import helpers M_PATH = "cloudinit.distros." @@ -67,7 +69,7 @@ self.patchOS(self.tmp) self.patchUtils(self.tmp) d.write_doas_rules(user, rules) - contents = util.load_file(d.doas_fn) + contents = util.load_text_file(d.doas_fn) return contents, cls, d def _write_load_sudoers(self, _user, rules): @@ -78,7 +80,7 @@ self.patchOS(self.tmp) self.patchUtils(self.tmp) d.write_sudo_rules("harlowja", rules) - contents = util.load_file(d.ci_sudoers_fn) + contents = util.load_text_file(d.ci_sudoers_fn) return contents, cls, d def _count_in(self, lines_look_for, text_content): @@ -119,7 +121,7 @@ d = self._write_load_doas("harlowja", rules)[2] # write to doas.conf again - should not create duplicate rules d.write_doas_rules("harlowja", rules) - contents = util.load_file(d.doas_fn) + contents = util.load_text_file(d.doas_fn) expected = [ "permit nopass harlowja cmd ls", "permit nopass harlowja cmd pwd", @@ -193,7 +195,7 @@ d = self._write_load_sudoers("harlowja", rules)[2] # write to sudoers again - should not create duplicate rules d.write_sudo_rules("harlowja", rules) - contents = util.load_file(d.ci_sudoers_fn) + contents = util.load_text_file(d.ci_sudoers_fn) expected = [ "harlowja ALL=(ALL:ALL) ALL", "harlowja B-ALL=(ALL:ALL) ALL", @@ -213,7 +215,7 @@ self.patchOS(self.tmp) self.patchUtils(self.tmp) d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") + contents = util.load_text_file("/etc/sudoers") self.assertIn("includedir /b", contents) self.assertTrue(os.path.isdir("/b")) @@ -224,7 +226,7 @@ self.patchUtils(self.tmp) util.write_file("/etc/sudoers", "josh, josh\n") d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") + contents = util.load_text_file("/etc/sudoers") self.assertIn("includedir /b", contents) self.assertTrue(os.path.isdir("/b")) self.assertIn("josh", contents) @@ -238,7 +240,7 @@ for char in ["#", "@"]: util.write_file("/etc/sudoers", "{}includedir /b".format(char)) d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") + contents = util.load_text_file("/etc/sudoers") self.assertIn("includedir /b", contents) self.assertTrue(os.path.isdir("/b")) self.assertEqual(1, contents.count("includedir /b")) @@ -501,3 +503,72 @@ assert "/tmp" == tmp_path else: assert "/usr_lib_exec/cloud-init/clouddir" == tmp_path + + +@pytest.mark.parametrize( + "chosen_client, config, which_override", + [ + pytest.param( + IscDhclient, + {"network": {"dhcp_client_priority": ["dhclient"]}}, + None, + id="single_client_is_found_from_config_dhclient", + ), + pytest.param( + Udhcpc, + {"network": {"dhcp_client_priority": ["udhcpc"]}}, + None, + id="single_client_is_found_from_config_udhcpc", + ), + pytest.param( + Dhcpcd, + {"network": {"dhcp_client_priority": ["dhcpcd"]}}, + None, + id="single_client_is_found_from_config_dhcpcd", + ), + pytest.param( + Dhcpcd, + {"network": {"dhcp_client_priority": ["dhcpcd", "dhclient"]}}, + None, + id="first_client_is_found_from_config_dhcpcd", + ), + pytest.param( + Udhcpc, + { + "network": { + "dhcp_client_priority": ["udhcpc", "dhcpcd", "dhclient"] + } + }, + None, + id="first_client_is_found_from_config_udhcpc", + ), + pytest.param( + Dhcpcd, + {"network": {"dhcp_client_priority": []}}, + None, + id="first_client_is_found_no_config_dhcpcd", + ), + pytest.param( + Dhcpcd, + { + "network": { + "dhcp_client_priority": ["udhcpc", "dhcpcd", "dhclient"] + } + }, + [False, False, True, True], + id="second_client_is_found_from_config_dhcpcd", + ), + ], +) +class TestDHCP: + @mock.patch("cloudinit.net.dhcp.subp.which") + def test_dhcp_configuration( + self, m_which, chosen_client, config, which_override + ): + """check that, when a user provides a configuration at + network.dhcp_client_priority, the correct client is chosen + """ + m_which.side_effect = which_override + distro = Distro("", {}, {}) + distro._cfg = config + assert isinstance(distro.dhcp_client, chosen_client) diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_arch.py cloud-init-24.1.3/tests/unittests/distros/test_arch.py --- cloud-init-23.4.4/tests/unittests/distros/test_arch.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_arch.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,9 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import util -from cloudinit.distros.arch import _render_network from tests.unittests.distros import _get_distro -from tests.unittests.helpers import CiTestCase, dir2dict +from tests.unittests.helpers import CiTestCase class TestArch(CiTestCase): @@ -12,43 +11,4 @@ hostname = "myhostname" hostfile = self.tmp_path("hostfile") distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname + "\n", util.load_file(hostfile)) - - -class TestRenderNetwork(CiTestCase): - def test_basic_static(self): - """Just the most basic static config. - - note 'lo' should not be rendered as an interface.""" - entries = { - "eth0": { - "auto": True, - "dns-nameservers": ["8.8.8.8"], - "bootproto": "static", - "address": "10.0.0.2", - "gateway": "10.0.0.1", - "netmask": "255.255.255.0", - }, - "lo": {"auto": True}, - } - target = self.tmp_dir() - devs = _render_network(entries, target=target) - files = dir2dict(target, prefix=target) - self.assertEqual(["eth0"], devs) - self.assertEqual( - { - "/etc/netctl/eth0": "\n".join( - [ - "Address=10.0.0.2/255.255.255.0", - "Connection=ethernet", - "DNS=('8.8.8.8')", - "Gateway=10.0.0.1", - "IP=static", - "Interface=eth0", - "", - ] - ), - "/etc/resolv.conf": "nameserver 8.8.8.8\n", - }, - files, - ) + self.assertEqual(hostname + "\n", util.load_text_file(hostfile)) diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_bsd_utils.py cloud-init-24.1.3/tests/unittests/distros/test_bsd_utils.py --- cloud-init-23.4.4/tests/unittests/distros/test_bsd_utils.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_bsd_utils.py 2024-03-27 13:14:04.000000000 +0000 @@ -18,7 +18,7 @@ self.addCleanup(patches.close) self.load_file = patches.enter_context( - mock.patch.object(bsd_utils.util, "load_file") + mock.patch.object(bsd_utils.util, "load_text_file") ) self.write_file = patches.enter_context( diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_gentoo.py cloud-init-24.1.3/tests/unittests/distros/test_gentoo.py --- cloud-init-23.4.4/tests/unittests/distros/test_gentoo.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_gentoo.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,7 +11,9 @@ hostname = "myhostname" hostfile = self.tmp_path("hostfile") distro._write_hostname(hostname, hostfile) - self.assertEqual('hostname="myhostname"\n', util.load_file(hostfile)) + self.assertEqual( + 'hostname="myhostname"\n', util.load_text_file(hostfile) + ) def test_write_existing_hostname_with_comments(self): distro = _get_distro("gentoo") @@ -22,5 +24,5 @@ distro._write_hostname(hostname, hostfile) self.assertEqual( '#This is the hostname\nhostname="myhostname"\n', - util.load_file(hostfile), + util.load_text_file(hostfile), ) diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_init.py cloud-init-24.1.3/tests/unittests/distros/test_init.py --- cloud-init-23.4.4/tests/unittests/distros/test_init.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_init.py 2024-03-27 13:14:04.000000000 +0000 @@ -256,6 +256,17 @@ class TestInstall: """Tests for cloudinit.distros.Distro.install_packages.""" + @pytest.fixture(autouse=True) + def ensure_available(self, mocker): + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=True, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=True, + ) + @pytest.fixture def m_apt_install(self, mocker): return mocker.patch( @@ -318,7 +329,7 @@ ) with pytest.raises( PackageInstallerError, - match="Failed to install the following packages: \\['pkg3'\\]", + match="Failed to install the following packages: {'pkg3'}", ): _get_distro("debian").install_packages( [{"apt": ["pkg1"]}, "pkg2", {"snap": ["pkg3"]}] @@ -339,3 +350,120 @@ assert "pkg3" not in apt_install_args m_snap_install.assert_not_called() + + def test_specific_package_manager_fail_doesnt_retry( + self, mocker, m_snap_install + ): + """Test fail from package manager doesn't retry as generic.""" + m_apt_install = mocker.patch( + "cloudinit.distros.package_management.apt.Apt.install_packages", + return_value=["pkg1"], + ) + with pytest.raises(PackageInstallerError): + _get_distro("ubuntu").install_packages([{"apt": ["pkg1"]}]) + apt_install_args = m_apt_install.call_args_list[0][0][0] + assert "pkg1" in apt_install_args + m_snap_install.assert_not_called() + + def test_no_attempt_if_no_package_manager( + self, mocker, m_apt_install, m_snap_install, caplog + ): + """Test that no attempt is made if there are no package manager.""" + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=False, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=False, + ) + with pytest.raises(PackageInstallerError): + _get_distro("ubuntu").install_packages( + ["pkg1", "pkg2", {"other": "pkg3"}] + ) + m_apt_install.assert_not_called() + m_snap_install.assert_not_called() + + assert "Package manager 'apt' not available" in caplog.text + assert "Package manager 'snap' not available" in caplog.text + + @pytest.mark.parametrize( + "distro,pkg_list,apt_available,apt_failed,snap_failed,total_failed", + [ + pytest.param( + "debian", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + False, + [], + ["pkg1", "pkg3"], + ["pkg1", "pkg2", "pkg3"], + id="debian_no_apt", + ), + pytest.param( + "debian", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + True, + ["pkg2"], + ["pkg3"], + ["pkg2", "pkg3"], + id="debian_with_apt", + ), + pytest.param( + "ubuntu", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + False, + [], + [], + ["pkg2"], + id="ubuntu_no_apt", + ), + pytest.param( + "ubuntu", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + True, + ["pkg1"], + ["pkg3"], + ["pkg3"], + id="ubuntu_with_apt", + ), + ], + ) + def test_failed( + self, + distro, + pkg_list, + apt_available, + apt_failed, + snap_failed, + total_failed, + mocker, + m_apt_install, + m_snap_install, + ): + """Test that failed packages are properly tracked. + + We need to ensure that the failed packages are properly tracked: + 1. When package install fails normally + 2. When package manager is not available + 3. When package manager is not explicitly supported by the distro + + So test various combinations of these scenarios. + """ + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=apt_available, + ) + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.install_packages", + return_value=apt_failed, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.install_packages", + return_value=snap_failed, + ) + with pytest.raises(PackageInstallerError) as exc: + _get_distro(distro).install_packages(pkg_list) + message = exc.value.args[0] + assert "Failed to install the following packages" in message + for pkg in total_failed: + assert pkg in message diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_netconfig.py cloud-init-24.1.3/tests/unittests/distros/test_netconfig.py --- cloud-init-23.4.4/tests/unittests/distros/test_netconfig.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_netconfig.py 2024-03-27 13:14:04.000000000 +0000 @@ -1012,43 +1012,6 @@ def netplan_path(self): return "/etc/netplan/50-cloud-init.yaml" - def test_apply_network_config_v1_without_netplan(self): - # Note that this is in fact an invalid netctl config: - # "Address=None/None" - # But this is what the renderer has been writing out for a long time, - # and the test's purpose is to assert that the netctl renderer is - # still being used in absence of netplan, not the correctness of the - # rendered netctl config. - expected_cfgs = { - self.netctl_path("eth0"): dedent( - """\ - Address=192.168.1.5/255.255.255.0 - Connection=ethernet - DNS=() - Gateway=192.168.1.254 - IP=static - Interface=eth0 - """ - ), - self.netctl_path("eth1"): dedent( - """\ - Address=None/None - Connection=ethernet - DNS=() - Gateway= - IP=dhcp - Interface=eth1 - """ - ), - } - - self._apply_and_verify( - self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=False, - ) - def test_apply_network_config_v1_with_netplan(self): expected_cfgs = { self.netplan_path(): dedent( diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_opensuse.py cloud-init-24.1.3/tests/unittests/distros/test_opensuse.py --- cloud-init-23.4.4/tests/unittests/distros/test_opensuse.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_opensuse.py 2024-03-27 13:14:04.000000000 +0000 @@ -14,7 +14,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs rw,bar\n", ) @mock.patch( @@ -33,7 +33,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs rw,bar\n", ) @mock.patch( @@ -58,7 +58,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs rw,bar\n", ) @mock.patch( @@ -77,7 +77,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs rw,bar\n", ) @mock.patch( @@ -102,7 +102,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs rw,bar\n", ) @mock.patch( @@ -128,7 +128,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs rw,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -150,7 +150,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs rw,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -175,7 +175,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrf rw,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -197,7 +197,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs rw,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -222,7 +222,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs rw,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -248,7 +248,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs ro,bar\n", ) @mock.patch( @@ -270,7 +270,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs rw,bar\n", ) @mock.patch( @@ -293,7 +293,7 @@ return_value=("/dev/sda1", "xfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / xfs ro,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) @@ -314,7 +314,7 @@ return_value=("/dev/sda1", "btrfs", "/"), ) @mock.patch( - "cloudinit.distros.opensuse.util.load_file", + "cloudinit.distros.opensuse.util.load_text_file", return_value="foo\n/dev/sda1 / btrfs ro,bar\n", ) @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True) diff -Nru cloud-init-23.4.4/tests/unittests/distros/test_photon.py cloud-init-24.1.3/tests/unittests/distros/test_photon.py --- cloud-init-23.4.4/tests/unittests/distros/test_photon.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/distros/test_photon.py 2024-03-27 13:14:04.000000000 +0000 @@ -29,7 +29,7 @@ hostname = "myhostname" hostfile = self.tmp_path("previous-hostname") self.distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname, util.load_file(hostfile)) + self.assertEqual(hostname, util.load_text_file(hostfile)) ret = self.distro._read_hostname(hostfile) self.assertEqual(ret, hostname) diff -Nru cloud-init-23.4.4/tests/unittests/filters/test_launch_index.py cloud-init-24.1.3/tests/unittests/filters/test_launch_index.py --- cloud-init-23.4.4/tests/unittests/filters/test_launch_index.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/filters/test_launch_index.py 2024-03-27 13:14:04.000000000 +0000 @@ -38,7 +38,7 @@ msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)] msg2_msgs = [m for m in msg2.walk()] msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)] - for i in range(0, len(msg2_msgs)): + for i in range(len(msg2_msgs)): m1_msg = msg1_msgs[i] m2_msg = msg2_msgs[i] if m1_msg.get_charset() != m2_msg.get_charset(): diff -Nru cloud-init-23.4.4/tests/unittests/helpers.py cloud-init-24.1.3/tests/unittests/helpers.py --- cloud-init-23.4.4/tests/unittests/helpers.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/helpers.py 2024-03-27 13:14:04.000000000 +0000 @@ -12,7 +12,6 @@ import time import unittest from contextlib import ExitStack, contextmanager -from pathlib import Path from typing import ClassVar, List, Union from unittest import mock from unittest.util import strclass @@ -20,7 +19,6 @@ import responses -import cloudinit from cloudinit import atomic_helper, cloud, distros from cloudinit import helpers as ch from cloudinit import subp, util @@ -30,6 +28,7 @@ ) from cloudinit.sources import DataSourceNone from cloudinit.templater import JINJA_AVAILABLE +from tests.helpers import cloud_init_project_dir from tests.hypothesis_jsonschema import HAS_HYPOTHESIS_JSONSCHEMA _real_subp = subp.subp @@ -47,6 +46,14 @@ HAS_APT_PKG = False +# Used by tests to verify the error message when a jsonschema structure +# is empty but should not be. +# Version 4.20.0 of jsonschema changed the error messages for empty structures. +SCHEMA_EMPTY_ERROR = ( + "(is too short|should be non-empty|does not have enough properties)" +) + + # Makes the old path start # with new base instead of whatever # it previously had @@ -69,7 +76,7 @@ nam = am if am == -1: nam = len(n_args) - for i in range(0, nam): + for i in range(nam): path = args[i] # patchOS() wraps various os and os.path functions, however in # Python 3 some of these now accept file-descriptors (integers). @@ -153,6 +160,7 @@ handler.setFormatter(formatter) self.old_handlers = self.logger.handlers self.logger.handlers = [handler] + self.old_level = logging.root.level if self.allowed_subp is True: subp.subp = _real_subp else: @@ -194,7 +202,7 @@ if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers - logging.getLogger().setLevel(logging.NOTSET) + logging.getLogger().setLevel(self.old_level) subp.subp = _real_subp super(CiTestCase, self).tearDown() @@ -285,7 +293,8 @@ util: [ ("write_file", 1), ("append_file", 1), - ("load_file", 1), + ("load_binary_file", 1), + ("load_text_file", 1), ("ensure_dir", 1), ("chmod", 1), ("delete_dir_contents", 1), @@ -481,7 +490,7 @@ for fname in files: fpath = os.path.join(root, fname) key = fpath[len(prefix) :] - flist[key] = util.load_file(fpath) + flist[key] = util.load_text_file(fpath) return flist @@ -596,24 +605,6 @@ mock.Mock.assert_not_called = __mock_assert_not_called # type: ignore -def get_top_level_dir() -> Path: - """Return the absolute path to the top cloudinit project directory - - @return Path('') - """ - return Path(cloudinit.__file__).parent.parent.resolve() - - -def cloud_init_project_dir(sub_path: str) -> str: - """Get a path within the cloudinit project directory - - @return str of the combined path - - Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path" - """ - return str(get_top_level_dir() / sub_path) - - @contextmanager def does_not_raise(): """Context manager to parametrize tests raising and not raising exceptions diff -Nru cloud-init-23.4.4/tests/unittests/net/test_dhcp.py cloud-init-24.1.3/tests/unittests/net/test_dhcp.py --- cloud-init-23.4.4/tests/unittests/net/test_dhcp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/net/test_dhcp.py 2024-03-27 13:14:04.000000000 +0000 @@ -2,12 +2,18 @@ import os import signal +import socket +import subprocess from textwrap import dedent import pytest import responses +from cloudinit.distros import alpine, amazon, centos, debian, freebsd, rhel +from cloudinit.distros.ubuntu import Distro from cloudinit.net.dhcp import ( + DHCLIENT_FALLBACK_LEASE_DIR, + Dhcpcd, InvalidDHCPLeaseFileError, IscDhclient, NoDHCPLeaseError, @@ -18,7 +24,8 @@ networkd_load_leases, ) from cloudinit.net.ephemeral import EphemeralDHCPv4 -from cloudinit.util import ensure_file, subp, write_file +from cloudinit.subp import SubpResult +from cloudinit.util import ensure_file, load_binary_file, subp, write_file from tests.unittests.helpers import ( CiTestCase, ResponsesTestCase, @@ -30,34 +37,74 @@ PID_F = "/run/dhclient.pid" LEASE_F = "/run/dhclient.lease" DHCLIENT = "/sbin/dhclient" +ib_address_prefix = "00:00:00:00:00:00:00:00:00:00:00:00" +@pytest.mark.parametrize( + "server_address,lease_file_content", + ( + pytest.param(None, None, id="no_server_addr_on_absent_lease_file"), + pytest.param(None, "", id="no_server_addr_on_empty_lease_file"), + pytest.param( + None, + "lease {\n fixed-address: 10.1.2.3;\n}\n", + id="no_server_addr_when_no_server_ident", + ), + pytest.param( + "10.4.5.6", + "lease {\n fixed-address: 10.1.2.3;\n" + " option dhcp-server-identifier 10.4.5.6;\n" + " option dhcp-renewal-time 1800;\n}\n", + id="server_addr_found_when_server_ident_present", + ), + ), +) +class TestParseDHCPServerFromLeaseFile: + @pytest.mark.usefixtures("dhclient_exists") + def test_find_server_address_when_present( + self, server_address, lease_file_content, tmp_path + ): + """Test that we return None in the case of no file or file contains no + server address, otherwise return the address. + """ + dhclient = IscDhclient() + dhclient.lease_file = tmp_path / "dhcp.leases" + if lease_file_content: + dhclient.lease_file.write_text(lease_file_content) + if server_address: + assert server_address == dhclient.get_newest_lease("eth0").get( + "dhcp-server-identifier" + ) + else: + assert None is dhclient.get_newest_lease("eth0").get( + "dhcp-server-identifier" + ) + + +@pytest.mark.usefixtures("dhclient_exists") class TestParseDHCPLeasesFile(CiTestCase): def test_parse_empty_lease_file_errors(self): - """parse_dhcp_lease_file errors when file content is empty.""" - empty_file = self.tmp_path("leases") - ensure_file(empty_file) - with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: - IscDhclient.parse_dhcp_lease_file(empty_file) - error = context_manager.exception - self.assertIn("Cannot parse empty dhcp lease file", str(error)) + """get_newest_lease errors when file content is empty.""" + client = IscDhclient() + client.lease_file = self.tmp_path("leases") + ensure_file(client.lease_file) + assert not client.get_newest_lease("eth0") def test_parse_malformed_lease_file_content_errors(self): - """IscDhclient.parse_dhcp_lease_file errors when file content isn't + """IscDhclient.get_newest_lease errors when file content isn't dhcp leases. """ - non_lease_file = self.tmp_path("leases") - write_file(non_lease_file, "hi mom.") - with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: - IscDhclient.parse_dhcp_lease_file(non_lease_file) - error = context_manager.exception - self.assertIn("Cannot parse dhcp lease file", str(error)) + client = IscDhclient() + client.lease_file = self.tmp_path("leases") + write_file(client.lease_file, "hi mom.") + assert not client.get_newest_lease("eth0") def test_parse_multiple_leases(self): - """IscDhclient.parse_dhcp_lease_file returns a list of all leases + """IscDhclient().get_newest_lease returns the latest lease within. """ - lease_file = self.tmp_path("leases") + client = IscDhclient() + client.lease_file = self.tmp_path("leases") content = dedent( """ lease { @@ -78,38 +125,30 @@ } """ ) - expected = [ - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - "renew": "4 2017/07/27 18:02:30", - "expire": "5 2017/07/28 07:08:15", - "filename": "http://192.168.2.50/boot.php?mac=${netX}", - }, - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "filename": "http://192.168.2.50/boot.php?mac=${netX}", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - }, - ] - write_file(lease_file, content) - self.assertCountEqual( - expected, IscDhclient.parse_dhcp_lease_file(lease_file) - ) + expected = { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "filename": "http://192.168.2.50/boot.php?mac=${netX}", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } + write_file(client.lease_file, content) + got = client.get_newest_lease("eth0") + self.assertCountEqual(got, expected) +@pytest.mark.usefixtures("dhclient_exists") class TestDHCPRFC3442(CiTestCase): def test_parse_lease_finds_rfc3442_classless_static_routes(self): - """IscDhclient.parse_dhcp_lease_file returns + """IscDhclient().get_newest_lease() returns rfc3442-classless-static-routes. """ - lease_file = self.tmp_path("leases") - content = dedent( - """ + client = IscDhclient() + client.lease_file = self.tmp_path("leases") + write_file( + client.lease_file, + dedent( + """ lease { interface "wlp3s0"; fixed-address 192.168.2.74; @@ -119,30 +158,27 @@ renew 4 2017/07/27 18:02:30; expire 5 2017/07/28 07:08:15; } - """ - ) - expected = [ - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - "rfc3442-classless-static-routes": "0,130,56,240,1", - "renew": "4 2017/07/27 18:02:30", - "expire": "5 2017/07/28 07:08:15", - } - ] - write_file(lease_file, content) - self.assertCountEqual( - expected, IscDhclient.parse_dhcp_lease_file(lease_file) + """ + ), ) + expected = { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "rfc3442-classless-static-routes": "0,130,56,240,1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + self.assertCountEqual(expected, client.get_newest_lease("eth0")) def test_parse_lease_finds_classless_static_routes(self): """ - IscDhclient.parse_dhcp_lease_file returns classless-static-routes + IscDhclient().get_newest_lease returns classless-static-routes for Centos lease format. """ - lease_file = self.tmp_path("leases") + client = IscDhclient() + client.lease_file = self.tmp_path("leases") content = dedent( """ lease { @@ -156,38 +192,31 @@ } """ ) - expected = [ - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - "classless-static-routes": "0 130.56.240.1", - "renew": "4 2017/07/27 18:02:30", - "expire": "5 2017/07/28 07:08:15", - } - ] - write_file(lease_file, content) - self.assertCountEqual( - expected, IscDhclient.parse_dhcp_lease_file(lease_file) - ) + expected = { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "classless-static-routes": "0 130.56.240.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + write_file(client.lease_file, content) + self.assertCountEqual(expected, client.get_newest_lease("eth0")) @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" - lease = [ - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - "rfc3442-classless-static-routes": "0,130,56,240,1", - "renew": "4 2017/07/27 18:02:30", - "expire": "5 2017/07/28 07:08:15", - } - ] - m_maybe.return_value = lease + m_maybe.return_value = { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "rfc3442-classless-static-routes": "0,130,56,240,1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } distro = MockDistro() eph = EphemeralDHCPv4(distro) eph.obtain_lease() @@ -208,18 +237,15 @@ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network for Centos Lease format """ - lease = [ - { - "interface": "wlp3s0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - "classless-static-routes": "0 130.56.240.1", - "renew": "4 2017/07/27 18:02:30", - "expire": "5 2017/07/28 07:08:15", - } - ] - m_maybe.return_value = lease + m_maybe.return_value = { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "classless-static-routes": "0 130.56.240.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } distro = MockDistro() eph = EphemeralDHCPv4(distro) eph.obtain_lease() @@ -349,29 +375,13 @@ ) -class TestDHCPDiscoveryClean(CiTestCase): - with_logs = True - ib_address_prefix = "00:00:00:00:00:00:00:00:00:00:00:00" - - @mock.patch("cloudinit.net.dhcp.find_fallback_nic") - def test_no_fallback_nic_found(self, m_fallback_nic): - """Log and do nothing when nic is absent and no fallback is found.""" - m_fallback_nic.return_value = None # No fallback nic found - - with pytest.raises(NoDHCPLeaseInterfaceError): - maybe_perform_dhcp_discovery(MockDistro()) - - self.assertIn( - "Skip dhcp_discovery: Unable to find fallback nic.", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.dhcp.find_fallback_nic", return_value="eth9") +class TestDHCPDiscoveryClean: + @mock.patch("cloudinit.distros.net.find_fallback_nic", return_value="eth9") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which") - def test_dhclient_exits_with_error( - self, m_which, m_subp, m_remove, m_fallback + def test_dhcpcd_exits_with_error( + self, m_which, m_subp, m_remove, m_fallback, caplog ): """Log and do nothing when nic is absent and no fallback is found.""" m_subp.side_effect = [ @@ -380,62 +390,44 @@ ] with pytest.raises(NoDHCPLeaseError): - maybe_perform_dhcp_discovery(MockDistro()) + maybe_perform_dhcp_discovery(Distro("fake but not", {}, None)) - self.assertIn( - "DHCP client selected: dhclient", - self.logs.getvalue(), - ) + assert "DHCP client selected: dhcpcd" in caplog.text - @mock.patch("cloudinit.net.dhcp.find_fallback_nic", return_value="eth9") + @mock.patch("cloudinit.distros.net.find_fallback_nic", return_value="eth9") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which") - def test_dhcp_client_failover(self, m_which, m_subp, m_remove, m_fallback): - """Log and do nothing when nic is absent and no fallback is found.""" + def test_dhcp_client_failover( + self, m_which, m_subp, m_remove, m_fallback, caplog + ): + """Log and do nothing when nic is absent and no fallback client is + found.""" m_subp.side_effect = [ ("", ""), subp.ProcessExecutionError(exit_code=-5), ] - m_which.side_effect = [False, True] + m_which.side_effect = [False, False, False, False] with pytest.raises(NoDHCPLeaseError): - maybe_perform_dhcp_discovery(MockDistro()) - - self.assertIn( - "DHCP client not found: dhclient", - self.logs.getvalue(), - ) - self.assertIn( - "DHCP client not found: dhcpcd", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.dhcp.find_fallback_nic", return_value=None) - def test_provided_nic_does_not_exist(self, m_fallback_nic): - """When the provided nic doesn't exist, log a message and no-op.""" - with pytest.raises(NoDHCPLeaseInterfaceError): - maybe_perform_dhcp_discovery(MockDistro(), "idontexist") + maybe_perform_dhcp_discovery(Distro("somename", {}, None)) - self.assertIn( - "Skip dhcp_discovery: nic idontexist not found in get_devicelist.", - self.logs.getvalue(), - ) + assert "DHCP client not found: dhclient" in caplog.text + assert "DHCP client not found: dhcpcd" in caplog.text + assert "DHCP client not found: udhcpc" in caplog.text @mock.patch("cloudinit.net.dhcp.subp.which") - @mock.patch("cloudinit.net.dhcp.find_fallback_nic") - def test_absent_dhclient_command(self, m_fallback, m_which): + @mock.patch("cloudinit.distros.net.find_fallback_nic") + def test_absent_dhclient_command(self, m_fallback, m_which, caplog): """When dhclient doesn't exist in the OS, log the issue and no-op.""" m_fallback.return_value = "eth9" m_which.return_value = None # dhclient isn't found - with pytest.raises(NoDHCPLeaseMissingDhclientError): - maybe_perform_dhcp_discovery(MockDistro()) + maybe_perform_dhcp_discovery(Distro("whoa", {}, None)) - self.assertIn( - "Skip dhclient configuration: No dhclient command found.", - self.logs.getvalue(), - ) + assert "DHCP client not found: dhclient" in caplog.text + assert "DHCP client not found: dhcpcd" in caplog.text + assert "DHCP client not found: udhcpc" in caplog.text @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("time.sleep", mock.MagicMock()) @@ -444,7 +436,7 @@ @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.net.dhcp.util.wait_for_files", return_value=False) def test_dhcp_discovery_warns_invalid_pid( - self, m_wait, m_which, m_subp, m_kill, m_remove + self, m_wait, m_which, m_subp, m_kill, m_remove, caplog ): """dhcp_discovery logs a warning when pidfile contains invalid content. @@ -464,60 +456,48 @@ ) with mock.patch( - "cloudinit.util.load_file", return_value=lease_content + "cloudinit.util.load_text_file", return_value=lease_content ): - self.assertCountEqual( - [ - { - "interface": "eth9", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - } - ], - IscDhclient.parse_dhcp_lease_file("lease"), - ) - with self.assertRaises(InvalidDHCPLeaseFileError): - with mock.patch("cloudinit.util.load_file", return_value=""): + assert { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().get_newest_lease("eth0") + with pytest.raises(InvalidDHCPLeaseFileError): + with mock.patch("cloudinit.util.load_text_file", return_value=""): IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) - self.assertIn( - "dhclient(pid=, parentpid=unknown) failed " - "to daemonize after 10.0 seconds", - self.logs.getvalue(), + assert ( + "dhclient(pid=, parentpid=unknown) failed to daemonize after" + " 10.0 seconds" in caplog.text ) m_kill.assert_not_called() @mock.patch("cloudinit.net.dhcp.os.remove") - @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.util.wait_for_files") @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_discovery_waits_on_lease_and_pid( - self, m_subp, m_which, m_wait, m_kill, m_getppid, m_remove + self, m_subp, m_which, m_wait, m_kill, m_remove, caplog ): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" m_subp.return_value = ("", "") # Don't create pid or leases file m_wait.return_value = [PID_F] # Return the missing pidfile wait for - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - self.assertEqual( - [], IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) - ) - self.assertEqual( - mock.call([PID_F, LEASE_F], maxwait=5, naplen=0.01), - m_wait.call_args_list[0], - ) - self.assertIn( - "WARNING: dhclient did not produce expected files: dhclient.pid", - self.logs.getvalue(), + assert {} == IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) + m_wait.assert_called_once_with( + [PID_F, LEASE_F], maxwait=5, naplen=0.01 + ) + assert ( + "dhclient did not produce expected files: dhclient.pid" + in caplog.text ) m_kill.assert_not_called() @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=False) @mock.patch("cloudinit.net.dhcp.os.remove") - @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @@ -528,7 +508,6 @@ m_which, m_subp, m_kill, - m_getppid, m_remove, mocked_is_ib_interface, ): @@ -548,22 +527,15 @@ """ ) my_pid = 1 - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - with mock.patch( - "cloudinit.util.load_file", side_effect=["1", lease_content] + "cloudinit.util.load_text_file", side_effect=["1", lease_content] ): - self.assertCountEqual( - [ - { - "interface": "eth9", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - } - ], - IscDhclient().dhcp_discovery("eth9", distro=MockDistro()), - ) + assert { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -597,7 +569,6 @@ ) @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=True) @mock.patch("cloudinit.net.dhcp.os.remove") - @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid", return_value=1) @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.net.dhcp.subp.subp", return_value=("", "")) @@ -608,7 +579,6 @@ m_subp, m_which, m_kill, - m_getppid, m_remove, mocked_is_ib_interface, get_interface_mac, @@ -631,19 +601,14 @@ ) my_pid = 1 with mock.patch( - "cloudinit.util.load_file", side_effect=["1", lease_content] + "cloudinit.util.load_text_file", side_effect=["1", lease_content] ): - self.assertCountEqual( - [ - { - "interface": "ib0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - } - ], - IscDhclient().dhcp_discovery("ib0", distro=MockDistro()), - ) + assert { + "interface": "ib0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().dhcp_discovery("ib0", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -679,20 +644,18 @@ ) @mock.patch("cloudinit.net.dhcp.os.remove") - @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.util.wait_for_files") def test_dhcp_output_error_stream( - self, m_wait, m_which, m_subp, m_kill, m_getppid, m_remove + self, m_wait, m_which, m_subp, m_kill, m_remove, tmpdir ): """ "dhcp_log_func is called with the output and error streams of dhclient when the callable is passed.""" dhclient_err = "FAKE DHCLIENT ERROR" dhclient_out = "FAKE DHCLIENT OUT" m_subp.return_value = (dhclient_out, dhclient_err) - tmpdir = self.tmp_dir() lease_content = dedent( """ lease { @@ -708,11 +671,10 @@ pid_file = os.path.join(tmpdir, "dhclient.pid") my_pid = 1 write_file(pid_file, "%d\n" % my_pid) - m_getppid.return_value = 1 # Indicate that dhclient has daemonized def dhcp_log_func(out, err): - self.assertEqual(out, dhclient_out) - self.assertEqual(err, dhclient_err) + assert out == dhclient_out + assert err == dhclient_err IscDhclient().dhcp_discovery( "eth9", dhcp_log_func=dhcp_log_func, distro=MockDistro() @@ -859,12 +821,11 @@ ): """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" url = "http://example.org/index.html" - fake_lease = { + m_dhcp.return_value = { "interface": "eth9", "fixed-address": "192.168.2.2", "subnet-mask": "255.255.0.0", } - m_dhcp.return_value = [fake_lease] m_subp.return_value = ("", "") self.responses.add(responses.GET, url, body=b"", status=404) @@ -872,7 +833,7 @@ MockDistro(), connectivity_url_data={"url": url}, ) as lease: - self.assertEqual(fake_lease, lease) + self.assertEqual(m_dhcp.return_value, lease) # Ensure that dhcp discovery occurs m_dhcp.assert_called_once() @@ -935,30 +896,12 @@ with_logs = True maxDiff = None - @mock.patch("cloudinit.net.dhcp.subp.which") - @mock.patch("cloudinit.net.dhcp.find_fallback_nic") - def test_absent_udhcpc_command(self, m_fallback, m_which): - """When dhclient doesn't exist in the OS, log the issue and no-op.""" - m_fallback.return_value = "eth9" - m_which.return_value = None # udhcpc isn't found - - distro = MockDistro() - distro.dhcp_client_priority = [Udhcpc] - - with pytest.raises(NoDHCPLeaseMissingDhclientError): - maybe_perform_dhcp_discovery(distro) - - self.assertIn( - "Skip udhcpc configuration: No udhcpc command found.", - self.logs.getvalue(), - ) - @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=False) @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/udhcpc") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.util.load_json") - @mock.patch("cloudinit.util.load_file") + @mock.patch("cloudinit.util.load_text_file") @mock.patch("cloudinit.util.write_file") def test_udhcpc_discovery( self, @@ -980,18 +923,13 @@ "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", } self.assertEqual( - [ - { - "fixed-address": "192.168.2.74", - "interface": "eth9", - "routers": "192.168.2.1", - "static_routes": [ - ("10.240.0.1/32", "0.0.0.0"), - ("0.0.0.0/0", "10.240.0.1"), - ], - "subnet-mask": "255.255.255.0", - } - ], + { + "fixed-address": "192.168.2.74", + "interface": "eth9", + "routers": "192.168.2.1", + "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", + "subnet-mask": "255.255.255.0", + }, Udhcpc().dhcp_discovery("eth9", distro=MockDistro()), ) # Interface was brought up before dhclient called @@ -1023,12 +961,15 @@ ) @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=True) - @mock.patch("cloudinit.net.dhcp.get_ib_interface_hwaddr") + @mock.patch( + "cloudinit.net.dhcp.get_interface_mac", + return_value="%s:AA:AA:AA:00:00:AA:AA:AA" % ib_address_prefix, + ) @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/udhcpc") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.util.load_json") - @mock.patch("cloudinit.util.load_file") + @mock.patch("cloudinit.util.load_text_file") @mock.patch("cloudinit.util.write_file") def test_udhcpc_discovery_ib( self, @@ -1050,20 +991,14 @@ "routers": "192.168.2.1", "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", } - m_get_ib_interface_hwaddr.return_value = "00:21:28:00:01:cf:4b:01" self.assertEqual( - [ - { - "fixed-address": "192.168.2.74", - "interface": "ib0", - "routers": "192.168.2.1", - "static_routes": [ - ("10.240.0.1/32", "0.0.0.0"), - ("0.0.0.0/0", "10.240.0.1"), - ], - "subnet-mask": "255.255.255.0", - } - ], + { + "fixed-address": "192.168.2.74", + "interface": "ib0", + "routers": "192.168.2.1", + "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", + "subnet-mask": "255.255.255.0", + }, Udhcpc().dhcp_discovery("ib0", distro=MockDistro()), ) # Interface was brought up before dhclient called @@ -1086,7 +1021,7 @@ "-f", "-v", "-x", - "0x3d:0021280001cf4b01", + "0x3d:20AAAAAA0000AAAAAA", ], update_env={ "LEASE_FILE": "/var/tmp/cloud-init/ib0.lease.json" @@ -1095,3 +1030,311 @@ ), ] ) + + +class TestISCDHClient(CiTestCase): + @mock.patch( + "os.listdir", + return_value=( + "some_file", + # rhel style lease file + "dhclient-0-u-u-i-d-enp2s0f0.lease", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_rhel(self, *_): + """ + Test that an rhel style lease has been found + """ + self.assertEqual( + "/var/lib/NetworkManager/dhclient-0-u-u-i-d-enp2s0f0.lease", + IscDhclient.get_newest_lease_file_from_distro(rhel.Distro), + ) + + @mock.patch( + "os.listdir", + return_value=( + "some_file", + # amazon linux style + "dhclient--eth0.leases", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_amazonlinux(self, *_): + """ + Test that an amazon style lease has been found + """ + self.assertEqual( + "/var/lib/dhcp/dhclient--eth0.leases", + IscDhclient.get_newest_lease_file_from_distro(amazon.Distro), + ) + + @mock.patch( + "os.listdir", + return_value=( + "some_file", + # freebsd style lease file + "dhclient.leases.vtynet0", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_freebsd(self, *_): + """ + Test that an freebsd style lease has been found + """ + self.assertEqual( + "/var/db/dhclient.leases.vtynet0", + IscDhclient.get_newest_lease_file_from_distro(freebsd.Distro), + ) + + @mock.patch( + "os.listdir", + return_value=( + "some_file", + # alpine style lease file + "dhclient.leases", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_alpine(self, *_): + """ + Test that an alpine style lease has been found + """ + self.assertEqual( + "/var/lib/dhcp/dhclient.leases", + IscDhclient.get_newest_lease_file_from_distro(alpine.Distro), + ) + + @mock.patch( + "os.listdir", + return_value=( + "some_file", + # debian style lease file + "dhclient.eth0.leases", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_debian(self, *_): + """ + Test that an debian style lease has been found + """ + self.assertEqual( + "/var/lib/dhcp/dhclient.eth0.leases", + IscDhclient.get_newest_lease_file_from_distro(debian.Distro), + ) + + # If argument to listdir is '/var/lib/NetworkManager' + # then mock an empty reply + # otherwise mock a reply with leasefile + @mock.patch( + "os.listdir", + side_effect=lambda x: ( + [] + if x == "/var/lib/NetworkManager" + else ["some_file", "!@#$-eth0.lease", "some_other_file"] + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_fallback_when_nothing_found(self, *_): + """ + This tests a situation where Distro provides lease information + but the lease wasn't found on that location + """ + self.assertEqual( + os.path.join(DHCLIENT_FALLBACK_LEASE_DIR, "!@#$-eth0.lease"), + IscDhclient.get_newest_lease_file_from_distro( + rhel.Distro("", {}, {}) + ), + ) + + @mock.patch( + "os.listdir", + return_value=( + "some_file", + "totally_not_a_leasefile", + "some_other_file", + ), + ) + @mock.patch("os.path.getmtime", return_value=123.45) + def test_get_newest_lease_file_from_distro_notfound(self, *_): + """ + Test the case when no leases were found + """ + # Any Distro would suffice for the absense test, choose Centos then. + self.assertEqual( + None, + IscDhclient.get_newest_lease_file_from_distro(centos.Distro), + ) + + +class TestDhcpcd: + def test_parse_lease_dump(self): + lease = dedent( + """ + broadcast_address='192.168.15.255' + dhcp_lease_time='3600' + dhcp_message_type='5' + dhcp_server_identifier='192.168.0.1' + domain_name='us-east-2.compute.internal' + domain_name_servers='192.168.0.2' + host_name='ip-192-168-0-212' + interface_mtu='9001' + ip_address='192.168.0.212' + network_number='192.168.0.0' + routers='192.168.0.1' + subnet_cidr='20' + subnet_mask='255.255.240.0' + """ + ) + with mock.patch("cloudinit.net.dhcp.util.load_binary_file"): + parsed_lease = Dhcpcd.parse_dhcpcd_lease(lease, "eth0") + assert "eth0" == parsed_lease["interface"] + assert "192.168.15.255" == parsed_lease["broadcast-address"] + assert "192.168.0.212" == parsed_lease["fixed-address"] + assert "255.255.240.0" == parsed_lease["subnet-mask"] + assert "192.168.0.1" == parsed_lease["routers"] + + @pytest.mark.parametrize( + "lease_file, option_245", + ( + pytest.param("enp24s0.lease", None, id="no option 245"), + pytest.param( + "eth0.lease", + socket.inet_aton("168.63.129.16"), + id="a valid option 245", + ), + ), + ) + def test_parse_raw_lease(self, lease_file, option_245): + lease = load_binary_file(f"tests/data/net/dhcp/{lease_file}") + assert option_245 == Dhcpcd.parse_unknown_options_from_packet( + lease, 245 + ) + + def test_parse_classless_static_routes(self): + lease = dedent( + """ + broadcast_address='10.0.0.255' + classless_static_routes='0.0.0.0/0 10.0.0.1 168.63.129.16/32""" + """ 10.0.0.1 169.254.169.254/32 10.0.0.1' + dhcp_lease_time='4294967295' + dhcp_message_type='5' + dhcp_rebinding_time='4294967295' + dhcp_renewal_time='4294967295' + dhcp_server_identifier='168.63.129.16' + domain_name='ilo2tr0xng2exgucxg20yx0tjb.gx.internal.cloudapp.net' + domain_name_servers='168.63.129.16' + ip_address='10.0.0.5' + network_number='10.0.0.0' + routers='10.0.0.1' + server_name='DSM111070915004' + subnet_cidr='24' + subnet_mask='255.255.255.0' + """ + ) + with mock.patch("cloudinit.net.dhcp.util.load_binary_file"): + parsed_lease = Dhcpcd.parse_dhcpcd_lease(lease, "eth0") + assert [ + ("0.0.0.0/0", "10.0.0.1"), + ("168.63.129.16/32", "10.0.0.1"), + ("169.254.169.254/32", "10.0.0.1"), + ] == Dhcpcd.parse_static_routes(parsed_lease["static_routes"]) + + @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=True) + @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhcpcd") + @mock.patch("cloudinit.net.dhcp.os.killpg") + @mock.patch("cloudinit.net.dhcp.subp.subp") + @mock.patch("cloudinit.util.load_json") + @mock.patch("cloudinit.util.load_binary_file") + @mock.patch("cloudinit.util.write_file") + def test_dhcpcd_discovery_ib( + self, + m_write_file, + m_load_file, + m_loadjson, + m_subp, + m_remove, + m_which, + m_is_ib_interface, + ): + """dhcp_discovery runs udcpc and parse the dhcp leases.""" + m_subp.return_value = SubpResult("a=b", "") + Dhcpcd().dhcp_discovery("ib0", distro=MockDistro()) + # Interface was brought up before dhclient called + m_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "dev", "ib0", "up"], + ), + mock.call( + [ + "/sbin/dhcpcd", + "--ipv4only", + "--waitip", + "--persistent", + "--noarp", + "--script=/bin/true", + "--clientid", + "ib0", + ], + timeout=Dhcpcd.timeout, + ), + ] + ) + + @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhcpcd") + @mock.patch("cloudinit.net.dhcp.os.killpg") + @mock.patch("cloudinit.net.dhcp.subp.subp") + @mock.patch("cloudinit.util.load_json") + @mock.patch("cloudinit.util.load_binary_file") + @mock.patch("cloudinit.util.write_file") + def test_dhcpcd_discovery_timeout( + self, + m_write_file, + m_load_file, + m_loadjson, + m_subp, + m_remove, + m_which, + ): + """Verify dhcpcd timeout results in NoDHCPLeaseError exception.""" + m_subp.side_effect = [ + SubpResult("a=b", ""), + subprocess.TimeoutExpired( + "/sbin/dhcpcd", timeout=6, output="testout", stderr="testerr" + ), + ] + with pytest.raises(NoDHCPLeaseError): + Dhcpcd().dhcp_discovery("eth0", distro=MockDistro()) + + m_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "dev", "eth0", "up"], + ), + mock.call( + [ + "/sbin/dhcpcd", + "--ipv4only", + "--waitip", + "--persistent", + "--noarp", + "--script=/bin/true", + "eth0", + ], + timeout=Dhcpcd.timeout, + ), + ] + ) + + +class TestMaybePerformDhcpDiscovery: + def test_none_and_missing_fallback(self): + with pytest.raises(NoDHCPLeaseInterfaceError): + distro = mock.Mock(fallback_interface=None) + maybe_perform_dhcp_discovery(distro, None) diff -Nru cloud-init-23.4.4/tests/unittests/net/test_dns.py cloud-init-24.1.3/tests/unittests/net/test_dns.py --- cloud-init-23.4.4/tests/unittests/net/test_dns.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/net/test_dns.py 2024-03-27 13:14:04.000000000 +0000 @@ -29,4 +29,6 @@ """ ) ) - assert "10.0.0.3" in state.dns_nameservers + assert ( + "10.0.0.3" in next(state.iter_interfaces())["dns"]["nameservers"] + ) diff -Nru cloud-init-23.4.4/tests/unittests/net/test_init.py cloud-init-24.1.3/tests/unittests/net/test_init.py --- cloud-init-23.4.4/tests/unittests/net/test_init.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/net/test_init.py 2024-03-27 13:14:04.000000000 +0000 @@ -17,49 +17,55 @@ from cloudinit.net.ephemeral import EphemeralIPv4Network, EphemeralIPv6Network from cloudinit.subp import ProcessExecutionError from cloudinit.util import ensure_file, write_file -from tests.unittests.helpers import CiTestCase, ResponsesTestCase +from tests.unittests.helpers import ( + CiTestCase, + ResponsesTestCase, + random_string, +) from tests.unittests.util import MockDistro -class TestSysDevPath(CiTestCase): +class TestSysDevPath: def test_sys_dev_path(self): """sys_dev_path returns a path under SYS_CLASS_NET for a device.""" dev = "something" path = "attribute" - expected = net.SYS_CLASS_NET + dev + "/" + path - self.assertEqual(expected, net.sys_dev_path(dev, path)) + expected = net.get_sys_class_path() + dev + "/" + path + assert expected == net.sys_dev_path(dev, path) def test_sys_dev_path_without_path(self): """When path param isn't provided it defaults to empty string.""" dev = "something" - expected = net.SYS_CLASS_NET + dev + "/" - self.assertEqual(expected, net.sys_dev_path(dev)) - + expected = net.get_sys_class_path() + dev + "/" + assert expected == net.sys_dev_path(dev) -class TestReadSysNet(CiTestCase): - with_logs = True - def setUp(self): - super(TestReadSysNet, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestReadSysNet: + @pytest.fixture(autouse=True) + @pytest.mark.parametrize( + "disable_sysfs_net", [False], indirect=["disable_sysfs_net"] + ) + def setup(self, disable_sysfs_net, tmpdir_factory): + # We mock invididual numbered tmpdirs here because these tests write + # to the sysfs directory and stale test artifacts break later tests. + mock_sysfs = f"{tmpdir_factory.mktemp('sysfs', numbered=True)}/" + with mock.patch( + "cloudinit.net.get_sys_class_path", return_value=mock_sysfs + ): + self.sysdir = mock_sysfs + yield def test_read_sys_net_strips_contents_of_sys_path(self): """read_sys_net strips whitespace from the contents of a sys file.""" content = "some stuff with trailing whitespace\t\r\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - self.assertEqual(content.strip(), net.read_sys_net("dev", "attr")) + assert content.strip() == net.read_sys_net("dev", "attr") def test_read_sys_net_reraises_oserror(self): """read_sys_net raises OSError/IOError when file doesn't exist.""" # Non-specific Exception because versions of python OSError vs IOError. - with self.assertRaises(Exception) as context_manager: # noqa: H202 + with pytest.raises(Exception, match="No such file or directory"): net.read_sys_net("dev", "attr") - error = context_manager.exception - self.assertIn("No such file or directory", str(error)) def test_read_sys_net_handles_error_with_on_enoent(self): """read_sys_net handles OSError/IOError with on_enoent if provided.""" @@ -70,30 +76,27 @@ net.read_sys_net("dev", "attr", on_enoent=on_enoent) error = handled_errors[0] - self.assertIsInstance(error, Exception) - self.assertIn("No such file or directory", str(error)) + assert isinstance(error, Exception) + assert "No such file or directory" in str(error) def test_read_sys_net_translates_content(self): """read_sys_net translates content when translate dict is provided.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) translate = {"you're welcome": "de nada"} - self.assertEqual( - "de nada", net.read_sys_net("dev", "attr", translate=translate) + assert "de nada" == net.read_sys_net( + "dev", "attr", translate=translate ) - def test_read_sys_net_errors_on_translation_failures(self): + def test_read_sys_net_errors_on_translation_failures(self, caplog): """read_sys_net raises a KeyError and logs details on failure.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - with self.assertRaises(KeyError) as context_manager: + with pytest.raises(KeyError, match='"you\'re welcome"'): net.read_sys_net("dev", "attr", translate={}) - error = context_manager.exception - self.assertEqual('"you\'re welcome"', str(error)) - self.assertIn( + assert ( "Found unexpected (not translatable) value 'you're welcome' in " - "'{0}dev/attr".format(self.sysdir), - self.logs.getvalue(), + "'{0}dev/attr".format(self.sysdir) in caplog.text ) def test_read_sys_net_handles_handles_with_onkeyerror(self): @@ -107,63 +110,63 @@ net.read_sys_net("dev", "attr", translate={}, on_keyerror=on_keyerror) error = handled_errors[0] - self.assertIsInstance(error, KeyError) - self.assertEqual('"you\'re welcome"', str(error)) + assert isinstance(error, KeyError) + assert '"you\'re welcome"' == str(error) def test_read_sys_net_safe_false_on_translate_failure(self): """read_sys_net_safe returns False on translation failures.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - self.assertFalse(net.read_sys_net_safe("dev", "attr", translate={})) + assert not net.read_sys_net_safe("dev", "attr", translate={}) def test_read_sys_net_safe_returns_false_on_noent_failure(self): """read_sys_net_safe returns False on file not found failures.""" - self.assertFalse(net.read_sys_net_safe("dev", "attr")) + assert not net.read_sys_net_safe("dev", "attr") def test_read_sys_net_int_returns_none_on_error(self): """read_sys_net_safe returns None on failures.""" - self.assertFalse(net.read_sys_net_int("dev", "attr")) + assert not net.read_sys_net_int("dev", "attr") def test_read_sys_net_int_returns_none_on_valueerror(self): """read_sys_net_safe returns None when content is not an int.""" write_file(os.path.join(self.sysdir, "dev", "attr"), "NOTINT\n") - self.assertFalse(net.read_sys_net_int("dev", "attr")) + assert not net.read_sys_net_int("dev", "attr") def test_read_sys_net_int_returns_integer_from_content(self): """read_sys_net_safe returns None on failures.""" write_file(os.path.join(self.sysdir, "dev", "attr"), "1\n") - self.assertEqual(1, net.read_sys_net_int("dev", "attr")) + assert 1 == net.read_sys_net_int("dev", "attr") def test_is_up_true(self): """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'.""" for state in ["up", "unknown"]: write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) - self.assertTrue(net.is_up("eth0")) + assert net.is_up("eth0") def test_is_up_false(self): """is_up is False if sys/net/devname/operstate is 'down' or invalid.""" for state in ["down", "incomprehensible"]: write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) - self.assertFalse(net.is_up("eth0")) + assert not net.is_up("eth0") def test_is_bridge(self): """is_bridge is True when /sys/net/devname/bridge exists.""" - self.assertFalse(net.is_bridge("eth0")) + assert not net.is_bridge("eth0") ensure_file(os.path.join(self.sysdir, "eth0", "bridge")) - self.assertTrue(net.is_bridge("eth0")) + assert net.is_bridge("eth0") def test_is_bond(self): """is_bond is True when /sys/net/devname/bonding exists.""" - self.assertFalse(net.is_bond("eth0")) + assert not net.is_bond("eth0") ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) - self.assertTrue(net.is_bond("eth0")) + assert net.is_bond("eth0") def test_get_master(self): """get_master returns the path when /sys/net/devname/master exists.""" - self.assertIsNone(net.get_master("enP1s1")) + assert net.get_master("enP1s1") is None master_path = os.path.join(self.sysdir, "enP1s1", "master") ensure_file(master_path) - self.assertEqual(master_path, net.get_master("enP1s1")) + assert master_path == net.get_master("enP1s1") def test_master_is_bridge_or_bond(self): bridge_mac = "aa:bb:cc:aa:bb:cc" @@ -173,8 +176,8 @@ write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac) write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) - self.assertFalse(net.master_is_bridge_or_bond("eth2")) + assert not net.master_is_bridge_or_bond("eth1") + assert not net.master_is_bridge_or_bond("eth2") # masters without bridge/bonding => False write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac) @@ -183,15 +186,15 @@ os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master")) os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master")) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) - self.assertFalse(net.master_is_bridge_or_bond("eth2")) + assert not net.master_is_bridge_or_bond("eth1") + assert not net.master_is_bridge_or_bond("eth2") # masters with bridge/bonding => True write_file(os.path.join(self.sysdir, "br0", "bridge"), "") write_file(os.path.join(self.sysdir, "bond0", "bonding"), "") - self.assertTrue(net.master_is_bridge_or_bond("eth1")) - self.assertTrue(net.master_is_bridge_or_bond("eth2")) + assert net.master_is_bridge_or_bond("eth1") + assert net.master_is_bridge_or_bond("eth2") def test_master_is_openvswitch(self): ovs_mac = "bb:cc:aa:bb:cc:aa" @@ -199,7 +202,7 @@ # No master => False write_file(os.path.join(self.sysdir, "eth1", "address"), ovs_mac) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) + assert not net.master_is_bridge_or_bond("eth1") # masters without ovs-system => False write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac) @@ -208,7 +211,7 @@ "../ovs-system", os.path.join(self.sysdir, "eth1", "master") ) - self.assertFalse(net.master_is_openvswitch("eth1")) + assert not net.master_is_openvswitch("eth1") # masters with ovs-system => True os.symlink( @@ -216,15 +219,15 @@ os.path.join(self.sysdir, "eth1", "upper_ovs-system"), ) - self.assertTrue(net.master_is_openvswitch("eth1")) + assert net.master_is_openvswitch("eth1") def test_is_vlan(self): """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" ensure_file(os.path.join(self.sysdir, "eth0", "uevent")) - self.assertFalse(net.is_vlan("eth0")) + assert not net.is_vlan("eth0") content = "junk\nDEVTYPE=vlan\njunk\n" write_file(os.path.join(self.sysdir, "eth0", "uevent"), content) - self.assertTrue(net.is_vlan("eth0")) + assert net.is_vlan("eth0") class TestGenerateFallbackConfig(CiTestCase): @@ -261,6 +264,7 @@ "eth1": { "match": {"macaddress": mac}, "dhcp4": True, + "dhcp6": True, "set-name": "eth1", } }, @@ -278,6 +282,7 @@ "eth0": { "match": {"macaddress": mac}, "dhcp4": True, + "dhcp6": True, "set-name": "eth0", } }, @@ -293,6 +298,7 @@ "ethernets": { "eth0": { "dhcp4": True, + "dhcp6": True, "match": {"macaddress": mac}, "set-name": "eth0", } @@ -359,6 +365,7 @@ "ethernets": { "ens3": { "dhcp4": True, + "dhcp6": True, "match": {"name": "ens3"}, "set-name": "ens3", } @@ -1453,134 +1460,121 @@ net.extract_physdevs({"version": 3, "awesome_config": []}) -class TestNetFailOver(CiTestCase): - def setUp(self): - super(TestNetFailOver, self).setUp() - self.add_patch("cloudinit.net.util", "m_util") - self.add_patch("cloudinit.net.read_sys_net", "m_read_sys_net") - self.add_patch("cloudinit.net.device_driver", "m_device_driver") +class TestNetFailOver: + @pytest.fixture(autouse=True) + def setup(self, mocker): + mocker.patch("cloudinit.net.util") + self.device_driver = mocker.patch("cloudinit.net.device_driver") + self.read_sys_net = mocker.patch("cloudinit.net.read_sys_net") def test_get_dev_features(self): - devname = self.random_string() - features = self.random_string() - self.m_read_sys_net.return_value = features - - self.assertEqual(features, net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual( - mock.call(devname, "device/features"), - self.m_read_sys_net.call_args_list[0], - ) + devname = random_string() + features = random_string() + self.read_sys_net.return_value = features + + assert features == net.get_dev_features(devname) + assert 1 == self.read_sys_net.call_count + self.read_sys_net.assert_called_once_with(devname, "device/features") def test_get_dev_features_none_returns_empty_string(self): - devname = self.random_string() - self.m_read_sys_net.side_effect = Exception("error") - self.assertEqual("", net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual( - mock.call(devname, "device/features"), - self.m_read_sys_net.call_args_list[0], - ) + devname = random_string() + self.read_sys_net.side_effect = Exception("error") + assert "" == net.get_dev_features(devname) + assert 1 == self.read_sys_net.call_count + self.read_sys_net.assert_called_once_with(devname, "device/features") @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature(self, m_dev_features): - devname = self.random_string() + devname = random_string() standby_features = ("0" * 62) + "1" + "0" m_dev_features.return_value = standby_features - self.assertTrue(net.has_netfail_standby_feature(devname)) + assert net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): - devname = self.random_string() - standby_features = self.random_string() + devname = random_string() + standby_features = random_string() m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_not_present_is_false( self, m_dev_features ): - devname = self.random_string() + devname = random_string() standby_features = "0" * 64 m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_no_features_is_false( self, m_dev_features ): - devname = self.random_string() + devname = random_string() standby_features = None m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_master(devname, driver)) + assert net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_master_checks_master_attr(self, m_sysdev): - devname = self.random_string() + devname = random_string() driver = "virtio_net" - m_sysdev.return_value = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) - self.assertEqual(1, m_sysdev.call_count) - self.assertEqual( - mock.call(devname, path="master"), m_sysdev.call_args_list[0] - ) + m_sysdev.return_value = random_string() + assert not net.is_netfail_master(devname, driver) + assert 1 == m_sysdev.call_count + m_sysdev.assert_called_once_with(devname, path="master") @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) + devname = random_string() + driver = random_string() + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr - self.assertFalse(net.is_netfail_master(devname, driver)) + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = False # no standby feature flag - self.assertFalse(net.is_netfail_master(devname, driver)) + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "virtio_net" # master virtio_net + self.device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_primary(devname, driver)) - self.assertEqual(1, self.m_device_driver.call_count) - self.assertEqual( - mock.call(master_devname), self.m_device_driver.call_args_list[0] - ) - self.assertEqual(1, m_standby.call_count) - self.assertEqual( - mock.call(master_devname), m_standby.call_args_list[0] - ) + assert net.is_netfail_primary(devname, driver) + self.device_driver.assert_called_once_with(master_devname) + assert 1 == m_standby.call_count + m_standby.assert_called_once_with(master_devname) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1588,18 +1582,18 @@ def test_is_netfail_primary_wrong_driver( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() + devname = random_string() driver = "virtio_net" - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net + devname = random_string() + driver = random_string() # device not virtio_net m_exists.return_value = False # no master sysfs attr - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1607,16 +1601,16 @@ def test_is_netfail_primary_bad_master( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "XXXX" # master not virtio_net - self.assertFalse(net.is_netfail_primary(devname, driver)) + self.device_driver.return_value = "XXXX" # master not virtio_net + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1624,77 +1618,77 @@ def test_is_netfail_primary_no_standby( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "virtio_net" # master virtio_net + self.device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = False # master has no standby feature flag - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_standby(devname, driver)) + assert net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_standby(devname, driver)) + devname = random_string() + driver = random_string() + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_master(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # has master sysfs attr - self.assertFalse(net.is_netfail_standby(devname, driver)) + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = False # has standby feature flag - self.assertFalse(net.is_netfail_standby(devname, driver)) + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_primary(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = True m_standby.return_value = False - self.assertTrue(net.is_netfailover(devname, driver)) + assert net.is_netfailover(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_standby(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = False m_standby.return_value = True - self.assertTrue(net.is_netfailover(devname, driver)) + assert net.is_netfailover(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_returns_false(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = False m_standby.return_value = False - self.assertFalse(net.is_netfailover(devname, driver)) + assert not net.is_netfailover(devname, driver) class TestOpenvswitchIsInstalled: diff -Nru cloud-init-23.4.4/tests/unittests/net/test_network_state.py cloud-init-24.1.3/tests/unittests/net/test_network_state.py --- cloud-init-23.4.4/tests/unittests/net/test_network_state.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/net/test_network_state.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,7 +4,7 @@ import pytest -from cloudinit import log, safeyaml, util +from cloudinit import safeyaml, util from cloudinit.net import network_state from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.renderers import NAME_TO_RENDERER @@ -214,8 +214,6 @@ In netplan targets we perform a passthrough and the warning is not needed. """ - log.setup_logging() - util.deprecate._log = set() # type: ignore ncfg = safeyaml.load( cfg.format( @@ -258,7 +256,10 @@ # If an interface was specified, DNS should be part of the interface for iface in config.iter_interfaces(): if iface["name"] == "eth1": - assert iface["dns"]["addresses"] == ["192.168.1.1", "8.8.8.8"] + assert iface["dns"]["nameservers"] == [ + "192.168.1.1", + "8.8.8.8", + ] assert iface["dns"]["search"] == ["spam.local"] else: assert "dns" not in iface @@ -287,15 +288,11 @@ "search": ["foo.local", "bar.local"], } - # Ensure DNS defined on interface also exists globally (since there - # is no global DNS definitions in v2) - assert ["4.4.4.4", "8.8.8.8"] == sorted(config.dns_nameservers) - assert [ - "bar.local", - "eggs.local", - "foo.local", - "spam.local", - ] == sorted(config.dns_searchdomains) + # Ensure DNS defined on interface does not exist globally + for server in ["4.4.4.4", "8.8.8.8"]: + assert server not in config.dns_nameservers + for search in ["bar.local", "eggs.local", "foo.local", "spam.local"]: + assert search not in config.dns_searchdomains class TestNetworkStateHelperFunctions(CiTestCase): diff -Nru cloud-init-23.4.4/tests/unittests/reporting/test_reporting_hyperv.py cloud-init-24.1.3/tests/unittests/reporting/test_reporting_hyperv.py --- cloud-init-23.4.4/tests/unittests/reporting/test_reporting_hyperv.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/reporting/test_reporting_hyperv.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,7 +10,7 @@ from cloudinit import util from cloudinit.reporting import events, instantiated_handler_registry -from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler +from cloudinit.reporting.handlers import HyperVKvpReportingHandler from cloudinit.sources.helpers import azure from tests.unittests.helpers import CiTestCase @@ -232,74 +232,6 @@ finally: instantiated_handler_registry.unregister_item( "telemetry", force=False - ) - - @mock.patch("cloudinit.sources.helpers.azure.report_compressed_event") - @mock.patch("cloudinit.sources.helpers.azure.report_diagnostic_event") - @mock.patch("cloudinit.subp.subp") - def test_push_log_to_kvp_exception_handling(self, m_subp, m_diag, m_com): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) - try: - instantiated_handler_registry.register_item("telemetry", reporter) - log_file = self.tmp_path("cloud-init.log") - azure.MAX_LOG_TO_KVP_LENGTH = 100 - azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path( - "log_pushed_to_kvp" - ) - with open(log_file, "w") as f: - log_content = "A" * 50 + "B" * 100 - f.write(log_content) - - m_com.side_effect = Exception("Mock Exception") - azure.push_log_to_kvp(log_file) - - # exceptions will trigger diagnostic reporting calls - self.assertEqual(m_diag.call_count, 3) - finally: - instantiated_handler_registry.unregister_item( - "telemetry", force=False - ) - - @mock.patch("cloudinit.subp.subp") - @mock.patch.object(LogHandler, "publish_event") - def test_push_log_to_kvp(self, publish_event, m_subp): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) - try: - instantiated_handler_registry.register_item("telemetry", reporter) - log_file = self.tmp_path("cloud-init.log") - azure.MAX_LOG_TO_KVP_LENGTH = 100 - azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path( - "log_pushed_to_kvp" - ) - with open(log_file, "w") as f: - log_content = "A" * 50 + "B" * 100 - f.write(log_content) - azure.push_log_to_kvp(log_file) - - with open(log_file, "a") as f: - extra_content = "C" * 10 - f.write(extra_content) - azure.push_log_to_kvp(log_file) - - # make sure dmesg is called every time - m_subp.assert_called_with(["dmesg"], capture=True, decode=False) - - for call_arg in publish_event.call_args_list: - event = call_arg[0][0] - self.assertNotEqual( - event.event_type, azure.COMPRESSED_EVENT_TYPE - ) - self.validate_compressed_kvps( - reporter, - 2, - [ - log_content[-azure.MAX_LOG_TO_KVP_LENGTH :].encode(), - extra_content.encode(), - ], - ) - finally: - instantiated_handler_registry.unregister_item( - "telemetry", force=False ) def validate_compressed_kvps(self, reporter, count, values): diff -Nru cloud-init-23.4.4/tests/unittests/runs/test_merge_run.py cloud-init-24.1.3/tests/unittests/runs/test_merge_run.py --- cloud-init-23.4.4/tests/unittests/runs/test_merge_run.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/runs/test_merge_run.py 2024-03-27 13:14:04.000000000 +0000 @@ -55,5 +55,5 @@ self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists("/etc/blah.ini")) self.assertIn("write_files", which_ran) - contents = util.load_file("/etc/blah.ini") + contents = util.load_text_file("/etc/blah.ini") self.assertEqual(contents, "blah") diff -Nru cloud-init-23.4.4/tests/unittests/runs/test_simple_run.py cloud-init-24.1.3/tests/unittests/runs/test_simple_run.py --- cloud-init-23.4.4/tests/unittests/runs/test_simple_run.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/runs/test_simple_run.py 2024-03-27 13:14:04.000000000 +0000 @@ -77,7 +77,7 @@ initer.apply_network_config(False) self.assertEqual( f"{atomic_helper.json_dumps(netcfg)}\n", - util.load_file("/var/lib/cloud/instance/network-config.json"), + util.load_text_file("/var/lib/cloud/instance/network-config.json"), ) def test_none_ds_runs_modules_which_do_not_define_distros(self): @@ -100,7 +100,7 @@ self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists("/etc/blah.ini")) self.assertIn("write_files", which_ran) - contents = util.load_file("/etc/blah.ini") + contents = util.load_text_file("/etc/blah.ini") self.assertEqual(contents, "blah") self.assertNotIn( "Skipping modules ['write_files'] because they are not verified on" diff -Nru cloud-init-23.4.4/tests/unittests/sources/azure/test_errors.py cloud-init-24.1.3/tests/unittests/sources/azure/test_errors.py --- cloud-init-23.4.4/tests/unittests/sources/azure/test_errors.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/azure/test_errors.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,6 +3,7 @@ import base64 import datetime from unittest import mock +from xml.etree import ElementTree import pytest import requests @@ -25,12 +26,13 @@ yield timestamp -@pytest.fixture() -def fake_vm_id(): - vm_id = "fake-vm-id" - with mock.patch.object(errors.identity, "query_vm_id", autospec=True) as m: - m.return_value = vm_id - yield vm_id +@pytest.fixture(autouse=True) +def fake_vm_id(mocker): + vm_id = "foo" + mocker.patch( + "cloudinit.sources.azure.identity.query_vm_id", return_value=vm_id + ) + yield vm_id def quote_csv_value(value: str) -> str: @@ -121,7 +123,7 @@ assert error.as_encoded_report() == "|".join(data) -def test_dhcp_lease(): +def test_dhcp_lease(mocker): error = errors.ReportableErrorDhcpLease(duration=5.6, interface="foo") assert error.reason == "failure to obtain DHCP lease" @@ -129,7 +131,7 @@ assert error.supporting_data["interface"] == "foo" -def test_dhcp_interface_not_found(): +def test_dhcp_interface_not_found(mocker): error = errors.ReportableErrorDhcpInterfaceNotFound(duration=5.6) assert error.reason == "failure to find DHCP interface" @@ -206,6 +208,24 @@ assert error.supporting_data["exception"] == repr(exception) +def test_ovf_parsing_exception(): + error = None + try: + ElementTree.fromstring("', @@ -455,6 +458,11 @@ "%s" % preprovisioned_vm_type ) + if provision_guest_proxy_agent is not None: + content.append( + "%s" + % provision_guest_proxy_agent + ) content += [ "", "", @@ -1058,7 +1066,6 @@ mock.MagicMock(), ) ) - super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): for module, name, new in patches: @@ -1441,6 +1448,7 @@ expected_cfg = { "PreprovisionedVMType": None, "PreprovisionedVm": False, + "ProvisionGuestProxyAgent": False, "system_info": {"default_user": {"name": "myuser"}}, } expected_metadata = { @@ -1477,13 +1485,12 @@ """crawl_metadata raises an exception on invalid ovf-env.xml.""" data = {"ovfcontent": "BOGUS", "sys_cfg": {}} dsrc = self._get_ds(data) - error_msg = ( - "BrokenAzureDataSource: Invalid ovf-env.xml:" - " syntax error: line 1, column 0" - ) - with self.assertRaises(InvalidMetaDataException) as cm: + error_msg = "error parsing ovf-env.xml: syntax error: line 1, column 0" + with self.assertRaises( + errors.ReportableErrorOvfParsingException + ) as cm: dsrc.crawl_metadata() - self.assertEqual(str(cm.exception), error_msg) + self.assertEqual(cm.exception.reason, error_msg) def test_crawl_metadata_call_imds_once_no_reprovision(self): """If reprovisioning, report ready at the end""" @@ -1877,7 +1884,7 @@ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml") # The XML should not be same since the user password is redacted - on_disk_ovf = load_file(ovf_env_path) + on_disk_ovf = load_text_file(ovf_env_path) self.xml_notequals(data["ovfcontent"], on_disk_ovf) # Make sure that the redacted password on disk is not used by CI @@ -1900,7 +1907,7 @@ # we expect that the ovf-env.xml file is copied there. ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml") self.assertTrue(os.path.exists(ovf_env_path)) - self.xml_equals(xml, load_file(ovf_env_path)) + self.xml_equals(xml, load_text_file(ovf_env_path)) def test_ovf_can_include_unicode(self): xml = construct_ovf_env() @@ -2009,7 +2016,7 @@ # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception - test_lease_dhcp_option_245 = "01:02:03:04" + test_lease_dhcp_option_245 = "1.2.3.4" test_lease = { "unknown-245": test_lease_dhcp_option_245, "interface": "eth0", @@ -2315,11 +2322,13 @@ ovf_path = os.path.join(self.source_dir, "ovf-env.xml") with open(ovf_path, "wb") as stream: stream.write(b"invalid xml") - with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: + with self.assertRaises( + errors.ReportableErrorOvfParsingException + ) as context_manager: dsaz.load_azure_ds_dir(self.source_dir) self.assertEqual( - "Invalid ovf-env.xml: syntax error: line 1, column 0", - str(context_manager.exception), + "error parsing ovf-env.xml: syntax error: line 1, column 0", + context_manager.exception.reason, ) @@ -2327,7 +2336,9 @@ def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_ovf_env() self.assertRaises( - dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml + errors.ReportableErrorOvfParsingException, + dsaz.read_azure_ovf, + invalid_xml, ) def test_load_with_pubkeys(self): @@ -2815,6 +2826,14 @@ self.assertTrue(cfg["PreprovisionedVm"]) self.assertEqual("Savable", cfg["PreprovisionedVMType"]) + def test_read_azure_ovf_with_proxy_guest_agent(self): + """The read_azure_ovf method should set ProvisionGuestProxyAgent + cfg flag to True.""" + content = construct_ovf_env(provision_guest_proxy_agent=True) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg["ProvisionGuestProxyAgent"]) + @pytest.mark.parametrize( "ovf_cfg,imds_md,pps_type", @@ -3298,7 +3317,9 @@ ): lease = { "interface": "fakeEth0", - "unknown-245": "10:ff:fe:fd", + "unknown-245": dhcp.IscDhclient.get_ip_from_lease_value( + "10:ff:fe:fd" + ), } mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [lease] @@ -3637,6 +3658,7 @@ mock_kvp_report_success_to_host, mock_netlink, mock_readurl, + mock_report_dmesg_to_kvp, mock_subp_subp, mock_timestamp, mock_util_ensure_dir, @@ -3666,6 +3688,7 @@ self.mock_kvp_report_success_to_host = mock_kvp_report_success_to_host self.mock_netlink = mock_netlink self.mock_readurl = mock_readurl + self.mock_report_dmesg_to_kvp = mock_report_dmesg_to_kvp self.mock_subp_subp = mock_subp_subp self.mock_timestmp = mock_timestamp self.mock_util_ensure_dir = mock_util_ensure_dir @@ -3755,6 +3778,7 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ) @@ -3771,6 +3795,9 @@ assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + @pytest.mark.parametrize("pps_type", ["Savable", "Running"]) def test_stale_pps(self, pps_type): imds_md_source = copy.deepcopy(self.imds_md) @@ -3917,11 +3944,13 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ), mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev=None, pubkey_info=None, ), @@ -3944,6 +3973,9 @@ assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 2 + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 2 + def test_savable_pps(self): imds_md_source = copy.deepcopy(self.imds_md) imds_md_source["extended"]["compute"]["ppsType"] = "Savable" @@ -4034,11 +4066,13 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ), mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev=None, pubkey_info=None, ), @@ -4062,6 +4096,9 @@ assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 2 + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 2 + @pytest.mark.parametrize( "fabric_side_effect", [ @@ -4187,11 +4224,13 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ), mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev=None, pubkey_info=None, ), @@ -4275,6 +4314,7 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ), @@ -4384,6 +4424,7 @@ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ mock.call( endpoint="10.11.12.13", + distro=self.azure_ds.distro, iso_dev="/dev/sr0", pubkey_info=None, ) @@ -4522,13 +4563,14 @@ class TestReportFailure: @pytest.mark.parametrize("kvp_enabled", [False, True]) - def report_host_only_kvp_enabled( + def test_report_host_only_kvp_enabled( self, azure_ds, kvp_enabled, mock_azure_report_failure_to_fabric, mock_kvp_report_failure_to_host, mock_kvp_report_success_to_host, + mock_report_dmesg_to_kvp, ): mock_kvp_report_failure_to_host.return_value = kvp_enabled error = errors.ReportableError(reason="foo") @@ -4538,6 +4580,7 @@ assert mock_kvp_report_failure_to_host.mock_calls == [mock.call(error)] assert mock_kvp_report_success_to_host.mock_calls == [] assert mock_azure_report_failure_to_fabric.mock_calls == [] + assert mock_report_dmesg_to_kvp.mock_calls == [mock.call()] class TestValidateIMDSMetadata: diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_azure_helper.py cloud-init-24.1.3/tests/unittests/sources/test_azure_helper.py --- cloud-init-23.4.4/tests/unittests/sources/test_azure_helper.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_azure_helper.py 2024-03-27 13:14:04.000000000 +0000 @@ -11,12 +11,14 @@ import requests from cloudinit import url_helper +from cloudinit.net import dhcp from cloudinit.sources.azure import errors from cloudinit.sources.helpers import azure as azure_helper from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim -from cloudinit.util import load_file +from cloudinit.util import load_text_file from tests.unittests.helpers import CiTestCase, ExitStack, mock from tests.unittests.sources.test_azure import construct_ovf_env +from tests.unittests.util import MockDistro GOAL_STATE_TEMPLATE = """\ @@ -106,6 +108,15 @@ MOCKPATH = "cloudinit.sources.helpers.azure." +@pytest.fixture(autouse=True) +def fake_vm_id(mocker): + vm_id = "foo" + mocker.patch( + "cloudinit.sources.azure.identity.query_vm_id", return_value=vm_id + ) + yield vm_id + + @pytest.fixture def mock_readurl(): with mock.patch(MOCKPATH + "url_helper.readurl", autospec=True) as m: @@ -141,12 +152,12 @@ ) def test_get_ip_from_lease_value(self, encoded_address, ip_address): assert ( - azure_helper.get_ip_from_lease_value(encoded_address) == ip_address + dhcp.IscDhclient.get_ip_from_lease_value(encoded_address) + == ip_address ) class TestGoalStateParsing(CiTestCase): - default_parameters = { "incarnation": 1, "container_id": "MyContainerId", @@ -242,7 +253,6 @@ class TestAzureEndpointHttpClient(CiTestCase): - regular_headers = { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": "2012-11-30", @@ -524,8 +534,8 @@ @unittest.skip("todo move to cloud_test") def test_pubkey_extract(self): - cert = load_file(self._data_file("pubkey_extract_cert")) - good_key = load_file(self._data_file("pubkey_extract_ssh_key")) + cert = load_text_file(self._data_file("pubkey_extract_cert")) + good_key = load_text_file(self._data_file("pubkey_extract_ssh_key")) sslmgr = azure_helper.OpenSSLManager() key = sslmgr._get_ssh_key_from_cert(cert) self.assertEqual(good_key, key) @@ -542,8 +552,10 @@ from certs are extracted and that fingerprints are converted to the form specified in the ovf-env.xml file. """ - cert_contents = load_file(self._data_file("parse_certificates_pem")) - fingerprints = load_file( + cert_contents = load_text_file( + self._data_file("parse_certificates_pem") + ) + fingerprints = load_text_file( self._data_file("parse_certificates_fingerprints") ).splitlines() mock_decrypt_certs.return_value = cert_contents @@ -556,7 +568,6 @@ class TestGoalStateHealthReporter(CiTestCase): - maxDiff = None default_parameters = { @@ -1001,16 +1012,19 @@ self.GoalState.return_value.instance_id = self.test_instance_id def test_eject_iso_is_called(self): + mock_distro = MockDistro() shim = wa_shim(endpoint="test_endpoint") with mock.patch.object( shim, "eject_iso", autospec=True ) as m_eject_iso: - shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") - m_eject_iso.assert_called_once_with("/dev/sr0") + shim.register_with_azure_and_fetch_data( + distro=mock_distro, iso_dev="/dev/sr0" + ) + m_eject_iso.assert_called_once_with("/dev/sr0", distro=mock_distro) def test_http_client_does_not_use_certificate_for_report_ready(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) self.assertEqual( [mock.call(None)], self.AzureEndpointHttpClient.call_args_list ) @@ -1024,7 +1038,7 @@ def test_correct_url_used_for_goalstate_during_report_ready(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) m_get = self.AzureEndpointHttpClient.return_value.get self.assertEqual( [mock.call("http://test_endpoint/machine/?comp=goalstate")], @@ -1076,7 +1090,9 @@ } sslmgr = self.OpenSSLManager.return_value sslmgr.parse_certificates.return_value = certs - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + data = shim.register_with_azure_and_fetch_data( + distro=None, pubkey_info=mypk + ) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], sslmgr.parse_certificates.call_args_list, @@ -1089,12 +1105,14 @@ mypk = [{"fingerprint": "fp1", "path": "path1"}] self.GoalState.return_value.certificates_xml = None shim = wa_shim(endpoint="test_endpoint") - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + data = shim.register_with_azure_and_fetch_data( + distro=None, pubkey_info=mypk + ) self.assertEqual([], data) def test_correct_url_used_for_report_ready(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) expected_url = "http://test_endpoint/machine?comp=health" self.assertEqual( [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], @@ -1112,7 +1130,7 @@ def test_goal_state_values_used_for_report_ready(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) posted_document = ( self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] ) @@ -1132,7 +1150,7 @@ def test_xml_elems_in_report_ready_post(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) health_document = get_formatted_health_report_xml_bytes( incarnation=escape(self.test_incarnation), container_id=escape(self.test_container_id), @@ -1170,7 +1188,7 @@ self, m_goal_state_health_reporter ): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) self.assertEqual( 1, m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501 @@ -1204,14 +1222,14 @@ def test_openssl_manager_not_instantiated_by_shim_report_status(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) shim.register_with_azure_and_report_failure(description="TestDesc") shim.clean_up() self.OpenSSLManager.assert_not_called() def test_clean_up_after_report_ready(self): shim = wa_shim(endpoint="test_endpoint") - shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_fetch_data(distro=None) shim.clean_up() self.OpenSSLManager.return_value.clean_up.assert_not_called() @@ -1227,7 +1245,7 @@ ) shim = wa_shim(endpoint="test_endpoint") self.assertRaises( - url_helper.UrlError, shim.register_with_azure_and_fetch_data + url_helper.UrlError, shim.register_with_azure_and_fetch_data, None ) def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): @@ -1245,7 +1263,7 @@ self.GoalState.side_effect = url_helper.UrlError("retry", code=404) shim = wa_shim(endpoint="test_endpoint") self.assertRaises( - url_helper.UrlError, shim.register_with_azure_and_fetch_data + url_helper.UrlError, shim.register_with_azure_and_fetch_data, None ) def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( @@ -1265,7 +1283,7 @@ ) shim = wa_shim(endpoint="test_endpoint") self.assertRaises( - url_helper.UrlError, shim.register_with_azure_and_fetch_data + url_helper.UrlError, shim.register_with_azure_and_fetch_data, None ) def test_failure_to_send_report_failure_health_doc_bubbles_up(self): @@ -1291,14 +1309,18 @@ ) def test_data_from_shim_returned(self): - ret = azure_helper.get_metadata_from_fabric(endpoint="test_endpoint") + ret = azure_helper.get_metadata_from_fabric( + distro=None, endpoint="test_endpoint" + ) self.assertEqual( self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501 ret, ) def test_success_calls_clean_up(self): - azure_helper.get_metadata_from_fabric(endpoint="test_endpoint") + azure_helper.get_metadata_from_fabric( + distro=None, endpoint="test_endpoint" + ) self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) def test_failure_in_registration_propagates_exc_and_calls_clean_up(self): @@ -1309,6 +1331,7 @@ url_helper.UrlError, azure_helper.get_metadata_from_fabric, "test_endpoint", + None, ) self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) @@ -1316,6 +1339,7 @@ m_pubkey_info = mock.MagicMock() azure_helper.get_metadata_from_fabric( endpoint="test_endpoint", + distro=None, pubkey_info=m_pubkey_info, iso_dev="/dev/sr0", ) @@ -1324,12 +1348,16 @@ self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501 ) self.assertEqual( - mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), + mock.call( + distro=None, iso_dev="/dev/sr0", pubkey_info=m_pubkey_info + ), self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501 ) def test_instantiates_shim_with_kwargs(self): - azure_helper.get_metadata_from_fabric(endpoint="test_endpoint") + azure_helper.get_metadata_from_fabric( + endpoint="test_endpoint", distro=None + ) self.assertEqual(1, self.m_shim.call_count) self.assertEqual( mock.call(endpoint="test_endpoint"), @@ -1543,19 +1571,23 @@ [ ( construct_ovf_env(username=None), - "No ovf-env.xml configuration for 'UserName'", + "unexpected metadata parsing ovf-env.xml: " + "missing configuration for 'UserName'", ), ( construct_ovf_env(hostname=None), - "No ovf-env.xml configuration for 'HostName'", + "unexpected metadata parsing ovf-env.xml: " + "missing configuration for 'HostName'", ), ], ) def test_missing_required_fields(self, ovf, error): - with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info: + with pytest.raises( + errors.ReportableErrorOvfInvalidMetadata + ) as exc_info: azure_helper.OvfEnvXml.parse_text(ovf) - assert str(exc_info.value) == error + assert str(exc_info.value.reason) == error def test_multiple_sections_fails(self): ovf = """\ @@ -1575,13 +1607,15 @@ """ - with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info: + with pytest.raises( + errors.ReportableErrorOvfInvalidMetadata + ) as exc_info: azure_helper.OvfEnvXml.parse_text(ovf) assert ( - str(exc_info.value) - == "Multiple configuration matches in ovf-exml.xml " - "for 'ProvisioningSection' (2)" + exc_info.value.reason + == "unexpected metadata parsing ovf-env.xml: " + "multiple configuration matches for 'ProvisioningSection' (2)" ) def test_multiple_properties_fails(self): @@ -1607,13 +1641,15 @@ """ - with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info: + with pytest.raises( + errors.ReportableErrorOvfInvalidMetadata + ) as exc_info: azure_helper.OvfEnvXml.parse_text(ovf) assert ( - str(exc_info.value) - == "Multiple configuration matches in ovf-exml.xml " - "for 'HostName' (2)" + exc_info.value.reason + == "unexpected metadata parsing ovf-env.xml: " + "multiple configuration matches for 'HostName' (2)" ) def test_non_azure_ovf(self): @@ -1632,19 +1668,58 @@ ) @pytest.mark.parametrize( - "ovf,error", + "ovf,reason", [ - ("", "Invalid ovf-env.xml: no element found: line 1, column 0"), + ( + "", + "error parsing ovf-env.xml: " + "no element found: line 1, column 0", + ), ( "", - "Invalid ovf-env.xml: not well-formed (invalid token): " - "line 1, column 2", + "error parsing ovf-env.xml: " + "not well-formed (invalid token): line 1, column 2", + ), + ( + "badxml", + "error parsing ovf-env.xml: syntax error: line 1, column 0", ), - ("badxml", "Invalid ovf-env.xml: syntax error: line 1, column 0"), ], ) - def test_invalid_xml(self, ovf, error): - with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info: + def test_invalid_xml(self, ovf, reason): + with pytest.raises( + errors.ReportableErrorOvfParsingException + ) as exc_info: azure_helper.OvfEnvXml.parse_text(ovf) - assert str(exc_info.value) == error + assert exc_info.value.reason == reason + + +class TestReportDmesgToKvp: + @mock.patch.object( + azure_helper.subp, "subp", return_value=("dmesg test", "") + ) + @mock.patch.object(azure_helper, "report_compressed_event") + def test_report_dmesg_to_kvp( + self, mock_report_compressed_event, mock_subp + ): + azure_helper.report_dmesg_to_kvp() + + assert mock_subp.mock_calls == [ + mock.call(["dmesg"], decode=False, capture=True) + ] + assert mock_report_compressed_event.mock_calls == [ + mock.call("dmesg", "dmesg test") + ] + + @mock.patch.object(azure_helper.subp, "subp", side_effect=[Exception()]) + @mock.patch.object(azure_helper, "report_compressed_event") + def test_report_dmesg_to_kvp_dmesg_error( + self, mock_report_compressed_event, mock_subp + ): + azure_helper.report_dmesg_to_kvp() + + assert mock_subp.mock_calls == [ + mock.call(["dmesg"], decode=False, capture=True) + ] + assert mock_report_compressed_event.mock_calls == [] diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_bigstep.py cloud-init-24.1.3/tests/unittests/sources/test_bigstep.py --- cloud-init-23.4.4/tests/unittests/sources/test_bigstep.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_bigstep.py 2024-03-27 13:14:04.000000000 +0000 @@ -22,7 +22,7 @@ class TestBigstep: @pytest.mark.parametrize("custom_paths", [False, True]) - @mock.patch(M_PATH + "util.load_file", return_value=IMDS_URL) + @mock.patch(M_PATH + "util.load_text_file", return_value=IMDS_URL) @responses.activate def test_get_data_honor_cloud_dir(self, m_load_file, custom_paths, tmpdir): responses.add(responses.GET, IMDS_URL, body=METADATA_BODY) diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_cloudstack.py cloud-init-24.1.3/tests/unittests/sources/test_cloudstack.py --- cloud-init-23.4.4/tests/unittests/sources/test_cloudstack.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_cloudstack.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,17 +1,334 @@ # This file is part of cloud-init. See LICENSE file for license information. +from textwrap import dedent -import os -import time +import pytest -from cloudinit import helpers, util -from cloudinit.net.dhcp import IscDhclient +from cloudinit import helpers +from cloudinit.distros import rhel, ubuntu +from cloudinit.sources import DataSourceHostname from cloudinit.sources.DataSourceCloudStack import DataSourceCloudStack from tests.unittests.helpers import CiTestCase, ExitStack, mock +from tests.unittests.util import MockDistro -MOD_PATH = "cloudinit.sources.DataSourceCloudStack" +SOURCES_PATH = "cloudinit.sources" +MOD_PATH = SOURCES_PATH + ".DataSourceCloudStack" DS_PATH = MOD_PATH + ".DataSourceCloudStack" +DHCP_MOD_PATH = "cloudinit.net.dhcp" +@pytest.mark.usefixtures("dhclient_exists") +class TestCloudStackHostname(CiTestCase): + def setUp(self): + super(TestCloudStackHostname, self).setUp() + self.patches = ExitStack() + self.addCleanup(self.patches.close) + self.hostname = "vm-hostname" + self.networkd_domainname = "networkd.local" + self.isc_dhclient_domainname = "dhclient.local" + + # Mock the parent class get_hostname() method to return + # a non-fqdn hostname + get_hostname_parent = mock.MagicMock( + return_value=DataSourceHostname(self.hostname, True) + ) + self.patches.enter_context( + mock.patch( + SOURCES_PATH + ".DataSource.get_hostname", get_hostname_parent + ) + ) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".util.load_text_file", + return_value=dedent( + """ + lease { + interface "eth0"; + fixed-address 10.0.0.5; + server-name "DSM111070915004"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.0.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 168.63.129.16; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes """ + """0,10,0,0,1,32,168,63,129,16,10,0,0,1,32,169,254,""" + """169,254,10,0,0,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + """ + """renew 0 2160/02/17 02:22:33; + rebind 0 2160/02/17 02:22:33; + expire 0 2160/02/17 02:22:33; + } + """ + ), + ) + ) + + # Mock cloudinit.net.dhcp.networkd_get_option_from_leases() method \ + # result since we don't have a DHCP client running + networkd_get_option_from_leases = mock.MagicMock( + return_value=self.networkd_domainname + ) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".networkd_get_option_from_leases", + networkd_get_option_from_leases, + ) + ) + + # Mock cloudinit.net.dhcp.get_newest_lease_file_from_distro() method \ + # result since we don't have a DHCP client running + isc_dhclient_get_newest_lease_file_from_distro = mock.MagicMock( + return_value="/var/lib/NetworkManager/dhclient-u-u-i-d-eth0.lease" + ) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + + ".IscDhclient.get_newest_lease_file_from_distro", + isc_dhclient_get_newest_lease_file_from_distro, + ) + ) + + # Mock cloudinit.net.dhcp.networkd_get_option_from_leases() method \ + # result since we don't have a DHCP client running + lease = { + "interface": "eth0", + "fixed-address": "192.168.0.1", + "subnet-mask": "255.255.255.0", + "routers": "192.168.0.1", + "domain-name": self.isc_dhclient_domainname, + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + get_newest_lease = mock.MagicMock(return_value=lease) + + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".IscDhclient.get_newest_lease", + get_newest_lease, + ) + ) + + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".IscDhclient.parse_leases", + mock.MagicMock(return_value=[lease]), + ) + ) + + # Mock get_vr_address() method as it relies to + # parsing DHCP/networkd files + self.patches.enter_context( + mock.patch( + MOD_PATH + ".get_vr_address", + mock.MagicMock(return_value="192.168.0.1"), + ) + ) + + self.tmp = self.tmp_dir() + + def test_get_domainname_networkd(self): + """ + Test if DataSourceCloudStack._get_domainname() + gets domain name from systemd-networkd leases. + """ + ds = DataSourceCloudStack( + {}, ubuntu.Distro, helpers.Paths({"run_dir": self.tmp}) + ) + result = ds._get_domainname() + self.assertEqual(self.networkd_domainname, result) + + def test_get_domainname_isc_dhclient(self): + """ + Test if DataSourceCloudStack._get_domainname() + gets domain name from isc-dhcp-client leases + """ + + # Override systemd-networkd reply mock to None + # to force the code to fallback to IscDhclient + get_networkd_domain = mock.MagicMock(return_value=None) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".networkd_get_option_from_leases", + get_networkd_domain, + ) + ) + + ds = DataSourceCloudStack( + {}, rhel.Distro, helpers.Paths({"run_dir": self.tmp}) + ) + with mock.patch( + MOD_PATH + ".util.load_text_file", + return_value=dedent( + """ + lease { + interface "eth0"; + fixed-address 10.0.0.5; + server-name "DSM111070915004"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.0.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 168.63.129.16; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes """ + """0,10,0,0,1,32,168,63,129,16,10,0,0,1,32,169,254,""" + """169,254,10,0,0,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + """ + f"option domain-name {self.isc_dhclient_domainname};" + """renew 0 2160/02/17 02:22:33; + rebind 0 2160/02/17 02:22:33; + expire 0 2160/02/17 02:22:33; + } + """ + ), + ): + result = ds._get_domainname() + self.assertEqual(self.isc_dhclient_domainname, result) + + def test_get_hostname_non_fqdn(self): + """ + Test get_hostname() method implementation + with fqdn parameter=False. + It should call the parent class method and should + return its response intact. + """ + expected = DataSourceHostname(self.hostname, True) + + ds = DataSourceCloudStack( + {}, ubuntu.Distro, helpers.Paths({"run_dir": self.tmp}) + ) + result = ds.get_hostname(fqdn=False) + self.assertTupleEqual(expected, result) + + def test_get_hostname_fqdn(self): + """ + Test get_hostname() method implementation + with fqdn parameter=True. + It should look for domain name in DHCP leases. + """ + expected = DataSourceHostname( + self.hostname + "." + self.networkd_domainname, True + ) + + ds = DataSourceCloudStack( + {}, ubuntu.Distro, helpers.Paths({"run_dir": self.tmp}) + ) + result = ds.get_hostname(fqdn=True) + self.assertTupleEqual(expected, result) + + def test_get_hostname_fqdn_fallback(self): + """ + Test get_hostname() when some error happens + during domainname discovery. + + We mock both systemd-networkd discovery as None, + And the IscDhclient not having domain-name option + in the lease. + + It should return the hostname without domainname + in such cases. + """ + expected = DataSourceHostname(self.hostname, True) + + # Override systemd-networkd reply mock to None + # to force the code to fallback to IscDhclient + get_networkd_domain = mock.MagicMock(return_value=None) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".networkd_get_option_from_leases", + get_networkd_domain, + ) + ) + + self.patches.enter_context( + mock.patch( + "cloudinit.distros.net.find_fallback_nic", + return_value="eth0", + ) + ) + + self.patches.enter_context( + mock.patch( + MOD_PATH + + ".dhcp.IscDhclient.get_newest_lease_file_from_distro", + return_value=True, + ) + ) + + self.patches.enter_context( + mock.patch( + MOD_PATH + ".dhcp.IscDhclient.parse_leases", return_value=[] + ) + ) + + lease = { + "interface": "eth0", + "fixed-address": "192.168.0.1", + "subnet-mask": "255.255.255.0", + "routers": "192.168.0.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".IscDhclient.get_newest_lease", + return_value=lease, + ) + ) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".Dhcpcd.get_newest_lease", return_value=lease + ) + ) + + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + ".util.load_text_file", + return_value=dedent( + """ + lease { + interface "eth0"; + fixed-address 10.0.0.5; + server-name "DSM111070915004"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.0.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 168.63.129.16; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes """ + """0,10,0,0,1,32,168,63,129,16,10,0,0,1,32,169,254,""" + """169,254,10,0,0,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + """ + """renew 0 2160/02/17 02:22:33; + rebind 0 2160/02/17 02:22:33; + expire 0 2160/02/17 02:22:33; + } + """ + ), + ) + ) + + ds = DataSourceCloudStack( + {}, ubuntu.Distro("", {}, {}), helpers.Paths({"run_dir": self.tmp}) + ) + ds.distro.fallback_interface = "eth0" + with mock.patch(MOD_PATH + ".util.load_text_file"): + result = ds.get_hostname(fqdn=True) + self.assertTupleEqual(expected, result) + + +@pytest.mark.usefixtures("dhclient_exists") class TestCloudStackPasswordFetching(CiTestCase): def setUp(self): super(TestCloudStackPasswordFetching, self).setUp() @@ -21,11 +338,26 @@ self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name))) self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name))) default_gw = "192.201.20.0" - get_latest_lease = mock.MagicMock(return_value=None) + + get_newest_lease_file_from_distro = mock.MagicMock(return_value=None) self.patches.enter_context( mock.patch( - mod_name + ".dhcp.IscDhclient.get_latest_lease", - get_latest_lease, + DHCP_MOD_PATH + ".IscDhclient.get_newest_lease", + return_value={ + "interface": "eth0", + "fixed-address": "192.168.0.1", + "subnet-mask": "255.255.255.0", + "routers": "192.168.0.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + }, + ) + ) + self.patches.enter_context( + mock.patch( + DHCP_MOD_PATH + + ".IscDhclient.get_newest_lease_file_from_distro", + get_newest_lease_file_from_distro, ) ) @@ -60,7 +392,7 @@ def test_empty_password_doesnt_create_config(self): self._set_password_server_response("") ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) ds.get_data() self.assertEqual({}, ds.get_config_obj()) @@ -68,7 +400,7 @@ def test_saved_password_doesnt_create_config(self): self._set_password_server_response("saved_password") ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) ds.get_data() self.assertEqual({}, ds.get_config_obj()) @@ -79,7 +411,7 @@ password = "SekritSquirrel" self._set_password_server_response(password) ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) ds.get_data() self.assertEqual(password, ds.get_config_obj()["password"]) @@ -88,8 +420,9 @@ def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): m_wait.return_value = True self._set_password_server_response("bad_request") + # with mock.patch(DHCP_MOD_PATH + ".util.load_text_file"): ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) self.assertTrue(ds.get_data()) @@ -108,7 +441,7 @@ password = "SekritSquirrel" subp = self._set_password_server_response(password) ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) ds.get_data() self.assertRequestTypesSent( @@ -118,7 +451,7 @@ def _check_password_not_saved_for(self, response_string): subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack( - {}, None, helpers.Paths({"run_dir": self.tmp}) + {}, MockDistro(), helpers.Paths({"run_dir": self.tmp}) ) with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait: m_wait.return_value = True @@ -133,77 +466,3 @@ def test_password_not_saved_if_bad_request(self): self._check_password_not_saved_for("bad_request") - - -class TestGetLatestLease(CiTestCase): - def _populate_dir_list(self, bdir, files): - """populate_dir_list([(name, data), (name, data)]) - - writes files to bdir, and updates timestamps to ensure - that their mtime increases with each file.""" - - start = int(time.time()) - for num, fname in enumerate(reversed(files)): - fpath = os.path.sep.join((bdir, fname)) - util.write_file(fpath, fname.encode()) - os.utime(fpath, (start - num, start - num)) - - def _pop_and_test(self, files, expected): - lease_d = self.tmp_dir() - self._populate_dir_list(lease_d, files) - self.assertEqual( - self.tmp_path(expected, lease_d), - IscDhclient.get_latest_lease(lease_d), - ) - - def test_skips_dhcpv6_files(self): - """files started with dhclient6 should be skipped.""" - expected = "dhclient.lease" - self._pop_and_test([expected, "dhclient6.lease"], expected) - - def test_selects_dhclient_dot_files(self): - """files named dhclient.lease or dhclient.leases should be used. - - Ubuntu names files dhclient.eth0.leases dhclient6.leases and - sometimes dhclient.leases.""" - self._pop_and_test(["dhclient.lease"], "dhclient.lease") - self._pop_and_test(["dhclient.leases"], "dhclient.leases") - - def test_selects_dhclient_dash_files(self): - """files named dhclient-lease or dhclient-leases should be used. - - Redhat/Centos names files with dhclient--eth0.lease (centos 7) or - dhclient-eth0.leases (centos 6). - """ - self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease") - self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease") - - def test_ignores_by_extension(self): - """only .lease or .leases file should be considered.""" - - self._pop_and_test( - [ - "dhclient.lease", - "dhclient.lease.bk", - "dhclient.lease-old", - "dhclient.leaselease", - ], - "dhclient.lease", - ) - - def test_selects_newest_matching(self): - """If multiple files match, the newest written should be used.""" - lease_d = self.tmp_dir() - valid_1 = "dhclient.leases" - valid_2 = "dhclient.lease" - valid_1_path = self.tmp_path(valid_1, lease_d) - valid_2_path = self.tmp_path(valid_2, lease_d) - - self._populate_dir_list(lease_d, [valid_1, valid_2]) - self.assertEqual(valid_2_path, IscDhclient.get_latest_lease(lease_d)) - - # now update mtime on valid_2 to be older than valid_1 and re-check. - mtime = int(os.path.getmtime(valid_1_path)) - 1 - os.utime(valid_2_path, (mtime, mtime)) - - self.assertEqual(valid_1_path, IscDhclient.get_latest_lease(lease_d)) diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_common.py cloud-init-24.1.3/tests/unittests/sources/test_common.py --- cloud-init-23.4.4/tests/unittests/sources/test_common.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_common.py 2024-03-27 13:14:04.000000000 +0000 @@ -33,6 +33,7 @@ from cloudinit.sources import DataSourceUpCloud as UpCloud from cloudinit.sources import DataSourceVMware as VMware from cloudinit.sources import DataSourceVultr as Vultr +from cloudinit.sources import DataSourceWSL as WSL from tests.unittests import helpers as test_helpers DEFAULT_LOCAL = [ @@ -45,7 +46,7 @@ Hetzner.DataSourceHetzner, IBMCloud.DataSourceIBMCloud, LXD.DataSourceLXD, - MAAS.DataSourceMAAS, + MAAS.DataSourceMAASLocal, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, Oracle.DataSourceOracle, @@ -60,6 +61,7 @@ VMware.DataSourceVMware, NWCS.DataSourceNWCS, Akamai.DataSourceAkamaiLocal, + WSL.DataSourceWSL, ] DEFAULT_NETWORK = [ @@ -74,6 +76,7 @@ MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, + Oracle.DataSourceOracleNet, OVF.DataSourceOVFNet, UpCloud.DataSourceUpCloud, Akamai.DataSourceAkamai, diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_ec2.py cloud-init-24.1.3/tests/unittests/sources/test_ec2.py --- cloud-init-23.4.4/tests/unittests/sources/test_ec2.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_ec2.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,8 +10,10 @@ import responses from cloudinit import helpers +from cloudinit.net import activators from cloudinit.sources import DataSourceEc2 as ec2 from tests.unittests import helpers as test_helpers +from tests.unittests.util import MockDistro DYNAMIC_METADATA = { "instance-identity": { @@ -170,6 +172,28 @@ "vpc-ipv4-cidr-blocks": "172.31.0.0/16", } +NIC2_MD_IPV4_IPV6_MULTI_IP = { + "device-number": "1", + "interface-id": "eni-043cdce36ded5e79f", + "ipv6s": [ + "2600:1f16:292:100:c187:593c:4349:136", + "2600:1f16:292:100:f153:12a3:c37c:11f9", + ], + "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", + "local-ipv4s": "172.31.47.221", + "mac": "0a:75:69:92:e2:16", + "owner-id": "329910648901", + "security-group-ids": "sg-0d68fef37d8cc9b77", + "security-groups": "launch-wizard-17", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", +} + SECONDARY_IP_METADATA_2018_09_24 = { "ami-id": "ami-0986c2ac728528ac2", "ami-launch-index": "0", @@ -210,6 +234,7 @@ "services": {"domain": "amazonaws.com", "partition": "aws"}, } +M_PATH = "cloudinit.sources.DataSourceEc2." M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." TAGS_METADATA_2021_03_23: dict = { @@ -342,9 +367,11 @@ p.start() self.addCleanup(p.stop) - def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): + def _setup_ds( + self, sys_cfg, platform_data, md, md_version=None, distro=None + ): self.uris = [] - distro = mock.MagicMock() + distro = distro or mock.MagicMock() distro.get_tmp_exec_path = self.tmp_dir paths = helpers.Paths({"run_dir": self.tmp}) if sys_cfg is None: @@ -479,6 +506,10 @@ with mock.patch(patch_path) as m_get_interfaces_by_mac: with mock.patch(find_fallback_path) as m_find_fallback: with mock.patch(get_interface_mac_path) as m_get_mac: + dhcp_client = ds.distro.dhcp_client + dhcp_client.dhcp_discovery.return_value = { + "routers": "172.31.1.0" + } m_get_interfaces_by_mac.return_value = {mac1: "eth9"} m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 @@ -577,7 +608,7 @@ ) mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" - ds.fallback_nic = "eth9" + ds.distro.fallback_nic = "eth9" with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: m_get_interfaces_by_mac.return_value = {mac1: "eth9"} nc = ds.network_config # Will re-crawl network metadata @@ -846,7 +877,7 @@ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") - @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") def test_ec2_local_performs_dhcp_on_non_bsd( @@ -861,20 +892,19 @@ m_fallback_nic.return_value = "eth9" m_is_bsd.return_value = False - m_dhcp.return_value = [ - { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - ] + m_dhcp.return_value = { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "broadcast-address": "192.168.2.255", + } self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + distro=MockDistro("", {}, {}), ) ret = ds.get_data() @@ -906,7 +936,6 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = "06:17:04:d7:26:ff" with_logs = True @@ -951,6 +980,144 @@ self.assertIn(log, logs) +class TestBuildNicOrder: + @pytest.mark.parametrize( + ["macs_metadata", "macs", "expected"], + [ + pytest.param({}, [], {}, id="all_empty"), + pytest.param( + {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + } + }, + [], + {}, + id="empty_macs", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], + {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, + id="no-device-number-info", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + ["0a:f7:8d:96:f2:a1"], + {"0a:f7:8d:96:f2:a1": 0}, + id="no-device-number-info-subset", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], + {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, + id="device-numbers", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "network-card": "0", + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "network-card": "1", + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + "0a:f7:8d:96:f2:a2": { + "network-card": "2", + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + [ + "0a:f7:8d:96:f2:a1", + "0a:0d:dd:44:cd:7b", + "0a:f7:8d:96:f2:a2", + ], + { + "0a:0d:dd:44:cd:7b": 0, + "0a:f7:8d:96:f2:a1": 1, + "0a:f7:8d:96:f2:a2": 2, + }, + id="network-cardes", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "network-card": "0", + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "network-card": "1", + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + "0a:f7:8d:96:f2:a2": { + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + [ + "0a:f7:8d:96:f2:a1", + "0a:0d:dd:44:cd:7b", + "0a:f7:8d:96:f2:a2", + ], + { + "0a:0d:dd:44:cd:7b": 0, + "0a:f7:8d:96:f2:a1": 1, + "0a:f7:8d:96:f2:a2": 2, + }, + id="network-card-partially-missing", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + ["0a:f7:8d:96:f2:a9"], + {}, + id="macs-not-in-md", + ), + ], + ) + def test_build_nic_order(self, macs_metadata, macs, expected): + assert expected == ec2._build_nic_order(macs_metadata, macs) + + class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): def setUp(self): super(TestConvertEc2MetadataNetworkConfig, self).setUp() @@ -980,10 +1147,11 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - self.network_metadata, macs_to_nics + self.network_metadata, distro, macs_to_nics ), ) @@ -1005,10 +1173,11 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics + network_metadata_ipv6, distro, macs_to_nics ), ) @@ -1030,10 +1199,11 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics + network_metadata_ipv6, distro, macs_to_nics ), ) @@ -1056,10 +1226,11 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics, fallback_nic="eth9" + network_metadata_ipv6, distro, macs_to_nics ), ) @@ -1082,15 +1253,18 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics + network_metadata_both, distro, macs_to_nics ), ) - def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" + def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): + """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. + Source-routing configured for secondary NICs (routing-policy and extra + routing table).""" mac2 = "06:17:04:d7:26:08" macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} network_metadata_both = copy.deepcopy(self.network_metadata) @@ -1115,15 +1289,110 @@ "match": {"macaddress": mac2}, "set-name": "eth10", "dhcp4": True, - "dhcp4-overrides": {"route-metric": 200}, + "dhcp4-overrides": { + "route-metric": 200, + "use-routes": True, + }, "dhcp6": False, + "routes": [ + # via DHCP gateway + {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, + # to NIC2_MD["subnet-ipv4-cidr-block"] + {"to": "172.31.32.0/20", "table": 101}, + ], + "routing-policy": [ + # NIC2_MD["local-ipv4s"] + {"from": "172.31.47.221", "table": 101} + ], + }, + }, + } + distro = mock.Mock() + distro.network_activator = activators.NetplanActivator + distro.dhcp_client.dhcp_discovery.return_value = { + "routers": "172.31.1.0" + } + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics + ), + ) + + def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( + self, + ): + """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. + Source-routing configured for secondary NICs (routing-policy and extra + routing table).""" + mac2 = "06:17:04:d7:26:08" + macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} + network_metadata_both = copy.deepcopy(self.network_metadata) + # Add 2nd nic info + network_metadata_both["interfaces"]["macs"][ + mac2 + ] = NIC2_MD_IPV4_IPV6_MULTI_IP + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg + nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc + expected = { + "version": 2, + "ethernets": { + "eth9": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "match": {"macaddress": "06:17:04:d7:26:09"}, + "set-name": "eth9", + "dhcp6-overrides": {"route-metric": 100}, + }, + "eth10": { + "dhcp4": True, + "dhcp4-overrides": { + "route-metric": 200, + "use-routes": True, + }, + "dhcp6": True, + "match": {"macaddress": "06:17:04:d7:26:08"}, + "set-name": "eth10", + "routes": [ + # via DHCP gateway + {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, + # to NIC2_MD["subnet-ipv4-cidr-block"] + {"to": "172.31.32.0/20", "table": 101}, + # to NIC2_MD["subnet-ipv6-cidr-blocks"] + {"to": "2600:1f16:292:100::/64", "table": 101}, + ], + "routing-policy": [ + # NIC2_MD["local-ipv4s"] + {"from": "172.31.47.221", "table": 101}, + { + "from": "2600:1f16:292:100:c187:593c:4349:136", + "table": 101, + }, + { + "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", + "table": 101, + }, + ], + "dhcp6-overrides": { + "route-metric": 200, + "use-routes": True, + }, + "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], }, }, } + distro = mock.Mock() + distro.network_activator = activators.NetplanActivator + distro.dhcp_client.dhcp_discovery.return_value = { + "routers": "172.31.1.0" + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics + network_metadata_both, distro, macs_to_nics ), ) @@ -1144,10 +1413,11 @@ } }, } + distro = mock.Mock() self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics + network_metadata_both, distro, macs_to_nics ), ) @@ -1165,11 +1435,14 @@ }, } patch_path = M_PATH_NET + "get_interfaces_by_mac" + distro = mock.Mock() with mock.patch(patch_path) as m_get_interfaces_by_mac: m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} self.assertEqual( expected, - ec2.convert_ec2_metadata_network_config(self.network_metadata), + ec2.convert_ec2_metadata_network_config( + self.network_metadata, distro + ), ) diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_gce.py cloud-init-24.1.3/tests/unittests/sources/test_gce.py --- cloud-init-23.4.4/tests/unittests/sources/test_gce.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_gce.py 2024-03-27 13:14:04.000000000 +0000 @@ -405,9 +405,8 @@ autospec=True, ) @mock.patch(M_PATH + "net.find_candidate_nics", return_value=["ens4"]) - @mock.patch(M_PATH + "DataSourceGCELocal.fallback_interface") def test_local_datasource_uses_ephemeral_dhcp( - self, _m_fallback, _m_find_candidate_nics, m_dhcp + self, _m_find_candidate_nics, m_dhcp ): self._set_mock_metadata() distro = mock.MagicMock() @@ -424,9 +423,8 @@ autospec=True, ) @mock.patch(M_PATH + "net.find_candidate_nics") - @mock.patch(M_PATH + "DataSourceGCELocal.fallback_interface") def test_local_datasource_tries_on_multi_nic( - self, _m_fallback, m_find_candidate_nics, m_dhcp, m_read_md + self, m_find_candidate_nics, m_dhcp, m_read_md ): self._set_mock_metadata() distro = mock.MagicMock() @@ -464,7 +462,7 @@ mock.call(distro, iface="ens0p5"), mock.call(distro, iface="ens0p6"), ] - assert ds._fallback_interface == "ens0p6" + assert ds.distro.fallback_interface == "ens0p6" assert ds.metadata == "md" assert ds.userdata_raw == "ud" diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_init.py cloud-init-24.1.3/tests/unittests/sources/test_init.py --- cloud-init-23.4.4/tests/unittests/sources/test_init.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_init.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,7 @@ import stat from cloudinit import importer, util +from cloudinit.distros import ubuntu from cloudinit.event import EventScope, EventType from cloudinit.helpers import Paths from cloudinit.sources import ( @@ -73,7 +74,7 @@ def setUp(self): super(TestDataSource, self).setUp() self.sys_cfg = {"datasource": {"_undef": {"key1": False}}} - self.distro = "distrotest" # generally should be a Distro object + self.distro = ubuntu.Distro("somedistro", {}, {}) self.paths = Paths({}) self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) @@ -201,28 +202,28 @@ for log in expected_logs: self.assertIn(log, logs) - @mock.patch("cloudinit.sources.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") def test_fallback_interface_is_discovered(self, m_get_fallback_nic): """The fallback_interface is discovered via find_fallback_nic.""" m_get_fallback_nic.return_value = "nic9" - self.assertEqual("nic9", self.datasource.fallback_interface) + self.assertEqual("nic9", self.datasource.distro.fallback_interface) @mock.patch("cloudinit.sources.net.find_fallback_nic") def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): """Log a warning when fallback_interface can not discover the nic.""" - self.datasource._cloud_name = "MySupahCloud" m_get_fallback_nic.return_value = None # Couldn't discover nic - self.assertIsNone(self.datasource.fallback_interface) + self.assertIsNone(self.datasource.distro.fallback_interface) self.assertEqual( - "WARNING: Did not find a fallback interface on MySupahCloud.\n", + "WARNING: Did not find a fallback interface on distro: " + "somedistro.\n", self.logs.getvalue(), ) @mock.patch("cloudinit.sources.net.find_fallback_nic") def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): """The fallback_interface is cached and won't be rediscovered.""" - self.datasource._fallback_interface = "nic10" - self.assertEqual("nic10", self.datasource.fallback_interface) + self.datasource.distro.fallback_interface = "nic10" + self.assertEqual("nic10", self.datasource.distro.fallback_interface) m_get_fallback_nic.assert_not_called() def test__get_data_unimplemented(self): @@ -395,7 +396,7 @@ ): datasource.get_data() json_file = Paths({"run_dir": tmp}).get_runpath("instance_data") - content = util.load_file(json_file) + content = util.load_text_file(json_file) expected = { "base64_encoded_keys": [], "merged_cfg": REDACT_SENSITIVE_VALUE, @@ -500,7 +501,7 @@ with mock.patch("cloudinit.util.system_info", return_value=sys_info): datasource.get_data() json_file = Paths({"run_dir": tmp}).get_runpath("instance_data") - redacted = util.load_json(util.load_file(json_file)) + redacted = util.load_json(util.load_text_file(json_file)) expected = { "base64_encoded_keys": [], "merged_cfg": REDACT_SENSITIVE_VALUE, @@ -618,7 +619,7 @@ sensitive_json_file = Paths({"run_dir": tmp}).get_runpath( "instance_data_sensitive" ) - content = util.load_file(sensitive_json_file) + content = util.load_text_file(sensitive_json_file) expected = { "base64_encoded_keys": [], "merged_cfg": { @@ -699,7 +700,7 @@ ) datasource.get_data() json_file = paths.get_runpath("instance_data") - content = util.load_file(json_file) + content = util.load_text_file(json_file) expected_metadata = { "key1": "val1", "key2": { @@ -726,11 +727,11 @@ datasource.ec2_metadata = UNSET datasource.get_data() json_file = paths.get_runpath("instance_data") - instance_data = util.load_json(util.load_file(json_file)) + instance_data = util.load_json(util.load_text_file(json_file)) self.assertNotIn("ec2_metadata", instance_data["ds"]) datasource.ec2_metadata = {"ec2stuff": "is good"} datasource.persist_instance_data() - instance_data = util.load_json(util.load_file(json_file)) + instance_data = util.load_json(util.load_text_file(json_file)) self.assertEqual( {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"] ) @@ -756,7 +757,7 @@ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud" ): datasource.get_data() - self.assertEqual("my-cloud\n", util.load_file(cloud_id_link)) + self.assertEqual("my-cloud\n", util.load_text_file(cloud_id_link)) # A symlink with the generic /run/cloud-init/cloud-id # link is present self.assertTrue(util.is_link(cloud_id_link)) @@ -768,12 +769,12 @@ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud2" ): datasource.persist_instance_data() - self.assertEqual("my-cloud2\n", util.load_file(cloud_id2_file)) + self.assertEqual("my-cloud2\n", util.load_text_file(cloud_id2_file)) # Previous cloud-id- file removed self.assertFalse(os.path.exists(cloud_id_file)) # Generic link persisted which contains canonical-cloud-id as content self.assertTrue(util.is_link(cloud_id_link)) - self.assertEqual("my-cloud2\n", util.load_file(cloud_id_link)) + self.assertEqual("my-cloud2\n", util.load_text_file(cloud_id_link)) def test_persist_instance_data_writes_network_json_when_set(self): """When network_data.json class attribute is set, persist to json.""" @@ -788,11 +789,11 @@ ) datasource.get_data() json_file = paths.get_runpath("instance_data") - instance_data = util.load_json(util.load_file(json_file)) + instance_data = util.load_json(util.load_text_file(json_file)) self.assertNotIn("network_json", instance_data["ds"]) datasource.network_json = {"network_json": "is good"} datasource.persist_instance_data() - instance_data = util.load_json(util.load_file(json_file)) + instance_data = util.load_json(util.load_text_file(json_file)) self.assertEqual( {"network_json": "is good"}, instance_data["ds"]["network_json"] ) @@ -837,7 +838,7 @@ ) self.assertTrue(datasource.get_data()) json_file = paths.get_runpath("instance_data") - content = util.load_file(json_file) + content = util.load_text_file(json_file) instance_json = util.load_json(content) self.assertCountEqual( ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"] diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_opennebula.py cloud-init-24.1.3/tests/unittests/sources/test_opennebula.py --- cloud-init-23.4.4/tests/unittests/sources/test_opennebula.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_opennebula.py 2024-03-27 13:14:04.000000000 +0000 @@ -67,7 +67,7 @@ def tearDown(self): ds.switch_user_cmd = self.switch_user_cmd_real - super(TestOpenNebulaDataSource, self).tearDown() + super().tearDown() def test_get_data_non_contextdisk(self): orig_find_devs_with = util.find_devs_with diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_openstack.py cloud-init-24.1.3/tests/unittests/sources/test_openstack.py --- cloud-init-23.4.4/tests/unittests/sources/test_openstack.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_openstack.py 2024-03-27 13:14:04.000000000 +0000 @@ -338,16 +338,14 @@ ds_os_local = ds.DataSourceOpenStackLocal( settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) ) - ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp - m_dhcp.return_value = [ - { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - ] + distro.fallback_interface = "eth9" # Monkey patch for dhcp + m_dhcp.return_value = { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "broadcast-address": "192.168.2.255", + } self.assertIsNone(ds_os_local.version) with test_helpers.mock.patch.object( diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_oracle.py cloud-init-24.1.3/tests/unittests/sources/test_oracle.py --- cloud-init-23.4.4/tests/unittests/sources/test_oracle.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_oracle.py 2024-03-27 13:14:04.000000000 +0000 @@ -4,6 +4,7 @@ import copy import json import logging +from itertools import count from unittest import mock import pytest @@ -14,7 +15,6 @@ from cloudinit.sources.DataSourceOracle import OpcMetadata from cloudinit.url_helper import UrlError from tests.unittests import helpers as test_helpers -from tests.unittests.helpers import does_not_raise DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -730,84 +730,70 @@ assert instance_data == metadata.instance_data assert vnics_data == metadata.vnics_data - # No need to actually wait between retries in the tests @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @pytest.mark.parametrize( - "v2_failure_count,v1_failure_count,expected_body,expectation", - [ - (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 3, None, pytest.raises(UrlError)), - ], - ) - def test_retries( - self, - v2_failure_count, - v1_failure_count, - expected_body, - expectation, - mocked_responses, - ): - # Workaround https://github.com/getsentry/responses/pull/171 - # This mocking can be unrolled when Bionic is EOL - url_v2_call_count = 0 - - def url_v2_callback(request): - nonlocal url_v2_call_count - url_v2_call_count += 1 - if url_v2_call_count <= v2_failure_count: - return ( - 404, - request.headers, - f"403 Client Error: Forbidden for url: {url_v2}", - ) - return 200, request.headers, OPC_V2_METADATA - - url_v2 = "http://169.254.169.254/opc/v2/instance/" - mocked_responses.add_callback( - responses.GET, url_v2, callback=url_v2_callback + @mock.patch("cloudinit.url_helper.time.time", side_effect=count(0, 1)) + @mock.patch("cloudinit.url_helper.readurl", side_effect=UrlError) + def test_retry(self, m_readurl, m_time): + # Since wait_for_url has its own retry tests, just verify that we + # attempted to contact both endpoints multiple times + oracle.read_opc_metadata() + assert len(m_readurl.call_args_list) > 3 + assert ( + m_readurl.call_args_list[0][0][0] + == "http://169.254.169.254/opc/v2/instance/" ) - - # Workaround https://github.com/getsentry/responses/pull/171 - # This mocking can be unrolled when Bionic is EOL - url_v1_call_count = 0 - - def url_v1_callback(request): - nonlocal url_v1_call_count - url_v1_call_count += 1 - if url_v1_call_count <= v1_failure_count: - return ( - 404, - request.headers, - f"403 Client Error: Forbidden for url: {url_v1}", - ) - return 200, request.headers, OPC_V1_METADATA - - url_v1 = "http://169.254.169.254/opc/v1/instance/" - mocked_responses.add_callback( - responses.GET, url_v1, callback=url_v1_callback + assert ( + m_readurl.call_args_list[1][0][0] + == "http://169.254.169.254/opc/v1/instance/" + ) + assert ( + m_readurl.call_args_list[2][0][0] + == "http://169.254.169.254/opc/v2/instance/" + ) + assert ( + m_readurl.call_args_list[3][0][0] + == "http://169.254.169.254/opc/v1/instance/" ) - with expectation: - assert expected_body == oracle.read_opc_metadata().instance_data + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 11]) + @mock.patch( + "cloudinit.sources.DataSourceOracle.wait_for_url", + return_value=("http://hi", b'{"some": "value"}'), + ) + def test_fetch_vnics_max_wait(self, m_wait_for_url, m_time): + oracle.read_opc_metadata(fetch_vnics_data=True) + assert m_wait_for_url.call_count == 2 + # 19 because start time was 0, next time was 11 and max wait is 30 + assert m_wait_for_url.call_args_list[-1][1]["max_wait"] == 19 + + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 1000]) + @mock.patch( + "cloudinit.sources.DataSourceOracle.wait_for_url", + return_value=("http://hi", b'{"some": "value"}'), + ) + def test_attempt_vnics_after_max_wait_expire(self, m_wait_for_url, m_time): + oracle.read_opc_metadata(fetch_vnics_data=True) + assert m_wait_for_url.call_count == 2 + assert m_wait_for_url.call_args_list[-1][1]["max_wait"] < 0 # No need to actually wait between retries in the tests @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) def test_fetch_vnics_error(self, caplog): - def mocked_fetch(*args, path="instance", **kwargs): - if path == "vnics": - raise UrlError("cause") + def m_wait(*args, **kwargs): + for url in args[0]: + if "vnics" in url: + return False, None + return ("http://localhost", b"{}") - with mock.patch(DS_PATH + "._fetch", side_effect=mocked_fetch): + with mock.patch(DS_PATH + ".wait_for_url", side_effect=m_wait): opc_metadata = oracle.read_opc_metadata(fetch_vnics_data=True) assert None is opc_metadata.vnics_data assert ( logging.WARNING, "Failed to fetch IMDS network configuration!", - ) == caplog.record_tuples[-2][1:] + ) == caplog.record_tuples[-1][1:], caplog.record_tuples @pytest.mark.parametrize( diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_scaleway.py cloud-init-24.1.3/tests/unittests/sources/test_scaleway.py --- cloud-init-23.4.4/tests/unittests/sources/test_scaleway.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_scaleway.py 2024-03-27 13:14:04.000000000 +0000 @@ -10,6 +10,7 @@ from requests.exceptions import ConnectionError, ConnectTimeout from cloudinit import helpers, settings, sources +from cloudinit.distros import ubuntu from cloudinit.sources import DataSourceScaleway from tests.unittests.helpers import CiTestCase, ResponsesTestCase, mock @@ -190,7 +191,7 @@ class TestDataSourceScaleway(ResponsesTestCase): def setUp(self): tmp = self.tmp_dir() - distro = mock.MagicMock() + distro = ubuntu.Distro("", {}, {}) distro.get_tmp_exec_path = self.tmp_dir self.datasource = DataSourceScaleway.DataSourceScaleway( settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": tmp}) @@ -217,7 +218,7 @@ return_value=True, ) self.add_patch( - "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic", + "cloudinit.distros.net.find_fallback_nic", "_m_find_fallback_nic", return_value="scalewaynic0", ) @@ -475,6 +476,7 @@ self.assertIsNone(self.datasource.get_userdata_raw()) self.assertIsNone(self.datasource.get_vendordata_raw()) + @mock.patch("cloudinit.url_helper.time.sleep", lambda x: None) @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4") def test_metadata_connection_errors_legacy_ipv4_url(self, dhcpv4): """ @@ -496,11 +498,6 @@ callback=ConnectionError, ) self.datasource._set_metadata_url(self.datasource.metadata_urls) - if sys.version_info.minor >= 7: - self.responses.assert_call_count( - f"{self.datasource.metadata_urls[0]}/", - self.datasource.retries, - ) self.assertEqual(self.datasource.metadata, {}) self.assertIsNone(self.datasource.get_userdata_raw()) self.assertIsNone(self.datasource.get_vendordata_raw()) @@ -701,7 +698,7 @@ ], ) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_legacy_network_config_ok(self, m_get_cmdline, fallback_nic): """ @@ -726,7 +723,7 @@ } self.assertEqual(netcfg, resp) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_legacy_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): """ @@ -769,7 +766,7 @@ } self.assertEqual(netcfg, resp) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_legacy_network_config_existing(self, m_get_cmdline, fallback_nic): """ @@ -782,7 +779,7 @@ netcfg = self.datasource.network_config self.assertEqual(netcfg, "0xdeadbeef") - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_legacy_network_config_unset(self, m_get_cmdline, fallback_nic): """ @@ -810,7 +807,7 @@ self.assertEqual(netcfg, resp) @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning") - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_legacy_network_config_cached_none( self, m_get_cmdline, fallback_nic, logwarning @@ -843,7 +840,7 @@ sources.UNSET, ) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_ipmob_primary_ipv4_config_ok(self, m_get_cmdline, fallback_nic): """ @@ -872,7 +869,7 @@ self.assertEqual(netcfg, resp) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_ipmob_additional_ipv4_config_ok( self, m_get_cmdline, fallback_nic @@ -914,7 +911,7 @@ } self.assertEqual(netcfg, resp) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_ipmob_primary_ipv6_config_ok(self, m_get_cmdline, fallback_nic): """ @@ -952,7 +949,7 @@ self.assertEqual(netcfg, resp) - @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") def test_ipmob_primary_ipv4_v6_config_ok( self, m_get_cmdline, fallback_nic diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_smartos.py cloud-init-24.1.3/tests/unittests/sources/test_smartos.py --- cloud-init-23.4.4/tests/unittests/sources/test_smartos.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_smartos.py 2024-03-27 13:14:04.000000000 +0000 @@ -1374,7 +1374,7 @@ # os.kill() rather than mdata_proc.terminate() to avoid console spam. os.kill(self.mdata_proc.pid, signal.SIGKILL) self.mdata_proc.join() - super(TestSerialConcurrency, self).tearDown() + super().tearDown() def start_mdata_loop(self): """ @@ -1385,7 +1385,7 @@ are testing to be sure that cloud-init and mdata-get respect each others locks. """ - rcs = list(range(0, 256)) + rcs = list(range(256)) while True: subp(["mdata-get", "sdc:routes"], rcs=rcs) @@ -1402,7 +1402,7 @@ # 10 times at roughly the same time as cloud-init fetched each key # once. cloud-init would regularly see failures before making it # through all keys once. - for _ in range(0, 3): + for _ in range(3): for key in keys: # We don't care about the return value, just that it doesn't # thrown any exceptions. diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_upcloud.py cloud-init-24.1.3/tests/unittests/sources/test_upcloud.py --- cloud-init-23.4.4/tests/unittests/sources/test_upcloud.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_upcloud.py 2024-03-27 13:14:04.000000000 +0000 @@ -226,15 +226,13 @@ mock_readmd.return_value = UC_METADATA.copy() m_fallback_nic.return_value = "eth1" - m_dhcp.return_value = [ - { - "interface": "eth1", - "fixed-address": "10.6.3.27", - "routers": "10.6.0.1", - "subnet-mask": "22", - "broadcast-address": "10.6.3.255", - } - ] + m_dhcp.return_value = { + "interface": "eth1", + "fixed-address": "10.6.3.27", + "routers": "10.6.0.1", + "subnet-mask": "22", + "broadcast-address": "10.6.3.255", + } ds = self.get_ds() diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_vmware.py cloud-init-24.1.3/tests/unittests/sources/test_vmware.py --- cloud-init-23.4.4/tests/unittests/sources/test_vmware.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_vmware.py 2024-03-27 13:14:04.000000000 +0000 @@ -238,7 +238,7 @@ def tearDown(self): del os.environ[DataSourceVMware.VMX_GUESTINFO] - return super(TestDataSourceVMwareEnvVars, self).tearDown() + return super().tearDown() def create_system_files(self): rootd = self.tmp_dir() diff -Nru cloud-init-23.4.4/tests/unittests/sources/test_wsl.py cloud-init-24.1.3/tests/unittests/sources/test_wsl.py --- cloud-init-23.4.4/tests/unittests/sources/test_wsl.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/test_wsl.py 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,396 @@ +# Copyright (C) 2024 Canonical Ltd. +# +# Author: Carlos Nihelton +# +# This file is part of cloud-init. See LICENSE file for license information. +import logging +from copy import deepcopy +from email.mime.multipart import MIMEMultipart +from pathlib import PurePath +from typing import cast + +import pytest + +from cloudinit import util +from cloudinit.sources import DataSourceWSL as wsl +from tests.unittests.helpers import does_not_raise, mock + +INSTANCE_NAME = "Noble-MLKit" +GOOD_MOUNTS = { + "none": { + "fstype": "tmpfs", + "mountpoint": "/mnt/wsl", + "opts": "rw,relatime", + }, + "/dev/sdd": { + "fstype": "ext4", + "mountpoint": "/", + "opts": "rw,relatime,...", + }, + "sysfs": { + "fstype": "sysfs", + "mountpoint": "/sys", + "opts": "rw,nosuid...", + }, + "C:\\": { + "fstype": "9p", + "mountpoint": "/mnt/c", + "opts": "rw,noatime,dirsync,aname=drvfs;path=C:\\;...", + }, + "D:\\": { + "fstype": "9p", + "mountpoint": "/mnt/d", + "opts": "rw,noatime,dirsync,aname=drvfs;path=D:\\;...", + }, + "hugetblfs": { + "fstype": "hugetblfs", + "mountpoint": "/dev/hugepages", + "opts": "rw,relatime...", + }, +} +SAMPLE_LINUX_DISTRO = ("ubuntu", "24.04", "noble") +SAMPLE_LINUX_DISTRO_NO_VERSION_ID = ("debian", "", "trixie") + + +class TestWSLHelperFunctions: + @mock.patch("cloudinit.util.subp.subp") + def test_instance_name(self, m_subp): + m_subp.return_value = util.subp.SubpResult( + f"//wsl.localhost/{INSTANCE_NAME}/", "" + ) + + assert INSTANCE_NAME == wsl.instance_name() + + @mock.patch("cloudinit.util.mounts") + def test_mounted_drives(self, m_mounts): + # A good output + m_mounts.return_value = deepcopy(GOOD_MOUNTS) + mounts = wsl.mounted_win_drives() + assert ["/mnt/c", "/mnt/d"] == mounts + + # no more drvfs in C:\ options + m_mounts.return_value["C:\\"]["opts"] = "rw,relatime..." + mounts = wsl.mounted_win_drives() + assert ["/mnt/d"] == mounts + + # fstype mismatch for D:\ + m_mounts.return_value["D:\\"]["fstype"] = "zfs" + mounts = wsl.mounted_win_drives() + assert [] == mounts + + @mock.patch("os.access") + @mock.patch("cloudinit.util.mounts") + def test_cmd_exe_ok(self, m_mounts, m_os_access): + """ + Validates the happy path, when we find the Windows system drive and + cmd.exe is executable. + """ + m_mounts.return_value = deepcopy(GOOD_MOUNTS) + m_os_access.return_value = True + cmd = wsl.cmd_executable() + # To please pyright not to complain about optional member access. + assert cmd is not None + assert None is not cmd.relative_to(GOOD_MOUNTS["C:\\"]["mountpoint"]) + + @mock.patch("os.access") + @mock.patch("cloudinit.util.mounts") + def test_cmd_not_executable(self, m_mounts, m_os_access): + """ + When the cmd.exe found is not executable, then RuntimeError is raised. + """ + m_mounts.return_value = deepcopy(GOOD_MOUNTS) + m_os_access.return_value = True + cmd = wsl.cmd_executable() + # To please pyright not to complain about optional member access. + assert cmd is not None + assert None is not cmd.relative_to(GOOD_MOUNTS["C:\\"]["mountpoint"]) + + m_os_access.return_value = False + with pytest.raises(IOError): + wsl.cmd_executable() + + @mock.patch("os.access") + @mock.patch("cloudinit.util.mounts") + def test_cmd_exe_no_win_mounts(self, m_mounts, m_os_access): + """ + When no Windows drives are found, then RuntimeError is raised. + """ + m_os_access.return_value = True + + m_mounts.return_value = deepcopy(GOOD_MOUNTS) + m_mounts.return_value.pop("C:\\") + m_mounts.return_value.pop("D:\\") + with pytest.raises(IOError): + wsl.cmd_executable() + + @pytest.mark.parametrize( + "linux_distro_value,files", + ( + ( + SAMPLE_LINUX_DISTRO, + [ + f"{INSTANCE_NAME}.user-data", + "ubuntu-24.04.user-data", + "ubuntu-all.user-data", + "default.user-data", + ], + ), + ( + SAMPLE_LINUX_DISTRO_NO_VERSION_ID, + [ + f"{INSTANCE_NAME}.user-data", + "debian-trixie.user-data", + "debian-all.user-data", + "default.user-data", + ], + ), + ), + ) + @mock.patch("cloudinit.util.get_linux_distro") + def test_candidate_files(self, m_gld, linux_distro_value, files): + """ + Validate the file names candidate for holding user-data and their + order of precedence. + """ + m_gld.return_value = linux_distro_value + assert files == wsl.candidate_user_data_file_names(INSTANCE_NAME) + + @pytest.mark.parametrize( + "md_content,raises,errors,warnings,md_expected", + ( + pytest.param( + None, + does_not_raise(), + [], + [], + {"instance-id": "iid-datasource-wsl"}, + id="default_md_on_no_md_file", + ), + pytest.param( + "{}", + pytest.raises( + ValueError, + match=( + "myinstance.meta-data does not contain instance-id key" + ), + ), + ["myinstance.meta-data does not contain instance-id key"], + [], + "", + id="error_on_md_missing_instance_id_key", + ), + pytest.param( + "{", + pytest.raises( + ValueError, + match=( + "myinstance.meta-data does not contain instance-id key" + ), + ), + ["myinstance.meta-data does not contain instance-id key"], + ["Failed loading yaml blob. Invalid format at line 1"], + "", + id="error_on_md_invalid_yaml", + ), + ), + ) + def test_load_instance_metadata( + self, md_content, raises, errors, warnings, md_expected, tmpdir, caplog + ): + """meta-data file is optional. Errors are raised on invalid content.""" + if md_content is not None: + tmpdir.join("myinstance.meta-data").write(md_content) + with caplog.at_level(logging.WARNING): + with raises: + assert md_expected == wsl.load_instance_metadata( + PurePath(tmpdir), "myinstance" + ) + warning_logs = "\n".join( + [ + x.message + for x in caplog.records + if x.levelno == logging.WARNING + ] + ) + error_logs = "\n".join( + [ + x.message + for x in caplog.records + if x.levelno == logging.ERROR + ] + ) + if warnings: + for warning in warnings: + assert warning in warning_logs + else: + assert "" == warning_logs + if errors: + for error in errors: + assert error in error_logs + else: + assert "" == error_logs + + +SAMPLE_CFG = {"datasource_list": ["NoCloud", "WSL"]} + + +def join_payloads_from_content_type( + part: MIMEMultipart, content_type: str +) -> str: + """ + Helper function to decode and join all parts of a multipart MIME + message matched by the content type. + """ + content = "" + for p in part.walk(): + if p.get_content_type() == content_type: + content = content + str(p.get_payload(decode=True)) + + return content + + +class TestWSLDataSource: + @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") + @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") + def test_metadata_id_default(self, m_seed_dir, m_iname, tmpdir, paths): + """ + Validates that instance-id is properly set, indepedent of the existence + of user-data. + """ + m_iname.return_value = INSTANCE_NAME + m_seed_dir.return_value = PurePath(tmpdir) + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=None, + paths=paths, + ) + ds.get_data() + + assert ds.get_instance_id() == wsl.DEFAULT_INSTANCE_ID + + @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") + @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") + def test_metadata_id(self, m_seed_dir, m_iname, tmpdir, paths): + """ + Validates that instance-id is properly set, indepedent of the existence + of user-data. + """ + m_iname.return_value = INSTANCE_NAME + m_seed_dir.return_value = PurePath(tmpdir) + SAMPLE_ID = "Nice-ID" + tmpdir.join(f"{INSTANCE_NAME}.meta-data").write( + f'{{"instance-id":"{SAMPLE_ID}"}}', + ) + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=None, + paths=paths, + ) + ds.get_data() + + assert ds.get_instance_id() == SAMPLE_ID + + @mock.patch("cloudinit.util.lsb_release") + @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") + @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") + def test_get_data_cc(self, m_seed_dir, m_iname, m_gld, paths, tmpdir): + m_gld.return_value = SAMPLE_LINUX_DISTRO + m_iname.return_value = INSTANCE_NAME + m_seed_dir.return_value = PurePath(tmpdir) + tmpdir.join(f"{INSTANCE_NAME}.user-data").write( + "#cloud-config\nwrite_files:\n- path: /etc/wsl.conf" + ) + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=None, + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ) + assert userdata is not None + assert "wsl.conf" in cast(str, userdata) + + @mock.patch("cloudinit.util.lsb_release") + @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") + @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") + def test_get_data_sh(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): + m_gld.return_value = SAMPLE_LINUX_DISTRO + m_iname.return_value = INSTANCE_NAME + m_seed_dir.return_value = PurePath(tmpdir) + COMMAND = "echo Hello cloud-init on WSL!" + tmpdir.join(f"{INSTANCE_NAME}.user-data").write( + f"#!/bin/sh\n{COMMAND}\n" + ) + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=None, + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/x-shellscript" + ), + ) + assert COMMAND in userdata + + @mock.patch("cloudinit.util.get_linux_distro") + @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") + @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") + def test_data_precedence(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): + m_gld.return_value = SAMPLE_LINUX_DISTRO + m_iname.return_value = INSTANCE_NAME + m_seed_dir.return_value = PurePath(tmpdir) + # This is the most specific: should win over the other user-data files. + # Also, notice the file name casing: should be irrelevant. + tmpdir.join("ubuntu-24.04.user-data").write( + "#cloud-config\nwrite_files:\n- path: /etc/wsl.conf" + ) + + distro_file = tmpdir.join(".cloud-init", "Ubuntu-all.user-data") + distro_file.dirpath().mkdir() + distro_file.write("#!/bin/sh\n\necho Hello World\n") + + generic_file = tmpdir.join(".cloud-init", "default.user-data") + generic_file.write("#cloud-config\npackages:\n- g++-13\n") + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=None, + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "wsl.conf" in userdata + assert "packages" not in userdata + shell_script = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/x-shellscript" + ), + ) + + assert "" == shell_script diff -Nru cloud-init-23.4.4/tests/unittests/sources/vmware/test_vmware_config_file.py cloud-init-24.1.3/tests/unittests/sources/vmware/test_vmware_config_file.py --- cloud-init-23.4.4/tests/unittests/sources/vmware/test_vmware_config_file.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/sources/vmware/test_vmware_config_file.py 2024-03-27 13:14:04.000000000 +0000 @@ -25,7 +25,8 @@ get_network_data_from_vmware_cust_cfg, get_non_network_data_from_vmware_cust_cfg, ) -from tests.unittests.helpers import CiTestCase, cloud_init_project_dir +from tests.helpers import cloud_init_project_dir +from tests.unittests.helpers import CiTestCase logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger(__name__) diff -Nru cloud-init-23.4.4/tests/unittests/test__init__.py cloud-init-24.1.3/tests/unittests/test__init__.py --- cloud-init-23.4.4/tests/unittests/test__init__.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test__init__.py 2024-03-27 13:14:04.000000000 +0000 @@ -293,7 +293,7 @@ lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline ) - assert util.load_file(fpath, decode=False) == payload + assert util.load_binary_file(fpath) == payload assert logging.INFO == lvl assert url in msg @@ -310,7 +310,7 @@ lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline ) - assert util.load_file(fpath, decode=False) == payload + assert util.load_binary_file(fpath) == payload assert logging.INFO == lvl assert url in msg diff -Nru cloud-init-23.4.4/tests/unittests/test_builtin_handlers.py cloud-init-24.1.3/tests/unittests/test_builtin_handlers.py --- cloud-init-23.4.4/tests/unittests/test_builtin_handlers.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_builtin_handlers.py 2024-03-27 13:14:04.000000000 +0000 @@ -189,7 +189,7 @@ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) util.write_file(instance_json, atomic_helper.json_dumps({})) h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) - with mock.patch(self.mpath + "load_file") as m_load: + with mock.patch(self.mpath + "load_text_file") as m_load: with self.assertRaises(JinjaLoadError) as context_manager: m_load.side_effect = OSError(errno.EACCES, "Not allowed") h.handle_part( @@ -239,7 +239,7 @@ self.logs.getvalue(), ) self.assertEqual( - "#!/bin/bash\necho himom", util.load_file(script_file) + "#!/bin/bash\necho himom", util.load_text_file(script_file) ) @skipUnlessJinja() @@ -450,6 +450,8 @@ payload=payload, frequency=None, ) - assert payload == util.load_file(f"{handler.boothook_dir}/part-001") - assert "id:i-testing\n" == util.load_file(f"{tmpdir}/boothook") + assert payload == util.load_text_file( + f"{handler.boothook_dir}/part-001" + ) + assert "id:i-testing\n" == util.load_text_file(f"{tmpdir}/boothook") assert "id:i-testing\n" == capfd.readouterr().out diff -Nru cloud-init-23.4.4/tests/unittests/test_cli.py cloud-init-24.1.3/tests/unittests/test_cli.py --- cloud-init-23.4.4/tests/unittests/test_cli.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_cli.py 2024-03-27 13:14:04.000000000 +0000 @@ -2,12 +2,13 @@ import contextlib import io +import logging import os from collections import namedtuple import pytest -from cloudinit import helpers, log +from cloudinit import helpers from cloudinit.cmd import main as cli from tests.unittests import helpers as test_helpers @@ -29,7 +30,6 @@ if not sysv_args: sysv_args = ["cloud-init"] try: - log.setup_logging() return cli.main(sysv_args=sysv_args) except SystemExit as e: return e.code @@ -172,6 +172,45 @@ for subcommand in expected_subcommands: assert subcommand in err + @pytest.mark.parametrize( + "subcommand,log_to_stderr,mocks", + ( + ("init", False, [mock.patch("cloudinit.cmd.main.status_wrapper")]), + ( + "modules", + False, + [mock.patch("cloudinit.cmd.main.status_wrapper")], + ), + ( + "schema", + True, + [ + mock.patch( + "cloudinit.stages.Init._read_cfg", return_value={} + ), + mock.patch("cloudinit.config.schema.handle_schema_args"), + ], + ), + ), + ) + @mock.patch("cloudinit.cmd.main.setup_basic_logging") + def test_subcommands_log_to_stderr_via_setup_basic_logging( + self, setup_basic_logging, subcommand, log_to_stderr, mocks + ): + """setup_basic_logging is called for modules to use stderr + + Subcommands with exception of 'init' and 'modules' use + setup_basic_logging to direct logged errors to stderr. + """ + with contextlib.ExitStack() as mockstack: + for mymock in mocks: + mockstack.enter_context(mymock) + self._call_main(["cloud-init", subcommand]) + if log_to_stderr: + setup_basic_logging.assert_called_once_with(logging.WARNING) + else: + setup_basic_logging.assert_not_called() + @pytest.mark.parametrize("subcommand", ["init", "modules"]) @mock.patch("cloudinit.cmd.main.status_wrapper") def test_modules_subcommand_parser(self, m_status_wrapper, subcommand): diff -Nru cloud-init-23.4.4/tests/unittests/test_conftest.py cloud-init-24.1.3/tests/unittests/test_conftest.py --- cloud-init-23.4.4/tests/unittests/test_conftest.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_conftest.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,6 +1,7 @@ import pytest from cloudinit import subp +from conftest import UnexpectedSubpError from tests.unittests.helpers import CiTestCase @@ -8,7 +9,7 @@ """Test that the disable_subp_usage fixture behaves as expected.""" def test_using_subp_raises_assertion_error(self): - with pytest.raises(AssertionError): + with pytest.raises(UnexpectedSubpError): subp.subp(["some", "args"]) def test_typeerrors_on_incorrect_usage(self): @@ -17,6 +18,13 @@ # pylint: disable=no-value-for-parameter subp.subp() + def test_subp_exception_escapes_exception_handling(self): + with pytest.raises(UnexpectedSubpError): + try: + subp.subp(["some", "args"]) + except Exception: + pytest.fail("Unexpected exception raised") + @pytest.mark.allow_all_subp def test_subp_usage_can_be_reenabled(self): subp.subp(["whoami"]) @@ -25,14 +33,14 @@ def test_subp_usage_can_be_conditionally_reenabled(self): # The two parameters test each potential invocation with a single # argument - with pytest.raises(AssertionError) as excinfo: + with pytest.raises(UnexpectedSubpError) as excinfo: subp.subp(["some", "args"]) assert "allowed: whoami" in str(excinfo.value) subp.subp(["whoami"]) @pytest.mark.allow_subp_for("whoami", "bash") def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): - with pytest.raises(AssertionError) as excinfo: + with pytest.raises(UnexpectedSubpError) as excinfo: subp.subp(["some", "args"]) assert "allowed: whoami,bash" in str(excinfo.value) subp.subp(["bash", "-c", "true"]) @@ -41,7 +49,7 @@ @pytest.mark.allow_all_subp @pytest.mark.allow_subp_for("bash") def test_both_marks_raise_an_error(self): - with pytest.raises(AssertionError, match="marked both"): + with pytest.raises(UnexpectedSubpError, match="marked both"): subp.subp(["bash"]) diff -Nru cloud-init-23.4.4/tests/unittests/test_data.py cloud-init-24.1.3/tests/unittests/test_data.py --- cloud-init-23.4.4/tests/unittests/test_data.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_data.py 2024-03-27 13:14:04.000000000 +0000 @@ -82,7 +82,9 @@ init_tmp.fetch() with mock.patch.object(init_tmp, "_reset"): init_tmp.consume_data() - cc_contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + cc_contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) cc = util.load_yaml(cc_contents) assert len(cc) == 2 assert cc["baz"] == "qux" @@ -215,7 +217,9 @@ init_tmp.fetch() with mock.patch.object(init_tmp, "_reset"): init_tmp.consume_data() - cc_contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + cc_contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) cc = util.load_yaml(cc_contents) assert len(cc) == 1 assert cc["a"] == "c" @@ -248,7 +252,9 @@ init_tmp.fetch() with mock.patch.object(init_tmp, "_reset"): init_tmp.consume_data() - cc_contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + cc_contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) cc = util.load_yaml(cc_contents) assert len(cc) == 1 assert cc["a"] == "c" @@ -392,7 +398,7 @@ cloud_cfg.handle_part( None, handlers.CONTENT_END, None, None, None, None ) - contents = util.load_file(paths.get_ipath("cloud_config")) + contents = util.load_text_file(paths.get_ipath("cloud_config")) contents = util.load_yaml(contents) assert contents["run"], ["b", "c", "stuff", "morestuff"] assert contents["a"] == "be" @@ -441,7 +447,9 @@ init_tmp.fetch() with mock.patch.object(init_tmp, "_reset"): init_tmp.consume_data() - contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) contents = util.load_yaml(contents) assert isinstance(contents, dict) is True assert len(contents) == 3 @@ -519,7 +527,7 @@ } loaded_json = util.load_json( - util.load_file( + util.load_text_file( init_tmp.paths.get_runpath("instance_data_sensitive") ) ) @@ -527,7 +535,9 @@ expected["_doc"] = stages.COMBINED_CLOUD_CONFIG_DOC assert expected == util.load_json( - util.load_file(init_tmp.paths.get_runpath("combined_cloud_config")) + util.load_text_file( + init_tmp.paths.get_runpath("combined_cloud_config") + ) ) def test_mime_text_x_shellscript(self, init_tmp, caplog): @@ -685,7 +695,9 @@ with mock.patch.object(init_tmp, "_reset") as _reset: init_tmp.consume_data() assert _reset.call_count == 1 - cc_contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + cc_contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) cc = util.load_yaml(cc_contents) assert cc.get("included") is True @@ -711,7 +723,7 @@ assert _reset.call_count == 1 with pytest.raises(FileNotFoundError): - util.load_file(init_tmp.paths.get_ipath("cloud_config")) + util.load_text_file(init_tmp.paths.get_ipath("cloud_config")) @responses.activate @mock.patch("cloudinit.url_helper.time.sleep") @@ -750,7 +762,9 @@ "403 Client Error: Forbidden for url: %s" % bad_url in caplog.text ) - cc_contents = util.load_file(init_tmp.paths.get_ipath("cloud_config")) + cc_contents = util.load_text_file( + init_tmp.paths.get_ipath("cloud_config") + ) cc = util.load_yaml(cc_contents) assert cc.get("bad") is None assert cc.get("included") is True diff -Nru cloud-init-23.4.4/tests/unittests/test_dmi.py cloud-init-24.1.3/tests/unittests/test_dmi.py --- cloud-init-23.4.4/tests/unittests/test_dmi.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_dmi.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,7 @@ import pytest from cloudinit import dmi, subp, util +from cloudinit.subp import SubpResult from tests.unittests import helpers @@ -22,6 +23,10 @@ self.addCleanup(p.stop) self._m_is_FreeBSD = p.start() + p = mock.patch("cloudinit.dmi.is_OpenBSD", return_value=False) + self.addCleanup(p.stop) + self._m_is_OpenBSD = p.start() + def _create_sysfs_parent_directory(self): util.ensure_dir(os.path.join("sys", "class", "dmi", "id")) @@ -37,10 +42,10 @@ function fakes the results of dmidecode to test the results. """ - def _dmidecode_subp(cmd): + def _dmidecode_subp(cmd) -> SubpResult: if cmd[-1] != key: raise subp.ProcessExecutionError() - return (content, error) + return SubpResult(content, error) self.patched_funcs.enter_context( mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True) @@ -55,15 +60,30 @@ function fakes the results of kenv to test the results. """ - def _kenv_subp(cmd): + def _kenv_subp(cmd) -> SubpResult: if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd: raise subp.ProcessExecutionError() - return (content, error) + return SubpResult(content, error) self.patched_funcs.enter_context( mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp) ) + def _configure_sysctl_return(self, key, content, error=None): + """ + In order to test an OpenBSD system call outs to sysctl, this + function fakes the results of kenv to test the results. + """ + + def _sysctl_subp(cmd) -> SubpResult: + if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].openbsd: + raise subp.ProcessExecutionError() + return SubpResult(content, error) + + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.subp", side_effect=_sysctl_subp) + ) + def patch_mapping(self, new_mapping): self.patched_funcs.enter_context( mock.patch("cloudinit.dmi.DMIDECODE_TO_KERNEL", new_mapping) @@ -71,7 +91,7 @@ def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): self.patch_mapping( - {"mapped-key": dmi.KernelNames("mapped-value", None)} + {"mapped-key": dmi.KernelNames("mapped-value", None, None)} ) expected_dmi_value = "sys-used-correctly" self._create_sysfs_file("mapped-value", expected_dmi_value) @@ -149,7 +169,7 @@ # first verify we get the value if not in container self._m_is_container.return_value = False - key, val = ("system-product-name", "my_product") + key, val = "system-product-name", "my_product" self._create_sysfs_file("product_name", val) self.assertEqual(val, dmi.read_dmi_data(key)) @@ -167,13 +187,19 @@ def test_freebsd_uses_kenv(self): """On a FreeBSD system, kenv is called.""" self._m_is_FreeBSD.return_value = True - key, val = ("system-product-name", "my_product") + key, val = "system-product-name", "my_product" self._configure_kenv_return(key, val) self.assertEqual(dmi.read_dmi_data(key), val) + def test_openbsd_uses_kenv(self): + """On a OpenBSD system, sysctl is called.""" + self._m_is_OpenBSD.return_value = True + key, val = "system-product-name", "my_product" + self._configure_sysctl_return(key, val) + self.assertEqual(dmi.read_dmi_data(key), val) -class TestSubDMIVars: +class TestSubDMIVars: DMI_SRC = ( "dmi.nope__dmi.system-uuid__/__dmi.uuid____dmi.smbios.system.uuid__" ) diff -Nru cloud-init-23.4.4/tests/unittests/test_ds_identify.py cloud-init-24.1.3/tests/unittests/test_ds_identify.py --- cloud-init-23.4.4/tests/unittests/test_ds_identify.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_ds_identify.py 2024-03-27 13:14:04.000000000 +0000 @@ -3,6 +3,7 @@ import copy import os from collections import namedtuple +from pathlib import Path from textwrap import dedent from uuid import uuid4 @@ -12,25 +13,27 @@ from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceOracle as ds_oracle from cloudinit.sources import DataSourceSmartOS as ds_smartos +from tests.helpers import cloud_init_project_dir from tests.unittests.helpers import ( CiTestCase, - cloud_init_project_dir, dir2dict, populate_dir, populate_dir_with_ts, ) -UNAME_MYSYS = ( - "Linux bart 4.4.0-62-generic #83-Ubuntu " - "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux" -) +UNAME_MYSYS = "Linux #83-Ubuntu SMP Wed Jan 18 14:10:15 UTC 2017 x86_64" UNAME_PPC64EL = ( - "Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " - "Mon Jun 26 17:53:54 UTC 2017 " - "ppc64le ppc64le ppc64le GNU/Linux" + "Linux #106-Ubuntu SMP mon Jun 26 17:53:54 UTC 2017 " + "ppc64le ppc64le ppc64le" ) UNAME_FREEBSD = ( - "FreeBSD fbsd12-1 12.1-RELEASE-p10 FreeBSD 12.1-RELEASE-p10 GENERIC amd64" + "FreeBSD FreeBSD 14.0-RELEASE-p3 releng/14.0-n265398-20fae1e1699" + "GENERIC-MMCCAM amd64" +) +UNAME_OPENBSD = "OpenBSD GENERIC.MP#1397 amd64" +UNAME_WSL = ( + "Linux 5.15.133.1-microsoft-standard-WSL2 #1 SMP Thu Oct 5 21:02:42 " + "UTC 2023 x86_64" ) BLKID_EFI_ROOT = """ @@ -106,8 +109,37 @@ # currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt. MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0} MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0} +MOCK_VIRT_IS_WSL = {"name": "detect_virt", "RET": "wsl", "ret": 0} MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0} MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0} +MOCK_UNAME_IS_OPENBSD = {"name": "uname", "out": UNAME_OPENBSD, "ret": 0} +MOCK_UNAME_IS_WSL = {"name": "uname", "out": UNAME_WSL, "ret": 0} +MOCK_WSL_INSTANCE_DATA = { + "name": "Noble-MLKit", + "distro": "ubuntu", + "version": "24.04", + "os_release": dedent( + """\ + PRETTY_NAME="Ubuntu Noble Numbat (development branch)" + NAME="Ubuntu" + VERSION_ID="24.04" + VERSION="24.04 (Noble Numbat)" + VERSION_CODENAME=noble + ID=ubuntu + ID_LIKE=debian + UBUNTU_CODENAME=noble + LOGO=ubuntu-logo + """ + ), + "os_release_no_version_id": dedent( + """\ + PRETTY_NAME="Debian GNU/Linux trixie/sid" + NAME="Debian GNU/Linux" + VERSION_CODENAME="trixie" + ID=debian + """ + ), +} shell_true = 0 shell_false = 1 @@ -198,7 +230,17 @@ }, ] - written = [d["name"] for d in mocks] + uname = "Linux" + runpath = "run" + written = [] + for d in mocks: + written.append(d["name"]) + if d["name"] == "uname": + uname = d["out"].split(" ")[0] + # set runpath so that BSDs use /var/run rather than /run + if uname != "Linux": + runpath = "var/run" + for data in mocks: mocklines.append(write_mock(data)) for d in default_mocks: @@ -221,9 +263,9 @@ err = e.stderr cfg = None - cfg_out = os.path.join(rootd, "run/cloud-init/cloud.cfg") + cfg_out = os.path.join(rootd, runpath, "cloud-init/cloud.cfg") if os.path.exists(cfg_out): - contents = util.load_file(cfg_out) + contents = util.load_text_file(cfg_out) try: cfg = safeyaml.load(contents) except Exception as e: @@ -292,11 +334,8 @@ "KERNEL_CMDLINE", "VIRT", "UNAME_KERNEL_NAME", - "UNAME_KERNEL_RELEASE", "UNAME_KERNEL_VERSION", "UNAME_MACHINE", - "UNAME_NODENAME", - "UNAME_OPERATING_SYSTEM", "DSNAME", "DSLIST", "MODE", @@ -318,9 +357,9 @@ stricter identifiers). Since the MAAS datasource is at the begining of the list, this is particularly troublesome and more concerning than NoCloud false positives, for example. + """ config = "LXD-kvm-not-MAAS-1" self._test_ds_found(config) - """ def test_maas_not_detected_2(self): """Don't incorrectly identify maas @@ -583,7 +622,7 @@ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"]) cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg" mydata["files"][cfgpath] = 'datasource_list: ["NoCloud"]\n' - self._check_via_dict(mydata, rc=RC_FOUND, dslist=["NoCloud", DS_NONE]) + self._check_via_dict(mydata, rc=RC_FOUND, dslist=["NoCloud"]) def test_configured_list_with_none(self): """When datasource_list already contains None, None is not added. @@ -979,6 +1018,7 @@ """Test *BSD code paths FreeBSD doesn't have /sys so we use kenv(1) here. + OpenBSD uses sysctl(8). Other BSD systems fallback to dmidecode(8). BSDs also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query kern.vm_guest, and optionally map it""" @@ -990,6 +1030,13 @@ """ self._test_ds_found("Hetzner-kenv") + def test_dmi_sysctl(self): + """Test that sysctl(8) works on systems which don't have /sys + + This will be used on OpenBSD systems. + """ + self._test_ds_found("Hetzner-sysctl") + def test_dmi_dmidecode(self): """Test that dmidecode(8) works on systems which don't have /sys @@ -1055,6 +1102,100 @@ self._check_via_dict(mycfg, rc=RC_NOT_FOUND) +class TestWSL(DsIdentifyBase): + def test_not_found_virt(self): + """Simple negative test for WSL due other virt.""" + self._test_ds_not_found("Not-WSL") + + def test_no_fs_mounts(self): + """Negative test by lack of host filesystem mount points.""" + self._test_ds_not_found("WSL-no-host-mounts") + + def test_no_cloudinitdir(self): + """Negative test by not finding %USERPROFILE%/.cloud-init.""" + data = copy.deepcopy(VALID_CFG["WSL-supported"]) + data["mocks"].append( + { + "name": "WSL_cloudinit_dir_in", + "ret": 1, + "RET": "", + }, + ) + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_empty_cloudinitdir(self): + """Negative test by lack of host filesystem mount points.""" + data = copy.deepcopy(VALID_CFG["WSL-supported"]) + cloudinitdir = self.tmp_dir() + data["mocks"].append( + { + "name": "WSL_cloudinit_dir_in", + "ret": 0, + "RET": cloudinitdir, + }, + ) + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_found_via_userdata_version_codename(self): + """WLS datasource detected by VERSION_CODENAME when no VERSION_ID""" + data = copy.deepcopy(VALID_CFG["WSL-supported-debian"]) + cloudinitdir = self.tmp_dir() + data["mocks"].append( + { + "name": "WSL_cloudinit_dir_in", + "ret": 0, + "RET": cloudinitdir, + }, + ) + filename = os.path.join(cloudinitdir, "debian-trixie.user-data") + Path(filename).touch() + self._check_via_dict(data, RC_FOUND, dslist=[data.get("ds"), DS_NONE]) + Path(filename).unlink() + + def test_found_via_userdata(self): + """ + WSL datasource is found on applicable userdata files in cloudinitdir. + """ + data = copy.deepcopy(VALID_CFG["WSL-supported"]) + cloudinitdir = self.tmp_dir() + data["mocks"].append( + { + "name": "WSL_cloudinit_dir_in", + "ret": 0, + "RET": cloudinitdir, + }, + ) + userdata_files = [ + os.path.join( + cloudinitdir, MOCK_WSL_INSTANCE_DATA["name"] + ".user-data" + ), + os.path.join( + cloudinitdir, + "%s-%s.user-data" + % ( + MOCK_WSL_INSTANCE_DATA["distro"], + MOCK_WSL_INSTANCE_DATA["version"], + ), + ), + os.path.join( + cloudinitdir, + MOCK_WSL_INSTANCE_DATA["distro"] + "-all.user-data", + ), + os.path.join(cloudinitdir, "default.user-data"), + ] + + for filename in userdata_files: + Path(filename).touch() + self._check_via_dict( + data, RC_FOUND, dslist=[data.get("ds"), DS_NONE] + ) + # Delete one by one + Path(filename).unlink() + + # Until there is none, making the datasource no longer viable. + return self._check_via_dict(data, RC_NOT_FOUND) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -1574,6 +1715,13 @@ {"name": "get_kenv_field", "ret": 0, "RET": "Hetzner"}, ], }, + "Hetzner-sysctl": { + "ds": "Hetzner", + "mocks": [ + MOCK_UNAME_IS_OPENBSD, + {"name": "get_sysctl_field", "ret": 0, "RET": "Hetzner"}, + ], + }, "Hetzner-dmidecode": { "ds": "Hetzner", "mocks": [{"name": "dmi_decode", "ret": 0, "RET": "Hetzner"}], @@ -1724,10 +1872,7 @@ { "name": "uname", "ret": 0, - "out": ( - "Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 " - "BrandZ virtual linux x86_64 GNU/Linux" - ), + "out": ("Linux BrandZ virtual linux x86_64"), }, {"name": "blkid", "ret": 2, "out": ""}, ], @@ -2069,4 +2214,73 @@ P_SYS_VENDOR: "3DS Outscale\n", }, }, + "Not-WSL": { + "ds": "WSL", + "mocks": [ + MOCK_VIRT_IS_KVM, + ], + }, + "WSL-no-host-mounts": { + "ds": "WSL", + "mocks": [ + MOCK_VIRT_IS_WSL, + MOCK_UNAME_IS_WSL, + ], + "files": { + "proc/mounts": ( + "/dev/sdd / ext4 rw,errors=remount-ro,data=ordered 0 0\n" + "cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec0 0\n" + "snapfuse /snap/core22/1033 fuse.snapfuse ro,nodev,user_id=0," + "group_id=0,allow_other 0 0" + ), + }, + }, + "WSL-supported": { + "ds": "WSL", + "mocks": [ + MOCK_VIRT_IS_WSL, + MOCK_UNAME_IS_WSL, + { + "name": "WSL_instance_name", + "ret": 0, + "RET": MOCK_WSL_INSTANCE_DATA["name"], + }, + ], + "files": { + "proc/mounts": ( + "/dev/sdd / ext4 rw,errors=remount-ro,data=ordered 0 0\n" + "cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec0 0\n" + "C:\\134 /mnt/c 9p rw,dirsync,aname=drvfs;path=C:\\;uid=0;" + "gid=0;symlinkroot=/mnt/...\n" + "snapfuse /snap/core22/1033 fuse.snapfuse ro,nodev,user_id=0," + "group_id=0,allow_other 0 0" + ), + "etc/os-release": MOCK_WSL_INSTANCE_DATA["os_release"], + }, + }, + "WSL-supported-debian": { + "ds": "WSL", + "mocks": [ + MOCK_VIRT_IS_WSL, + MOCK_UNAME_IS_WSL, + { + "name": "WSL_instance_name", + "ret": 0, + "RET": MOCK_WSL_INSTANCE_DATA["name"], + }, + ], + "files": { + "proc/mounts": ( + "/dev/sdd / ext4 rw,errors=remount-ro,data=ordered 0 0\n" + "cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec0 0\n" + "C:\\134 /mnt/c 9p rw,dirsync,aname=drvfs;path=C:\\;uid=0;" + "gid=0;symlinkroot=/mnt/...\n" + "snapfuse /snap/core22/1033 fuse.snapfuse ro,nodev,user_id=0," + "group_id=0,allow_other 0 0" + ), + "etc/os-release": MOCK_WSL_INSTANCE_DATA[ + "os_release_no_version_id" + ], + }, + }, } diff -Nru cloud-init-23.4.4/tests/unittests/test_helpers.py cloud-init-24.1.3/tests/unittests/test_helpers.py --- cloud-init-23.4.4/tests/unittests/test_helpers.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_helpers.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,7 +6,8 @@ from pathlib import Path from cloudinit import sources -from tests.unittests import helpers as test_helpers +from tests.helpers import cloud_init_project_dir, get_top_level_dir +from tests.unittests.helpers import ResourceUsingTestCase class MyDataSource(sources.DataSource): @@ -16,7 +17,7 @@ return self._instance_id -class TestPaths(test_helpers.ResourceUsingTestCase): +class TestPaths(ResourceUsingTestCase): def test_get_ipath_and_instance_id_with_slashes(self): myds = MyDataSource(sys_cfg={}, distro=None, paths={}) myds._instance_id = "/foo/bar" @@ -37,7 +38,7 @@ class Testcloud_init_project_dir: - top_dir = test_helpers.get_top_level_dir() + top_dir = get_top_level_dir() @staticmethod def _get_top_level_dir_alt_implementation(): @@ -61,6 +62,6 @@ """ assert ( str(Path(self.top_dir, "test")) - == test_helpers.cloud_init_project_dir("test") + == cloud_init_project_dir("test") == str(Path(self._get_top_level_dir_alt_implementation(), "test")) ) diff -Nru cloud-init-23.4.4/tests/unittests/test_log.py cloud-init-24.1.3/tests/unittests/test_log.py --- cloud-init-23.4.4/tests/unittests/test_log.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_log.py 2024-03-27 13:14:04.000000000 +0000 @@ -62,7 +62,6 @@ class TestDeprecatedLogs: def test_deprecated_log_level(self, caplog): logger = logging.getLogger() - log.setup_logging() logger.deprecated("deprecated message") assert "DEPRECATED" == caplog.records[0].levelname assert "deprecated message" in caplog.text diff -Nru cloud-init-23.4.4/tests/unittests/test_merging.py cloud-init-24.1.3/tests/unittests/test_merging.py --- cloud-init-23.4.4/tests/unittests/test_merging.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_merging.py 2024-03-27 13:14:04.000000000 +0000 @@ -70,7 +70,7 @@ if t in [dict, list, tuple]: if t in [dict]: amount = rand.randint(0, 5) - keys = [_random_str(rand) for _i in range(0, amount)] + keys = [_random_str(rand) for _i in range(amount)] base = {} for k in keys: try: @@ -80,7 +80,7 @@ elif t in [list, tuple]: base = [] amount = rand.randint(0, 5) - for _i in range(0, amount): + for _i in range(amount): try: base.append(_make_dict(current_depth + 1, max_depth, rand)) except _NoMoreException: @@ -122,8 +122,8 @@ for i in sorted(source_ids.keys()): source_file_contents = [] for fn in sorted(source_ids[i]): - source_file_contents.append([fn, util.load_file(fn)]) - expected = util.load_yaml(util.load_file(expected_files[i])) + source_file_contents.append([fn, util.load_text_file(fn)]) + expected = util.load_yaml(util.load_text_file(expected_files[i])) entry = [source_file_contents, [expected, expected_files[i]]] tests.append(entry) return tests @@ -270,7 +270,7 @@ [ ({"merge_how": "list()+dict()+str()"}, None), ({"merge_type": "list()+dict()+str()"}, None), - ({"merge_how": []}, "\\[\\] is too short"), + ({"merge_how": []}, f"\\[\\] {helpers.SCHEMA_EMPTY_ERROR}"), ( {"merge_how": {"name": "list", "settings": ["append"]}}, "is not of type", diff -Nru cloud-init-23.4.4/tests/unittests/test_net.py cloud-init-24.1.3/tests/unittests/test_net.py --- cloud-init-23.4.4/tests/unittests/test_net.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_net.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,7 +13,7 @@ import pytest from yaml.serializer import Serializer -from cloudinit import distros, log, net +from cloudinit import distros, net from cloudinit import safeyaml as yaml from cloudinit import subp, temp_utils, util from cloudinit.net import ( @@ -646,6 +646,7 @@ may-fail=false address1=172.19.1.34/22 route1=0.0.0.0/0,172.19.3.254 +dns=172.19.0.12; """.lstrip(), ), @@ -1280,8 +1281,6 @@ """\ auto lo iface lo inet loopback - dns-nameservers 8.8.8.8 8.8.4.4 - dns-search wark.maas iface eth1 inet manual @@ -1477,11 +1476,11 @@ [ipv4] method=auto - may-fail=false + may-fail=true [ipv6] method=auto - may-fail=false + may-fail=true """ ), @@ -1650,11 +1649,11 @@ [ipv6] method=auto - may-fail=false + may-fail=true [ipv4] method=auto - may-fail=false + may-fail=true """ ), @@ -2797,6 +2796,8 @@ [ipv4] method=auto may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; """ ), @@ -2822,6 +2823,8 @@ method=manual may-fail=false address1=192.168.200.7/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; """ ), @@ -2846,6 +2849,8 @@ [ipv4] method=auto may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; """ ), @@ -2930,12 +2935,15 @@ method=manual may-fail=false address1=192.168.14.2/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; [ipv6] method=manual may-fail=false address1=2001:1::1/64 route1=::/0,2001:4800:78ff:1b::1 + dns-search=barley.maas;wark.maas;foobar.maas; """ ), @@ -2962,9 +2970,9 @@ may-fail=false address1=192.168.0.2/24 gateway=192.168.0.1 + address2=192.168.2.10/24 dns=192.168.0.10;10.23.23.134; dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; - address2=192.168.2.10/24 """ ), @@ -2990,6 +2998,7 @@ [ipv6] method=auto may-fail=false + dns-search=barley.maas;wark.maas;foobar.maas; """ ), @@ -4131,6 +4140,170 @@ ), }, }, + "v1-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Match] + MACAddress=11:22:33:44:55:66 + Name=interface0 + + [Network] + DHCP=no + DNS=1.1.1.1 3.3.3.3 + Domains=aaaa cccc + + [Route] + Gateway=192.168.1.1 + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + dns-nameservers 2.2.2.2 + dns-search bbbb + + iface lo inet6 loopback + dns-nameservers FEDC::1 + dns-search bbbb + + auto interface0 + iface interface0 inet static + address 192.168.1.20/16 + dns-nameservers 1.1.1.1 3.3.3.3 + dns-search aaaa cccc + gateway 192.168.1.1 + """ # noqa: E501 + ), + "expected_netplan": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.1.20/16 + match: + macaddress: 11:22:33:44:55:66 + nameservers: + addresses: + - 1.1.1.1 + - 3.3.3.3 + search: + - aaaa + - cccc + routes: + - to: default + via: 192.168.1.1 + set-name: interface0 + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-interface0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + LLADDR=11:22:33:44:55:66 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEFROUTE=yes + DEVICE=interface0 + DNS1=1.1.1.1 + DNS2=3.3.3.3 + DOMAIN=aaaa cccc + GATEWAY=192.168.1.1 + HWADDR=11:22:33:44:55:66 + IPADDR=192.168.1.20 + NETMASK=255.255.0.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-interface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init interface0 + uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=11:22:33:44:55:66 + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + gateway=192.168.1.1 + dns=3.3.3.3;1.1.1.1; + dns-search=cccc;aaaa; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: physical + name: interface0 + mac_address: "11:22:33:44:55:66" + subnets: + - type: static + address: 192.168.1.20/16 + gateway: 192.168.1.1 + dns_nameservers: + - 3.3.3.3 + dns_search: + - cccc + - type: nameserver + interface: interface0 + address: + - 1.1.1.1 + search: + - aaaa + - type: nameserver + address: + - 2.2.2.2 + - FEDC::1 + search: + - bbbb + """ + ), + }, "v2-dev-name-via-mac-lookup": { "expected_sysconfig_rhel": { "ifcfg-eth0": textwrap.dedent( @@ -4154,6 +4327,320 @@ """ ), }, + "v2-mixed-routes": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=500 + + [ipv4] + method=auto + may-fail=true + route1=169.254.42.42/32,62.210.0.1 + route1_options=mtu=400 + route2=169.254.42.43/32,62.210.0.2 + route2_options=mtu=200 + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + route1=::/0,fe80::dc00:ff:fe20:186 + route1_options=mtu=300 + route2=fe80::dc00:ff:fe20:188/64,fe80::dc00:ff:fe20:187 + route2_options=mtu=100 + method=auto + may-fail=true + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + mtu: 500 + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + routes: + - to: 169.254.42.42/32 + via: 62.210.0.1 + mtu: 400 + - via: fe80::dc00:ff:fe20:186 + to: ::/0 + mtu: 300 + - to: 169.254.42.43/32 + via: 62.210.0.2 + mtu: 200 + - via: fe80::dc00:ff:fe20:187 + to: fe80::dc00:ff:fe20:188 + mtu: 100 + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Address] + Address=2001:bc8:1210:232:dc00:ff:fe20:185/64 + + [Match] + Name=eth0 + + [Network] + DHCP=no + DNS=8.8.8.8 FEDC::1 + Domains=lab home + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search lab home + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + IPADDR6=2001:bc8:1210:232:dc00:ff:fe20:185/64 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEVICE=eth0 + DNS1=8.8.8.8 + DNS2=FEDC::1 + DOMAIN="lab home" + IPADDR=192.168.1.20 + IPV6ADDR=2001:bc8:1210:232:dc00:ff:fe20:185/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.0.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ) + }, + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=manual + may-fail=false + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns-no-if-ips": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=true + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=auto + may-fail=true + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-dns-no-dhcp": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-route-no-gateway": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=false + route1=0.0.0.0/0 + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + routes: + - to: "0.0.0.0/0" + """ + ), + }, } @@ -4339,6 +4826,7 @@ "ethernets": { "eth0": { "dhcp4": True, + "dhcp6": True, "set-name": "eth0", "match": { "macaddress": "00:11:22:33:44:55", @@ -4423,6 +4911,9 @@ auto eth0 iface eth0 inet dhcp + +# control-alias eth0 +iface eth0 inet6 dhcp """ self.assertEqual(expected.lstrip(), contents.lstrip()) @@ -4512,6 +5003,9 @@ auto eth1 iface eth1 inet dhcp + +# control-alias eth1 +iface eth1 inet6 dhcp """ self.assertEqual(expected.lstrip(), contents.lstrip()) @@ -4735,7 +5229,9 @@ # BOOTPROTO=dhcp DEVICE=eth1000 +DHCPV6C=yes HWADDR=07-1c-c6-75-a4-be +IPV6INIT=yes NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet @@ -5228,6 +5724,20 @@ self.assertEqual([], dhcp_found) + @pytest.mark.xfail(reason="sysconfig should render interface-level DNS") + def test_v1_dns(self): + entry = NETWORK_CONFIGS["v1-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + # TODO: verify resolv.conf + + def test_v2_dns(self): + entry = NETWORK_CONFIGS["v2-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" @@ -5503,7 +6013,6 @@ """ # noqa: E501 ), } - log.setup_logging() found = self._render_and_read(network_config=v2_data) self._compare_files_to_expected(expected, found) @@ -5646,7 +6155,8 @@ expected_content = """ # Created by cloud-init automatically, do not edit. # -BOOTPROTO=dhcp4 +BOOTPROTO=dhcp +DHCLIENT6_MODE=managed LLADDR=07-1c-c6-75-a4-be STARTMODE=auto """.lstrip() @@ -5918,6 +6428,19 @@ self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_v1_dns(self): + entry = NETWORK_CONFIGS["v1-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + # TODO: verify resolv.conf + + def test_v2_dns(self): + entry = NETWORK_CONFIGS["v2-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", @@ -6032,7 +6555,11 @@ [ipv4] method=auto - may-fail=false + may-fail=true + + [ipv6] + method=auto + may-fail=true """ ), @@ -6253,6 +6780,48 @@ entry[self.expected_name], self.expected_conf_d, found ) + def test_v1_dns(self): + entry = NETWORK_CONFIGS["v1-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + + def test_v2_mixed_routes(self): + entry = NETWORK_CONFIGS["v2-mixed-routes"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + + def test_v2_dns(self): + entry = NETWORK_CONFIGS["v2-dns"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + + def test_v2_dns_no_ips(self): + entry = NETWORK_CONFIGS["v2-dns-no-if-ips"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + + def test_v2_dns_no_dhcp(self): + entry = NETWORK_CONFIGS["v2-dns-no-dhcp"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + + def test_v2_route_no_gateway(self): + entry = NETWORK_CONFIGS["v2-route-no-gateway"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected( + entry[self.expected_name], self.expected_conf_d, found + ) + @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", @@ -6298,6 +6867,9 @@ auto eth1000 iface eth1000 inet dhcp + +# control-alias eth1000 +iface eth1000 inet6 dhcp """ self.assertEqual(expected.lstrip(), contents.lstrip()) @@ -6357,6 +6929,7 @@ ethernets: eth1000: dhcp4: true + dhcp6: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 @@ -7418,6 +7991,17 @@ files["/etc/netplan/50-cloud-init.yaml"].splitlines(), ) + @pytest.mark.xfail( + reason="netplan should render interface-level nameservers" + ) + def testsimple_render_v1_dns(self): + entry = NETWORK_CONFIGS["v1-dns"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self.assertEqual( + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) + def test_render_output_has_yaml_no_aliases(self): entry = { "yaml": V1_NAMESERVER_ALIAS, @@ -7792,6 +8376,24 @@ files["/etc/network/interfaces"].splitlines(), ) + @pytest.mark.xfail(reason="GH-4219") + def test_v1_dns(self): + entry = NETWORK_CONFIGS["v1-dns"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self.assertEqual( + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) + + @pytest.mark.xfail(reason="GH-4219") + def test_v2_dns(self): + entry = NETWORK_CONFIGS["v2-dns"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self.assertEqual( + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) + class TestNetworkdNetRendering(CiTestCase): def create_conf_dict(self, contents): @@ -7856,7 +8458,7 @@ Name=eth1000 MACAddress=07-1c-c6-75-a4-be [Network] - DHCP=ipv4""" + DHCP=yes""" ).rstrip(" ") expected = self.create_conf_dict(expected.splitlines()) @@ -8055,6 +8657,35 @@ self.compare_dicts(actual, expected) + @pytest.mark.xfail( + reason="DNS and Domains getting rendered on multiple lines" + ) + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_v1_dns(self, m_chown): + nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" + entry = NETWORK_CONFIGS["v1-dns"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + + actual = self.create_conf_dict(files[nwk_fn].splitlines()) + expected = self.create_conf_dict( + entry["expected_networkd"].splitlines() + ) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_v2_dns(self, m_chown): + nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" + entry = NETWORK_CONFIGS["v2-dns"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + + actual = self.create_conf_dict(files[nwk_fn].splitlines()) + expected = self.create_conf_dict( + entry["expected_networkd"].splitlines() + ) + + self.compare_dicts(actual, expected) + class TestRenderersSelect: @pytest.mark.parametrize( diff -Nru cloud-init-23.4.4/tests/unittests/test_net_activators.py cloud-init-24.1.3/tests/unittests/test_net_activators.py --- cloud-init-23.4.4/tests/unittests/test_net_activators.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_net_activators.py 2024-03-27 13:14:04.000000000 +0000 @@ -322,3 +322,106 @@ activator.bring_down_interface("eth0") assert len(m_subp.call_args_list) == 1 assert m_subp.call_args_list[0] == expected_call_list[0] + + +class TestNetworkManagerActivatorBringUp: + @patch("cloudinit.subp.subp", return_value=("", "")) + @patch( + "cloudinit.net.network_manager.available_nm_ifcfg_rh", + return_value=True, + ) + @patch("os.path.isfile") + @patch("os.path.exists", return_value=True) + def test_bring_up_interface_no_nm_conn( + self, m_exists, m_isfile, m_plugin, m_subp + ): + """ + There is no network manager connection file but ifcfg-rh plugin is + present and ifcfg interface config files are also present. In this + case, we should use ifcfg files. + """ + + def fake_isfile_no_nmconn(filename): + return False if filename.endswith(".nmconnection") else True + + m_isfile.side_effect = fake_isfile_no_nmconn + + expected_call_list = [ + ( + ( + [ + "nmcli", + "connection", + "load", + "".join( + [ + "/etc/sysconfig/network-scripts/ifcfg-eth0", + ] + ), + ], + ), + {}, + ), + ( + ( + [ + "nmcli", + "connection", + "up", + "filename", + "".join( + [ + "/etc/sysconfig/network-scripts/ifcfg-eth0", + ] + ), + ], + ), + {}, + ), + ] + + index = 0 + assert NetworkManagerActivator.bring_up_interface("eth0") + for call in m_subp.call_args_list: + assert call == expected_call_list[index] + index += 1 + + @patch("cloudinit.subp.subp", return_value=("", "")) + @patch( + "cloudinit.net.network_manager.available_nm_ifcfg_rh", + return_value=False, + ) + @patch("os.path.isfile") + @patch("os.path.exists", return_value=True) + def test_bring_up_interface_no_plugin_no_nm_conn( + self, m_exists, m_isfile, m_plugin, m_subp + ): + """ + The ifcfg-rh plugin is absent and nmconnection file is also + not present. In this case, we can't use ifcfg file and the + interface bring up should fail. + """ + + def fake_isfile_no_nmconn(filename): + return False if filename.endswith(".nmconnection") else True + + m_isfile.side_effect = fake_isfile_no_nmconn + assert not NetworkManagerActivator.bring_up_interface("eth0") + + @patch("cloudinit.subp.subp", return_value=("", "")) + @patch( + "cloudinit.net.network_manager.available_nm_ifcfg_rh", + return_value=True, + ) + @patch("os.path.isfile", return_value=False) + @patch("os.path.exists", return_value=True) + def test_bring_up_interface_no_conn_file( + self, m_exists, m_isfile, m_plugin, m_subp + ): + """ + Neither network manager connection files are present nor + ifcfg files are present. Even if ifcfg-rh plugin is present, + we can not bring up the interface. So bring_up_interface() + should fail. + """ + assert not NetworkManagerActivator.bring_up_interface("eth0") diff -Nru cloud-init-23.4.4/tests/unittests/test_render_template.py cloud-init-24.1.3/tests/unittests/test_render_template.py --- cloud-init-23.4.4/tests/unittests/test_render_template.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_render_template.py 2024-03-27 13:14:04.000000000 +0000 @@ -5,7 +5,7 @@ import pytest from cloudinit import subp, templater, util -from tests.unittests.helpers import cloud_init_project_dir +from tests.helpers import cloud_init_project_dir # TODO(Look to align with tools.render-template or cloudinit.distos.OSFAMILIES) DISTRO_VARIANTS = [ diff -Nru cloud-init-23.4.4/tests/unittests/test_ssh_util.py cloud-init-24.1.3/tests/unittests/test_ssh_util.py --- cloud-init-23.4.4/tests/unittests/test_ssh_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_ssh_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -58,18 +58,6 @@ # the testdata for OpenSSH, and their private keys are available # https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata VALID_CONTENT = { - "dsa": ( - "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF" - "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa" - "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa" - "jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv" - "KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4" - "REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7" - "hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P" - "obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE" - "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/" - "5z7u2rVAlDw==" - ), "ecdsa": ( "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ" "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/" @@ -180,35 +168,6 @@ "AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAICFo/k5LU8863u66YC9" "eUO2170QduohPURkQnbLa/dczAAAABHNzaDo=" ), - "ssh-dss-cert-v01@openssh.com": ( - "AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgdTlbNU9Hn9Qng3F" - "HxwH971bxCIoq1ern/QWFFDWXgmYAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0c" - "Fn1zYd/JGvtabKnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4" - "yLB+6vCtHcJF7rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DD" - "jMF0k5emWKCsa3ZfAAAAFQCjA/+dKkMu4/CWjJPtfl7YNaStNQAAAIEA7uX1BVV" - "tJKjLmWrpw62+l/xSXA5rr7MHBuWjiCYV3VHBfXJaQDyRDtGuEJKDwdzqYgacpG" - "ApGWL/cuBtJ9nShsUl6GRG0Ra03g+Hx9VR5LviJBsjAVB4qVgciU1NGga0Bt2Le" - "cd1X4EGQRBzVXeuOpiqGM6jP/I2yDMs0Pboet0AAACBAOdXpyfmobEBaOqZAuvg" - "j1P0uhjG2P31Ufurv22FWPBU3A9qrkxbOXwE0LwvjCvrsQV/lrYhJz/tiys40Ve" - "ahulWZE5SAHMXGIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1Uuy" - "QMcUtb34+I0u9Ycnyhp2mSFsQtAAAAAAAAAAYAAAACAAAABmp1bGl1cwAAABIAA" - "AAFaG9zdDEAAAAFaG9zdDIAAAAANowB8AAAAABNHmBwAAAAAAAAAAAAAAAAAAAA" - "MwAAAAtzc2gtZWQyNTUxOQAAACBThupGO0X+FLQhbz8CoKPwc7V3JNsQuGtlsgN" - "+F7SMGQAAAFMAAAALc3NoLWVkMjU1MTkAAABAh/z1LIdNL1b66tQ8t9DY9BTB3B" - "QKpTKmc7ezyFKLwl96yaIniZwD9Ticdbe/8i/Li3uCFE3EAt8NAIv9zff8Bg==" - ), - "ssh-dss": ( - "AAAAB3NzaC1kc3MAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0cFn1zYd/JGvtab" - "KnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4yLB+6vCtHcJF7" - "rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DDjMF0k5emWKCsa" - "3ZfAAAAFQCjA/+dKkMu4/CWjJPtfl7YNaStNQAAAIEA7uX1BVVtJKjLmWrpw62+" - "l/xSXA5rr7MHBuWjiCYV3VHBfXJaQDyRDtGuEJKDwdzqYgacpGApGWL/cuBtJ9n" - "ShsUl6GRG0Ra03g+Hx9VR5LviJBsjAVB4qVgciU1NGga0Bt2Lecd1X4EGQRBzVX" - "euOpiqGM6jP/I2yDMs0Pboet0AAACBAOdXpyfmobEBaOqZAuvgj1P0uhjG2P31U" - "furv22FWPBU3A9qrkxbOXwE0LwvjCvrsQV/lrYhJz/tiys40VeahulWZE5SAHMX" - "GIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1UuyQMcUtb34+I0u9" - "Ycnyhp2mSFsQt" - ), "ssh-ed25519-cert-v01@openssh.com": ( "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u" "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX" @@ -398,7 +357,7 @@ """new entries with the same base64 should replace old.""" orig_entries = [ " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")), - " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")), + " ".join(("ecdsa", VALID_CONTENT["ecdsa"], "orig_comment2")), ] expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n" @@ -412,7 +371,7 @@ assert expected == found -@mock.patch(M_PATH + "util.load_file") +@mock.patch(M_PATH + "util.load_text_file") @mock.patch(M_PATH + "os.path.isfile") class TestParseSSHConfig: @pytest.mark.parametrize( @@ -579,7 +538,7 @@ util.write_file(mycfg, self.cfgdata) ret = ssh_util.update_ssh_config({"MyKey": "NEW_VAL"}, mycfg) assert True is ret - found = util.load_file(mycfg) + found = util.load_text_file(mycfg) assert self.cfgdata.replace("ORIG_VAL", "NEW_VAL") == found # assert there is a newline at end of file (LP: #1677205) assert "\n" == found[-1] @@ -590,7 +549,7 @@ with patch("cloudinit.ssh_util.util.write_file") as m_write_file: ret = ssh_util.update_ssh_config({"MyKey": "ORIG_VAL"}, mycfg) assert False is ret - assert self.cfgdata == util.load_file(mycfg) + assert self.cfgdata == util.load_text_file(mycfg) m_write_file.assert_not_called() def test_without_include(self, tmpdir): @@ -598,7 +557,7 @@ cfg = "X Y" util.write_file(mycfg, cfg) assert ssh_util.update_ssh_config({"key": "value"}, mycfg) - assert "X Y\nkey value\n" == util.load_file(mycfg) + assert "X Y\nkey value\n" == util.load_text_file(mycfg) expected_conf_file = f"{mycfg}.d/50-cloud-init.conf" assert not os.path.isfile(expected_conf_file) @@ -613,14 +572,14 @@ expected_conf_file = f"{mycfg}.d/50-cloud-init.conf" assert os.path.isfile(expected_conf_file) assert 0o600 == stat.S_IMODE(os.stat(expected_conf_file).st_mode) - assert "key value\n" == util.load_file(expected_conf_file) + assert "key value\n" == util.load_text_file(expected_conf_file) def test_with_commented_include(self, tmpdir): mycfg = tmpdir.join("sshd_config") cfg = f"# Include {mycfg}.d/*.conf" util.write_file(mycfg, cfg) assert ssh_util.update_ssh_config({"key": "value"}, mycfg) - assert f"{cfg}\nkey value\n" == util.load_file(mycfg) + assert f"{cfg}\nkey value\n" == util.load_text_file(mycfg) expected_conf_file = f"{mycfg}.d/50-cloud-init.conf" assert not os.path.isfile(expected_conf_file) @@ -629,7 +588,7 @@ cfg = f"Include other_{mycfg}.d/*.conf" util.write_file(mycfg, cfg) assert ssh_util.update_ssh_config({"key": "value"}, mycfg) - assert f"{cfg}\nkey value\n" == util.load_file(mycfg) + assert f"{cfg}\nkey value\n" == util.load_text_file(mycfg) expected_conf_file = f"{mycfg}.d/50-cloud-init.conf" assert not os.path.isfile(expected_conf_file) assert not os.path.isfile(f"other_{mycfg}.d/50-cloud-init.conf") @@ -646,7 +605,7 @@ ssh_util.append_ssh_config( [("MyKey", "NEW_VAL"), ("MyKey", "NEW_VAL_2")], mycfg ) - found = util.load_file(mycfg) + found = util.load_text_file(mycfg) expected_cfg = dedent( """\ #Option val @@ -852,9 +811,9 @@ home, "authorized_keys", "rsa", keys ) - # /tmp/home/bobby/.ssh/user_keys = dsa + # /tmp/home/bobby/.ssh/user_keys = ed25519 user_keys = self.create_user_authorized_file( - home, "user_keys", "dsa", keys + home, "user_keys", "ed25519", keys ) # /tmp/sshd_config @@ -920,9 +879,9 @@ home, "authorized_keys", "rsa", keys ) - # /tmp/home/bobby/.ssh/user_keys = dsa + # /tmp/home/bobby/.ssh/user_keys = ed25519 user_keys = self.create_user_authorized_file( - home, "user_keys", "dsa", keys + home, "user_keys", "ed25519", keys ) authorized_keys_global = self.create_global_authorized_file( @@ -1153,9 +1112,9 @@ self.create_user_authorized_file( home_bobby, "authorized_keys2", "rsa", keys ) - # /tmp/home/bobby/.ssh/user_keys3 = dsa + # /tmp/home/bobby/.ssh/user_keys3 = ed25519 user_keys = self.create_user_authorized_file( - home_bobby, "user_keys3", "dsa", keys + home_bobby, "user_keys3", "ed25519", keys ) # /tmp/home/suzie/.ssh/authorized_keys2 = rsa @@ -1233,9 +1192,9 @@ authorized_keys = self.create_user_authorized_file( home_bobby, "authorized_keys2", "rsa", keys ) - # /tmp/home/bobby/.ssh/user_keys3 = dsa + # /tmp/home/bobby/.ssh/user_keys3 = ecdsa user_keys = self.create_user_authorized_file( - home_bobby, "user_keys3", "dsa", keys + home_bobby, "user_keys3", "ecdsa", keys ) # /tmp/home/badguy/home/bobby = "" @@ -1326,10 +1285,10 @@ authorized_keys = self.create_user_authorized_file( home_bobby, "authorized_keys", "rsa", keys ) - # /tmp/etc/ssh/userkeys/bobby = dsa + # /tmp/etc/ssh/userkeys/bobby = ecdsa # assume here that we can bypass userkeys, despite permissions self.create_global_authorized_file( - "etc/ssh/userkeys/bobby", "dsa", keys, tmpdir + "etc/ssh/userkeys/bobby", "ecdsa", keys, tmpdir ) # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com @@ -1419,10 +1378,10 @@ self.create_user_authorized_file( home_bobby, "authorized_keys", "rsa", keys ) - # /tmp/etc/ssh/userkeys/bobby = dsa + # /tmp/etc/ssh/userkeys/bobby = ed25519 # assume here that we can bypass userkeys, despite permissions authorized_keys = self.create_global_authorized_file( - "etc/ssh/userkeys/bobby", "dsa", keys, tmpdir + "etc/ssh/userkeys/bobby", "ed25519", keys, tmpdir ) # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com diff -Nru cloud-init-23.4.4/tests/unittests/test_stages.py cloud-init-24.1.3/tests/unittests/test_stages.py --- cloud-init-23.4.4/tests/unittests/test_stages.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_stages.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,8 @@ from cloudinit import sources, stages from cloudinit.event import EventScope, EventType -from cloudinit.sources import NetworkConfigSource +from cloudinit.helpers import Paths +from cloudinit.sources import DataSource, NetworkConfigSource from cloudinit.util import sym_link, write_file from tests.unittests.helpers import mock from tests.unittests.util import TEST_INSTANCE_ID, FakeDataSource @@ -17,6 +18,41 @@ M_PATH = "cloudinit.stages." +class TestUpdateEventEnabled: + @pytest.mark.parametrize( + "cfg", + [ + {}, + {"updates": {}}, + {"updates": {"when": ["boot"]}}, + {"updates": {"when": ["hotplug"]}}, + {"updates": {"when": ["boot", "hotplug"]}}, + ], + ) + @pytest.mark.parametrize( + ["enabled_file_content", "enabled"], + [ + ({"scopes": ["network"]}, True), + ({"scopes": []}, False), + ], + ) + @mock.patch(M_PATH + "util.read_hotplug_enabled_file") + def test_hotplug_added_by_file( + self, m_read_hotplug_enabled_file, cfg, enabled_file_content, enabled + ): + m_datasource = mock.MagicMock(spec=DataSource) + m_datasource.paths = mock.MagicMock(spec=Paths) + m_datasource.default_update_events = {} + m_datasource.supported_update_events = { + EventScope.NETWORK: [EventType.HOTPLUG] + } + m_read_hotplug_enabled_file.return_value = enabled_file_content + cfg = {} + assert enabled is stages.update_event_enabled( + m_datasource, cfg, EventType.HOTPLUG, EventScope.NETWORK + ) + + class TestInit: @pytest.fixture(autouse=True) def setup(self, tmpdir): @@ -441,6 +477,10 @@ assert not self.tmpdir.join(path).exists() @mock.patch("cloudinit.distros.ubuntu.Distro") + @mock.patch.dict( + sources.DataSource.default_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) def test_apply_network_on_same_instance_id(self, m_ubuntu, caplog): """Only call distro.networking.apply_network_config_names on same instance id.""" @@ -608,8 +648,9 @@ yield init @mock.patch(M_PATH + "util.ensure_file") + @mock.patch(f"{M_PATH}Init._read_cfg") def test_ensure_file_not_called_if_no_log_file_configured( - self, m_ensure_file, init + self, m_read_cfg, m_ensure_file, init ): """If no log file is configured, we should not ensure its existence.""" init._cfg = {} diff -Nru cloud-init-23.4.4/tests/unittests/test_subp.py cloud-init-24.1.3/tests/unittests/test_subp.py --- cloud-init-23.4.4/tests/unittests/test_subp.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_subp.py 2024-03-27 13:14:04.000000000 +0000 @@ -9,7 +9,8 @@ from unittest import mock from cloudinit import subp, util -from tests.unittests.helpers import CiTestCase, get_top_level_dir +from tests.helpers import get_top_level_dir +from tests.unittests.helpers import CiTestCase BASH = subp.which("bash") BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name" @@ -118,7 +119,7 @@ (out, _err) = subp.subp(cmd.encode("utf-8"), shell=True) self.assertEqual("", out) self.assertEqual("", _err) - self.assertEqual("HI MOM\n", util.load_file(tmp_file)) + self.assertEqual("HI MOM\n", util.load_text_file(tmp_file)) def test_subp_handles_strings(self): """subp can run a string command if shell is True.""" @@ -127,7 +128,7 @@ (out, _err) = subp.subp(cmd, shell=True) self.assertEqual("", out) self.assertEqual("", _err) - self.assertEqual("HI MOM\n", util.load_file(tmp_file)) + self.assertEqual("HI MOM\n", util.load_text_file(tmp_file)) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. @@ -191,17 +192,6 @@ out, _err = subp.subp(self.printenv + ["FOO"], capture=True) self.assertEqual("FOO=BAR", out.splitlines()[0]) - def test_subp_env_and_update_env(self): - out, _err = subp.subp( - self.printenv + ["FOO", "HOME", "K1", "K2"], - capture=True, - env={"FOO": "BAR"}, - update_env={"HOME": "/myhome", "K2": "V2"}, - ) - self.assertEqual( - ["FOO=BAR", "HOME=/myhome", "K1=", "K2=V2"], out.splitlines() - ) - def test_subp_update_env(self): extra = {"FOO": "BAR", "HOME": "/root", "K1": "V1"} with mock.patch.dict("os.environ", values=extra): @@ -230,20 +220,6 @@ (noshebang,), ) - def test_subp_combined_stderr_stdout(self): - """Providing combine_capture as True redirects stderr to stdout.""" - data = b"hello world" - (out, err) = subp.subp( - self.stdin2err, - capture=True, - combine_capture=True, - decode=False, - env={"LANG": "C"}, - data=data, - ) - self.assertEqual(b"", err) - self.assertEqual(data, out) - def test_returns_none_if_no_capture(self): (out, err) = subp.subp(self.stdin2out, data=b"", capture=False) self.assertIsNone(err) @@ -263,6 +239,14 @@ self.assertTrue(isinstance(cm.exception.stdout, str)) self.assertTrue(isinstance(cm.exception.stderr, str)) + def test_exception_invalid_command(self): + args = [None, "first", "arg", "missing"] + with self.assertRaises( + subp.ProcessExecutionError, msg="Running invalid command" + ): + with self.allow_subp(args): + subp.subp(args) + def test_bunch_of_slashes_in_path(self): self.assertEqual( "/target/my/path/", subp.target_path("/target/", "//my/path/") @@ -313,38 +297,3 @@ decode=False, ) self.assertEqual(self.utf8_valid, out) - - def test_bogus_command_logs_status_messages(self): - """status_cb gets status messages logs on bogus commands provided.""" - logs = [] - - def status_cb(log): - logs.append(log) - - with self.assertRaises(subp.ProcessExecutionError): - subp.subp([BOGUS_COMMAND], status_cb=status_cb) - - expected = [ - "Begin run command: {cmd}\n".format(cmd=BOGUS_COMMAND), - "ERROR: End run command: invalid command provided\n", - ] - self.assertEqual(expected, logs) - - def test_command_logs_exit_codes_to_status_cb(self): - """status_cb gets status messages containing command exit code.""" - logs = [] - - def status_cb(log): - logs.append(log) - - with self.assertRaises(subp.ProcessExecutionError): - subp.subp([BASH, "-c", "exit 2"], status_cb=status_cb) - subp.subp([BASH, "-c", "exit 0"], status_cb=status_cb) - - expected = [ - "Begin run command: %s -c exit 2\n" % BASH, - "ERROR: End run command: exit(2)\n", - "Begin run command: %s -c exit 0\n" % BASH, - "End run command: exit(0)\n", - ] - self.assertEqual(expected, logs) diff -Nru cloud-init-23.4.4/tests/unittests/test_templating.py cloud-init-24.1.3/tests/unittests/test_templating.py --- cloud-init-23.4.4/tests/unittests/test_templating.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_templating.py 2024-03-27 13:14:04.000000000 +0000 @@ -6,8 +6,11 @@ import textwrap +import pytest + from cloudinit import templater -from cloudinit.util import load_file, write_file +from cloudinit.templater import JinjaSyntaxParsingException +from cloudinit.util import load_binary_file, write_file from tests.unittests import helpers as test_helpers @@ -123,7 +126,7 @@ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), ) templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) - result = load_file(out_fn, decode=False).decode("utf-8") + result = load_binary_file(out_fn).decode("utf-8") self.assertEqual(result, self.jinja_utf8_rbob) def test_jinja_nonascii_render_from_file(self): @@ -167,3 +170,136 @@ ).strip(), expected_result, ) + + +class TestJinjaSyntaxParsingException: + def test_jinja_syntax_parsing_exception_message(self): + """ + Test that the message of the JinjaSyntaxParsingException is written and + formatted as expected, and that the template is filled in correctly. + """ + jinja_template = ( + "## template: jinja\n" + "#cloud-config\n" + "runcmd:\n" + "{% if 1 == 1 % }\n" + ' - echo "1 is equal to 1"\n' + "{% endif %}\n" + ) + expected_error_msg = ( + "Unable to parse Jinja template due to syntax error: " + "unexpected '}' on line 4: {% if 1 == 1 % }" + ) + with pytest.raises(JinjaSyntaxParsingException) as excinfo: + templater.render_string(jinja_template, {}) + assert str(excinfo.value) == expected_error_msg + + @pytest.mark.parametrize( + "line_no,replace_tuple,syntax_error", + ( + ( + 4, + ("%}", "% }"), + "unexpected '}'", + ), + ( + 6, + ("%}", "% }"), + "expected token 'end of statement block', got '%'", + ), + ( + 8, + ("%}", "% }"), + "expected token 'end of statement block', got '%'", + ), + ( + 4, + ("%}", "}}"), + "unexpected '}'", + ), + ( + 6, + ("%}", "}}"), + "unexpected '}'", + ), + ( + 8, + ("%}", "}}"), + "unexpected '}'", + ), + ( + 4, + ("==", "="), + "expected token 'end of statement block', got '='", + ), + ( + 7, + ("}}", "} }"), + "unexpected '}'", + ), + ), + ) + def test_functionality_for_various_syntax_errors( + self, line_no, replace_tuple, syntax_error + ): + """ + Test a variety of jinja syntax errors and make sure the exceptions + are raised with the correct syntax error, line number, and line content + as expected. + """ + jinja_template = ( + "## template: jinja\n" + "#cloud-config\n" + "runcmd:\n" + '{% if v1.cloud_name == "unknown" %}\n' + ' - echo "Cloud name is unknown"\n' + "{% else %}\n" + ' - echo "Cloud name is known: {{ v1.cloud_name }}"\n' + "{% endif %}\n" + ) + # replace "%}" in line_no with "% }" + jinja_template = jinja_template.replace( + jinja_template.split("\n")[line_no - 1], + jinja_template.split("\n")[line_no - 1].replace(*replace_tuple), + ) + + with pytest.raises(JinjaSyntaxParsingException) as excinfo: + templater.render_string(jinja_template, {}) + error: JinjaSyntaxParsingException = excinfo.value + assert error.lineno == line_no + assert error.message == syntax_error + assert ( + error.source.splitlines()[line_no - 2] # -2 because of header + == jinja_template.splitlines()[line_no - 1] + ) + + def test_format_error_message_with_content_line(self): + expected_error_msg = ( + "Unable to parse Jinja template due to syntax error: " + "unexpected '}' on line 4: {% if 1 == 1 % }" + ) + error_msg = JinjaSyntaxParsingException.format_error_message( + syntax_error="unexpected '}'", + line_number=4, + line_content="{% if 1 == 1 % }", + ) + assert error_msg == expected_error_msg + + @pytest.mark.parametrize( + "line_content", + ( + "", + None, + ), + ) + def test_format_error_message_without_content_line(self, line_content): + expected_error_msg = ( + "Unable to parse Jinja template due to syntax error: " + "unexpected '}' on line 4" + ) + error_msg = JinjaSyntaxParsingException.format_error_message( + syntax_error="unexpected '}'", + line_number=4, + line_content=line_content, + ) + assert error_msg == expected_error_msg diff -Nru cloud-init-23.4.4/tests/unittests/test_upgrade.py cloud-init-24.1.3/tests/unittests/test_upgrade.py --- cloud-init-23.4.4/tests/unittests/test_upgrade.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_upgrade.py 2024-03-27 13:14:04.000000000 +0000 @@ -13,13 +13,15 @@ import operator import pathlib +from unittest import mock import pytest -from cloudinit.sources import pkl_load +from cloudinit import importer, settings, sources, type_utils from cloudinit.sources.DataSourceAzure import DataSourceAzure from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud from tests.unittests.helpers import resourceLocation +from tests.unittests.util import MockDistro DSNAME_TO_CLASS = { "Azure": DataSourceAzure, @@ -28,6 +30,137 @@ class TestUpgrade: + # Expect the following "gaps" in unpickling per-datasource. + # The presence of these attributes existed in 20.1. + ds_expected_unpickle_attrs = { + "AltCloud": {"seed", "supported_seed_starts"}, + "AliYun": {"identity", "metadata_address", "default_update_events"}, + "Azure": { + "_ephemeral_dhcp_ctx", + "_iso_dev", + "_network_config", + "_reported_ready_marker_file", + "_route_configured_for_imds", + "_route_configured_for_wireserver", + "_wireserver_endpoint", + "cfg", + "seed", + "seed_dir", + }, + "CloudSigma": {"cepko", "ssh_public_key"}, + "CloudStack": { + "api_ver", + "cfg", + "metadata_address", + "seed_dir", + "vr_addr", + }, + "ConfigDrive": { + "_network_config", + "ec2_metadata", + "files", + "known_macs", + "network_eni", + "network_json", + "seed_dir", + "source", + "version", + }, + "DigitalOcean": { + "_network_config", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "use_ip4LL", + "wait_retry", + }, + "Ec2": {"identity", "metadata_address"}, + "Exoscale": { + "api_version", + "extra_config", + "metadata_url", + "password_server_port", + "url_retries", + "url_timeout", + }, + "GCE": {"default_user", "metadata_address"}, + "Hetzner": { + "_network_config", + "dsmode", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "userdata_address", + "wait_retry", + }, + "IBMCloud": {"source", "_network_config", "network_json", "platform"}, + "RbxCloud": {"cfg", "gratuitous_arp", "seed"}, + "Scaleway": { + "_network_config", + "metadata_url", + "retries", + "timeout", + }, + "Joyent": { + "_network_config", + "network_data", + "routes_data", + "script_base_d", + }, + "MAAS": {"base_url", "seed_dir"}, + "NoCloud": { + "_network_eni", + "_network_config", + "supported_seed_starts", + "seed_dir", + "seed", + "seed_dirs", + }, + "NWCS": { + "_network_config", + "dsmode", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "wait_retry", + }, + "OpenNebula": {"network", "seed", "seed_dir"}, + "OpenStack": { + "ec2_metadata", + "files", + "metadata_address", + "network_json", + "ssl_details", + "version", + }, + "OVF": { + "cfg", + "environment", + "_network_config", + "seed", + "seed_dir", + "supported_seed_starts", + }, + "UpCloud": { + "_network_config", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "wait_retry", + }, + "Vultr": {"netcfg"}, + "VMware": { + "data_access_method", + "rpctool", + "rpctool_fn", + }, + "WSL": {"instance_name"}, + } + @pytest.fixture( params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"), scope="class", @@ -39,7 +172,102 @@ Test implementations _must not_ modify the ``previous_obj_pkl`` which they are passed, as that will affect tests that run after them. """ - return pkl_load(str(request.param)) + return sources.pkl_load(str(request.param)) + + @pytest.mark.parametrize( + "mode", + ( + [sources.DEP_FILESYSTEM], + [sources.DEP_FILESYSTEM, sources.DEP_NETWORK], + ), + ) + @mock.patch.object( + importer, + "match_case_insensitive_module_name", + lambda name: f"DataSource{name}", + ) + def test_all_ds_init_vs_unpickle_attributes( + self, mode, mocker, paths, tmpdir + ): + """Unpickle resets any instance attributes created in __init__ + + This test asserts that deserialization of a datasource cache + does proper initialization of any 'new' instance attributes + created as a side-effect of the __init__ method. + + Without proper _unpickle coverage for newly introduced attributes, + the new deserialized instance will hit AttributeErrors at runtime. + """ + # Load all cloud-init init-local time-frame DataSource classes + for ds_class in sources.list_sources( + settings.CFG_BUILTIN["datasource_list"], + mode, + [type_utils.obj_name(sources)], + ): + # Expected common instance attrs from __init__ that are typically + # handled via existing _unpickling and setup in _get_data + common_instance_attrs = { + "paths", + "vendordata2", + "sys_cfg", + "ud_proc", + "vendordata", + "vendordata2_raw", + "ds_cfg", + "distro", + "userdata", + "userdata_raw", + "metadata", + "vendordata_raw", + } + # Grab initial specific-class attributes from magic method + class_attrs = set(ds_class.__dict__) + + # Mock known subp calls from some datasource __init__ setup + mocker.patch("cloudinit.util.is_container", return_value=False) + mocker.patch("cloudinit.dmi.read_dmi_data", return_value="") + mocker.patch("cloudinit.subp.subp", return_value=("", "")) + + # Initialize the class to grab the instance attributes from + # instance.__dict__ magic method. + ds = ds_class(sys_cfg={}, distro=MockDistro(), paths=paths) + + if getattr(ds.__class__.__bases__[0], "dsname", None) == ds.dsname: + # We are a subclass in a different boot mode (Local/Net) and + # share a common parent with class atttributes + class_attrs.update(ds.__class__.__bases__[0].__dict__) + + # Determine new instance attributes created by __init__ + # by calling the __dict__ magic method on the instance. + # Then, subtract common_instance_attrs and + # ds_expected_unpickle_attrs from the list of current attributes. + # What's left is our 'new' instance attributes added as a + # side-effect of __init__. + init_attrs = ( + set(ds.__dict__) + - class_attrs + - common_instance_attrs + - self.ds_expected_unpickle_attrs.get(ds_class.dsname, set()) + ) + + # Remove all side-effect attributes added by __init__ + for side_effect_attr in init_attrs: + delattr(ds, side_effect_attr) + + # Pickle the version of the DataSource with all init_attrs removed + sources.pkl_store(ds, tmpdir.join(f"{ds.dsname}.obj.pkl")) + + # Reload the pickled bare-bones datasource to ensure all instance + # attributes are reconstituted by _unpickle helpers. + ds2 = sources.pkl_load(tmpdir.join(f"{ds.dsname}.obj.pkl")) + unpickled_attrs = ( + set(ds2.__dict__) - class_attrs - common_instance_attrs + ) + missing_unpickled_attrs = init_attrs - unpickled_attrs + assert not missing_unpickled_attrs, ( + f"New {ds_class.dsname} attributes need unpickle coverage:" + f" {missing_unpickled_attrs}" + ) def test_pkl_load_defines_all_init_side_effect_attributes( self, previous_obj_pkl diff -Nru cloud-init-23.4.4/tests/unittests/test_url_helper.py cloud-init-24.1.3/tests/unittests/test_url_helper.py --- cloud-init-23.4.4/tests/unittests/test_url_helper.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_url_helper.py 2024-03-27 13:14:04.000000000 +0000 @@ -1,9 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import logging +import re from functools import partial from threading import Event from time import process_time +from unittest.mock import ANY, call import pytest import requests @@ -444,20 +447,72 @@ """Assert expected call intervals occur""" stagger = 0.1 with mock.patch(M_PATH + "_run_func_with_delay") as delay_func: + + def identity_of_first_arg(x, _): + return x + dual_stack( - lambda x, _y: x, + identity_of_first_arg, ["you", "and", "me", "and", "dog"], stagger_delay=stagger, timeout=1, ) - # ensure that stagger delay for each subsequent call is: + # ensure that stagger delay for each call is made with args: # [ 0 * N, 1 * N, 2 * N, 3 * N, 4 * N, 5 * N] where N = stagger # it appears that without an explicit wait/join we can't assert # number of calls - for delay, call_item in enumerate(delay_func.call_args_list): - _, kwargs = call_item - assert stagger * delay == kwargs.get("delay") + calls = [ + call( + func=identity_of_first_arg, + addr="you", + timeout=1, + event=ANY, + delay=stagger * 0, + ), + call( + func=identity_of_first_arg, + addr="and", + timeout=1, + event=ANY, + delay=stagger * 1, + ), + call( + func=identity_of_first_arg, + addr="me", + timeout=1, + event=ANY, + delay=stagger * 2, + ), + call( + func=identity_of_first_arg, + addr="and", + timeout=1, + event=ANY, + delay=stagger * 3, + ), + call( + func=identity_of_first_arg, + addr="dog", + timeout=1, + event=ANY, + delay=stagger * 4, + ), + ] + num_calls = 0 + for call_instance in calls: + if call_instance in delay_func.call_args_list: + num_calls += 1 + + # we can't know the order of the submitted functions' execution + # we can't know how many of the submitted functions get called + # in advance + # + # we _do_ know what the possible arg combinations are + # we _do_ know from the mocked function how many got called + # assert that all calls that occurred had known valid arguments + # by checking for the correct number of matches + assert num_calls == len(delay_func.call_args_list) ADDR1 = "https://addr1/" @@ -470,6 +525,21 @@ fail = "FAIL" event = Event() + @pytest.fixture + def retry_mocks(self, mocker): + self.mock_time_value = 0 + m_readurl = mocker.patch( + f"{M_PATH}readurl", side_effect=self.readurl_side_effect + ) + m_sleep = mocker.patch( + f"{M_PATH}time.sleep", side_effect=self.sleep_side_effect + ) + mocker.patch(f"{M_PATH}time.time", side_effect=self.time_side_effect) + + yield m_readurl, m_sleep + + self.mock_time_value = 0 + @classmethod def response_wait(cls, _request): cls.event.wait(0.1) @@ -530,7 +600,7 @@ assert response.encode() == response_contents @responses.activate - def test_timeout(self): + def test_timeout(self, caplog): """If no endpoint responds in time, expect no response""" self.event.clear() @@ -540,7 +610,7 @@ responses.GET, address, callback=( - self.response_wait + requests.ConnectTimeout if "sleep" in address else self.response_nowait ), @@ -558,3 +628,74 @@ self.event.set() assert not url assert not response_contents + assert re.search( + r"open 'https:\/\/sleep1\/'.*Timed out", caplog.text, re.DOTALL + ) + + def test_explicit_arguments(self, retry_mocks): + """Ensure that explicit arguments are respected""" + m_readurl, m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], + max_wait=23, + timeout=5, + sleep_time=3, + ) + + assert len(m_readurl.call_args_list) == 3 + assert len(m_sleep.call_args_list) == 2 + + for readurl_call in m_readurl.call_args_list: + assert readurl_call[1]["timeout"] == 5 + for sleep_call in m_sleep.call_args_list: + assert sleep_call[0][0] == 3 + + # Call 1 starts 0 + # Call 2 starts at 8-ish after 5 second timeout and 3 second sleep + # Call 3 starts at 16-ish for same reasons + # The 5 second timeout puts us at 21-ish and now we break + # because 21-ish + the sleep time puts us over max wait of 23 + assert pytest.approx(self.mock_time_value) == 21 + + def test_shortened_timeout(self, retry_mocks): + """Test that we shorten the last timeout to align with max_wait""" + m_readurl, _m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], max_wait=10, timeout=9, sleep_time=0 + ) + + assert len(m_readurl.call_args_list) == 2 + assert m_readurl.call_args_list[-1][1]["timeout"] == pytest.approx(1) + + def test_default_sleep_time(self, retry_mocks): + """Test default sleep behavior when not specified""" + _m_readurl, m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], + max_wait=50, + timeout=1, + ) + + expected_sleep_times = [1] * 5 + [2] * 5 + [3] * 5 + actual_sleep_times = [ + m_sleep.call_args_list[i][0][0] + for i in range(len(m_sleep.call_args_list)) + ] + assert actual_sleep_times == expected_sleep_times + + # These side effect methods are a way of having a somewhat predictable + # output for time.time(). Otherwise, we have to track too many calls + # to time.time() and unrelated changes to code being called could cause + # these tests to fail. + # 0.0000001 is added to simulate additional execution time but keep it + # small enough for pytest.approx() to work + def sleep_side_effect(self, sleep_time): + self.mock_time_value += sleep_time + 0.0000001 + + def time_side_effect(self): + return self.mock_time_value + + def readurl_side_effect(self, *args, **kwargs): + if "timeout" in kwargs: + self.mock_time_value += kwargs["timeout"] + 0.0000001 + raise UrlError("test") diff -Nru cloud-init-23.4.4/tests/unittests/test_util.py cloud-init-24.1.3/tests/unittests/test_util.py --- cloud-init-23.4.4/tests/unittests/test_util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/test_util.py 2024-03-27 13:14:04.000000000 +0000 @@ -23,6 +23,7 @@ import yaml from cloudinit import atomic_helper, features, importer, subp, url_helper, util +from cloudinit.distros import Distro from cloudinit.helpers import Paths from cloudinit.sources import DataSourceHostname from cloudinit.subp import SubpResult @@ -443,18 +444,20 @@ assert is_rw is False def test_read_conf(self, mocker): - mocker.patch("cloudinit.util.load_file", return_value='{"a": "b"}') + mocker.patch( + "cloudinit.util.load_text_file", return_value='{"a": "b"}' + ) assert util.read_conf("any") == {"a": "b"} @skipUnlessJinja() def test_read_conf_with_template(self, mocker, caplog): mocker.patch("os.path.exists", return_value=True) mocker.patch( - "cloudinit.util.load_file", + "cloudinit.util.load_text_file", return_value='## template: jinja\n{"a": "{{c}}"}', ) mocker.patch( - "cloudinit.handlers.jinja_template.load_file", + "cloudinit.handlers.jinja_template.load_text_file", return_value='{"c": "d"}', ) @@ -466,14 +469,14 @@ ) in caplog.text @skipUnlessJinja() - def test_read_conf_with_failed_template(self, mocker, caplog): + def test_read_conf_with_failed_config_json(self, mocker, caplog): mocker.patch("os.path.exists", return_value=True) mocker.patch( - "cloudinit.util.load_file", + "cloudinit.util.load_text_file", return_value='## template: jinja\n{"a": "{{c}}"', # missing } ) mocker.patch( - "cloudinit.handlers.jinja_template.load_file", + "cloudinit.handlers.jinja_template.load_text_file", return_value='{"c": "d"}', ) conf = util.read_conf("cfg_path", instance_data_file="vars_path") @@ -481,20 +484,48 @@ assert conf == {} @skipUnlessJinja() - def test_read_conf_with_failed_vars(self, mocker, caplog): + def test_read_conf_with_failed_instance_data_json(self, mocker, caplog): mocker.patch("os.path.exists", return_value=True) mocker.patch( - "cloudinit.util.load_file", + "cloudinit.util.load_text_file", return_value='## template: jinja\n{"a": "{{c}}"}', ) mocker.patch( - "cloudinit.handlers.jinja_template.load_file", + "cloudinit.handlers.jinja_template.load_text_file", return_value='{"c": "d"', # missing } ) conf = util.read_conf("cfg_path", instance_data_file="vars_path") assert "Could not apply Jinja template" in caplog.text assert conf == {"a": "{{c}}"} + @pytest.mark.parametrize( + "template", + [ + '{"a": "{{c} } }"', + '{"a": "{{c} } "', + "{% if c %} C is present {% else % } C is NOT present {% endif %}", + ], + ) + @skipUnlessJinja() + def test_read_conf_with_config_invalid_jinja_syntax( + self, mocker, caplog, template + ): + mocker.patch("os.path.exists", return_value=True) + mocker.patch( + "cloudinit.util.load_text_file", + return_value="## template: jinja\n" + template, + ) + mocker.patch( + "cloudinit.handlers.jinja_template.load_text_file", + return_value='{"c": "d"}', + ) + conf = util.read_conf("cfg_path", instance_data_file="vars_path") + assert ( + "Failed to render templated yaml config file 'cfg_path'" + in caplog.text + ) + assert conf == {} + @mock.patch( M_PATH + "read_conf", side_effect=(OSError(errno.EACCES, "Not allowed"), {"0": "0"}), @@ -515,10 +546,7 @@ confs[i].write("{}") assert {"0": "0"} == util.read_conf_d(tmpdir) assert ( - caplog.text.count( - f"REDACTED config part {tmpdir}/conf-1.cfg for non-root user" - ) - == 1 + caplog.text.count(f"REDACTED config part {tmpdir}/conf-1.cfg") == 1 ) assert m_read_conf.call_count == 2 out, err = capsys.readouterr() @@ -559,12 +587,7 @@ if create_confd: confd_fn = tmpdir.mkdir("conf.cfg.d") util.read_conf_with_confd(conf_fn) - assert ( - caplog.text.count( - f"REDACTED config part {conf_fn} for non-root user" - ) - == 1 - ) + assert caplog.text.count(f"REDACTED config part {conf_fn}") == 1 assert m_read_conf.call_count == 1 out, err = capsys.readouterr() assert not out @@ -617,7 +640,7 @@ util.sym_link(target2, link, force=True) self.assertTrue(os.path.exists(link)) - self.assertEqual("hello2", util.load_file(link)) + self.assertEqual("hello2", util.load_text_file(link)) def test_sym_link_dangling_link(self): tmpd = self.tmp_dir() @@ -945,7 +968,7 @@ if path == "/etc/redhat-release": return 1 - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file has the distro name in quotes""" @@ -954,7 +977,7 @@ dist = util.get_linux_distro() self.assertEqual(("sles", "12.3", platform.machine()), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file does not have the distro name in quotes""" @@ -982,7 +1005,7 @@ dist = util.get_linux_distro() self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos6(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on CentOS 6.""" m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 @@ -990,7 +1013,7 @@ dist = util.get_linux_distro() self.assertEqual(("centos", "6.10", "Final"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): """Verify the correct release info on CentOS 7 without os-release.""" m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 @@ -998,7 +1021,7 @@ dist = util.get_linux_distro() self.assertEqual(("centos", "7.5.1804", "Core"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): """Verify redhat 7 read from os-release.""" m_os_release.return_value = OS_RELEASE_REDHAT_7 @@ -1006,7 +1029,7 @@ dist = util.get_linux_distro() self.assertEqual(("redhat", "7.5", "Maipo"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): """Verify redhat 7 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 @@ -1014,7 +1037,7 @@ dist = util.get_linux_distro() self.assertEqual(("redhat", "7.5", "Maipo"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): """Verify redhat 6 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 @@ -1022,7 +1045,7 @@ dist = util.get_linux_distro() self.assertEqual(("redhat", "6.10", "Santiago"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_copr_centos(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on COPR CentOS.""" m_os_release.return_value = OS_RELEASE_CENTOS @@ -1030,7 +1053,7 @@ dist = util.get_linux_distro() self.assertEqual(("centos", "7", "Core"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): """Verify almalinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 @@ -1038,7 +1061,7 @@ dist = util.get_linux_distro() self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): """Verify almalinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_ALMALINUX_8 @@ -1046,7 +1069,7 @@ dist = util.get_linux_distro() self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): """Verify eurolinux 7 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 @@ -1054,7 +1077,7 @@ dist = util.get_linux_distro() self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): """Verify eurolinux 7 read from os-release.""" m_os_release.return_value = OS_RELEASE_EUROLINUX_7 @@ -1062,7 +1085,7 @@ dist = util.get_linux_distro() self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): """Verify eurolinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 @@ -1070,7 +1093,7 @@ dist = util.get_linux_distro() self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): """Verify eurolinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_EUROLINUX_8 @@ -1078,7 +1101,7 @@ dist = util.get_linux_distro() self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_rhrelease( self, m_os_release, m_path_exists ): @@ -1088,7 +1111,7 @@ dist = util.get_linux_distro() self.assertEqual(("miracle", "8.4", "Peony"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_osrelease( self, m_os_release, m_path_exists ): @@ -1098,7 +1121,7 @@ dist = util.get_linux_distro() self.assertEqual(("miraclelinux", "8", "Peony"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 @@ -1106,7 +1129,7 @@ dist = util.get_linux_distro() self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_ROCKY_8 @@ -1114,7 +1137,7 @@ dist = util.get_linux_distro() self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): """Verify virtuozzo linux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 @@ -1122,7 +1145,7 @@ dist = util.get_linux_distro() self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): """Verify virtuozzo linux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 @@ -1130,7 +1153,7 @@ dist = util.get_linux_distro() self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): """Verify cloudlinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 @@ -1138,7 +1161,7 @@ dist = util.get_linux_distro() self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): """Verify cloudlinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 @@ -1146,7 +1169,7 @@ dist = util.get_linux_distro() self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" m_os_release.return_value = OS_RELEASE_DEBIAN @@ -1154,7 +1177,7 @@ dist = util.get_linux_distro() self.assertEqual(("debian", "9", "stretch"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_openeuler(self, m_os_release, m_path_exists): """Verify get the correct name and release name on Openeuler.""" m_os_release.return_value = OS_RELEASE_OPENEULER_20 @@ -1162,7 +1185,7 @@ dist = util.get_linux_distro() self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_opencloudos(self, m_os_release, m_path_exists): """Verify get the correct name and release name on OpenCloudOS.""" m_os_release.return_value = OS_RELEASE_OPENCLOUDOS_8 @@ -1170,7 +1193,7 @@ dist = util.get_linux_distro() self.assertEqual(("OpenCloudOS", "8.6", ""), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_tencentos(self, m_os_release, m_path_exists): """Verify get the correct name and release name on TencentOS.""" m_os_release.return_value = OS_RELEASE_TENCENTOS_3 @@ -1178,7 +1201,7 @@ dist = util.get_linux_distro() self.assertEqual(("TencentOS", "3.1", ""), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE prior to openSUSE Leap 15. @@ -1188,7 +1211,7 @@ dist = util.get_linux_distro() self.assertEqual(("opensuse", "42.3", platform.machine()), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE for openSUSE Leap 15.0 and later. @@ -1198,7 +1221,7 @@ dist = util.get_linux_distro() self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE for openSUSE Tumbleweed @@ -1210,7 +1233,7 @@ ("opensuse-tumbleweed", "20180920", platform.machine()), dist ) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on PhotonOS""" m_os_release.return_value = OS_RELEASE_PHOTON @@ -1218,7 +1241,7 @@ dist = util.get_linux_distro() self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist) - @mock.patch("cloudinit.util.load_file") + @mock.patch("cloudinit.util.load_text_file") def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on MarinerOS""" m_os_release.return_value = OS_RELEASE_MARINER @@ -1226,7 +1249,7 @@ dist = util.get_linux_distro() self.assertEqual(("mariner", "2.0", ""), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_openmandriva(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on OpenMandriva""" m_os_release.return_value = OS_RELEASE_OPENMANDRIVA @@ -1234,7 +1257,7 @@ dist = util.get_linux_distro() self.assertEqual(("openmandriva", "4.90", "nickel"), dist) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_get_linux_cos(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on COS""" m_os_release.return_value = OS_RELEASE_COS @@ -1997,7 +2020,7 @@ pytest.param("1", True, id="true_when_fips_enabled_no_newline"), ), ) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_text_file") def test_fips_enabled_based_on_proc_crypto( self, load_file, fips_enabled_content, expected, tmpdir ): @@ -2154,54 +2177,6 @@ expected = ("none", "tmpfs", "/run/lock") self.assertEqual(expected, util.parse_mount_info("/run/lock", lines)) - @mock.patch(M_PATH + "os") - @mock.patch("cloudinit.subp.subp") - def test_get_device_info_from_zpool(self, zpool_output, m_os): - # mock /dev/zfs exists - m_os.path.exists.return_value = True - # mock subp command from util.get_mount_info_fs_on_zpool - zpool_output.return_value = ( - helpers.readResource("zpool_status_simple.txt"), - "", - ) - # save function return values and do asserts - ret = util.get_device_info_from_zpool("vmzroot") - self.assertEqual("gpt/system", ret) - self.assertIsNotNone(ret) - m_os.path.exists.assert_called_with("/dev/zfs") - - @mock.patch(M_PATH + "os") - def test_get_device_info_from_zpool_no_dev_zfs(self, m_os): - # mock /dev/zfs missing - m_os.path.exists.return_value = False - # save function return values and do asserts - ret = util.get_device_info_from_zpool("vmzroot") - self.assertIsNone(ret) - - @mock.patch(M_PATH + "os") - @mock.patch("cloudinit.subp.subp") - def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os): - """Handle case where there is no zpool command""" - # mock /dev/zfs exists - m_os.path.exists.return_value = True - m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd") - ret = util.get_device_info_from_zpool("vmzroot") - self.assertIsNone(ret) - - @mock.patch(M_PATH + "os") - @mock.patch("cloudinit.subp.subp") - def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): - # mock /dev/zfs exists - m_os.path.exists.return_value = True - # mock subp command from util.get_mount_info_fs_on_zpool - zpool_output.return_value = ( - helpers.readResource("zpool_status_simple.txt"), - "error", - ) - # save function return values and do asserts - ret = util.get_device_info_from_zpool("vmzroot") - self.assertIsNone(ret) - @mock.patch("cloudinit.subp.subp") def test_parse_mount_with_ext(self, mount_out): mount_out.return_value = ( @@ -2316,6 +2291,30 @@ ), ) + def test_output_logs_parsed_when_teeing_files_and_rotated(self): + """When output configuration is parsed when teeing files and rotated + log files are present.""" + tmpd = self.tmp_dir() + log1 = self.tmp_path("my.log", tmpd) + log1_rotated = self.tmp_path("my.log.1.gz", tmpd) + log2 = self.tmp_path("himom.log", tmpd) + log2_rotated = self.tmp_path("himom.log.1.gz", tmpd) + + util.write_file(log1_rotated, "hello") + util.write_file(log2_rotated, "hello") + + self.assertEqual( + [log2, log2_rotated, log1, log1_rotated], + sorted( + util.get_config_logfiles( + { + "def_log_file": str(log1), + "output": {"all": f"|tee -a {log2}"}, + } + ) + ), + ) + class TestMultiLog(helpers.FilesystemMockingTestCase): def _createConsole(self, root): @@ -2757,7 +2756,7 @@ # return the value portion of key=val decoded. return blob.split(b"=", 1)[1].decode(encoding, errors) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_binary_file") def test_non_utf8_in_environment(self, m_load_file): """env may have non utf-8 decodable content.""" content = self.null.join( @@ -2776,7 +2775,7 @@ ) self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_binary_file") def test_encoding_none_returns_bytes(self, m_load_file): """encoding none returns bytes.""" lines = (self.bootflag, self.simple1, self.simple2, self.mixed) @@ -2789,7 +2788,7 @@ ) self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_binary_file") def test_all_utf8_encoded(self, m_load_file): """common path where only utf-8 decodable content.""" content = self.null.join((self.simple1, self.simple2)) @@ -2799,7 +2798,7 @@ ) self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_file") + @mock.patch(M_PATH + "load_binary_file") def test_non_existing_file_returns_empty_dict(self, m_load_file): """as implemented, a non-existing pid returns empty dict. This is how it was originally implemented.""" @@ -2812,27 +2811,25 @@ """test get_proc_ppid""" @skipIf(not util.is_Linux(), "/proc/$pid/stat is not useful on not-Linux") - @mock.patch(M_PATH + "is_Linux") - def test_get_proc_ppid_linux(self, m_is_Linux): + def test_get_proc_ppid_linux(self): """get_proc_ppid returns correct parent pid value.""" - m_is_Linux.return_value = True my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) + self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + + @skipIf(not util.is_Linux(), "/proc/$pid/stat is not useful on not-Linux") + def test_get_proc_pgrp_linux(self): + """get_proc_ppid returns correct parent pid value.""" + self.assertEqual(os.getpgid(0), Distro.get_proc_pgid(os.getpid())) @pytest.mark.allow_subp_for("ps") - @mock.patch(M_PATH + "is_Linux") - def test_get_proc_ppid_ps(self, m_is_Linux): + def test_get_proc_ppid_ps(self): """get_proc_ppid returns correct parent pid value.""" - m_is_Linux.return_value = False my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) - - @mock.patch(M_PATH + "is_Linux") - def test_get_proc_ppid_mocked(self, m_is_Linux): - m_is_Linux.return_value = True + self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + def test_get_proc_ppid_mocked(self): for ppid, proc_data in ( ( 0, @@ -2867,11 +2864,20 @@ "0 0 0 0 20 0 1 0 4136 175616000 1394 18446744073709551615 1 1" "0 0 0 0 0 4096 0 0 0 0 17 8 0 0 0 0 0 0 0 0 0 0 0 0 0", ), + ( + 144855, + "167644 (python) R 144855 167644 144855 34819 167644 4194304 " + "12692 0 0 0 114 10 0 0 20 0 1 0 8929754 69824512 13959 " + "18446744073709551615 4321280 7154413 140733469268592 0 0 0 0 " + "16781312 1258 0 0 0 17 0 0 0 0 0 0 9719240 11022936 13484032 " + "140733469277329 140733469277436 140733469277436 " + "140733469282250 0", + ), ): with mock.patch( - "cloudinit.util.load_file", return_value=proc_data + "cloudinit.util.load_text_file", return_value=proc_data ): - assert ppid == util.get_proc_ppid("mocked") + assert ppid == Distro.get_proc_ppid(-999) class TestHuman2Bytes: @@ -3170,3 +3176,47 @@ ) def test_happy_path(self, in_data, expected): assert expected == util.maybe_b64decode(in_data) + + +class MockPath: + def __init__(self, target_file="/does/not/exist"): + self.target_file = target_file + + def get_cpath(self, *args): + assert args == ( + "hotplug.enabled", + ), f"Invalid get_cpath argument {args}" + return self.target_file + + +@pytest.mark.usefixtures("fake_filesystem") +class TestReadHotplugEnabledFile: + def test_file_not_found(self, caplog): + assert {"scopes": []} == util.read_hotplug_enabled_file(MockPath()) + assert "enabled because it is not decodable" not in caplog.text + + def test_json_decode_error(self, caplog, tmpdir): + target_file = ( + tmpdir.mkdir("var") + .mkdir("lib") + .mkdir("cloud") + .join("hotplug.enabled") + ) + target_file.write("asdfasdfa") + assert {"scopes": []} == util.read_hotplug_enabled_file( + MockPath(target_file.strpath) + ) + assert "not decodable" in caplog.text + + @pytest.mark.parametrize("content", ['{"scopes": ["network"]}']) + def test_file_present(self, content, caplog, tmpdir): + target_file = ( + tmpdir.mkdir("var") + .mkdir("lib") + .mkdir("cloud") + .join("hotplug.enabled") + ) + target_file.write(content) + assert {"scopes": ["network"]} == util.read_hotplug_enabled_file( + MockPath(target_file.strpath) + ) diff -Nru cloud-init-23.4.4/tests/unittests/util.py cloud-init-24.1.3/tests/unittests/util.py --- cloud-init-23.4.4/tests/unittests/util.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tests/unittests/util.py 2024-03-27 13:14:04.000000000 +0000 @@ -2,6 +2,7 @@ from unittest import mock from cloudinit import cloud, distros, helpers +from cloudinit.net.dhcp import IscDhclient from cloudinit.sources import DataSource, DataSourceHostname from cloudinit.sources.DataSourceNone import DataSourceNone @@ -32,7 +33,9 @@ myds.metadata.update(metadata) if paths: paths.datasource = myds - return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + return cloud.Cloud( + myds, paths, sys_cfg, mydist, runners=helpers.Runners(paths) + ) def abstract_to_concrete(abclass): @@ -53,10 +56,6 @@ return True @property - def fallback_interface(self): - return None - - @property def cloud_name(self): return "testing" @@ -64,12 +63,22 @@ class MockDistro(distros.Distro): # MockDistro is here to test base Distro class implementations def __init__(self, name="testingdistro", cfg=None, paths=None): + self._client = None if not cfg: cfg = {} if not paths: paths = {} super(MockDistro, self).__init__(name, cfg, paths) + @property + def dhcp_client(self): + if not self._client: + with mock.patch( + "cloudinit.net.dhcp.subp.which", return_value=True + ): + self._client = IscDhclient() + return self._client + def install_packages(self, pkglist): pass @@ -80,15 +89,20 @@ def uses_systemd(): return True + @staticmethod + def get_proc_ppid(_): + return 1 + + @staticmethod + def get_proc_pgid(_): + return 99999 + def get_primary_arch(self): return "i386" def get_package_mirror_info(self, arch=None, data_source=None): pass - def apply_network(self, settings, bring_up=True): - return False - def generate_fallback_config(self): return {} diff -Nru cloud-init-23.4.4/tools/.github-cla-signers cloud-init-24.1.3/tools/.github-cla-signers --- cloud-init-23.4.4/tools/.github-cla-signers 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/.github-cla-signers 2024-03-27 13:14:04.000000000 +0000 @@ -6,6 +6,8 @@ ajmyyra akutz AlexBaranowski +AlexSv04047 +AliyevH Aman306 andgein andrew-lee-metaswitch @@ -13,6 +15,7 @@ andrewlukoshko ani-sinha antonyc +apollo13 aswinrajamannar bdrung beantaxi @@ -25,6 +28,7 @@ brianphaley CalvoM candlerb +CarlosNihelton catmsred cawamata cclauss @@ -38,6 +42,7 @@ cvstealth dankenigsberg dankm +dark2phoenix david-caro dbungert ddymko diff -Nru cloud-init-23.4.4/tools/Z99-cloud-locale-test.sh cloud-init-24.1.3/tools/Z99-cloud-locale-test.sh --- cloud-init-23.4.4/tools/Z99-cloud-locale-test.sh 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/Z99-cloud-locale-test.sh 2024-03-27 13:14:04.000000000 +0000 @@ -18,7 +18,7 @@ $_local w1 w2 w3 w4 remain # if shell is zsh, act like sh only for this function (-L). - # The behavior change will not permenently affect user's shell. + # The behavior change will not permanently affect user's shell. [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh # locale is expected to output either: diff -Nru cloud-init-23.4.4/tools/build-on-openbsd cloud-init-24.1.3/tools/build-on-openbsd --- cloud-init-23.4.4/tools/build-on-openbsd 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/build-on-openbsd 2024-03-27 13:14:04.000000000 +0000 @@ -2,6 +2,12 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; } +PYTHON=${PYTHON:-python3} +if ! command -v ${PYTHON} >/dev/null 2>&1; then + echo "Please install python first." + exit 1 +fi + # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" @@ -16,12 +22,42 @@ py3-setuptools py3-yaml sudo-- + wget " -[ -f "$depschecked" ] || pkg_add "${pkgs}" || fail "install packages" -touch $depschecked +[ -f $depschecked ] || echo "Installing the following packages: $pkgs"; output=$(pkg_add -zI $pkgs 2>&1) + + +if echo "$output" | grep -q -e "Can't find" -e "Ambiguous"; then + echo "Failed to find or install one or more packages" + echo "Failed Package(s):" + echo "$output" + exit 1 +else + echo Successfully installed packages + touch $depschecked + + python3 setup.py build + python3 setup.py install -O1 --distro openbsd --skip-build --init-system sysvinit_openbsd + + echo "Installation completed." + RC_LOCAL="/etc/rc.local" + RC_LOCAL_CONTENT=" -python3 setup.py build -python3 setup.py install -O1 --distro openbsd --skip-build +/usr/local/lib/cloud-init/ds-identify -echo "Installation completed." +cloud-init init --local + +cloud-init init + +cloud-init modules --mode config + +cloud-init modules --mode final +" + if ! test -e $RC_LOCAL; then + echo "export PATH=$PATH:/usr/local/sbin:/usr/local/bin" >> $RC_LOCAL + echo "$RC_LOCAL_CONTENT" >> $RC_LOCAL + elif ! grep -Fq "cloud-init" $RC_LOCAL; then + echo "$RC_LOCAL_CONTENT" >> $RC_LOCAL + fi +fi diff -Nru cloud-init-23.4.4/tools/ccfg-merge-debug cloud-init-24.1.3/tools/ccfg-merge-debug --- cloud-init-23.4.4/tools/ccfg-merge-debug 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/ccfg-merge-debug 2024-03-27 13:14:04.000000000 +0000 @@ -60,7 +60,7 @@ # Walk the user data part_data = { 'handlers': c_handlers, - # Any new handlers that are encountered get writen here + # Any new handlers that are encountered get written here 'handlerdir': handler_dir, 'data': data, # The default frequency if handlers don't have one diff -Nru cloud-init-23.4.4/tools/cloud-init-hotplugd cloud-init-24.1.3/tools/cloud-init-hotplugd --- cloud-init-23.4.4/tools/cloud-init-hotplugd 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-24.1.3/tools/cloud-init-hotplugd 2024-03-27 13:14:04.000000000 +0000 @@ -0,0 +1,23 @@ +#!/bin/sh +# This file is part of cloud-init. See LICENSE file for license information. + +# This script is used on non-systemd systems. It is called by the +# cloud-init-hotplug init.d script. +# +# Creates a named pipe and then continually listens to this pipe. The pipe +# is written to by the hook-hotplug script (which is called by a udev rule +# upon a network device event). Anything received via the pipe is then +# passed on via the "cloud-init devel hotplug-hook handle" command. + +PIPE="/run/cloud-init/hook-hotplug-cmd" + +mkfifo -m700 $PIPE + +while true; do + # shellcheck disable=SC2162 + read args < $PIPE + # shellcheck disable=SC2086 + exec /usr/bin/cloud-init devel hotplug-hook $args +done + +exit diff -Nru cloud-init-23.4.4/tools/ds-identify cloud-init-24.1.3/tools/ds-identify --- cloud-init-23.4.4/tools/ds-identify 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/ds-identify 2024-03-27 13:14:04.000000000 +0000 @@ -68,7 +68,6 @@ DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" PATH_ROOT=${PATH_ROOT:-""} -PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} @@ -82,11 +81,17 @@ PATH_ETC_CLOUD="${PATH_ETC_CLOUD:-${PATH_ROOT}/etc/cloud}" PATH_ETC_CI_CFG="${PATH_ETC_CI_CFG:-${PATH_ETC_CLOUD}/cloud.cfg}" PATH_ETC_CI_CFG_D="${PATH_ETC_CI_CFG_D:-${PATH_ETC_CI_CFG}.d}" -PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" -PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} -PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} -DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" +# Declare global here, so they can be overwritten from the outside. +# if not overwritten from the outside, we'll populate them with system default +# paths, once we have determined the system we're running on. +# This is done in set_run_path(), which must run after read_uname_info(). +PATH_RUN="${PATH_RUN:-}" +PATH_RUN_CI="${PATH_RUN_CI:-}" +PATH_RUN_CI_CFG="${PATH_RUN_CI_CFG:-}" +PATH_RUN_DI_RESULT="${PATH_RUN_DI_RESULT:-}" + +DI_LOG="${DI_LOG:-}" _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. @@ -110,11 +115,8 @@ DI_PID_1_PRODUCT_NAME="" DI_UNAME_KERNEL_NAME="" -DI_UNAME_KERNEL_RELEASE="" DI_UNAME_KERNEL_VERSION="" DI_UNAME_MACHINE="" -DI_UNAME_NODENAME="" -DI_UNAME_OPERATING_SYSTEM="" DI_UNAME_CMD_OUT="" DS_FOUND=0 @@ -127,7 +129,7 @@ DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware \ -LXD NWCS Akamai" +LXD NWCS Akamai WSL" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -157,6 +159,8 @@ shift [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return + [ "$DI_LOG" = "" ] && DI_LOG="stderr" + if [ "$_DI_LOGGED" != "$DI_LOG" ]; then # first time here, open file descriptor for append case "$DI_LOG" in @@ -213,6 +217,27 @@ _RET="$val" } +get_sysctl_field() { + local sys_field="$1" sysctl_field="" val="" + command -v sysctl >/dev/null 2>&1 || { + warn "No sysctl program. Cannot read $sys_field." + return 1 + } + case "$sys_field" in + chassis_vendor) sysctl_field='hw.vendor';; + chassis_serial) sysctl_field='hw.type';; + chassis_version) sysctl_field='hw.uuid';; + sys_vendor) sysctl_field='hw.vendor';; + product_name) sysctl_field='hw.product';; + product_serial) sysctl_field='hw.uuid';; + product_uuid) sysctl_field='hw.uuid';; + *) error "Unknown field $sys_field. Cannot call sysctl." + return 1;; + esac + val=$(sysctl -nq "$sysctl_field" 2>/dev/null) || return 1 + _RET="$val" +} + dmi_decode() { local sys_field="$1" dmi_field="" val="" command -v dmidecode >/dev/null 2>&1 || { @@ -235,9 +260,12 @@ get_dmi_field() { _RET="$UNAVAILABLE" - if [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then + if [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" -o "$DI_UNAME_KERNEL_NAME" = "Dragonfly" ]; then get_kenv_field "$1" || _RET="$ERROR" return $? + elif [ "$DI_UNAME_KERNEL_NAME" = "OpenBSD" ]; then + get_sysctl_field "$1" || _RET="$ERROR" + return $? fi local path="${PATH_SYS_CLASS_DMI_ID}/$1" @@ -393,7 +421,7 @@ # - DI_ISO9660_DEVS # - DI_FS_UUIDS - if [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then + if [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" -o "$DI_UNAME_KERNEL_NAME" = "Dragonfly" ]; then read_fs_info_freebsd return $? else @@ -414,7 +442,19 @@ if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then virt="$out" fi - elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then + elif command -v virt-what >/dev/null 2>&1; then + # Map virt-what's names to those systemd-detect-virt that + # don't match up. + out=$(virt-what 2>&1 | head -n 1) && { + case "$out" in + ibm_systemz-zvm) virt="zvm" ;; + hyperv) virt="microsoft" ;; + virtualbox) virt="oracle" ;; + xen-domU) virt="xen" ;; + *) virt="$out" + esac + } + elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" -o "$DI_UNAME_KERNEL_NAME" = "Dragonfly" ]; then # Map FreeBSD's vm_guest names to those systemd-detect-virt that # don't match up. See # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160 @@ -524,33 +564,27 @@ # uname is tricky to parse as it outputs always in a given order # independent of option order. kernel-version is known to have spaces. # 1 -s kernel-name - # 2 -n nodename - # 3 -r kernel-release - # 4.. -v kernel-version(whitespace) - # N-2 -m machine - # N-1 -o operating-system + # 2.. -v kernel-version(whitespace) + # N-1 -m machine cached "${DI_UNAME_CMD_OUT}" && return local out="${1:-}" ret=0 buf="" if [ -z "$out" ]; then - out=$(uname -snrvmo) || { + out=$(uname -svm) || { ret=$? - error "failed reading uname with 'uname -snrvmo'" + error "failed reading uname with 'uname -svm'" return $ret } fi # shellcheck disable=2086 set -- $out DI_UNAME_KERNEL_NAME="$1" - DI_UNAME_NODENAME="$2" - DI_UNAME_KERNEL_RELEASE="$3" - shift 3 - while [ $# -gt 2 ]; do + shift + while [ $# -gt 1 ]; do buf="$buf $1" shift done DI_UNAME_KERNEL_VERSION="${buf# }" DI_UNAME_MACHINE="$1" - DI_UNAME_OPERATING_SYSTEM="$2" DI_UNAME_CMD_OUT="$out" return 0 } @@ -678,7 +712,7 @@ nocase_equal() { # nocase_equal(a, b) - # return 0 if case insenstive comparision a.lower() == b.lower() + # return 0 if case insensitive comparison a.lower() == b.lower() # different lengths [ "${#1}" = "${#2}" ] || return 1 # case sensitive equal @@ -725,11 +759,15 @@ [ -b "$fpath" ] || { STATE_FLOPPY_PROBED=1; return 1; } - modprobe --use-blacklist floppy >/dev/null 2>&1 || + # Use "-b" option as Busybox modprobe doesn't support long-option + modprobe -b floppy >/dev/null 2>&1 || { STATE_FLOPPY_PROBED=1; return 1; } - udevadm settle "--exit-if-exists=$fpath" || - { STATE_FLOPPY_PROBED=1; return 1; } + # Some Linux distros/non-Linux OSes may not have udev + if command -v udevadm; then + udevadm settle "--exit-if-exists=$fpath" || + { STATE_FLOPPY_PROBED=1; return 1; } + fi [ -b "$fpath" ] STATE_FLOPPY_PROBED=$? @@ -1066,7 +1104,8 @@ fi local idstr="http://schemas.dmtf.org/ovf/environment/1" - grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" + # POSIX grep only supports short-options, long-options are GNU-specific + grep -q -i "$idstr" "${PATH_ROOT}$dev" } has_ovf_cdrom() { @@ -1128,7 +1167,7 @@ } dscheck_Bigstep() { - # bigstep is activated by presense of seed file 'url' + # bigstep is activated by presence of seed file 'url' [ -f "${PATH_VAR_LIB_CLOUD}/data/seed/bigstep/url" ] && return ${DS_FOUND} return ${DS_NOT_FOUND} @@ -1215,7 +1254,7 @@ local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2*) - # both start with ec2, now check for case insenstive equal + # both start with ec2, now check for case insensitive equal nocase_equal "$uuid" "$serial" && { _RET="AWS"; return 0; };; esac @@ -1547,6 +1586,88 @@ return "${DS_NOT_FOUND}" } +WSL_cloudinit_dir_in() { + _RET="" + local cmdexe="" cloudinitdir="" val="" + for m in "$@"; do + cmdexe="$m/Windows/System23/cmd.exe" + if command -v "$cmdexe" > /dev/null 2>&1; then + # Here WSL's proprietary `/init` is used to start the Windows cmd.exe + # to output the Windows user profile directory path, which is + # held by the environment variable %USERPROFILE%. + cloudinitdir=$(/init "$cmdexe" /c echo %USERPROFILE% 2>/dev/null) + if [ -n "$cloudinitdir" ]; then + # wslpath is a program supplied by WSL itself that translates Windows and Linux paths, + # respecting the mountpoints where the Windows drives are mounted. + # (in fact it's a symlink to /init). + val=$(wslpath -au "$cloudinitdir") && _RET="$val" + return $? + fi + fi + done + + return 1 +} + +WSL_instance_name() { + local val="" instance_name="" + instance_name=$(wslpath -am /) + val="${instance_name##*/}" + _RET="${val}" +} + +dscheck_WSL() { + local mountpoints="" cloudinitdir="" candidate="" instance_name="" + if [ "${DI_UNAME_KERNEL_NAME}" != "Linux" ]; then + return "${DS_NOT_FOUND}" + fi + + if [ "${DI_VIRT}" != "wsl" ]; then + return "${DS_NOT_FOUND}" + fi + + # The datasource needs to find the cloud-config files in the Windows host + # filesystem, which is exposed as 9p mount points, one per disk drive (partition). + # If none is found, the datasource cannot proceed. + # See https://youtu.be/lwhMThePdIo?t=2431&si=JKTHx39TyRgPbzkZ and + # https://learn.microsoft.com/en-us/windows/wsl/wsl-config#what-is-drvfs + # for more information. + mountpoints=$(grep '^[^[:space:]]* [^[:space:]]* 9p [^[:space:]]*aname=drvfs;.*' "${PATH_ROOT}/proc/mounts" | cut -f2 -d' ') + + if [ -z "$mountpoints" ]; then + debug 1 "WSL datasource requires access to Windows drives mount points" + return "${DS_NOT_FOUND}" + fi + + # We know we are under WSL and have acess to the host filesystem, + # so let's find the .cloud-init directory + WSL_cloudinit_dir_in "$mountpoints" + cloudinitdir="${_RET}" + if [ -z "$cloudinitdir" ]; then + debug 1 "%USERPROFILE/.cloud-init/ directory not found" + return "${DS_NOT_FOUND}" + fi + + # and the applicable userdata file. Notice the ordering in the for-loop + # must match our expected precedence, so the file we find is what the + # datasource must process. + WSL_instance_name + instance_name="${_RET}" + # shellcheck source=/dev/null + . "${PATH_ROOT}/etc/os-release" + for userdatafile in "${instance_name}.user-data" "${ID:-linux}-${VERSION_ID:-${VERSION_CODENAME}}".user-data "${ID:-linux}-all.user-data" "default.user-data"; do + candidate="$cloudinitdir/$userdatafile" + if [ -f "$candidate" ]; then + debug 1 "Found applicable user data file for this instance at: $candidate" + return ${DS_FOUND} + fi + done + + debug 1 "Didn't find any applicable user data file for instance named $instance_name in $cloudinitdir" + + return "${DS_NOT_FOUND}" +} + collect_info() { read_pid1_product_name read_config @@ -1561,6 +1682,7 @@ } print_info() { + read_uname_info collect_info _print_info } @@ -1570,8 +1692,7 @@ vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" vars="$vars DMI_BOARD_NAME FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT" - vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" - vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_VERSION UNAME_MACHINE" vars="$vars DSNAME DSLIST" vars="$vars MODE ON_FOUND ON_MAYBE ON_NOTFOUND" for v in ${vars}; do @@ -1818,12 +1939,25 @@ return } +set_run_path() { + if [ "$DI_UNAME_KERNEL_NAME" != "Linux" ]; then + PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/var/run"} + else + PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} + fi + + PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" + PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} + PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} + + DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" +} + _main() { local dscheck_fn="" ret_dis=1 ret_en=0 read_uptime debug 1 "[up ${_RET}s]" "ds-identify $*" - read_uname_info read_virt read_kernel_cmdline if is_disabled; then @@ -1865,7 +1999,11 @@ # if there is only a single entry in $DI_DSLIST if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then debug 1 "single entry in datasource_list ($DI_DSLIST) use that." - found "$@" + if [ $# -eq 1 ]; then + write_result "datasource_list: [ $1 ]" + else + found "$@" + fi return fi @@ -1946,11 +2084,14 @@ main() { local ret="" ensure_sane_path + read_uname_info + set_run_path + [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && [ -f "$PATH_RUN_DI_RESULT" ]; then if read ret < "$PATH_RUN_DI_RESULT"; then - if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then + if [ "$ret" = "0" ] || [ "$ret" = "1" ] || [ "$ret" = "2" ]; then debug 2 "used cached result $ret. pass --force to re-run." return "$ret"; fi diff -Nru cloud-init-23.4.4/tools/hook-hotplug cloud-init-24.1.3/tools/hook-hotplug --- cloud-init-23.4.4/tools/hook-hotplug 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/hook-hotplug 2024-03-27 13:14:04.000000000 +0000 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # This file is part of cloud-init. See LICENSE file for license information. # This script checks if cloud-init has hotplug hooked and if @@ -11,12 +11,12 @@ if is_finished; then # open cloud-init's hotplug-hook fifo rw exec 3<>/run/cloud-init/hook-hotplug-cmd - env_params=( - --subsystem="${SUBSYSTEM}" - handle - --devpath="${DEVPATH}" - --udevaction="${ACTION}" - ) + env_params=" \ + --subsystem=${SUBSYSTEM} \ + handle \ + --devpath=${DEVPATH} \ + --udevaction=${ACTION} \ + " # write params to cloud-init's hotplug-hook fifo - echo "${env_params[@]}" >&3 + echo "${env_params}" >&3 fi diff -Nru cloud-init-23.4.4/tools/make-tarball cloud-init-24.1.3/tools/make-tarball --- cloud-init-23.4.4/tools/make-tarball 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/make-tarball 2024-03-27 13:14:04.000000000 +0000 @@ -60,7 +60,7 @@ fi # when building an archiving from HEAD, ensure that there aren't any -# uncomitted changes in the working directory (because these would not +# uncommitted changes in the working directory (because these would not # end up in the archive). if [ "$rev" = HEAD ] && ! git diff-index --quiet HEAD --; then if [ -z "$SKIP_UNCOMITTED_CHANGES_CHECK" ]; then diff -Nru cloud-init-23.4.4/tools/migrate-lp-user-to-github cloud-init-24.1.3/tools/migrate-lp-user-to-github --- cloud-init-23.4.4/tools/migrate-lp-user-to-github 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/migrate-lp-user-to-github 2024-03-27 13:14:04.000000000 +0000 @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""Link your Launchpad user to github, proposing branches to LP and Github""" +"""Link your Launchpad user to GitHub, proposing branches to LP and GitHub""" from argparse import ArgumentParser from subprocess import Popen, PIPE @@ -175,7 +175,7 @@ def create_migration_branch( branch_name, upstream, lp_user, gh_user, commit_msg ): - """Create an LP to Github migration branch and add lp_user->gh_user.""" + """Create an LP to GitHub migration branch and add lp_user->gh_user.""" log( "Creating a migration branch: {} adding your users".format( MIGRATE_BRANCH_NAME diff -Nru cloud-init-23.4.4/tools/mock-meta.py cloud-init-24.1.3/tools/mock-meta.py --- cloud-init-23.4.4/tools/mock-meta.py 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/mock-meta.py 2024-03-27 13:14:04.000000000 +0000 @@ -179,8 +179,6 @@ # Nice helper to add in the 'running' users key (if they have one) key_pth = os.path.expanduser("~/.ssh/id_rsa.pub") - if not os.path.isfile(key_pth): - key_pth = os.path.expanduser("~/.ssh/id_dsa.pub") if os.path.isfile(key_pth): with open(key_pth, "rb") as fh: diff -Nru cloud-init-23.4.4/tools/read-dependencies cloud-init-24.1.3/tools/read-dependencies --- cloud-init-23.4.4/tools/read-dependencies 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/read-dependencies 2024-03-27 13:14:04.000000000 +0000 @@ -224,8 +224,8 @@ """Translate pip package names to distro-specific package names. @param pip_requires: List of versionless pip package names to translate. - @param renames: Dict containg special case renames from pip name to system - package name for the distro. + @param renames: Dict containing special case renames from pip name to + system package name for the distro. """ prefix = "python3-" standard_pkg_name = "{0}{1}" diff -Nru cloud-init-23.4.4/tools/uncloud-init cloud-init-24.1.3/tools/uncloud-init --- cloud-init-23.4.4/tools/uncloud-init 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/uncloud-init 2024-03-27 13:14:04.000000000 +0000 @@ -105,7 +105,7 @@ if [ "${ubuntu_pass}" = "R" -o "${ubuntu_pass}" = "random" ]; then ubuntu_pass=$(python -c 'import string, random; random.seed(); print "".join(random.sample(string.letters+string.digits, 8))') - log "settting ubuntu pass = ${ubuntu_pass}" + log "setting ubuntu pass = ${ubuntu_pass}" printf "\n===\nubuntu_pass = %s\n===\n" "${ubuntu_pass}" >/dev/ttyS0 fi diff -Nru cloud-init-23.4.4/tools/xkvm cloud-init-24.1.3/tools/xkvm --- cloud-init-23.4.4/tools/xkvm 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tools/xkvm 2024-03-27 13:14:04.000000000 +0000 @@ -126,7 +126,7 @@ qemu_supports_file_locking() { # hackily check if qemu has file.locking in -drive params (LP: #1716028) if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then - # The only way we could find to check presense of file.locking is + # The only way we could find to check presence of file.locking is # qmp (query-qmp-schema). Simply checking if the virtio-blk driver # supports 'share-rw' is expected to be equivalent and simpler. isdevopt virtio-blk share-rw && diff -Nru cloud-init-23.4.4/tox.ini cloud-init-24.1.3/tox.ini --- cloud-init-23.4.4/tox.ini 2024-02-27 15:17:52.000000000 +0000 +++ cloud-init-24.1.3/tox.ini 2024-03-27 13:14:04.000000000 +0000 @@ -44,7 +44,7 @@ [testenv:ruff] deps = ruff=={[format_deps]ruff} -commands = {envpython} -m ruff {posargs:cloudinit/ tests/ tools/ conftest.py setup.py} +commands = {envpython} -m ruff {posargs:cloudinit/ tests/ tools/ packages/bddeb packages/brpm conftest.py setup.py} [testenv:pylint] deps = @@ -213,6 +213,12 @@ {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} doc8 doc/rtd +[testenv:doc-spelling] +deps = + -r{toxinidir}/doc-requirements.txt +commands = + {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} + # linkcheck shows false positives and has noisy output. # Despite these limitations, it is better than a manual search of the docs. # suggested workflow is: @@ -323,4 +329,9 @@ serial: tests that do not work in parallel, skipped with py3-fast unstable: skip this test because it is flakey user_data: the user data to be passed to the test instance - allow_dns_lookup: disable autochecking for host network configuration + allow_dns_lookup: disable autochecking for host network configuration + +[coverage:paths] +source = + cloudinit/ + /usr/lib/python3/dist-packages/cloudinit/