diff -Nru ubuntu-advantage-tools-27.14.4~18.04/apport/source_ubuntu-advantage-tools.py ubuntu-advantage-tools-28.1~18.04/apport/source_ubuntu-advantage-tools.py --- ubuntu-advantage-tools-27.14.4~18.04/apport/source_ubuntu-advantage-tools.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/apport/source_ubuntu-advantage-tools.py 2023-05-30 19:02:35.000000000 +0000 @@ -2,6 +2,7 @@ import tempfile from apport.hookutils import attach_file_if_exists +from uaclient import defaults from uaclient.actions import collect_logs from uaclient.config import UAConfig @@ -12,7 +13,7 @@ cfg = UAConfig() with tempfile.TemporaryDirectory() as output_dir: collect_logs(cfg, output_dir) - auto_include_log_files = [ + auto_include_log_files = { "cloud-id.txt", "cloud-id.txt-error", "ua-status.json", @@ -24,6 +25,9 @@ os.path.basename(cfg.timer_log_file), os.path.basename(cfg.daemon_log_file), os.path.basename(cfg.data_path("jobs-status")), - ] + os.path.basename(defaults.CONFIG_DEFAULTS["log_file"]), + os.path.basename(defaults.CONFIG_DEFAULTS["timer_log_file"]), + os.path.basename(defaults.CONFIG_DEFAULTS["daemon_log_file"]), + } for f in auto_include_log_files: attach_file_if_exists(report, os.path.join(output_dir, f), key=f) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/apt.conf.d/51ubuntu-advantage-esm ubuntu-advantage-tools-28.1~18.04/apt.conf.d/51ubuntu-advantage-esm --- ubuntu-advantage-tools-27.14.4~18.04/apt.conf.d/51ubuntu-advantage-esm 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/apt.conf.d/51ubuntu-advantage-esm 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -Unattended-Upgrade::Allowed-Origins { - "${distro_id}ESM:${distro_codename}-infra-security"; -}; -Unattended-Upgrade::Allowed-Origins { - "${distro_id}ESMApps:${distro_codename}-apps-security"; -}; diff -Nru ubuntu-advantage-tools-27.14.4~18.04/apt-hook/json-hook.cc ubuntu-advantage-tools-28.1~18.04/apt-hook/json-hook.cc --- ubuntu-advantage-tools-27.14.4~18.04/apt-hook/json-hook.cc 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/apt-hook/json-hook.cc 2023-05-30 19:02:35.000000000 +0000 @@ -218,13 +218,17 @@ return ret; } -bool is_xenial() { +enum ESMInfraSeries {NOT_ESM_INFRA, XENIAL, BIONIC}; + +ESMInfraSeries get_esm_infra_series() { std::ifstream os_release_file("/etc/os-release"); - bool ret = false; + ESMInfraSeries ret = NOT_ESM_INFRA; if (os_release_file.is_open()) { std::string os_release_str((std::istreambuf_iterator(os_release_file)), (std::istreambuf_iterator())); if (os_release_str.find("xenial") != os_release_str.npos) { - ret = true; + ret = XENIAL; + } else if (os_release_str.find("bionic") != os_release_str.npos) { + ret = BIONIC; } os_release_file.close(); } @@ -238,27 +242,39 @@ ESMContext get_esm_context() { CloudID cloud_id = get_cloud_id(); - bool is_x = is_xenial(); + ESMInfraSeries esm_infra_series = get_esm_infra_series(); ESMContext ret; ret.context = ""; ret.url = "https://ubuntu.com/pro"; - if (cloud_id != AZURE && is_x) { - ret.context = " for 16.04"; - ret.url = "https://ubuntu.com/16-04"; - } else if (cloud_id == AZURE && !is_x) { - ret.context = " on Azure"; - ret.url = "https://ubuntu.com/azure/pro"; - } else if (cloud_id == AZURE && is_x) { - ret.context = " for 16.04 on Azure"; - ret.url = "https://ubuntu.com/16-04/azure"; - } else if (cloud_id == AWS && !is_x) { - ret.context = " on AWS"; - ret.url = "https://ubuntu.com/aws/pro"; - } else if (cloud_id == GCE && !is_x) { - ret.context = " on GCP"; - ret.url = "https://ubuntu.com/gcp/pro"; + if (esm_infra_series == XENIAL) { + if (cloud_id == AZURE) { + ret.context = " for 16.04 on Azure"; + ret.url = "https://ubuntu.com/16-04/azure"; + } else { + ret.context = " for 16.04"; + ret.url = "https://ubuntu.com/16-04"; + } + } else if (esm_infra_series == BIONIC) { + if (cloud_id == AZURE) { + ret.context = " for 18.04 on Azure"; + ret.url = "https://ubuntu.com/18-04/azure"; + } else { + ret.context = " for 18.04"; + ret.url = "https://ubuntu.com/18-04"; + } + } else { + if (cloud_id == AZURE) { + ret.context = " on Azure"; + ret.url = "https://ubuntu.com/azure/pro"; + } else if (cloud_id == AWS) { + ret.context = " on AWS"; + ret.url = "https://ubuntu.com/aws/pro"; + } else if (cloud_id == GCE) { + ret.context = " on GCP"; + ret.url = "https://ubuntu.com/gcp/pro"; + } } return ret; diff -Nru ubuntu-advantage-tools-27.14.4~18.04/debian/changelog ubuntu-advantage-tools-28.1~18.04/debian/changelog --- ubuntu-advantage-tools-27.14.4~18.04/debian/changelog 2023-04-06 13:49:20.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/debian/changelog 2023-06-27 00:49:37.000000000 +0000 @@ -1,8 +1,72 @@ -ubuntu-advantage-tools (27.14.4~18.04) bionic; urgency=medium +ubuntu-advantage-tools (28.1~18.04) bionic; urgency=medium - * Backport new upstream release: (LP: #2011477) to bionic + * Backport new upstream release: (LP: #2017949) to bionic - -- Renan Rodrigo Thu, 06 Apr 2023 10:49:20 -0300 + -- Grant Orndorff Mon, 26 Jun 2023 20:49:37 -0400 + +ubuntu-advantage-tools (28.1) mantic; urgency=medium + + * New upstream release 28.1 (LP: #2017949) + - fips: ensure shim-signed is updated to the version in the fips repo + if necessary + - status: fix bug where an existing status cache could influence the + simulated status output + + -- Grant Orndorff Mon, 26 Jun 2023 15:05:11 -0400 + +ubuntu-advantage-tools (28) mantic; urgency=medium + + * d/ubuntu-advantage-tools.postinst: + - more specific regex for ua_config warning + * d/source/lintian-overrides + - adjust missing-build-dependency-for-dh-addon systemd override to work + for the different but related error message on jammy onwards + * New upstream release 28 (LP: #2017949) + - api: + + new endpoint: u.pro.status.is_attached.v1 + + new endpoint: u.pro.status.enabled_services.v1 + - apport: collect default log files if present for bug reports + - apt messaging: add bionic-specific urls + - auto-attach: + + check for new Azure UBUNTU_PRO license on-boot of non-pro instances + + exit 4 if attach succeeds but service enablement fails + - cli: + + avoid unnecessary network calls during autocomplete (GH: #2556) + + warn users to not rely on human-readable output in scripts + - config: no longer load uaclient.conf from current working directory + - fix: + + add support for --no-related flag + + separate target USN from related USNs + - general: + + logs to user cache directory when run as non-root + + fix bug where non-root commands failed with file permission error + accessing /tmp/ubuntu-advantage (GH: #2567) + + use system environment vars by default in sub processes (GH: #2527) + + fall back to /usr/lib/os-release for release info + + start logging to default log file until config is loaded + + remove small timeout from contract checking request + + avoid crashes when processing unicode text (LP: #2019729) + - livepatch: + + use uname.machine for kernel arch when checking support + (GH: #2517) + + display tailored warning messages for granular support statuses + - realtime-kernel: add support for intel-iotg variant + - reboot-required: new criteria for "yes-kernel-livepatches-applied" + livepatch status must be either "applied" or "nothing-to-apply" and + livepatch support status must say "supported" + - security-status: + + always show available/installed counts for esm packages + + include hint to run apt-get update for up-to-date info (GH: #2443) + + improve visibility of installed and available updates (GH: #2442) + + change package info message hint to recommend apt-cache show + + avoids unnecessary network calls (LP: #2015286, GH: #2536) + - systemd: update service unit for reboot_cmds to not run if not attached + - status: + + add hint for pro status --all + + better message if no services are available (LP: #1994923) + - timer: only run timer when attached + + -- Grant Orndorff Thu, 27 Apr 2023 16:34:55 -0400 ubuntu-advantage-tools (27.14.4) lunar; urgency=medium diff -Nru ubuntu-advantage-tools-27.14.4~18.04/debian/source/lintian-overrides ubuntu-advantage-tools-28.1~18.04/debian/source/lintian-overrides --- ubuntu-advantage-tools-27.14.4~18.04/debian/source/lintian-overrides 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/debian/source/lintian-overrides 2023-06-02 19:38:24.000000000 +0000 @@ -1,6 +1,8 @@ # Lintian doesn't see dh-systemd alternative when building on xenial ubuntu-advantage-tools: missing-build-dependency-for-dh_-command dh_systemd_start => dh-systemd -ubuntu-advantage-tools: missing-build-dependency-for-dh-addon systemd => dh-systemd + +# Lintian can't handle the multiline debhelper/dh-systemd dependencies explained in debian/control +ubuntu-advantage-tools: missing-build-dependency-for-dh-addon *systemd* # Lintian doesn't like mentioning riscv64 for older go package ubuntu-advantage-tools: invalid-arch-string-in-source-relation riscv64 [build-depends: golang-1.10-go [!powerpc !riscv64]] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/debian/ubuntu-advantage-tools.postinst ubuntu-advantage-tools-28.1~18.04/debian/ubuntu-advantage-tools.postinst --- ubuntu-advantage-tools-27.14.4~18.04/debian/ubuntu-advantage-tools.postinst 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/debian/ubuntu-advantage-tools.postinst 2023-05-30 19:02:35.000000000 +0000 @@ -452,7 +452,7 @@ migrate_user_config_post fi - if grep -q "ua_config:" /etc/ubuntu-advantage/uaclient.conf; then + if grep -q "^ua_config:" /etc/ubuntu-advantage/uaclient.conf; then echo "Warning: uaclient.conf contains old ua_config field." >&2 echo " Please do the following:" >&2 echo " 1. Run 'pro config set field=value' for each field/value pair" >&2 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/explanations/systemd_units.md ubuntu-advantage-tools-28.1~18.04/dev-docs/explanations/systemd_units.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/explanations/systemd_units.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/explanations/systemd_units.md 2023-06-01 18:49:33.000000000 +0000 @@ -6,8 +6,8 @@ There are three methods by which a cloud instance may auto-attach to become Ubuntu Pro. 1. On boot auto-attach for known Pro cloud instances. -2. Upgrade-in-place for non-Pro instances that get modified via the Cloud platform to entitle them to become Ubuntu Pro (only on GCP for now) -3. Retry auto-attach in case of failures +2. Upgrade-in-place for non-Pro instances that get modified via the Cloud platform to entitle them to become Ubuntu Pro (only on Azure and GCP for now). +3. Retry auto-attach in case of failures. (1) is handled by a systemd unit (`ua-auto-attach.service`) delivered by a separate package called `ubuntu-advantage-pro`. This package is only installed on Ubuntu Pro Cloud images. In this way, an instance launched from an Ubuntu Pro Cloud image knows that it needs to auto-attach. @@ -22,8 +22,8 @@ is_pro{Is -pro installed?} auto_outcome{Success?} is_attached{Attached?} - should_run_daemon{on GCP? or retry flag set?} - is_gcp{GCP?} + should_run_daemon{on Azure? or GCP? or retry flag set?} + is_gcp{Azure or GCP?} is_retry{retry flag set?} is_gcp_pro{Pro license detected?} daemon_attach_outcome{Success?} @@ -33,11 +33,11 @@ auto_attach[/Try to Attach/] trigger_retry[/Create Retry Flag File/] trigger_retry2[/Create Retry Flag File/] - poll_gcp[/Poll for GCP Pro license/] + poll_gcp[/Poll for cloud Pro license/] daemon_attach[/Try to Attach/] daemon_attach2[/Try to Attach/] wait[/Wait a while/] - + %%%% systemd units auto(ua-auto-attach.service) daemon(ubuntu-advantage.service) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/build-docs.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/build-docs.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/build-docs.md 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/build-docs.md 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,11 @@ +# How to generate Ubuntu Pro Client user documentation + +To build the docs for Ubuntu Pro Client, you can use a dedicated `tox` command for it. +You can install `tox` on your machine by running the `make test` command. Once tox is +installed just run the command: + +```console +$ tox -e docs +``` + +The command will generate the html pages inside `docs/build` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/building.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/building.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/building.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/building.md 2023-05-30 19:02:35.000000000 +0000 @@ -41,16 +41,3 @@ > # this script can be used to update all chroots > sudo PATTERN=\* sh /usr/share/doc/sbuild/examples/sbuild-debian-developer-setup-update-all > ``` - -## Setting up an lxc development container -```shell -lxc launch ubuntu-daily:xenial dev-x -c user.user-data="$(cat tools/ua-dev-cloud-config.yaml)" -lxc exec dev-x bash -``` - -## Setting up a kvm development environment with multipass -**Note:** There is a sample procedure documented in tools/multipass.md as well. -```shell -multipass launch daily:focal -n dev-f --cloud-init tools/ua-dev-cloud-config.yaml -multipass connect dev-f -``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/how_to_release_a_new_version_of_ua.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/how_to_release_a_new_version_of_ua.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/how_to_release_a_new_version_of_ua.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/how_to_release_a_new_version_of_ua.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,208 +0,0 @@ -# Ubuntu Pro Client releases - -## Background - -The release process for ubuntu-advantage-tools has three overarching steps/goals. - -1. Release to our team infrastructure. This includes GitHub and the `ua-client` PPAs. -2. Release to the latest Ubuntu devel release. -3. Release to the supported Ubuntu past releases via [SRU](https://wiki.ubuntu.com/StableReleaseUpdates) using the [ubuntu-advantage-tools specific SRU process](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates). - -Generally speaking, these steps happen in order, but there is some overlap. Also we may backtrack if issues are found part way through the process. - -An average release should take somewhere between 10 and 14 calendar days if things go smoothly, starting at the decision to release and ending at the new version being available in all supported Ubuntu releases. Note that it is not 2 weeks of full time work. Most of the time is spent waiting for review or sitting in proposed. - -> **Warning** -> If the release contains any change listed in the [Early Review Sign-Off list](../references/early_review_signoff.md), make sure it was properly reviewed *before* starting the release process. Ideally they would be reviewed even before implementation, but if some feature is in the list and didn't get a proper review, now is the time to do so. - -## Prerequisites - -If this is your first time releasing ubuntu-advantage-tools, you'll need to do the following before getting started: - -* Add the team helper scripts to your PATH: [uss-tableflip](https://github.com/canonical/uss-tableflip). -* If you don't yet have a gpg key set up, follow the instructions - [here](https://help.launchpad.net/YourAccount/ImportingYourPGPKey) to create a key, - publish it to `hkp://keyserver.ubuntu.com`, and import it into Launchpad. -* Before you run `sbuild-it` for the first time, you'll need to set up a chroot for each Ubuntu release. - Run the following to set up chroots with dependencies pre-installed for each release: - ```bash - apt-get install sbuild-launchpad-chroot - bash ./tools/setup_sbuild.sh # This will give you usage information on how to call it with the correct parameters - ``` -* You must have Launchpad already properly configured in your system in order to upload packages to the PPAs. Follow [this guide](https://help.launchpad.net/Packaging/PPA/Uploading) to get set up. - -## I. Preliminary/staging release to team infrastructure -1. Create a release PR: - - a. Move the desired commits from our `main` branch onto the desired release branch. - - * This step is currently not well defined. We currently are using `release-27` for all `27.X` releases and have been cherry-picking/rebasing all commits from `main` into this branch for a release. - - b Create a new entry in the `debian/changelog` file: - - * You can do that by running `dch --newversion `. - * Remember to update the release from `UNRELEASED` to the ubuntu/devel release. Edit the version to look like: `27.2~21.10.1`, with the appropriate pro-client and ubuntu/devel version numbers. - * Populate `debian/changelog` with the commits you have cherry-picked. - * You can do that by running `git log .. | log2dch` - * This will generate a list of commits that could be included in the changelog. - * You don't need to include all of the commits generated. Remember that the changelog should - be read by the user to understand the new features/modifications in the package. If you - think a commit will not add that much to the user experience, you can drop it from the - changelog. - * To structure the changelog you can use the other entries as example. But we basically try to - keep this order: debian changes, new features/modifications, testing. Within each section, bullet points should be alphabetized. - - c. Create a PR on GitHub into the release branch. Ask in the ~UA channel on Mattermost for review. - - d. When reviewing the release PR, please use the following guidelines when reviewing the new changelog entry: - - * Is the version correctly updated? We must ensure that the new version in the changelog is - correct and it also targets the latest Ubuntu release at the moment. - * Is the entry useful for the user? The changelog entries should be user focused, meaning - that we should only add entries that we think users will care about (i.e. we don't need - entries when fixing a test, as this doesn't provide meaningful information to the user). - * Is this entry redundant? Sometimes we may have changes that affect separate modules of the - code. We should have an entry only for the module that was most affected by it. - * Is the changelog entry unique? We need to verify that the changelog entry is not already - reflected in an earlier version of the changelog. If it is, we need not only to remove but double - check the process we are using to cherry-pick the commits. - * Is this entry actually reflected in the code? Sometimes, we can have changelog entries - that are not reflected in the code anymore. This can happen during development when we are - still unsure about the behaviour of a feature or when we fix a bug that removes the code - that was added. We must verify each changelog entry that is added to be sure of their - presence in the product. - -2. After the release PR is merged, tag the head of the release branch with the version number, e.g., `27.1`. Push this tag to GitHub. - -3. Build the package for all Ubuntu releases and upload to `ppa:ua-client/staging`: - - a. Clone the repository into a clean directory and switch to the release branch. - * *WARNING* Build the package in a clean environment. The reason is that the package - will contain everything that is present in the folder. If you are storing credentials or - other sensible development information in your folder, they will be uploaded too when we send - the package to the ppa. A clean environment is the safest way to perform this. - - b. Edit the changelog - * List yourself as the author of this release. - * Edit the version number to look like: `27.2~20.04.1~rc1` (`~.~rc`) - * Edit the Ubuntu release name. Start with the ubuntu/devel release. - * `git add debian/changelog && git commit -m "throwaway"` - Do **not** push this commit! - - c. `build-package` - * This script will generate all the package artefacts in the parent directory as `../out`. - - d. `sbuild-it ../out/.dsc` - * If this succeeds move on. If this fails, debug and fix before continuing. - - e. Repeat 3.b through 3.d for all supported Ubuntu Releases - * PS: remember to also change the version number on the changelog. For example, suppose - the new version is `1.1~20.04.1~rc1`. If you want to test Bionic now, change it to - `1.1~18.04.1~rc1`. - - f. For each release, dput to the staging PPA: - * `dput ppa:ua-client/staging ../out/_source.changes` - * After each `dput` wait for the "Accepted" email from Launchpad before moving on. - -## II. Release to Ubuntu (devel and SRU) - -> **Note** -> `kinetic` is used throughout as a reference to the current devel release. This will change. - -1. Prepare SRU Launchpad bugs. - - a. We do this even before a successful merge into ubuntu/devel because the context added to these bugs is useful for the Server Team reviewer. - - b. Create a new bug on Launchpad for ubuntu-advantage-tools and use the format defined [here](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates#SRU_Template) for the description. - * The title should be in the format `[SRU] ubuntu-advantage-tools (27.1 -> 27.2) Xenial, Bionic, Focal, Jammy`, substituting version numbers and release names as necessary. - * If any of the changes for the SRU is in the [Early Review Sign-off list](../references/early_review_signoff.md), include a pointer in the `[Discussion]` section to where the discussion/approval of that feature took place (if possible). - - c. For each Launchpad bug fixed by this release (which should all be referenced in our changelog), add the SRU template to the description and fill out each section. - * Leave the original description in the bug at the bottom under the header `[Original Description]`. - * For the testing steps, include steps to reproduce the bug. Then include instructions for adding `ppa:ua-client/staging`, and steps to verify the bug is no longer present. - -2. Set up the Merge Proposal (MP) for ubuntu/devel: - - a. `git-ubuntu clone ubuntu-advantage-tools; cd ubuntu-advantage-tools` - - b. `git remote add upstream git@github.com:canonical/ubuntu-advantage-client.git` - - c. `git fetch upstream` - - d. `git rebase --onto pkg/ubuntu/devel ` - * e.g. `git rebase --onto pkg/ubuntu/devel 27.0.2 27.1` - * You may need to resolve conflicts, but hopefully these will be minimal. - * You'll end up in a detached state. - - e. `git checkout -B upload--kinetic` - * This creates a new local branch name based on your detached branch. - - f. Make sure the changelog version contains the release version in the name (e.g., `27.1~22.10.1`) - - g. `git push upload--kinetic` - - h. On Launchpad, create a merge proposal for this version which targets `ubuntu/devel` - * For an example, see the [27.9 merge proposal](https://code.launchpad.net/~orndorffgrant/ubuntu/+source/ubuntu-advantage-tools/+git/ubuntu-advantage-tools/+merge/422906). - * Add 2 review slots for `canonical-server-reporter` and `canonical-server-core-reviewers`. - -4. Server Team Review and Pre-SRU Review - - a. Ask the assigned ubuntu-advantage-tools reviewer/sponsor from Server team for a review of your MPs. If you don't know who that is, ask in ~Server. Include a link to the ubuntu/devel MP and to the SRU bug. - - b. If they request changes, create a PR into the release branch on GitHub and ask Pro Client team for review. After that is merged, cherry-pick the commit into your `upload--` branch and push to launchpad. Then notify the Server Team member that you have addressed their requests. - * Some issues may just be filed for addressing in the future if they are not urgent or pertinent to this release. - * Unless the changes are very minor, or only testing related, you should upload a new release candidate version to `ppa:ua-client/staging` as described in I.3. - * After the release is finished, any commits that were merged directly into the release branch in this way should be brought back into `main` via a single PR. - - c. Once review is complete and approved, the Server Team member should **not** upload the version to the devel release. - * If they do, then any changes to the code after this point will require a bump in the patch version of the release. - - d. Now ask the SRU team for a pre-SRU review of the same MP. Mention that the exact same code will be released to all stable Ubuntu releases. - * Follow instructions in `II.4.b` if they request any changes. - - e. Once the SRU team member gives a pre-SRU approval, create the branches for each stable release. They should be named `upload--`. - * If you've followed the instructions precisely so far, you can just run `bash tools/create-lp-release-branches.sh`. - - f. Ask Server team member sponsor to upload to devel, and then the SRU proposed queue using the stable release branches you just created. - * Ask them to tag the PR with the appropriate `upload/` tag so git-ubuntu will import rich commit history. - * If they do not have upload rights to the proposed queue, ask in ~Server channel for a Ubuntu Server team member with upload rights for an upload review of the MP for the proposed queue. - - g. Check the [-proposed release queue](https://launchpad.net/ubuntu/xenial/+queue?queue_state=1&queue_text=ubuntu-advantage-tools) for presence of ubuntu-advantage-tools in unapproved state for each supported release. Note: libera chat #ubuntu-release IRC channel has a bot that reports queued uploads of any package in a message like "Unapproved: ubuntu-advantage-tools .. version". - - h. Tell the SRU team member who performed the pre-SRU review that the packages are in the -proposed release queue. They will need to actually approve the package to move into -proposed. - -5. -proposed verification and release to -updates - - a. As soon as the SRU vanguard approves the packages, a bot in #ubuntu-release will announce that ubuntu-advantage-tools is accepted into the applicable -proposed pockets, or the [Xenial -proposed release rejection queue](https://launchpad.net/ubuntu/xenial/+queue?queue_state=4&queue_text=ubuntu-advantage-tools) will contain a reason for rejections. Double check the SRU process bug for any actionable review feedback. - * Once accepted into `-proposed` by an SRU vanguard [ubuntu-advantage-tools shows up in the pending_sru page](https://people.canonical.com/~ubuntu-archive/pending-sru.html), check `rmadison ubuntu-advantage-tools | grep -proposed` to see if the upload exists in -proposed yet. - * Also actually check that the packages are accessible in a container by [enabling proposed](https://wiki.ubuntu.com/Testing/EnableProposed) and updating the package. - - b. With the package in proposed, perform the steps from `I.3` above but use a `~stableppaX` suffix instead of `~rcX` in the version name, and upload to `ppa:ua-client/stable` instead of staging. - - c. Perform the [Ubuntu-advantage-client SRU verification steps](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates). This typically involves running all behave targets with `UACLIENT_BEHAVE_ENABLE_PROPOSED=1 UACLIENT_BEHAVE_CHECK_VERSION=` and saving the output. - * There may also be one-time test scripts added in the `sru/` directory for this release. - - d. After all tests have passed, tarball all of the output files and upload them to the SRU bug with a message that looks like this: - ``` - We have run the full ubuntu-advantage-tools integration test suite against the version in -proposed. The results are attached. All tests passed. - - You can verify the correct version was used by checking the output of the first test in each file, which prints the version number. - - I am marking the verification done for this SRU. - ``` - Change the tags on the bug from `verification-needed` to `verification-done` (including the verification tags for each Ubuntu release). - - e. For any other related Launchpad bugs that are fixed in this release, perform the verification steps necessary for those bugs and mark them `verification-done` as needed. This will likely involve following the test steps, but instead of adding the staging PPA, enabling -proposed. - - f. Once all SRU bugs are tagged as `verification*-done`, all SRU-bugs should be listed as green in [the pending_sru page](https://people.canonical.com/~ubuntu-archive/pending-sru.html). - - g. After the pending SRU page says that ubuntu-advantage-tools has been in proposed for 7 days, it is now time to ping the [current SRU vanguard](https://wiki.ubuntu.com/StableReleaseUpdates#Publishing) for acceptance of ubuntu-advantage-tools into -updates. - - h. Check `rmadison ubuntu-advantage-tools` for updated version in -updates. - * Also actually check that the packages are accessible in a container and updating the package. - -## III. Post-release updates - -1. Ensure the version tag is correct on GitHub. The `version` git tag should point to the commit that was released as that version to ubuntu -updates. If changes were made in response to feedback during the release process, the tag may have to be moved. -2. Bring in any changes that were made to the release branch into `main` via PR (e.g., changelog edits). -3. Move any scripts added in `sru/` to a new folder in `sru/_archive` for the release. -4. Tell CPC that there is a new version of `ubuntu-advantage-tools` in -updates for all series. diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/how_to_use_magic_attach_endpoints.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/how_to_use_magic_attach_endpoints.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/how_to_use_magic_attach_endpoints.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/how_to_use_magic_attach_endpoints.md 2023-06-01 18:49:33.000000000 +0000 @@ -28,7 +28,7 @@ "_schema_version": "v1", "data": { "meta": { - "environment_vars": []} + "environment_vars": [] }, "attributes": { "expires": "EXPIRE_DATE", diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/release_a_new_version.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/release_a_new_version.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/release_a_new_version.md 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/release_a_new_version.md 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,227 @@ +# Ubuntu Pro Client releases + +## Background + +The release process for ubuntu-advantage-tools has three overarching steps/goals. + +1. Release to our team infrastructure. This includes GitHub and the `ua-client` PPAs. +2. Release to the latest Ubuntu devel release. +3. Release to the supported Ubuntu past releases via [SRU](https://wiki.ubuntu.com/StableReleaseUpdates) using the [ubuntu-advantage-tools specific SRU process](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates). + +Generally speaking, these steps happen in order, but there is some overlap. Also we may backtrack if issues are found part way through the process. + +An average release should take somewhere between 10 and 14 calendar days if things go smoothly, starting at the decision to release and ending at the new version being available in all supported Ubuntu releases. Note that it is not 2 weeks of full time work. Most of the time is spent waiting for review or sitting in proposed. + +> **Warning** +> If the release contains any change listed in the [Early Review Sign-Off list](../references/early_review_signoff.md), make sure it was properly reviewed *before* starting the release process. Ideally they would be reviewed even before implementation, but if some feature is in the list and didn't get a proper review, now is the time to do so. + +## Prerequisites + +If this is your first time releasing ubuntu-advantage-tools, you'll need to do the following before getting started: + +* Add the team helper scripts to your PATH: [uss-tableflip](https://github.com/canonical/uss-tableflip). +* If you don't yet have a gpg key set up, follow the instructions + [here](https://help.launchpad.net/YourAccount/ImportingYourPGPKey) to create a key, + publish it to `hkp://keyserver.ubuntu.com`, and import it into Launchpad. +* Before you run `sbuild-it` for the first time, you'll need to set up a chroot for each Ubuntu release. + Run the following to set up chroots with dependencies pre-installed for each release: + ```bash + apt-get install sbuild-launchpad-chroot + bash ./tools/setup_sbuild.sh # This will give you usage information on how to call it with the correct parameters + ``` +* You must have Launchpad already properly configured in your system in order to upload packages to the PPAs. Follow [this guide](https://help.launchpad.net/Packaging/PPA/Uploading) to get set up. + +* In order to run the `ppa` command, install `ppa-dev-tools` from `bryce`'s PPA: + ```bash + sudo add-apt-repository ppa:bryce/ppa-dev-tools + sudo apt update + sudo apt install ppa-dev-tools + ``` + When running `ppa` for the first time, there will be another round of launchpad authorization to be performed. + +## I. Preliminary/staging release to team infrastructure +1. Create a release PR: + + a. Move the desired commits from our `main` branch onto the desired release branch. + + * This step is currently not well defined. We currently are using `release-27` for all `27.X` releases and have been cherry-picking/rebasing all commits from `main` into this branch for a release. + + b. Create a new entry in the `debian/changelog` file: + + * You can do that by running `dch --newversion `. + * Remember to update the release from `UNRELEASED` to the ubuntu/devel release. Edit the version to look like: `27.2`, with the appropriate pro-client version number. + * Populate `debian/changelog` with the commits you have cherry-picked. + * You can do that by running `git log .. | log2dch` + * This will generate a list of commits that could be included in the changelog. + * You don't need to include all of the commits generated. Remember that the changelog should + be read by the user to understand the new features/modifications in the package. If you + think a commit will not add that much to the user experience, you can drop it from the + changelog. + * To structure the changelog you can use the other entries as example. But we basically try to + keep this order: debian changes, new features/modifications, testing. Within each section, bullet points should be alphabetized. + + c. Create a PR on GitHub into the release branch. Ask in the ~UA channel on Mattermost for review. + + d. When reviewing the release PR, please use the following guidelines when reviewing the new changelog entry: + + * Is the version correctly updated? We must ensure that the new version in the changelog is + correct and it also targets the latest Ubuntu release at the moment. + * Is the entry useful for the user? The changelog entries should be user focused, meaning + that we should only add entries that we think users will care about (i.e. we don't need + entries when fixing a test, as this doesn't provide meaningful information to the user). + * Is this entry redundant? Sometimes we may have changes that affect separate modules of the + code. We should have an entry only for the module that was most affected by it. + * Is the changelog entry unique? We need to verify that the changelog entry is not already + reflected in an earlier version of the changelog. If it is, we need not only to remove but double + check the process we are using to cherry-pick the commits. + * Is this entry actually reflected in the code? Sometimes, we can have changelog entries + that are not reflected in the code anymore. This can happen during development when we are + still unsure about the behaviour of a feature or when we fix a bug that removes the code + that was added. We must verify each changelog entry that is added to be sure of their + presence in the product. + +2. After the release PR is merged, tag the head of the release branch with the version number, e.g., `27.1`. Push this tag to GitHub. + +3. Build the package for all Ubuntu releases and upload to `ppa:ua-client/staging`: + + a. Clone the repository into a clean directory and switch to the release branch. + * *WARNING* Build the package in a clean environment. The reason is that the package + will contain everything that is present in the folder. If you are storing credentials or + other sensible development information in your folder, they will be uploaded too when we send + the package to the ppa. A clean environment is the safest way to perform this. + + b. Edit the changelog + * List yourself as the author of this release. + * Edit the version number to look like: `27.2~rc1` (`~rc`) + * Edit the Ubuntu release name. Start with the ubuntu/devel release. + * `git add debian/changelog && git commit -m "throwaway"` - Do **not** push this commit! + + c. `build-package` + * This script will generate all the package artefacts in the parent directory as `../out`. + + d. `sbuild-it ../out/.dsc` + * If this succeeds move on. If this fails, debug and fix before continuing. + + e. Repeat 3.b through 3.d for all supported Ubuntu Releases + * The version for series other than devel should be in the form `~~rc` + This means you must add the release number in the changelog. For example, suppose + the devel version is `1.1~rc1`. If you want to build for jammy now, change it to + `1.1~22.04~rc1`. + + f. For each release, dput to the staging PPA: + * `dput ppa:ua-client/staging ../out/_source.changes` + * After each `dput` wait for the "Accepted" email from Launchpad before moving on. + +## II. Release to Ubuntu (devel and SRU) + +> **Note** +> `kinetic` is used throughout as a reference to the current devel release. This will change. + +1. Prepare SRU Launchpad bugs. + + a. We do this even before a successful merge into ubuntu/devel because the context added to these bugs is useful for the Server Team reviewer. + + b. Create a new bug on Launchpad for ubuntu-advantage-tools and use the format defined [here](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates#SRU_Template) for the description. + * The title should be in the format `[SRU] ubuntu-advantage-tools (27.1 -> 27.2) Xenial, Bionic, Focal, Jammy`, substituting version numbers and release names as necessary. + * If any of the changes for the SRU is in the [Early Review Sign-off list](../references/early_review_signoff.md), include a pointer in the `[Discussion]` section to where the discussion/approval of that feature took place (if possible). + + c. For each Launchpad bug fixed by this release (which should all be referenced in our changelog), add the SRU template to the description and fill out each section. + * Leave the original description in the bug at the bottom under the header `[Original Description]`. + * For the testing steps, include steps to reproduce the bug. Then include instructions for adding `ppa:ua-client/staging`, and steps to verify the bug is no longer present. + +2. Set up the Merge Proposal (MP) for ubuntu/devel: + + a. `git-ubuntu clone ubuntu-advantage-tools; cd ubuntu-advantage-tools` + + b. `git remote add upstream git@github.com:canonical/ubuntu-advantage-client.git` + + c. `git fetch upstream` + + d. `git rebase --onto pkg/ubuntu/devel ` + * e.g. `git rebase --onto pkg/ubuntu/devel 27.0.2 27.1` + * You may need to resolve conflicts, but hopefully these will be minimal. + * You'll end up in a detached state. + + e. `git checkout -B upload--kinetic` + * This creates a new local branch name based on your detached branch. + + f. `git push upload--kinetic` + + g. On Launchpad, create a merge proposal for this version which targets `ubuntu/devel` + * For an example, see the [27.14.1 merge proposal](https://code.launchpad.net/~renanrodrigo/ubuntu/+source/ubuntu-advantage-tools/+git/ubuntu-advantage-tools/+merge/439507). + * Add 2 review slots for `canonical-server-reporter` and `canonical-server-core-reviewers`. + + h. With the packages published to `ppa:ua-client/staging`, add links to the autopkgtest triggers to the Merge Proposal. The reviewer will have permission to trigger those tests. The links can be obtained by running `ppa tests -r -a ua-client/staging -L` + * Make sure to post links to all the architectures built for a given release. + * The riscv64 autopkgtests are not avaialble and don't need to be included. + * The `ppa test` command will have two variations of tests: the regular one, and one with `all-proposed=1`; only the regular test need to be there. + +3. Server Team Review and Pre-SRU Review + + a. Ask the assigned ubuntu-advantage-tools reviewer/sponsor from Server team for a review of your MPs. If you don't know who that is, ask in ~Server. Include a link to the ubuntu/devel MP and to the SRU bug. + + b. If they request changes, create a PR into the release branch on GitHub and ask Pro Client team for review. After that is merged, cherry-pick the commit into your `upload--` branch and push to launchpad. Then notify the Server Team member that you have addressed their requests. + * Some issues may just be filed for addressing in the future if they are not urgent or pertinent to this release. + * Unless the changes are very minor, or only testing related, you should upload a new release candidate version to `ppa:ua-client/staging` as described in I.3. + * After the release is finished, any commits that were merged directly into the release branch in this way should be brought back into `main` via a single PR. + + c. Once review is complete and approved, the Server Team member should **not** upload the version to the devel release. + * If they do, then any changes to the code after this point will require a bump in the patch version of the release. + + d. Now ask the SRU team for a pre-SRU review of the same MP. Mention that the exact same code will be released to all stable Ubuntu releases. + * Follow instructions in `II.4.b` if they request any changes. + + e. Once the SRU team member gives a pre-SRU approval, create the branches for each stable release. They should be named `upload--`. + * The versions for the stable releases must include `~` + * If you've followed the instructions precisely so far, you can just run `bash tools/create-lp-release-branches.sh`. + - When using the `create-lp-release-branches.sh` script, an important parameter is `SRU_BUG`: + - In the vast majority of cases, this should be set to the overall SRU bug written in step II.1.b. + - In the case where an existing SRU never got released, and a new patch version was uploaded on top of it to fix a new bug discovered during review, then the bug should still be the overall SRU bug. + - If the release is exclusively a bugfix release and the previous version has already been successfully released all the way through the SRU process, then the bug should instead be the specific bugfix number. + + f. Ask Server team member sponsor to upload to devel, and then the SRU proposed queue using the stable release branches you just created. + * Ask them to tag the PR with the appropriate `upload/` tag so git-ubuntu will import rich commit history. + * If they do not have upload rights to the proposed queue, ask in ~Server channel for a Ubuntu Server team member with upload rights for an upload review of the MP for the proposed queue. + + g. Check the [-proposed release queue](https://launchpad.net/ubuntu/xenial/+queue?queue_state=1&queue_text=ubuntu-advantage-tools) for presence of ubuntu-advantage-tools in unapproved state for each supported release. Note: libera chat #ubuntu-release IRC channel has a bot that reports queued uploads of any package in a message like "Unapproved: ubuntu-advantage-tools .. version". + + h. Tell the SRU team member who performed the pre-SRU review that the packages are in the -proposed release queue. They will need to actually approve the package to move into -proposed. + +4. -proposed verification and release to -updates + + a. As soon as the SRU vanguard approves the packages, a bot in #ubuntu-release will announce that ubuntu-advantage-tools is accepted into the applicable -proposed pockets, or the [Xenial -proposed release rejection queue](https://launchpad.net/ubuntu/xenial/+queue?queue_state=4&queue_text=ubuntu-advantage-tools) will contain a reason for rejections. Double check the SRU process bug for any actionable review feedback. + * Once accepted into `-proposed` by an SRU vanguard [ubuntu-advantage-tools shows up in the pending_sru page](https://people.canonical.com/~ubuntu-archive/pending-sru.html), check `rmadison ubuntu-advantage-tools | grep -proposed` to see if the upload exists in -proposed yet. + * Also actually check that the packages are accessible in a container by [enabling proposed](https://wiki.ubuntu.com/Testing/EnableProposed) and updating the package. + + b. With the package in proposed, perform the steps from `I.3` above but use a `~stableppaX` suffix instead of `~rcX` in the version name, and upload to `ppa:ua-client/stable` instead of staging. + + c. Perform the [Ubuntu-advantage-client SRU verification steps](https://wiki.ubuntu.com/UbuntuAdvantageToolsUpdates). This typically involves running all behave targets with `UACLIENT_BEHAVE_ENABLE_PROPOSED=1 UACLIENT_BEHAVE_CHECK_VERSION=` and saving the output. + * There may also be one-time test scripts added in the `sru/` directory for this release. + + d. After all tests have passed, tarball all of the output files and upload them to the SRU bug with a message that looks like this: + + ``` + We have run the full ubuntu-advantage-tools integration test suite against the version in -proposed. The results are attached. All tests passed. + + You can verify the correct version was used by checking the output of the first test in each file, which prints the version number. + + I am marking the verification done for this SRU. + ``` + + Change the tags on the bug from `verification-needed` to `verification-done` (including the verification tags for each Ubuntu release). + + e. For any other related Launchpad bugs that are fixed in this release, perform the verification steps necessary for those bugs and mark them `verification-done` as needed. This will likely involve following the test steps, but instead of adding the staging PPA, enabling -proposed. + + f. Once all SRU bugs are tagged as `verification*-done`, all SRU-bugs should be listed as green in [the pending_sru page](https://people.canonical.com/~ubuntu-archive/pending-sru.html). + + g. After the pending SRU page says that ubuntu-advantage-tools has been in proposed for 7 days, it is now time to ping the [current SRU vanguard](https://wiki.ubuntu.com/StableReleaseUpdates#Publishing) for acceptance of ubuntu-advantage-tools into -updates. + + h. Check `rmadison ubuntu-advantage-tools` for updated version in -updates. + * Also actually check that the packages are accessible in a container and updating the package. + +## III. Post-release updates + +1. Ensure the version tag is correct on GitHub. The `version` git tag should point to the commit that was released as that version to ubuntu -updates. If changes were made in response to feedback during the release process, the tag may have to be moved. +2. Bring in any changes that were made to the release branch into `main` via PR (e.g., changelog edits). +3. Move any scripts added in `sru/` to a new folder in `sru/_archive` for the release. +4. Tell CPC that there is a new version of `ubuntu-advantage-tools` in -updates for all series. diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/testing.md ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/testing.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/howtoguides/testing.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/howtoguides/testing.md 2023-06-01 18:49:33.000000000 +0000 @@ -139,13 +139,13 @@ Based on some rough testing in July 2021, these are the situations when you should set UACLIENT_BEHAVE_SNAPSHOT_STRATEGY=1 -> At time of writing, starting a lxd.vm instance from a local snapshot takes -> longer than starting a fresh lxd.vm instance and installing ua. +> At time of writing, starting a lxd-vm instance from a local snapshot takes +> longer than starting a fresh lxd-vm instance and installing ua. | machine_type | condition | | ------------- | ------------------ | -| lxd.container | num_scenarios > 7 | -| lxd.vm | never | +| lxd-container | num_scenarios > 7 | +| lxd-vm | never | | gcp | num_scenarios > 5 | | azure | num_scenarios > 14 | | aws | num_scenarios > 11 | @@ -175,17 +175,6 @@ additional token obtained from https://ubuntu.com/pro needs to be set: - UACLIENT_BEHAVE_CONTRACT_TOKEN= -By default, the public AMIs for Ubuntu Pro testing used for each Ubuntu -release are defined in features/aws-ids.yaml. These ami-ids are determined by -running `./tools/refresh-aws-pro-ids`. - -Integration tests will read features/aws-ids.yaml to determine which default -AMI id to use for each supported Ubuntu release. - -To update `features/aws-ids.yaml`, run `./tools/refresh-aws-pro-ids` and put up -a pull request against this repo to updated that content from the ua-contracts -marketplace definitions. - * To manually run EC2 integration tests with a specific AMI Id provide the following environment variable to launch your specific AMI instead of building a daily ubuntu-advantage-tools image. diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/directory_layout.md ubuntu-advantage-tools-28.1~18.04/dev-docs/references/directory_layout.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/directory_layout.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/references/directory_layout.md 2023-06-01 18:49:33.000000000 +0000 @@ -9,12 +9,11 @@ | ./uaclient/ | collection of python modules which will be packaged into ubuntu-advantage-tools package to deliver the Ubuntu Pro Client CLI | | uaclient.entitlements | Service-specific \*Entitlement class definitions which perform enable, disable, status, and entitlement operations etc. All classes derive from base.py:UAEntitlement and many derive from repo.py:RepoEntitlement | | ./uaclient/cli.py | The entry-point for the command-line client -| ./uaclient/clouds/ | Cloud-platform detection logic used in Ubuntu Pro to determine if a given should be auto-attached to a contract | +| ./uaclient/clouds/ | Cloud-platform detection logic used in Ubuntu Pro to determine if a given instance should be auto-attached to a contract | | uaclient.contract | Module for interacting with the Contract Server API | | uaclient.messages | Module that contains the messages delivered by `pro` to the user | | uaclient.security | Module that hold the logic used to run `pro fix` commands | | ./apt-hook/ | the C++ apt-hook delivering MOTD and apt command notifications about Ubuntu Pro support services | -| ./apt-conf.d/ | apt config files delivered to /etc/apt/apt-conf.d to automatically allow unattended upgrades of ESM security-related components. If apt proxy settings are configured, an additional apt config file will be placed here to configure the apt proxy. | | /etc/ubuntu-advantage/uaclient.conf | Configuration file for the Ubuntu Pro Client.| | /var/lib/ubuntu-advantage/private | `root` read-only directory containing Contract API responses, machine-tokens and service credentials | | /var/lib/ubuntu-advantage/machine-token.json | `world` readable file containing redacted Contract API responses, machine-tokens and service credentials | @@ -23,6 +22,7 @@ ## Note We have two `machine-token.json` files, located at: + - /var/lib/ubuntu-advantage/private/machine-token.json - /var/lib/ubuntu-advantage/machine-token.json diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/enabling_a_service.md ubuntu-advantage-tools-28.1~18.04/dev-docs/references/enabling_a_service.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/enabling_a_service.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/references/enabling_a_service.md 2023-06-01 18:49:33.000000000 +0000 @@ -17,5 +17,5 @@ machine to describe whether a service is applicable for an environment and what configuration is required to properly enable that service. -Any interactions with the Contract server API are defined as UAContractClient +Any interactions with the Contract Server API are defined as UAContractClient class methods in [uaclient/contract.py](../../uaclient/contract.py). diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/terminology.md ubuntu-advantage-tools-28.1~18.04/dev-docs/references/terminology.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/terminology.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/references/terminology.md 2023-06-01 18:49:33.000000000 +0000 @@ -5,8 +5,8 @@ | Term | Meaning | | -------- | -------- | -| Ubuntu Pro Client | The python command line client represented in this ubuntu-advantage-client repository. It is installed on each Ubuntu machine and is the entry-point to enable any Ubuntu Pro commercial service on an Ubuntu machine. | -| Contract Server | The backend service exposing a REST API to which Ubuntu Pro Client authenticates in order to obtain contract and commercial service information and manage which support services are active on a machine.| +| Ubuntu Pro Client | The python command line client represented in this ubuntu-pro-client repository. It is installed on each Ubuntu machine and is the entry-point to enable any Ubuntu Pro commercial service on an Ubuntu machine | +| Contract Server | The backend service exposing a REST API to which Ubuntu Pro Client authenticates in order to obtain contract and commercial service information and manage which support services are active on a machine | | Entitlement/Service | An Ubuntu Pro commercial support service such as FIPS, ESM, Livepatch, CIS-Audit to which a contract may be entitled | | Affordance | Service-specific list of applicable architectures and Ubuntu series on which a service can run | | Directives | Service-specific configuration values which are applied to a service when enabling that service | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/version_string_formatting.md ubuntu-advantage-tools-28.1~18.04/dev-docs/references/version_string_formatting.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/version_string_formatting.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/references/version_string_formatting.md 2023-05-30 19:02:35.000000000 +0000 @@ -5,10 +5,10 @@ | Build target | Version Format | | --------------------------------------------------------------------------------- | ------------------------------------------ | | [Daily PPA](https://code.launchpad.net/~canonical-server/+recipe/ua-client-daily) | `XX.YY-~g~ubuntu22.04.1` | -| Staging PPA | `XX.YY~22.04.1~rc1` | -| Stable PPA | `XX.YY~22.04.1~stableppa1` | -| Archive release | `XX.YY~22.04.1` | -| Archive bugfix release | `XX.YY.Z~22.04.1` | +| Staging PPA | `XX.YY~22.04~rc1` | +| Stable PPA | `XX.YY~22.04~stableppa1` | +| Archive release | `XX.YY~22.04` | +| Archive bugfix release | `XX.YY.Z~22.04` | ## Supported upgrade paths on same upstream version @@ -18,10 +18,10 @@ | Upgrade path | Version diff example | | ------------------------------- | ----------------------------------------------------------------------- | -| Staging to Next Staging rev | `31.4~22.04.1~rc1` ➜ `31.4~22.04.1~rc2` | -| Staging to Stable | `31.4~22.04.1~rc2` ➜ `31.4~22.04.1~stableppa1` | -| Stable to Next Stable rev | `31.4~22.04.1~stableppa1` ➜ `31.4~22.04.1~stableppa2` | -| Stable to Archive | `31.4~22.04.1~stableppa2` ➜ `31.4~22.04.1` | -| LTS Archive to Next LTS Archive | `31.4~22.04.1` ➜ `31.4~24.04.1` | -| Archive to Daily | `31.4~24.04.1` ➜ `31.4-1500~g75fa134~ubuntu24.04.1` | +| Staging to Next Staging rev | `31.4~22.04~rc1` ➜ `31.4~22.04~rc2` | +| Staging to Stable | `31.4~22.04~rc2` ➜ `31.4~22.04~stableppa1` | +| Stable to Next Stable rev | `31.4~22.04~stableppa1` ➜ `31.4~22.04~stableppa2` | +| Stable to Archive | `31.4~22.04~stableppa2` ➜ `31.4~22.04` | +| LTS Archive to Next LTS Archive | `31.4~22.04` ➜ `31.4~24.04` | +| Archive to Daily | `31.4~24.04` ➜ `31.4-1500~g75fa134~ubuntu24.04.1` | | Daily to Next Daily | `31.4-1500~g75fa134~ubuntu24.04.1` ➜ `31.4-1501~g3836375~ubuntu24.04.1` | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/what_happens_during_attach.md ubuntu-advantage-tools-28.1~18.04/dev-docs/references/what_happens_during_attach.md --- ubuntu-advantage-tools-27.14.4~18.04/dev-docs/references/what_happens_during_attach.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-docs/references/what_happens_during_attach.md 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ ### What happens during attach After running the command `pro attach TOKEN`, Ubuntu Pro Client will perform the following steps: -* read the config from /etc/ubuntu-advantage/uaclient.conf to obtain +* Read the config from /etc/ubuntu-advantage/uaclient.conf to obtain the contract\_url (default: https://contracts.canonical.com) * POSTs to the Contract Server API @ /api/v1/context/machines/token providing the \ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/dev-requirements.txt ubuntu-advantage-tools-28.1~18.04/dev-requirements.txt --- ubuntu-advantage-tools-27.14.4~18.04/dev-requirements.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/dev-requirements.txt 2023-06-01 18:49:33.000000000 +0000 @@ -1,6 +1,6 @@ # The black, isort and shellcheck-py versions are also in .pre-commit-config.yaml; # make sure to update both together black==22.3.0 -isort==5.8.0 +isort==5.12.0 pre-commit shellcheck-py==0.8.0.4 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/conf.py ubuntu-advantage-tools-28.1~18.04/docs/conf.py --- ubuntu-advantage-tools-27.14.4~18.04/docs/conf.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/conf.py 2023-06-01 18:49:33.000000000 +0000 @@ -38,6 +38,8 @@ templates_path = ["_templates"] +html_extra_path = ["googleaf254801a5285c31.html", "sitemap-index.xml"] + # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. @@ -100,7 +102,7 @@ html_css_files = [ "css/logo.css", "css/github_issue_links.css", - "css/custom.css" + "css/custom.css", ] html_js_files = [ "js/github_issue_links.js", diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/apt_messages.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/apt_messages.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/apt_messages.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/apt_messages.md 2023-05-30 19:02:35.000000000 +0000 @@ -25,7 +25,7 @@ ## LTS series with esm-apps service disabled -When you are running `apt upgraded` on a LTS release, like Focal, we advertise +When you are running `apt upgrade` on a LTS release, like Focal, we advertise the `esm-apps` service if packages could be upgraded by enabling the service: ``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/cves_and_usns_explained.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/cves_and_usns_explained.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/cves_and_usns_explained.md 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/cves_and_usns_explained.md 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,44 @@ +# CVEs and USNs explained + +## What is a CVE + +Common Vulnerabilities and Exposures (CVEs) are a way to catalogue and track public security +vulnerabilities for a given software. Every CVE is identified through a unique identifier, +for example [CVE-2023-0465](https://www.cve.org/CVERecord?id=CVE-2023-0465). + +CVEs are maintained by the [MITRE Corporation](https://cve.mitre.org/) and the goal of the project +is to provide naming conventions for the public known security issues while also maintaining a +centralised repository for all of the security issues. This makes it easier for an organization to +submit a new security flaw though the CVE convention while also analysing any other existing CVEs +in the database. + +You can search for any existing CVE related to Ubuntu using +[the Ubuntu CVE page](https://ubuntu.com/security/cves). + +## What is a USN? + +An Ubuntu Security Notice (USN) is the way that Canonical publicly catalogues and displays security +vulneratibilities for Ubuntu packages. Usually, a USN is composed of one or more +[CVEs](#what-is-a-cve) and it also contains update instructions to fix the issue, if a fix is +already available. + +USNs follow a naming convention of the format: [USN-5963-1](https://ubuntu.com/security/notices/USN-5963-1) + +You can search for any existing USN using +[the Ubuntu Security Notices page](https://ubuntu.com/security/notices). + +## What are related USNs? + +A USN is composed of different CVEs. If the same CVE appears on multiple USNs, we say that those USNs are related. +In the following image, we can see a visual representation of that concept, where USN-789 and USN-321 +are related USNs because both are affected by CVE-2: + +![Related USN example](../images/usn-related.png) + + +A real example can be seen in [USN-5573-1](https://ubuntu.com/security/notices/USN-5573-1). +In the section **Related notices**, it shows that both **USN-5570-1** +and **USN-5570-2** are related to **USN-5573-1**. + +This information is useful for users that want to tackle +all related USNs at once, making sure that a CVE is fully fixed on their Ubuntu machine. diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/how_to_interpret_output_of_unattended_upgrades.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/how_to_interpret_output_of_unattended_upgrades.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/how_to_interpret_output_of_unattended_upgrades.md 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/how_to_interpret_output_of_unattended_upgrades.md 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,82 @@ +# How to interpret the output of unattended-upgrades + +On Pro Client version 27.14~, we introduced the `u.pro.unattended_upgrades.status.v1` endpoint. +This endpoint is designed to provide users with an overview of the configuration and setup for +unattended-upgrades on the machine. The expected output follows this JSON example: + +```json +{ + "_schema_version": "v1", + "data": { + "attributes": { + "apt_periodic_job_enabled": true, + "package_lists_refresh_frequency_days": 1, + "systemd_apt_timer_enabled": true, + "unattended_upgrades_allowed_origins": [ + "${distro_id}:${distro_codename}", + "${distro_id}:${distro_codename}-security", + "${distro_id}ESMApps:${distro_codename}-apps-security", + "${distro_id}ESM:${distro_codename}-infra-security" + ], + "unattended_upgrades_disabled_reason": null, + "unattended_upgrades_frequency_days": 1, + "unattended_upgrades_last_run": null, + "unattended_upgrades_running": true + }, + "meta": { + "environment_vars": [], + "raw_config": { + "APT::Periodic::Enable": "1", + "APT::Periodic::Unattended-Upgrade": "1", + "APT::Periodic::Update-Package-Lists": "1", + "Unattended-Upgrade::Allowed-Origins": [ + "${distro_id}:${distro_codename}", + "${distro_id}:${distro_codename}-security", + "${distro_id}ESMApps:${distro_codename}-apps-security", + "${distro_id}ESM:${distro_codename}-infra-security" + ] + } + }, + "type": "UnattendedUpgradesStatus" + }, + "errors": [], + "result": "success", + "version": "27.14~16.04.1", + "warnings": [] +} +``` + +As we can see from this output, we have a variable named `unattended_upgrades_running`. That variable +indicates if unattended-upgrades is properly configured and running on the machine. +The value of this field will only be `true` if *ALL* of the following prerequisites are also true: + +* *`apt_periodic_job_enable` is true*: That variable indicates if the APT::Periodic::Enable configuration variable + is turned on. If it is turned off, unattended-upgrades will not automatically run on the machine. +* *`package_lists_refresh_frequency_days` is non-zero*: That variable shows the value of APT::Periodic::Package-List-Frequency. + This configuration defines the daily frequency for updating package sources in the background. If it has a zero value, this step will never + happen and unattended-upgrades might not be able to install new versions of the packages. +* *`systemd_apt_timer_enabled` is true*: This variable is true if both `apt-daily.timer` and `apt-daily-upgrade.timer` are running + on the machine. These timers are the ones that control when unattended-upgrades run. The first job, `apt-daily.timer` is responsible + for triggering the code that downloads the lastest package information on the system. The second job, `apt-daily-upgrade.timer` is + responsible for running unattended-upgrades to download the latest version of the packages. If one of these jobs is disabled, + unattended-upgrades might not work as expected. +* *`unattended_upgrades_allowed_origins` is not empty*: This variable defines the origins that + unattended-upgrades can use to install a package. If that list is empty, no packages can be + installed and unattended-upgrades will not work as expected. +* *`unattended_upgrades_frequency_days` is non-zero*: That variable shows the value of + APT::Periodic::Unattended-Upgrade. This configuration defines the daily frequency for running + unattended-upgrades in the background. Therefore, if it has a zero value, the command will never + run. + + +If any of those conditions are not met, the variable +*unattended_upgrades_disabled_reason* will contain an object explaining why unattended-upgrades is +not running. For example, if `package_lists_refresh_frequency_days` has a zero value, we will see +the following value for *unattended_upgrades_disabled_reason*: + +```json +{ + "msg": "APT::Periodic::Update-Package-Lists is turned off", + "code": "unattended-upgrades-cfg-value-turned-off" +} +``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/how_to_interpret_the_security_status_command.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/how_to_interpret_the_security_status_command.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/how_to_interpret_the_security_status_command.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/how_to_interpret_the_security_status_command.md 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,200 @@ # What does `security-status` do? -The `security-status` command is used to get an overview of the packages -installed on your machine. +The `security-status` command provides an overview of all the packages +installed on your machine, and the security coverage that applies to those +packages. + +The output of the `security-status` command varies, depending on the configuration of the machine you run it on. In this article, we'll take a look at the different outputs of `security-status` and the situations in which you might see them. + +## Command output + +If you run the `pro security-status` command, the first blocks of information +you see look like: + +``` +2871 packages installed: + 2337 packages from Ubuntu Main/Restricted repository + 504 packages from Ubuntu Universe/Multiverse repository + 8 packages from third parties + 22 packages no longer available for download + +To get more information about the packages, run + pro security-status --help +for a list of available options. +``` + +Those are counts for the `apt` packages installed in the system, sorted +between the packages in main, universe, third party packages, and packages +that are no longer available. You will also see a hint to run +`pro security-status --help` to get more information. + +### `apt update` hint + +To get accurate package information, the `apt` caches must be up to date. If +your cache was not updated recently, you may see a message in the output with +a hint to update. + +``` +The system apt cache may be outdated. Make sure to run + sudo apt-get update +to get the latest package information from apt. +``` + +### LTS coverage + +If `esm-infra` is disabled in your system, main/restricted packages will be +covered during the LTS period - this information is presented right after the +hints. A covered system will present this message: + +``` +This machine is receiving security patching for Ubuntu Main/Restricted +repository until . +``` + +On a system where the LTS period ended, you'll see: + +``` +This machine is NOT receiving security patches because the LTS period has ended +and esm-infra is not enabled. +``` + +### Ubuntu Pro coverage + +An Ubuntu Pro subscription provides more security coverage than a standard LTS. +The next blocks of information are related to Ubuntu Pro itself: + +``` +This machine is attached to an Ubuntu Pro subscription. + +Main/Restricted packages are receiving security updates from +Ubuntu Pro with 'esm-infra' enabled until 2032. + +Universe/Multiverse packages are receiving security updates from +Ubuntu Pro with 'esm-apps' enabled until 2032. You have received 21 security +updates. +``` + +This system is already attached to Pro! It is a Jammy machine, which has +installed some updates from `esm-apps`. Running the same command on a Xenial +system without Pro enabled, the output looks like: + +``` +This machine is NOT attached to an Ubuntu Pro subscription. + +Ubuntu Pro with 'esm-infra' enabled provides security updates for +Main/Restricted packages until 2026. There are 170 pending security updates. + +Ubuntu Pro with 'esm-apps' enabled provides security updates for +Universe/Multiverse packages until 2026. There is 1 pending security update. + +Try Ubuntu Pro with a free personal subscription on up to 5 machines. +Learn more at https://ubuntu.com/pro +``` + +There are lots of `esm-infra` updates for this machine, and even an `esm-apps` +update. The hint in the end of the output has a link to the main Pro website, +so the user can learn more about Pro and get their subscription. + +### Interim releases + +If you are running an interim release, the output is slightly different because +there are no Ubuntu Pro services available. You will still see the package +counts and support period though - your main/restricted packages are supported +for 9 months from the release date. + +``` +613 packages installed: + 601 packages from Ubuntu Main/Restricted repository + 12 packages from Ubuntu Universe/Multiverse repository + +To get more information about the packages, run + pro security-status --help +for a list of available options. + +Main/Restricted packages receive updates until 1/2024. + +Ubuntu Pro is not available for non-LTS releases. +``` + +### Optional flags for specific package sets + +Some flags can be passed to `security-status` to get information about coverage +of specific package sets. As an example, let's look at the output of +`pro security-status --esm-infra`: + +``` +442 packages installed: + 441 packages from Ubuntu Main/Restricted repository + +Main/Restricted packages are receiving security updates from +Ubuntu Pro with 'esm-infra' enabled until 2026. You have received 3 security +updates. There are 160 pending security updates. + +Run 'pro help esm-infra' to learn more + +Installed packages with an available esm-infra update: +( ... list of packages ... ) + +Installed packages with an esm-infra update applied: +( ... list of packages ... ) + +Further installed packages covered by esm-infra: +( ... list of packages ... ) + +For example, run: + apt-cache show tcpdump +to learn more about that package. +``` + +Besides the support information of main/restricted (which Ubuntu Pro with +`esm-infra` extends) there are lists of: +- packages which have some updated version available in esm-infra repositories +- packages which have an installed version from the esm-infra repositories +- packages which are covered by esm-infra + +You will see a similar output when running `pro security-status --esm-apps`, +but with information regarding universe/multiverse packages. + +You can also get a list of the third-party packages installed in the system: + +``` +$ pro security-status --thirdparty +2871 packages installed: + 8 packages from third parties + +Packages from third parties are not provided by the official Ubuntu +archive, for example packages from Personal Package Archives in Launchpad. + +Packages: +( ... list of packages ... ) + +For example, run: + apt-cache show +to learn more about that package. +``` + +And also a list of unavailable packages (which no longer have any installation +source): + +``` +$ pro security-status --unavailable +2871 packages installed: + 22 packages no longer available for download + +Packages that are not available for download may be left over from a +previous release of Ubuntu, may have been installed directly from a +.deb file, or are from a source which has been disabled. + +Packages: +( ... list of packages ... ) + + +For example, run: + apt-cache show +to learn more about that package. +``` + +## Machine-readable output If you run the `pro security-status --format yaml` command on your machine, you should expect to see an output that follows this structure: @@ -41,10 +234,10 @@ Patched: true ``` -Let's understand what each key means in the output of the `pro security-status` -command: +Let's understand what each key means in the output of the +`pro security-status --format yaml` command: -## `summary` +### `summary` This provides a summary of the system related to Ubuntu Pro and the different package sources in the system: @@ -102,7 +295,7 @@ * **`entitled_services`**: A list of services that are entitled on your Ubuntu Pro subscription. If unattached, this will always be an empty list. -## `packages` +### `packages` This provides a list of security updates for packages installed on the system. Every entry on the list will follow this structure: @@ -123,7 +316,7 @@ * **`download_size`**: The number of bytes that would be downloaded in order to install the update. -## `livepatch` +### `livepatch` This displays Livepatch-related information. Currently, the only information presented is **`fixed_cves`**. This represents a list of CVEs that were fixed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/motd_messages.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/motd_messages.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/motd_messages.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/motd_messages.md 2023-05-30 19:02:35.000000000 +0000 @@ -2,21 +2,51 @@ When the Ubuntu Pro Client (`pro`) is installed on the system, it delivers custom messages on ["Message of the Day" (MOTD)](https://wiki.debian.org/motd). -Those messages are generated directly by two different sources. +Those messages are generated directly by three different sources. -## Python-scripted MOTD +* MOTD about available updates +* MOTD about important subscription conditions +* MOTD about ESM being available + +## MOTD about available updates The [update-notifier](https://wiki.ubuntu.com/UpdateNotifier) delivers a script -called `apt_check.py`. With regards to Ubuntu Pro, this script is responsible -for: - +via the `update-notifier-common` package called +`/usr/lib/update-notifier/apt_check.py. +With regards to Ubuntu Pro, this script is responsible for: + * Informing the user about the status of one of the ESM services; `esm-apps` if the machine is an LTS series, or `esm-infra` if the series is in ESM mode. * Showing the number of `esm-infra` or `esm-apps` packages that can be upgraded on the machine. -For example, here is the output of the `apt_check.py` script on a LTS machine -when both of those services are enabled: +`update-notifier` has always added information about potential updates to +MOTD to raise user awareness. With the advent of Ubuntu Pro they are +just more differentiated. + +Note that if you run `apt_check.py` directly it might give you rather +unreadable output as it is meant for program use. You can add `--human-readable` +to see the information as it would be presented in MOTD. + +### Machine is unattached + +On a machine that runs an Ubuntu release for which the `esm-apps` service +is available, but not yet attached to an Ubuntu Pro subscription, there will +be a message notifying the user that there may be more security updates +available through ESM Apps. + +``` +Expanded Security Maintenance for Applications is not enabled. + +0 updates can be applied immediately. + +Enable ESM Apps to receive additional future security updates. +See https://ubuntu.com/esm or run: sudo pro status +``` + +### Machine is fully attached + +In the opposite situation, if an LTS machine has the `esm-infra` and `esm-apps` services enabled then users will see the following output in MOTD: ``` Expanded Security Maintenance for Applications is enabled. @@ -28,8 +58,16 @@ To see these additional updates run: apt list --upgradable ``` -However, if we were running this on an ESM series, we would instead see -`esm-infra` being advertised: +### Machine is fully attached, on an older release + +Above you have seen examples of recent (as in "still in their first 5 +years of support") Ubuntu releases, where the hint is about ESM Apps +extending the coverage to the universe repositories. + +However, if running on an Ubuntu release that has is already past the initial +5 years of support and has thereby entered Expanded Security Maintenance +(["ESM"](https://ubuntu.com/security/esm)), we would instead see +`esm-infra` (which provides coverage for another 5 years) being shown: ``` Expanded Security Maintenance Infrastructure is enabled. @@ -41,17 +79,19 @@ To see these additional updates run: apt list --upgradable ``` +### Partial service enablement + Now let's consider a scenario where one of these services is not enabled. For example, if `esm-apps` was disabled, the output will be: ``` Expanded Security Maintenance for Applications is not enabled. - + 6 updates can be applied immediately. 1 of these updates is a ESM Infra security update. 5 of these updates are standard security updates. To see these additional updates run: apt list --upgradable - + 5 additional security updates can be applied with ESM Apps Learn more about enabling ESM Apps for Ubuntu 16.04 at https://ubuntu.com/16-04 @@ -62,13 +102,13 @@ information for `esm-infra` if the service was disabled and the series running on the machine is in ESM state. -## MOTD through Ubuntu Pro timer jobs +## MOTD about important subscription conditions -One of the timer jobs Ubuntu Pro uses can insert additional messages into MOTD. -These messages will be always delivered before or after the content created by -the Python script delivered by `update-notifier`. These additional messages are -generated when `pro` detects that certain conditions on the machine have been -met. They are: +One of the [timer jobs](https://canonical-ubuntu-pro-client.readthedocs-hosted.com/en/latest/explanations/what_are_the_timer_jobs.html) +Ubuntu Pro uses can insert additional messages into MOTD. +These messages will be always delivered next to the content created by +`update-notifier`. These additional messages are generated when `pro` +detects that certain conditions on the machine have been met. They are: ### Subscription expired @@ -104,8 +144,69 @@ Your grace period will expire in 9 days. ``` -### How are these messages updated and inserted into MOTD? +## MOTD about ESM being available + +When Ubuntu Pro became generally available, a temporary announcement was made +through MOTD. This was intended to raise awareness of Pro now being available +and free for personal use, and was shown on systems that could be covered +by `esm-apps`. +It looked like: + +``` + * Introducing Expanded Security Maintenance for Applications. + Receive updates to over 25,000 software packages with your + Ubuntu Pro subscription. Free for personal use. + + https://ubuntu.com/pro +``` + +Since this message was intended as a limited-time announcement to coincide +with the release of Ubuntu Pro into general availability, it was removed in +27.14. + +## How are these messages inserted into MOTD and how can I disable them? + +Just as there are different purposes to the messages outlined above, +there are different sources producing these MOTD elements that one +sees at login. + +Those messages are considered important to ensure user awareness about +the free additional security coverage provided by Ubuntu Pro and about +not-yet-applied potential updates in general. Therefore it is generally not +recommended to disable them. But still, you can selectively disable them +by removing the config files that add them, as outlined below. + +Removing those files is considered a conffile change to customize a program +and they will stay removed even on future upgrades or re-installations of the +related packages. + +If you realize that you actually need them back you need +to reinstall the related packages and tell apt/dpkg to offer you to restore +those files via: + +``` +sudo apt install --reinstall -o Dpkg::Options::="--force-confask" ubuntu-advantage-tools update-notifier-common +``` + +## Source: MOTD about available updates + +1. `update-notifier-common` has a hook `/etc/apt/apt.conf.d/99update-notifier` that runs after `apt update`. +2. That hook will update the information in `/var/lib/update-notifier/updates-available` matching the new package information that was just fetched by using `/usr/lib/update-notifier/apt-check --human-readable`. +3. At MOTD generation time, the script located at `/etc/update-motd.d/90-updates-available` checks if `/var/lib/update-notifier/updates-available` exists and if it does, inserts the message into the full MOTD. + +If you want to disable any message of update-notifier (not just related to Ubuntu Pro and ESM) about potentially available updates remove `/etc/update-motd.d/90-updates-available`. + +## Source: MOTD about important subscription conditions -1. The contract status is checked periodically in the background when the machine is attached to an Ubuntu Pro contract. -2. If one of the above messages applies to the contract that the machine is attached to, then the message is stored in `/var/lib/ubuntu-advantage/messages/motd-contract-status`. +1. The subscription status is checked periodically in the background when the machine is attached to an Ubuntu Pro subscription. +2. If one of the above conditions applies to the subscription that the machine is attached to (there are no messages generated by this for unattached machines), then the message is stored in `/var/lib/ubuntu-advantage/messages/motd-contract-status`. 3. At MOTD generation time, the script located at `/etc/update-motd.d/91-contract-ua-esm-status` checks if `/var/lib/ubuntu-advantage/messages/motd-contract-status` exists and if it does, inserts the message into the full MOTD. + +If you want to disable any message about important conditions of your attached subscription remove `/etc/update-motd.d/91-contract-ua-esm-status`. + +## Source: MOTD about ESM being available + +1. `pro` checks regularly if a system would have `esm-apps` available to it and if so places a message in `/var/lib/ubuntu-advantage/messages/motd-esm-announce`. +2. At MOTD generation time, the script located at `/etc/update-motd.d/88-esm-announce` checks if `/var/lib/ubuntu-advantage/messages/motd-esm-announce` exists and if it does, inserts the message into the full MOTD. + +If you want to disable the ESM announcement remove `/etc/update-motd.d/88-esm-announce` (or upgrade to 27.14 or later which will remove it for you). diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/status_columns.md ubuntu-advantage-tools-28.1~18.04/docs/explanations/status_columns.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations/status_columns.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations/status_columns.md 2023-05-30 19:02:35.000000000 +0000 @@ -101,3 +101,105 @@ It's important to keep in mind that any feature defined like this will be listed, even if it is invalid or typed the wrong way. Those appear in `status` output for informational and debugging purposes. + +## Machine-readable output + +The `pro status` command supports a `--format` flag with options including `json` and `yaml`. These result in a machine-readable form of the information presented by the `pro status` command. + +```{note} +`pro status` should return the same results whether using `sudo` or not, but earlier versions did not always do this. We recommend using `sudo` whenever possible. +``` + +For example, running `sudo pro status --format=json` on an attached machine may give you something like this: +```javascript +{ + "_doc": "Content provided in json response is currently considered Experimental and may change", + "_schema_version": "0.1", + "account": { + "created_at": "2000-01-02T03:04:05+06:00", + "id": "account_id", + "name": "Test" + }, + "attached": true, + "config": { ...effectiveConfiguration }, + "config_path": "/etc/ubuntu-advantage/uaclient.conf", + "contract": { + "created_at": "2000-01-02T03:04:05+06:00", + "id": "contract_id", + "name": "contract_name", + "products": [ "uaa-essential" ], + "tech_support_level": "essential" + }, + "effective": null, + "environment_vars": [...proClientEnvironmentVariables], + "errors": [], + "execution_details": "No Ubuntu Pro operations are running", + "execution_status": "inactive", + "expires": "9999-12-31T00:00:00+00:00", + "features": {}, + "machine_id": "machine_id", + "notices": [], + "result": "success", + "services": [ + { + "available": "yes", + "blocked_by": [], + "description": "Expanded Security Maintenance for Applications", + "description_override": null, + "entitled": "yes", + "name": "esm-apps", + "status": "enabled", + "status_details": "Ubuntu Pro: ESM Apps is active", + "warning": null + }, + { + "available": "yes", + "blocked_by": [], + "description": "Expanded Security Maintenance for Infrastructure", + "description_override": null, + "entitled": "yes", + "name": "esm-infra", + "status": "enabled", + "status_details": "Ubuntu Pro: ESM Infra is active", + "warning": null + }, + { + "available": "yes", + "blocked_by": [], + "description": "Canonical Livepatch service", + "description_override": null, + "entitled": "yes", + "name": "livepatch", + "status": "enabled", + "status_details": "", + "warning": null + }, + ...otherServiceStatusObjects + ], + "simulated": false, + "version": "27.13.6~18.04.1", + "warnings": [] +} +``` + +Some particularly important attributes in the output include: +* `attached`: This boolean value indicates whether this machine is attached to an Ubuntu Pro account. This does not tell you if any particular service (e.g. `esm-infra`) is enabled. You must check the individual service item in the `services` list for that status (described below). +* `expires`: This is the date that the Ubuntu Pro subscription is valid until (in RFC3339 format). After this date has passed the machine should be treated as if not attached and no services are enabled. `attached` may still say `true` and services may still say they are `entitled` and `enabled`, but if the `expires` date has passed, you should assume the services are not functioning. +* `services`: This is a list of Ubuntu Pro services. Each item has its own attributes. Widely applicable services include those with `name` equal to `esm-infra`, `esm-apps`, and `livepatch`. Some important fields in each service object are: + * `name`: The name of the service. + * `entitled`: A boolean indicating whether the attached Ubuntu Pro account is allowed to enable this service. + * `status`: A string indicating the service's current status on the machine. Any value other than `enabled` should be treated as if the service is not enabled and not working properly on the machine. Possible values are: + * `enabled`: The service is enabled and working. + * `disabled`: The service can be enabled but is not currently. + * `n/a`: The service cannot be enabled on this machine. + * `warning`: The service is supposed to be enabled but something is wrong. Check the `warning` field in the service item for additional information. + +For example, if you want to programatically find the status of esm-infra on a particular machine, you can use the following command: +```shell +sudo pro status --format=json | jq '.services[] | select(.name == "esm-infra").status' +``` +That command will print one of the `status` values defined above. + +```{attention} +In an future version of Ubuntu Pro Client, there will be an [API](../references/api.md) function to access this information. For now, though, `pro status --format=json` is the recommended machine-readable interface to this data. +``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/explanations.rst ubuntu-advantage-tools-28.1~18.04/docs/explanations.rst --- ubuntu-advantage-tools-27.14.4~18.04/docs/explanations.rst 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/explanations.rst 2023-06-02 19:38:08.000000000 +0000 @@ -27,6 +27,7 @@ :maxdepth: 1 explanations/how_to_interpret_the_security_status_command.md + explanations/how_to_interpret_output_of_unattended_upgrades.md explanations/status_columns.md explanations/what_refresh_does.md @@ -48,6 +49,7 @@ .. toctree:: :maxdepth: 1 + explanations/cves_and_usns_explained.md explanations/what_are_the_timer_jobs.md explanations/what_is_the_daemon.md explanations/why_trusty_is_no_longer_supported.md diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/googleaf254801a5285c31.html ubuntu-advantage-tools-28.1~18.04/docs/googleaf254801a5285c31.html --- ubuntu-advantage-tools-27.14.4~18.04/docs/googleaf254801a5285c31.html 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/googleaf254801a5285c31.html 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1 @@ +google-site-verification: googleaf254801a5285c31.html \ No newline at end of file diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/enable_fips.md ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/enable_fips.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/enable_fips.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/enable_fips.md 2023-05-30 19:02:35.000000000 +0000 @@ -36,5 +36,5 @@ ``` Installing FIPS packages FIPS enabled -A reboot is required to complete installl +A reboot is required to complete install. ``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/get_rid_of_corrupt_lock.md ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/get_rid_of_corrupt_lock.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/get_rid_of_corrupt_lock.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/get_rid_of_corrupt_lock.md 2023-06-02 19:38:08.000000000 +0000 @@ -2,7 +2,7 @@ Some pro commands (`attach`, `enable`, `detach` and `disable`) will potentially change the internal state of your system. Since those commands can run in parallel, we have a lock file -mechanism to guarantee that only one of these commands can run at the same time. The lock follow +mechanism to guarantee that only one of these commands can run at the same time. The lock follows this pattern: ``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/get_token_and_attach.md ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/get_token_and_attach.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/get_token_and_attach.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/get_token_and_attach.md 2023-05-30 19:02:35.000000000 +0000 @@ -1,10 +1,21 @@ # How to get an Ubuntu Pro token and attach to a subscription +## Get an Ubuntu Pro token + Retrieve your Ubuntu Pro token from the -[Ubuntu Pro portal](https://ubuntu.com/pro/). You will log in with your "Single +[Ubuntu Pro portal](https://ubuntu.com/pro/). Log in with your "Single Sign On" credentials, the same credentials you use for https://login.ubuntu.com. -Note that you can obtain a free personal token, which provides you with access -to several of the Ubuntu Pro services. + +Being logged in you can then go to the +[Ubuntu Pro Dashboard](https://ubuntu.com/pro/dashboard) that is associated to +your user. It will show you all subscriptions currently available to you and +for each the associated token. + +Note that even without buying anything you can always obtain a free personal +token that way, which provides you with access to several of the Ubuntu Pro +services. + +## Attach to a subscription Once that token is obtained, to attach your machine to a subscription, just run: @@ -35,3 +46,26 @@ Once the Ubuntu Pro Client is attached to your Ubuntu Pro account, you can use it to activate various services, including: access to ESM packages, Livepatch, FIPS, and CIS. Some features are specific to certain LTS releases. + +## Control of auto-enabled services + +Your subscription controls which services are available to you and which ones +you can manage via the [Ubuntu Pro Dashboard](https://ubuntu.com/pro/dashboard). + +Recommended services are auto-enabled by default when attaching a system. +You can choose which of the available services will be automatically +enabled or disabled when you attach by toggling them in the +[Ubuntu Pro Dashboard](https://ubuntu.com/pro/dashboard). +Available services can always be enabled or disabled on the command line +with `pro enable` and `pro disable` after attaching. + +![Toggling recommended services in the Pro Dashboard](pro-dashboard-service-toggles.png) + +If your subscription does not permit you to change the default +enabled services via the Dashboard, or if you want to keep the +defaults but do not want to auto-enable any services while attaching a particular +machine, you can pass the `--no-auto-enable` flag to `attach` using the following command: + +``` +$ sudo pro attach YOUR_TOKEN --no-auto-enable +``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/how_to_not_fix_related_usns.md ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/how_to_not_fix_related_usns.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/how_to_not_fix_related_usns.md 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/how_to_not_fix_related_usns.md 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,65 @@ +# How to not fix related USNs + +When running the `pro fix` command for a USN, by default we also try to fix +any related USNs as well. To better understand the concept of related USNs, +you can refer to our [related USNs guide](../explanations/cves_and_usns_explained.md). +To make this clear, let's take a look into the following example: + +``` +USN-5573-1: rsync vulnerability +Found CVEs: + - https://ubuntu.com/security/CVE-2022-37434 + +Fixing requested USN-5573-1 +1 affected source package is installed: rsync +(1/1) rsync: +A fix is available in Ubuntu standard updates. +{ apt update && apt install --only-upgrade -y rsync } + +✔ USN-5573-1 is resolved. + +Found related USNs: +- USN-5570-1 +- USN-5570-2 + +Fixing related USNs: +- USN-5570-1 +No affected source packages are installed. + +✔ USN-5570-1 does not affect your system. + +- USN-5570-2 +1 affected source package is installed: zlib +(1/1) zlib: +A fix is available in Ubuntu standard updates. +{ apt update && apt install --only-upgrade -y zlib1g } + +✔ USN-5570-2 is resolved. + +Summary: +✔ USN-5573-1 [requested] is resolved. +✔ USN-5570-1 [related] does not affect your system. +✔ USN-5570-2 [related] is resolved. +``` + +We can see here that the `pro fix` command fixed the requested **USN-5573-1** while also +handling both **USN-5570-1** and **USN-5570-2**, which are related to the requested USN. +If you don't want to fix any related USNs during the `fix` operation, just use the +`--no-related` flag. By running the command `pro fix USN-5573-1 --no-related` we would get +the following output instead: + +``` +USN-5573-1: rsync vulnerability +Found CVEs: + - https://ubuntu.com/security/CVE-2022-37434 + +Fixing requested USN-5573-1 +1 affected source package is installed: rsync +(1/1) rsync: +A fix is available in Ubuntu standard updates. +{ apt update && apt install --only-upgrade -y rsync } + +✔ USN-5573-1 is resolved. +``` + +Note that we have not analysed or tried to fix any related USNs Binary files /tmp/tmpxrouh4zc/T2SEFE1u_f/ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides/pro-dashboard-service-toggles.png and /tmp/tmpxrouh4zc/jJBju4acwB/ubuntu-advantage-tools-28.1~18.04/docs/howtoguides/pro-dashboard-service-toggles.png differ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides.rst ubuntu-advantage-tools-28.1~18.04/docs/howtoguides.rst --- ubuntu-advantage-tools-27.14.4~18.04/docs/howtoguides.rst 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/howtoguides.rst 2023-06-02 19:38:08.000000000 +0000 @@ -59,6 +59,7 @@ :maxdepth: 1 Run `fix` in "dry run" mode + Skip fixing related USNs ``refresh`` ----------- Binary files /tmp/tmpxrouh4zc/T2SEFE1u_f/ubuntu-advantage-tools-27.14.4~18.04/docs/images/usn-related.png and /tmp/tmpxrouh4zc/jJBju4acwB/ubuntu-advantage-tools-28.1~18.04/docs/images/usn-related.png differ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/index.rst ubuntu-advantage-tools-28.1~18.04/docs/index.rst --- ubuntu-advantage-tools-27.14.4~18.04/docs/index.rst 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/index.rst 2023-06-02 19:38:08.000000000 +0000 @@ -63,9 +63,10 @@ - **Having trouble?** We would like to help! To get help on a specific page in this documentation, - simply click on the "Have a question?" link at the top of that page. This + simply click on the "Give feedback" link at the top of that page. This will open up an issue in GitHub where you can tell us more about the problem - you're having and we will do our best to resolve it for you. + you're having or suggestion you'd like to make, and we will do our best to + resolve it for you. - **Found a bug?** You can `Report bugs on Launchpad`_! diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/README.md ubuntu-advantage-tools-28.1~18.04/docs/README.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/README.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -# How to generate Ubuntu Pro Client user documentation - -To build the docs for Ubuntu Pro Client, you can use a dedicated `tox` command for it. -You can install `tox` on your machine by running the `make test` command. Once tox is -installed just run the command: - -```console -$ tox -e docs -``` - -The command will generate the html pages inside `docs/build` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/references/api.md ubuntu-advantage-tools-28.1~18.04/docs/references/api.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/references/api.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/references/api.md 2023-06-01 18:49:33.000000000 +0000 @@ -75,6 +75,28 @@ You could do something similar by catching certain errors when using the `pro api` subcommand, but there are more cases that could indicate an old version, and it generally isn't recommended. + +### Errors and Warnings fields + +When using the API through the CLI, we use two distinct fields to list issues to the users; *errors* +and *warnings*. Both of those fields will contain a list of JSON objects explaining unexpected +behavior during the execution of a command. For example, the *errors* field will be populated like +this if we have a connectivity issue when running a `pro api` command: + +```json +[ + { + "msg": "Failed to connect to authentication server", + "code": "connectivity-error", + "meta": {} + } +] +``` + +Finally, *warnings* follow the exact same structure as *errors*. The only difference is that +*warnings* means that the command was able to complete although unexpected scenarios happened +when executing the command. + ## Available endpoints The currently available endpoints are: - [u.pro.version.v1](#uproversionv1) @@ -88,7 +110,10 @@ - [u.pro.security.status.reboot_required.v1](#uprosecuritystatusreboot_requiredv1) - [u.pro.packages.summary.v1](#upropackagessummaryv1) - [u.pro.packages.updates.v1](#upropackagesupdatesv1) +- [u.pro.status.is_attached.v1](#uprostatusis_attachedv1) +- [u.pro.status.enabled_services.v1](#uprostatusenabled_servicesv1) - [u.security.package_manifest.v1](#usecuritypackage_manifestv1) +- [u.unattended_upgrades.status.v1](#uunattended_upgradesstatusv1) ## u.pro.version.v1 @@ -753,6 +778,86 @@ } ``` +## u.pro.status.is_attached.v1 + +Introduced in Ubuntu Pro Client Version: `28~` + +Shows if the machine is attached to a Pro subscription. + +### Args + +This endpoint takes no arguments. + +### Python API interaction + +#### Calling from Python code + +```python +from uaclient.api.u.pro.status.is_attached.v1 import is_attached + +result = is_attached() +``` + +#### Expected return object: + +`uaclient.api.u.pro.status.is_attached.v1.IsAttachedResult` + +|Field Name|Type|Description| +|-|-|-| +|`is_attached`|*bool*|If the machine is attached to a Pro subscription| + +### CLI interaction + +#### Calling from the CLI: + +```bash +pro api u.pro.status.is_attached.v1 +``` + +## u.pro.status.enabled_services.v1 + +Introduced in Ubuntu Pro Client Version: `28~` + +Shows the Pro services that are enabled in the machine. + +### Args + +This endpoint takes no arguments. + +### Python API interaction + +#### Calling from Python code + +```python +from uaclient.api.u.pro.status.enabled_services.v1 import enabled_services + +result = enabled_services() +``` + +#### Expected return object: + +`uaclient.api.u.pro.status.enabled_services.v1.EnabledServicesResult` + +|Field Name|Type|Description| +|-|-|-| +|`enabled_services`|*List[EnabledService]*|A list of EnabledServices objects| + +`uaclient.api.u.pro.status.enabled_services.v1.EnabledService` + +|Field Name|Type|Description| +|-|-|-| +|`name` |*str* |name of the service | +|`variant_enabled`|*bool* |If a variant of the service is enabled | +|`variant_name` |*Optional[str]* |name of the variant, if a variant is enabled| + +### CLI interaction + +#### Calling from the CLI: + +```bash +pro api u.pro.status.enabled_services.v1 +``` + ## u.security.package_manifest.v1 Introduced in Ubuntu Pro Client Version: `27.12~` @@ -801,3 +906,105 @@ "package_manifest":"package1\t1.0\npackage2\t2.3\n" } ``` + +## u.unattended_upgrades.status.v1 + +Introduced in Ubuntu Pro Client Version: `27.14~` + +Returns the status around unattended-upgrades. The focus of the endpoint +is to verify if the application is running and how it is configured on +the machine. + +```{important} +For this endpoint, we deliver a unique key under `meta` called `raw_config`. This field contains +all related unattended-upgrades configurations unparsed. This means that this field will maintain +both original name and values for those configurations. +``` + +### Args + +This endpoint takes no arguments. + +### Python API interaction + +#### Calling from Python code + +```python +from uaclient.api.u.unattended_upgrades.status.v1 import status + +result = status() +``` + +#### Expected return object: +`uaclient.api.u.unattended_upgrades.status.v1.UnattendedUpgradesStatusResult + +|Field Name|Type|Description| +|-|-|-| +|`systemd_apt_timer_enabled`|*bool*|Indicate if the apt-daily.timer jobs are enabled| +|`apt_periodic_job_enabled`|*bool*|Indicate if the APT::Periodic::Enabled configuration is turned off| +|`package_lists_refresh_frequency_days`|*int*|The value of the APT::Periodic::Update-Package-Lists configuration| +|`unattended_upgrades_frequency_days`|*int*|The value of the APT::Periodic::Unattended-Upgrade configuration| +|`unattended_upgrades_allowed_origins`|*List[str]*|The value of the Unattended-Upgrade::Allowed-Origins configuration| +|`unattended_upgrades_running`|*bool*|Indicate if the unattended-upgrade service is correctly configured and running| +|`unattended_upgrades_disabled_reason`|*object*|Object that explains why unattended-upgrades is not running. In case the application is running, the object will be null| +|`unatteded_upgrades_last_run`|`datetime.datetime`|The last time unattended-upgrades has run| + +`uaclient.api.u.unattended_upgrades.status.v1.UnattendedUpgradesStatusDisabledReason` + +|Field Name|Type|Description| +|-|-|-| +|`msg`|*str*|The reason why unattended-upgrades is not running in the system| +|`code`|*str*|The message code associated with the message| + +### Raised exceptions + +- `UnattendedUpgradesError`: Raised in case we cannot run a necessary command to show the status + of unattended-upgrades + +### CLI interaction + +#### Calling from the CLI: + +```bash +pro api u.unattended_upgrades.status.v1 +``` + +#### Expected attributes in JSON structure + +```json +{ + "apt_periodic_job_enabled": true, + "package_lists_refresh_frequency_days": 1, + "systemd_apt_timer_enabled": true, + "unattended_upgrades_allowed_origins": [ + "${distro_id}:${distro_codename}", + "${distro_id}:${distro_codename}-security", + "${distro_id}ESMApps:${distro_codename}-apps-security", + "${distro_id}ESM:${distro_codename}-infra-security" + ], + "unattended_upgrades_disabled_reason": null, + "unattended_upgrades_frequency_days": 1, + "unattended_upgrades_last_run": null, + "unattended_upgrades_running": true +} +``` + +#### Possible attributes in JSON meta field +```json +{ + "meta": { + "environment_vars": [], + "raw_config": { + "APT::Periodic::Enable": "1", + "APT::Periodic::Unattended-Upgrade": "1", + "APT::Periodic::Update-Package-Lists": "1", + "Unattended-Upgrade::Allowed-Origins": [ + "${distro_id}:${distro_codename}", + "${distro_id}:${distro_codename}-security", + "${distro_id}ESMApps:${distro_codename}-apps-security", + "${distro_id}ESM:${distro_codename}-infra-security" + ] + } + } +} +``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/references/network_requirements.md ubuntu-advantage-tools-28.1~18.04/docs/references/network_requirements.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/references/network_requirements.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/references/network_requirements.md 2023-06-01 18:49:33.000000000 +0000 @@ -1,35 +1,41 @@ # Ubuntu Pro Client network requirements -Using the Ubuntu Pro Client to enable support services will rely on network -access to: - -- Obtain updated service credentials -- Add APT repositories to install `deb` packages -- Install [`snap` packages](https://snapcraft.io/about) when Livepatch is - enabled. +The Ubuntu Pro Client (`pro`) and Ubuntu Pro services need to make network requests to certain services to function correctly. ```{seealso} - -You can also refer to our [Proxy Configuration guide](/../howtoguides/configure_proxies.md) +You can also refer to our [Proxy Configuration guide](../howtoguides/configure_proxies.md) to learn how to inform Ubuntu Pro Client of HTTP(S)/APT proxies. ``` -## Network-limited +## Authentication +`pro` needs to authenticate with Canonical servers to provision credentials for access to the individual Ubuntu Pro services. + +Necessary endpoints: +- `contracts.canonical.com:443` -Ensure the managed system has access to the following port:urls if in a -network-limited environment: -* `443:https://contracts.canonical.com/`: HTTP PUTs, GETs and POSTs for Ubuntu - Pro Client interaction. -* `443:https://esm.ubuntu.com/\*`: APT repository access for most services. +## APT package based services +Many services are delivered via authenticated APT repositories. These include: +- `esm-infra` and `esm-apps` +- `fips` and `fips-updates` +- `cis` and `usg` +- `cc-eal` +- `ros` and `ros-updates` +- `realtime-kernel` -## Enable kernel Livepatch +Necessary endpoints: +- `esm.ubuntu.com:443` -Enabling kernel Livepatch requires additional network egress: +## Livepatch +`livepatch` requires a `snap`-packaged client, so `snap`-related endpoints are necessary. The Livepatch client itself also requires network access to download the patches from the Livepatch server. +```{seealso} +The [snap documentation page](https://snapcraft.io/docs/network-requirements) may have more up-to-date information on snap-related network requirements. +``` +Necessary endpoints for `snap`: +- `api.snapcraft.io:443` +- `dashboard.snapcraft.io:443` +- `login.ubuntu.com:443` +- `*.snapcraftcontent.com:443` -* `snap` endpoints required in order to install and run snaps as defined in - [snap forum network-requirements post](https://forum.snapcraft.io/t/network-requirements/5147) -* `443:api.snapcraft.io` -* `443:dashboard.snapcraft.io` -* `443:login.ubuntu.com` -* `443:\*.snapcraftcontent.com` - Download CDNs +Necessary endpoints for `livepatch`: +- `livepatch.canonical.com:443` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/sitemap-index.xml ubuntu-advantage-tools-28.1~18.04/docs/sitemap-index.xml --- ubuntu-advantage-tools-27.14.4~18.04/docs/sitemap-index.xml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/sitemap-index.xml 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,8 @@ + + + https://canonical-ubuntu-pro-client.readthedocs-hosted.com/en/latest/ + weekly + 1.0 + + + diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/_static/js/github_issue_links.js ubuntu-advantage-tools-28.1~18.04/docs/_static/js/github_issue_links.js --- ubuntu-advantage-tools-27.14.4~18.04/docs/_static/js/github_issue_links.js 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/_static/js/github_issue_links.js 2023-05-30 19:02:35.000000000 +0000 @@ -2,7 +2,7 @@ const link = document.createElement("a"); link.classList.add("muted-link"); link.classList.add("github-issue-link"); - link.text = "Have a question?"; + link.text = "Give feedback"; link.href = ( "https://github.com/canonical/ubuntu-pro-client/issues/new?" + "title=docs%3A+TYPE+YOUR+QUESTION+HERE" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/tutorials/create_a_fips_updates_pro_cloud_image.md ubuntu-advantage-tools-28.1~18.04/docs/tutorials/create_a_fips_updates_pro_cloud_image.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/tutorials/create_a_fips_updates_pro_cloud_image.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/tutorials/create_a_fips_updates_pro_cloud_image.md 2023-05-30 19:02:35.000000000 +0000 @@ -1,8 +1,8 @@ -# Customised Cloud Ubuntu Pro images with FIPS updates +# How to customise a cloud Ubuntu Pro image with FIPS updates ## Launch an Ubuntu Pro instance on your cloud -See the following links for up to date information for each supported Cloud: +See the following links for up to date information for each supported cloud: * https://ubuntu.com/aws/pro * https://ubuntu.com/azure/pro @@ -10,20 +10,20 @@ ## Enable FIPS updates -First, we need to wait for the standard Ubuntu Pro services to be set up: +Wait for the standard Ubuntu Pro services to be set up: ```bash sudo pro status --wait ``` -We can then use [the `enable` command](../howtoguides/enable_fips.md) to set up +Use [the `enable` command](../howtoguides/enable_fips.md) to set up FIPS updates. ```bash sudo pro enable fips-updates --assume-yes ``` -Now, we need to reboot the instance: +Now, reboot the instance: ```bash sudo reboot @@ -49,12 +49,12 @@ * [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/capture-image-resource) * [GCP](https://cloud.google.com/compute/docs/machine-images/create-machine-images) -## Launch your custom image! +## Launch your custom image -Use your specific Cloud to launch a new instance from your custom image. +Use your specific cloud to launch a new instance from the custom image. ````{note} -For versions prior to 27.11, you will need to re-enable `fips-updates` on each +For versions of the Ubuntu Pro Client prior to 27.11, you will need to re-enable `fips-updates` on each instance launched from the custom image. This won't require a reboot and is only necessary to ensure the instance gets @@ -64,7 +64,7 @@ sudo pro enable fips-updates --assume-yes ``` -You can easily script this using [cloud-init user data](https://cloudinit.readthedocs.io/en/latest/topics/modules.html#runcmd) at launch time: +This can be scripted using [cloud-init user data](https://cloudinit.readthedocs.io/en/latest/topics/modules.html#runcmd) at launch time: ```yaml #cloud-config # Enable fips-updates after pro auto-attach and reboot after cloud-init completes diff -Nru ubuntu-advantage-tools-27.14.4~18.04/docs/tutorials/fix_scenarios.md ubuntu-advantage-tools-28.1~18.04/docs/tutorials/fix_scenarios.md --- ubuntu-advantage-tools-27.14.4~18.04/docs/tutorials/fix_scenarios.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/docs/tutorials/fix_scenarios.md 2023-05-30 19:02:35.000000000 +0000 @@ -81,7 +81,9 @@ ``` CVE-2020-15180: MariaDB vulnerabilities https://ubuntu.com/security/CVE-2020-15180 + No affected source packages are installed. + ✔ CVE-2020-15180 does not affect your system. ``` @@ -110,10 +112,12 @@ ``` CVE-2020-25686: Dnsmasq vulnerabilities https://ubuntu.com/security/CVE-2020-25686 + 1 affected package is installed: dnsmasq (1/1) dnsmasq: A fix is available in Ubuntu standard updates. { apt update && apt install --only-upgrade -y dnsmasq } + ✔ CVE-2020-25686 is resolved. ``` @@ -137,10 +141,12 @@ ``` CVE-2020-25686: Dnsmasq vulnerabilities https://ubuntu.com/security/CVE-2020-25686 + 1 affected package is installed: dnsmasq (1/1) dnsmasq: A fix is available in Ubuntu standard updates. The update is already installed. + ✔ CVE-2020-25686 is resolved. ``` @@ -151,30 +157,34 @@ that we know has no fix available by running: ```console -$ sudo apt install -y libawl-php +$ sudo apt-get install -y expat=2.1.0-7 swish-e matanza ghostscript ``` Now, we can confirm that there is no fix by running the following command: ```console -$ pro fix USN-4539-1 +$ pro fix CVE-2017-9233 ``` You will see the following output: ``` -USN-4539-1: AWL vulnerability -Found CVEs: -https://ubuntu.com/security/CVE-2020-11728 -1 affected source package is installed: awl -(1/1) awl: -Sorry, no fix is available. -1 package is still affected: awl -✘ USN-4539-1 is not resolved. +CVE-2017-9233: Coin3D vulnerability + - https://ubuntu.com/security/CVE-2017-9233 + +3 affected source packages are installed: expat, matanza, swish-e +(1/3, 2/3) matanza, swish-e: +Ubuntu security engineers are investigating this issue. +(3/3) expat: +A fix is available in Ubuntu standard updates. +{ apt update && apt install --only-upgrade -y expat } + +2 packages are still affected: matanza, swish-e +✘ CVE-2017-9233 is not resolved. ``` -As you can see, we are informed by `pro fix` that there is no fix available. In -the last line, we can also see that the USN is not resolved. +As you can see, we are informed by `pro fix` that some packages do not have a fix available. In +the last line, we can also see that the CVE is not resolved. ## CVE/USN that require an Ubuntu Pro subscription @@ -193,6 +203,8 @@ Found CVEs: https://ubuntu.com/security/CVE-2021-22946 https://ubuntu.com/security/CVE-2021-22947 + +Fixing requested USN-5079-2 1 affected package is installed: curl (1/1) curl: A fix is available in Ubuntu Pro: ESM Infra. @@ -222,6 +234,7 @@ Found CVEs: https://ubuntu.com/security/CVE-2021-22946 https://ubuntu.com/security/CVE-2021-22947 + 1 affected package is installed: curl (1/1) curl: A fix is available in Ubuntu Pro: ESM Infra. @@ -258,22 +271,40 @@ Technical support level: essential { apt update && apt install --only-upgrade -y curl libcurl3-gnutls } ✔ USN-5079-2 is resolved. + +Found related USNs: +- USN-5079-1 + +Fixing related USNs: +- USN-5079-1 +No affected source packages are installed. + +✔ USN-5079-1 does not affect your system. + +Summary: +✔ USN-5079-2 [requested] is resolved. +✔ USN-5079-1 [related] does not affect your system. ``` -We can see that that the attach command was successful, which can be verified +We can see that this command also fixed related USN **USN-5079-1**. +If you want to learn more about related USNs, refer to [our explanation guide](../explanations/cves_and_usns_explained.md#what-are-related-usns) + +Finally, we can see that that the attach command was successful, which can be verified by the status output we see when executing the command. Additionally, we can observe that the USN is indeed fixed, which you can confirm by running the `pro fix` command again: ``` -N-5079-2: curl vulnerabilities +USN-5079-2: curl vulnerabilities Found CVEs: https://ubuntu.com/security/CVE-2021-22946 https://ubuntu.com/security/CVE-2021-22947 + 1 affected package is installed: curl (1/1) curl: A fix is available in Ubuntu Pro: ESM Infra. The update is already installed. + ✔ USN-5079-2 is resolved. ``` @@ -308,6 +339,7 @@ ``` CVE-2021-44731: snapd vulnerabilities https://ubuntu.com/security/CVE-2021-44731 + 1 affected package is installed: snapd (1/1) snapd: A fix is available in Ubuntu Pro: ESM Infra. @@ -321,6 +353,7 @@ Updating package lists Ubuntu Pro: ESM Infra enabled { apt update && apt install --only-upgrade -y ubuntu-core-launcher snapd } + ✔ CVE-2021-44731 is resolved. ``` @@ -342,13 +375,15 @@ Then you will see the following output: ``` -VE-2022-0778: OpenSSL vulnerability +CVE-2022-0778: OpenSSL vulnerability https://ubuntu.com/security/CVE-2022-0778 + 1 affected package is installed: openssl (1/1) openssl: A fix is available in Ubuntu Pro: ESM Infra. { apt update && apt install --only-upgrade -y libssl1.0.0 openssl } A reboot is required to complete fix operation. + ✘ CVE-2022-0778 is not resolved. ``` @@ -358,10 +393,12 @@ ``` CVE-2022-0778: OpenSSL vulnerability https://ubuntu.com/security/CVE-2022-0778 + 1 affected package is installed: openssl (1/1) openssl: A fix is available in Ubuntu Pro: ESM Infra. The update is already installed. + ✔ CVE-2022-0778 is resolved. ``` @@ -390,6 +427,7 @@ ``` CVE-2017-9233: Expat vulnerability https://ubuntu.com/security/CVE-2017-9233 + 3 affected packages are installed: expat, matanza, swish-e (1/3, 2/3) matanza, swish-e: Sorry, no fix is available. @@ -397,6 +435,7 @@ A fix is available in Ubuntu standard updates. { apt update && apt install --only-upgrade -y expat } 2 packages are still affected: matanza, swish-e + ✘ CVE-2017-9233 is not resolved. ``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/airgapped.feature ubuntu-advantage-tools-28.1~18.04/features/airgapped.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/airgapped.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/airgapped.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Feature: Performing attach using ua-airgapped @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable Common Criteria service in an ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed # set up the apt mirror configuration diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_configure_retry_service.feature ubuntu-advantage-tools-28.1~18.04/features/api_configure_retry_service.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_configure_retry_service.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_configure_retry_service.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: api.u.pro.attach.auto.configure_retry_service @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: v1 successfully triggers retry service when run during startup Given a `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api.feature ubuntu-advantage-tools-28.1~18.04/features/api.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Client behaviour for the API endpoints @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: API invalid endpoint or args Given a `` machine with ubuntu-advantage-tools installed When I verify that running `pro api invalid.endpoint` `with sudo` exits `1` @@ -25,7 +25,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Basic endpoints Given a `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.version.v1` with sudo @@ -43,6 +43,16 @@ """ {"_schema_version": "v1", "data": {"attributes": {"should_auto_attach": false}, "meta": {"environment_vars": \[\]}, "type": "ShouldAutoAttach"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ + When I run `ua api u.pro.status.is_attached.v1` with sudo + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"is_attached": false}, "meta": {"environment_vars": \[\]}, "type": "IsAttached"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ + When I run `ua api u.pro.status.enabled_services.v1` with sudo + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"enabled_services": \[\]}, "meta": {"environment_vars": \[\]}, "type": "EnabledServices"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ Examples: ubuntu release | release | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_full_auto_attach.feature ubuntu-advantage-tools-28.1~18.04/features/api_full_auto_attach.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_full_auto_attach.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_full_auto_attach.feature 2023-05-30 19:02:35.000000000 +0000 @@ -27,7 +27,7 @@ """ Then stdout matches regexp: """ - livepatch +yes +(disabled|n/a) +Canonical Livepatch service + livepatch +yes +(disabled|n/a) +(Canonical Livepatch service|Current kernel is not supported) """ Examples: | release | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_magic_attach.feature ubuntu-advantage-tools-28.1~18.04/features/api_magic_attach.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_magic_attach.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_magic_attach.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Magic Attach endpoints @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Call magic attach endpoints Given a `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_packages.feature ubuntu-advantage-tools-28.1~18.04/features/api_packages.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_packages.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_packages.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Package related API endpoints @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Call packages API endpoints to see information in a Ubuntu machine Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_security.feature ubuntu-advantage-tools-28.1~18.04/features/api_security.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_security.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_security.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: API security/security status tests @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm @uses.config.contract_token Scenario: Call Livepatched CVEs endpoint Given a `xenial` machine with ubuntu-advantage-tools installed @@ -17,7 +17,7 @@ """ @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Call package manifest endpoint for machine Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/api_unattended_upgrades.feature ubuntu-advantage-tools-28.1~18.04/features/api_unattended_upgrades.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/api_unattended_upgrades.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/api_unattended_upgrades.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: api.u.unattended_upgrades.status.v1 @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: v1 unattended upgrades status Given a `` machine with ubuntu-advantage-tools installed When I run `pro api u.unattended_upgrades.status.v1` as non-root diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/apt_messages.feature ubuntu-advantage-tools-28.1~18.04/features/apt_messages.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/apt_messages.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/apt_messages.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: APT Messages @series.xenial - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT JSON Hook prints package counts correctly on xenial Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -99,7 +99,7 @@ | xenial | accountsservice=0.6.40-2ubuntu10 libaccountsservice0=0.6.40-2ubuntu10 | curl=7.47.0-1ubuntu2 libcurl3-gnutls=7.47.0-1ubuntu2 | hello=2.10-1 | @series.xenial - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT Hook advertises esm-infra on upgrade Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -157,7 +157,7 @@ @series.bionic @series.focal @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT Hook advertises esm-apps on upgrade Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -175,7 +175,7 @@ Calculating upgrade... Get more security updates through Ubuntu Pro with 'esm-apps' enabled: - Learn more about Ubuntu Pro at https://ubuntu.com/pro + 0 upgraded, 0 newly installed, 0 to remove and \d+ not upgraded. """ When I run `apt-get upgrade` with sudo @@ -211,13 +211,13 @@ 0 upgraded, 0 newly installed, 0 to remove and \d+ not upgraded\. """ Examples: ubuntu release - | release | package | - | bionic | ansible | - | focal | hello | - | jammy | hello | + | release | package | learn_more_msg | + | bionic | ansible | Learn more about Ubuntu Pro for 18.04 at https://ubuntu.com/18-04 | + | focal | hello | Learn more about Ubuntu Pro at https://ubuntu.com/pro | + | jammy | hello | Learn more about Ubuntu Pro at https://ubuntu.com/pro | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT News Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -613,60 +613,35 @@ @series.xenial @series.bionic + @series.focal + @uses.config.machine_type.any @uses.config.machine_type.aws.generic - Scenario Outline: AWS URLs - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get install ansible -y` with sudo - When I run `apt-get update` with sudo - When I run `apt upgrade --dry-run` with sudo - Then stdout matches regexp: - """ - - """ - Examples: ubuntu release - | release | msg | - | xenial | Learn more about Ubuntu Pro for 16\.04 at https:\/\/ubuntu\.com\/16-04 | - | bionic | Learn more about Ubuntu Pro on AWS at https:\/\/ubuntu\.com\/aws\/pro | - - @series.xenial - @series.bionic @uses.config.machine_type.azure.generic - Scenario Outline: Azure URLs - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get install ansible -y` with sudo - When I run `apt-get update` with sudo - When I run `apt upgrade --dry-run` with sudo - Then stdout matches regexp: - """ - - """ - Examples: ubuntu release - | release | msg | - | xenial | Learn more about Ubuntu Pro for 16\.04 on Azure at https:\/\/ubuntu\.com\/16-04\/azure | - | bionic | Learn more about Ubuntu Pro on Azure at https:\/\/ubuntu\.com\/azure\/pro | - - @series.xenial - @series.bionic @uses.config.machine_type.gcp.generic - Scenario Outline: GCP URLs - Given a `` machine with ubuntu-advantage-tools installed + Scenario Outline: Cloud and series-specific URLs + Given a `` `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo When I run `apt-get install ansible -y` with sudo When I run `apt-get update` with sudo When I run `apt upgrade --dry-run` with sudo - Then stdout matches regexp: + Then stdout contains substring: """ """ - Examples: ubuntu release - | release | msg | - | xenial | Learn more about Ubuntu Pro for 16\.04 at https:\/\/ubuntu\.com\/16-04 | - | bionic | Learn more about Ubuntu Pro on GCP at https:\/\/ubuntu\.com\/gcp\/pro | + Examples: release-per-machine-type + | release | machine_type | msg | + | xenial | aws.generic | Learn more about Ubuntu Pro for 16.04 at https://ubuntu.com/16-04 | + | xenial | azure.generic | Learn more about Ubuntu Pro for 16.04 on Azure at https://ubuntu.com/16-04/azure | + | xenial | gcp.generic | Learn more about Ubuntu Pro for 16.04 at https://ubuntu.com/16-04 | + | bionic | aws.generic | Learn more about Ubuntu Pro for 18.04 at https://ubuntu.com/18-04 | + | bionic | azure.generic | Learn more about Ubuntu Pro for 18.04 on Azure at https://ubuntu.com/18-04/azure | + | bionic | gcp.generic | Learn more about Ubuntu Pro for 18.04 at https://ubuntu.com/18-04 | + | focal | aws.generic | Learn more about Ubuntu Pro on AWS at https://ubuntu.com/aws/pro | + | focal | azure.generic | Learn more about Ubuntu Pro on Azure at https://ubuntu.com/azure/pro | + | focal | gcp.generic | Learn more about Ubuntu Pro on GCP at https://ubuntu.com/gcp/pro | @series.kinetic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT Hook do not advertises esm-apps on upgrade for interim releases Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/attached_commands.feature ubuntu-advantage-tools-28.1~18.04/features/attached_commands.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/attached_commands.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/attached_commands.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Feature: Command behaviour when attached to an Ubuntu Pro subscription @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached refresh in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -60,7 +60,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached disable of an already disabled service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -86,7 +86,7 @@ | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached disable with json format Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -144,7 +144,7 @@ @series.xenial @series.bionic @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached disable of a service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -183,7 +183,7 @@ | jammy | Try cc-eal, esm-apps, esm-infra, fips, fips-updates, livepatch, realtime-kernel,\nros, ros-updates, usg. | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Attached disable of a service in a ubuntu machine Given a `focal` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -218,86 +218,91 @@ @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached detach in an ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo + And I run `pro api u.pro.status.enabled_services.v1` as non-root + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"enabled_services": \[{"name": "esm-apps", "variant_enabled": false, "variant_name": null}, {"name": "esm-infra", "variant_enabled": false, "variant_name": null}\]}, "meta": {"environment_vars": \[\]}, "type": "EnabledServices"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ Then I verify that running `pro detach` `as non-root` exits `1` And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ + """ + This command must be run as root \(try using sudo\). + """ When I run `pro detach --assume-yes` with sudo Then I will see the following on stdout: - """ - Detach will disable the following services: - esm-apps - esm-infra - Updating package lists - Updating package lists - This machine is now detached. - """ + """ + Detach will disable the following services: + esm-apps + esm-infra + Updating package lists + Updating package lists + This machine is now detached. + """ When I run `pro status --all` as non-root Then stdout matches regexp: - """ - SERVICE +AVAILABLE DESCRIPTION - cc-eal + +Common Criteria EAL2 Provisioning Packages - """ + """ + SERVICE +AVAILABLE DESCRIPTION + cc-eal + +Common Criteria EAL2 Provisioning Packages + """ Then stdout matches regexp: - """ - esm-apps + +Expanded Security Maintenance for Applications - esm-infra +yes +Expanded Security Maintenance for Infrastructure - fips + +NIST-certified core packages - fips-updates + +NIST-certified core packages with priority security updates - livepatch +(yes|no) +(Canonical Livepatch service|Current kernel is not supported) - realtime-kernel + +Ubuntu kernel with PREEMPT_RT patches integrated - ros + +Security Updates for the Robot Operating System - ros-updates + +All Updates for the Robot Operating System - """ + """ + esm-apps + +Expanded Security Maintenance for Applications + esm-infra +yes +Expanded Security Maintenance for Infrastructure + fips + +NIST-certified core packages + fips-updates + +NIST-certified core packages with priority security updates + livepatch +(yes|no) +(Canonical Livepatch service|Current kernel is not supported) + realtime-kernel + +Ubuntu kernel with PREEMPT_RT patches integrated + ros + +Security Updates for the Robot Operating System + ros-updates + +All Updates for the Robot Operating System + """ Then stdout matches regexp: - """ - + +Security compliance and audit tools - """ + """ + + +Security compliance and audit tools + """ And stdout matches regexp: - """ - This machine is not attached to an Ubuntu Pro subscription. - """ + """ + This machine is not attached to an Ubuntu Pro subscription. + """ And I verify that running `apt update` `with sudo` exits `0` When I attach `contract_token` with sudo Then I verify that running `pro enable foobar --format json` `as non-root` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ Then I verify that running `pro enable foobar --format json` `with sudo` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ Then I verify that running `pro detach --format json --assume-yes` `as non-root` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ When I run `pro detach --format json --assume-yes` with sudo Then stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} + """ Examples: ubuntu release | release | esm-apps | cc-eal | cis | fips | fips-update | ros | cis_or_usg | realtime-kernel | | xenial | yes | yes | yes | yes | yes | yes | cis | no | | bionic | yes | yes | yes | yes | yes | yes | cis | no | | focal | yes | no | yes | yes | yes | no | usg | no | - | jammy | yes | no | no | no | no | no | usg | yes | + | jammy | yes | no | yes | no | no | no | usg | yes | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached auto-attach in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -323,7 +328,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached show version in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -346,7 +351,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached status in a ubuntu machine with feature overrides Given a `` machine with ubuntu-advantage-tools installed When I create the file `/tmp/machine-token-overlay.json` with the following: @@ -416,7 +421,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached disable of different services in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -463,7 +468,7 @@ | jammy | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Attached disable of different services in a ubuntu machine Given a `focal` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -504,7 +509,7 @@ """ @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Help command on an attached machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -611,7 +616,7 @@ @series.jammy @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Help command on an attached machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -727,7 +732,7 @@ | jammy | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Enable command with invalid repositories in user machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -753,7 +758,7 @@ | jammy | cloud-init-dev-ubuntu-daily-jammy | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run timer script on an attached machine Given a `` machine with ubuntu-advantage-tools installed When I run `systemctl stop ua-timer.timer` with sudo @@ -831,7 +836,7 @@ | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run timer script to valid machine activity endpoint Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -902,7 +907,7 @@ | jammy | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run timer script to valid machine activity endpoint Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/attached_enable.feature ubuntu-advantage-tools-28.1~18.04/features/attached_enable.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/attached_enable.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/attached_enable.feature 2023-06-01 18:49:33.000000000 +0000 @@ -4,7 +4,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable Common Criteria service in an ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -30,7 +30,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Enable cc-eal with --access-only Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -52,7 +52,7 @@ @series.jammy @series.kinetic @series.lunar - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable Common Criteria service in an ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -75,7 +75,7 @@ | lunar | 23.04 | Lunar Lobster | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Empty series affordance means no series, null means all series Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` @@ -126,7 +126,7 @@ | jammy | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of different services using json format Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -196,7 +196,7 @@ | jammy | cc-eal, esm-apps, esm-infra, fips, fips-updates, livepatch, realtime-kernel,\nros, ros-updates, usg. | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of a service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -251,7 +251,7 @@ | bionic | libkrad0 | https://esm.ubuntu.com/infra/ubuntu | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Attached enable of a service in a ubuntu machine Given a `focal` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -302,7 +302,7 @@ """ @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of non-container services in a ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -328,7 +328,7 @@ | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable not entitled service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I set the machine token overlay to the following yaml @@ -362,7 +362,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of cis service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -446,7 +446,7 @@ | xenial | Canonical_Ubuntu_16.04_CIS_v1.1.0-harden.sh | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of cis service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -526,7 +526,7 @@ @series.bionic @series.xenial - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of usg service in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -547,7 +547,7 @@ | xenial | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of usg service in a focal machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -608,7 +608,7 @@ @series.bionic @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached disable of livepatch in a lxd vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -651,7 +651,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attach works when snapd cannot be installed Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get remove -y snapd` with sudo @@ -690,7 +690,7 @@ @series.bionic @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable livepatch Given a `` machine with ubuntu-advantage-tools installed When I verify that running `canonical-livepatch status` `with sudo` exits `1` @@ -722,7 +722,7 @@ @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable livepatch Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -792,7 +792,7 @@ @slow @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario: Attached enable livepatch on a machine with fips active Given a `bionic` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -831,7 +831,7 @@ """ @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario: Attached enable fips on a machine with livepatch active Given a `bionic` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -863,7 +863,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips on a machine with livepatch active Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -905,7 +905,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips on a machine with fips-updates active Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -943,7 +943,7 @@ @series.xenial @series.bionic @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable ros on a machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -1158,7 +1158,7 @@ @series.xenial @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: APT auth file is edited correctly on enable Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -1188,7 +1188,7 @@ | xenial | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable esm-apps on a machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -1231,7 +1231,7 @@ | focal | ant | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable with corrupt lock Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/attached_status.feature ubuntu-advantage-tools-28.1~18.04/features/attached_status.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/attached_status.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/attached_status.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Feature: Attached status @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached status in a ubuntu machine - formatted Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -42,7 +42,7 @@ | lunar | @series.xenial - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Non-root status can see in-progress operations Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -83,7 +83,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -101,6 +101,7 @@ ros +yes +disabled +Security Updates for the Robot Operating System ros-updates +yes +disabled +All Updates for the Robot Operating System + For a list of all Ubuntu Pro services, run 'pro status --all' Enable services with: pro enable """ When I verify root and non-root `pro status --all` calls have the same output @@ -128,7 +129,7 @@ | bionic | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -143,6 +144,7 @@ fips-updates +yes +disabled +NIST-certified core packages with priority security updates usg +yes +disabled +Security compliance and audit tools + For a list of all Ubuntu Pro services, run 'pro status --all' Enable services with: pro enable """ When I verify root and non-root `pro status --all` calls have the same output @@ -169,7 +171,7 @@ | focal | @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached status in the latest LTS ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -180,7 +182,9 @@ SERVICE +ENTITLED +STATUS +DESCRIPTION esm-apps +yes +enabled +Expanded Security Maintenance for Applications esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + usg +yes +disabled +Security compliance and audit tools + For a list of all Ubuntu Pro services, run 'pro status --all' Enable services with: pro enable """ When I verify root and non-root `pro status --all` calls have the same output @@ -197,7 +201,7 @@ realtime-kernel +yes +n/a +Ubuntu kernel with PREEMPT_RT patches integrated ros +yes +n/a +Security Updates for the Robot Operating System ros-updates +yes +n/a +All Updates for the Robot Operating System - usg +yes +n/a +Security compliance and audit tools + usg +yes +disabled +Security compliance and audit tools Enable services with: pro enable """ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/attach_invalidtoken.feature ubuntu-advantage-tools-28.1~18.04/features/attach_invalidtoken.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/attach_invalidtoken.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/attach_invalidtoken.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Pro subscription using an invalid token @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command failure on invalid token Given a `` machine with ubuntu-advantage-tools installed When I verify that running `pro attach INVALID_TOKEN` `with sudo` exits `1` @@ -32,7 +32,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Attach command failure on expired token Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/attach_validtoken.feature ubuntu-advantage-tools-28.1~18.04/features/attach_validtoken.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/attach_validtoken.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/attach_validtoken.feature 2023-06-01 18:49:33.000000000 +0000 @@ -4,22 +4,35 @@ @series.kinetic @series.lunar - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached command in a non-lts ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `pro status --all` as non-root + And I run `pro status` as non-root Then stdout matches regexp: - """ - SERVICE +ENTITLED STATUS DESCRIPTION - cc-eal +yes +n/a +Common Criteria EAL2 Provisioning Packages - cis +yes +n/a +Security compliance and audit tools - esm-apps +yes +n/a +Expanded Security Maintenance for Applications - esm-infra +yes +n/a +Expanded Security Maintenance for Infrastructure - fips +yes +n/a +NIST-certified core packages - fips-updates +yes +n/a +NIST-certified core packages with priority security updates - livepatch +yes +n/a +Canonical Livepatch service - """ + """ + No Ubuntu Pro services are available to this system. + """ + And stdout matches regexp: + """ + For a list of all Ubuntu Pro services, run 'pro status --all' + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED STATUS DESCRIPTION + cc-eal +yes +n/a +Common Criteria EAL2 Provisioning Packages + cis +yes +n/a +Security compliance and audit tools + esm-apps +yes +n/a +Expanded Security Maintenance for Applications + esm-infra +yes +n/a +Expanded Security Maintenance for Infrastructure + fips +yes +n/a +NIST-certified core packages + fips-updates +yes +n/a +NIST-certified core packages with priority security updates + livepatch +yes +n/a +Canonical Livepatch service + """ + And stdout does not match regexp: + """ + For a list of all Ubuntu Pro services, run 'pro status --all' + """ Examples: ubuntu release | release | @@ -27,7 +40,7 @@ | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command in a ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo, retrying exit [100] @@ -83,10 +96,10 @@ | xenial | libkrad0=1.13.2+dfsg-5 | disabled | cis | disabled | disabled | Canonical Livepatch service | | bionic | libkrad0=1.16-2build1 | disabled | cis | disabled | disabled | Canonical Livepatch service | | focal | hello=2.10-2ubuntu2 | n/a | usg | disabled | disabled | Canonical Livepatch service | - | jammy | hello=2.10-2ubuntu4 | n/a | usg | n/a | n/a | Available with the HWE kernel | + | jammy | hello=2.10-2ubuntu4 | n/a | usg | n/a | n/a | Canonical Livepatch service | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command with attach config Given a `` machine with ubuntu-advantage-tools installed # simplest happy path @@ -306,7 +319,7 @@ | jammy | enabled | n/a | n/a | usg | n/a | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command with json output Given a `` machine with ubuntu-advantage-tools installed When I verify that running attach `as non-root` with json response exits `1` @@ -334,7 +347,7 @@ | jammy | n/a | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach and Check for contract change in status checking Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -380,6 +393,7 @@ Examples: ubuntu release livepatch status | release | - | xenial | - | bionic | - | focal | + # removing until we add this feature back in a way that doesn't hammer the server + #| xenial | + #| bionic | + #| focal | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/cloud_pro_clone.feature ubuntu-advantage-tools-28.1~18.04/features/cloud_pro_clone.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/cloud_pro_clone.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/cloud_pro_clone.feature 2023-06-01 18:49:33.000000000 +0000 @@ -13,11 +13,7 @@ log_file: /var/log/ubuntu-advantage.log """ When I run `pro auto-attach` with sudo - And I run `pro status --format yaml` with sudo - Then stdout matches regexp: - """ - attached: true - """ + Then the machine is attached When I run `apt install -y jq` with sudo When I save the `activityInfo.activityToken` value from the contract When I save the `activityInfo.activityID` value from the contract @@ -37,11 +33,7 @@ When I launch a `` machine named `clone` from the snapshot of `system-under-test` # The clone will run auto-attach on boot When I run `pro status --wait` `with sudo` on the `clone` machine - When I run `pro status --format yaml` `with sudo` on the `clone` machine - Then stdout matches regexp: - """ - attached: true - """ + Then the machine is attached When I run `python3 /usr/lib/ubuntu-advantage/timer.py` `with sudo` on the `clone` machine Then I verify that `activityInfo.activityToken` value has been updated on the contract on the `clone` machine Then I verify that `activityInfo.activityID` value has been updated on the contract on the `clone` machine diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/cloud.py ubuntu-advantage-tools-28.1~18.04/features/cloud.py --- ubuntu-advantage-tools-27.14.4~18.04/features/cloud.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/cloud.py 2023-06-01 18:49:33.000000000 +0000 @@ -15,8 +15,6 @@ :cloud_credentials_path: A string containing the path for the pycloudlib cloud credentials file - :machine_type: - A string representing the type of machine to launch (pro or generic) :region: The region to create the cloud resources on :param tag: @@ -30,7 +28,6 @@ def __init__( self, - machine_type: str, cloud_credentials_path: Optional[str], tag: Optional[str] = None, timestamp_suffix: bool = True, @@ -39,11 +36,11 @@ self.tag = tag else: self.tag = "uaclient-ci" - self.machine_type = machine_type self._api = None self.key_name = pycloudlib.util.get_timestamped_tag(self.tag) self.timestamp_suffix = timestamp_suffix self.cloud_credentials_path = cloud_credentials_path + self._ssh_key_managed = False @property def pycloudlib_cls(self): @@ -65,6 +62,7 @@ def _create_instance( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -77,6 +75,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -115,6 +115,7 @@ def launch( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -127,6 +128,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -143,6 +146,7 @@ """ inst = self._create_instance( series=series, + machine_type=machine_type, instance_name=instance_name, image_name=image_name, user_data=user_data, @@ -168,11 +172,15 @@ """ return instance.id - def locate_image_name(self, series: str) -> str: + def locate_image_name( + self, series: str, machine_type: str, daily: bool = True + ) -> str: """Locate and return the image name to use for vm provision. :param series: The ubuntu release to be used when locating the image name + :machine_type: + string representing the type of machine to launch (pro or generic) :returns: A image name to use when provisioning a virtual machine @@ -184,12 +192,19 @@ ) image_type = ImageType.GENERIC - if "pro.fips" in self.machine_type: + if "pro-fips" in machine_type: image_type = ImageType.PRO_FIPS - elif "pro" in self.machine_type: + elif "pro" in machine_type: image_type = ImageType.PRO - return self.api.daily_image(release=series, image_type=image_type) + if daily: + logging.debug("looking up daily image for {}".format(series)) + return self.api.daily_image(release=series, image_type=image_type) + else: + logging.debug("looking up released image for {}".format(series)) + return self.api.released_image( + release=series, image_type=image_type + ) def manage_ssh_key( self, @@ -202,6 +217,11 @@ Location of the private key path to use. If None, the location will be a default location. """ + if self._ssh_key_managed: + logging.debug("SSH key already set up") + return + + logging.debug("Setting up SSH key") if key_name: self.key_name = key_name cloud_name = self.name.lower().replace("_", "-") @@ -221,10 +241,17 @@ self.api.use_key( public_key_path=pub_key_path, private_key_path=priv_key_path ) + self._ssh_key_managed = True class EC2(Cloud): - """Class that represents the EC2 cloud provider.""" + """ + Class that represents the EC2 cloud provider. + + For AWS, we need to specify on the pycloudlib config file that + the AWS region must be us-east-2. The reason for that is because + our image ids were captured using that region. + """ name = "aws" @@ -270,6 +297,7 @@ def _create_instance( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -282,6 +310,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -297,7 +327,16 @@ An AWS cloud provider instance """ if not image_name: - image_name = self.locate_image_name(series) + if series == "xenial" and "pro" not in machine_type: + logging.debug( + "defaulting to non-daily image for awsgeneric-16.04" + ) + daily = False + else: + daily = True + image_name = self.locate_image_name( + series, machine_type, daily=daily + ) logging.info( "--- Launching AWS image {}({})".format(image_name, series) @@ -316,7 +355,7 @@ class Azure(Cloud): """Class that represents the Azure cloud provider.""" - name = "Azure" + name = "azure" @property def pycloudlib_cls(self): @@ -376,6 +415,7 @@ def _create_instance( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -388,6 +428,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -403,7 +445,7 @@ An Azure cloud provider instance """ if not image_name: - image_name = self.locate_image_name(series) + image_name = self.locate_image_name(series, machine_type) logging.info( "--- Launching Azure image {}({})".format(image_name, series) @@ -430,13 +472,11 @@ def __init__( self, - machine_type: str, cloud_credentials_path: Optional[str], tag: Optional[str] = None, timestamp_suffix: bool = True, ) -> None: super().__init__( - machine_type=machine_type, cloud_credentials_path=cloud_credentials_path, tag=tag, timestamp_suffix=timestamp_suffix, @@ -494,6 +534,7 @@ def _create_instance( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -506,6 +547,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -521,7 +564,7 @@ An GCP cloud provider instance """ if not image_name: - image_name = self.locate_image_name(series) + image_name = self.locate_image_name(series, machine_type) logging.info( "--- Launching GCP image {}({})".format(image_name, series) @@ -536,6 +579,7 @@ def _create_instance( self, series: str, + machine_type: str, instance_name: Optional[str] = None, image_name: Optional[str] = None, user_data: Optional[str] = None, @@ -548,6 +592,8 @@ The ubuntu release to be used when creating an instance. We will create an image based on this value if the used does not provide a image_name value + :machine_type: + string representing the type of machine to launch (pro or generic) :param instance_name: The name of the instance to be created :param image_name: @@ -563,7 +609,7 @@ An AWS cloud provider instance """ if not image_name: - image_name = self.locate_image_name(series) + image_name = self.locate_image_name(series, machine_type) image_type = self.name.title().replace("-", " ") @@ -603,11 +649,15 @@ # instead of the instance id return instance.name - def locate_image_name(self, series: str) -> str: + def locate_image_name( + self, series: str, machine_type: str, daily: bool = True + ) -> str: """Locate and return the image name to use for vm provision. :param series: The ubuntu release to be used when locating the image name + :machine_type: + string representing the type of machine to launch (pro or generic) :returns: A image name to use when provisioning a virtual machine @@ -618,7 +668,13 @@ "Must provide either series or image_name to launch azure" ) - image_name = self.api.daily_image(release=series) + if daily: + logging.debug("looking up daily image for {}".format(series)) + image_name = self.api.daily_image(release=series) + else: + logging.debug("looking up released image for {}".format(series)) + image_name = self.api.released_image(release=series) + return image_name diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/collect_logs.feature ubuntu-advantage-tools-28.1~18.04/features/collect_logs.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/collect_logs.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/collect_logs.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Feature: Command behaviour when attached to an Ubuntu Pro subscription @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run collect-logs on an unattached machine Given a `` machine with ubuntu-advantage-tools installed When I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo @@ -51,7 +51,7 @@ | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run collect-logs on an attached machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/config.feature ubuntu-advantage-tools-28.1~18.04/features/config.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/config.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/config.feature 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ @series.xenial @series.jammy @series.kinetic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: old ua_config in uaclient.conf is still supported Given a `` machine with ubuntu-advantage-tools installed When I run `pro config show` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/daemon.feature ubuntu-advantage-tools-28.1~18.04/features/daemon.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/daemon.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/daemon.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ @series.all @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: cloud-id-shim service is not installed on anything other than xenial Given a `` machine with ubuntu-advantage-tools installed Then I verify that running `systemctl status ubuntu-advantage-cloud-id-shim.service` `with sudo` exits `4` @@ -20,7 +20,7 @@ @series.lts @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: cloud-id-shim should run in postinst and on boot Given a `` machine with ubuntu-advantage-tools installed # verify installing pro created the cloud-id file @@ -105,10 +105,10 @@ Active: active \(running\) """ # TODO find out what caused memory to go up, try to lower it again - Then on `xenial`, systemd status output says memory usage is less than `16` MB - Then on `bionic`, systemd status output says memory usage is less than `14` MB - Then on `focal`, systemd status output says memory usage is less than `12` MB - Then on `jammy`, systemd status output says memory usage is less than `13` MB + Then on `xenial`, systemd status output says memory usage is less than `17` MB + Then on `bionic`, systemd status output says memory usage is less than `15` MB + Then on `focal`, systemd status output says memory usage is less than `13` MB + Then on `jammy`, systemd status output says memory usage is less than `14` MB When I run `cat /var/log/ubuntu-advantage-daemon.log` with sudo Then stdout matches regexp: @@ -208,10 +208,79 @@ | focal | | jammy | + @series.lts + @uses.config.contract_token + @uses.config.machine_type.azure.generic + Scenario Outline: daemon should run when appropriate on azure generic lts + Given a `` machine with ubuntu-advantage-tools installed + # verify its enabled, but stops itself when not configured to poll + When I run `cat /var/log/ubuntu-advantage-daemon.log` with sudo + Then stdout matches regexp: + """ + daemon starting + """ + Then stdout matches regexp: + """ + Configured to not poll for pro license, shutting down + """ + Then stdout matches regexp: + """ + daemon ending + """ + When I run `systemctl is-enabled ubuntu-advantage.service` with sudo + Then stdout matches regexp: + """ + enabled + """ + Then I verify that running `systemctl is-failed ubuntu-advantage.service` `with sudo` exits `1` + Then stdout matches regexp: + """ + inactive + """ + + # verify it stays on when configured to do so + When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: + """ + { "poll_for_pro_license": true } + """ + When I run `systemctl restart ubuntu-advantage.service` with sudo + # give it time to get past the initial request + When I wait `5` seconds + When I run `cat /var/log/ubuntu-advantage-daemon.log` with sudo + Then stdout matches regexp: + """ + daemon starting + """ + Then stdout matches regexp: + """ + Cancelling polling + """ + Then stdout matches regexp: + """ + daemon ending + """ + When I run `systemctl is-enabled ubuntu-advantage.service` with sudo + Then stdout matches regexp: + """ + enabled + """ + Then I verify that running `systemctl is-failed ubuntu-advantage.service` `with sudo` exits `1` + Then stdout matches regexp: + """ + inactive + """ + Examples: version + | release | + | xenial | + | bionic | + | focal | + | jammy | + @series.kinetic @uses.config.contract_token + @uses.config.machine_type.azure.generic @uses.config.machine_type.gcp.generic - Scenario Outline: daemon does not start on gcp generic non lts + Scenario Outline: daemon does not start on gcp,azure generic non lts Given a `` machine with ubuntu-advantage-tools installed When I wait `1` seconds When I run `cat /var/log/ubuntu-advantage-daemon.log` with sudo @@ -233,11 +302,10 @@ @series.all @uses.config.contract_token - @uses.config.machine_type.lxd.container - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-container + @uses.config.machine_type.lxd-vm @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - Scenario Outline: daemon does not start when not on gcpgeneric + Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric Given a `` machine with ubuntu-advantage-tools installed Then I verify that running `systemctl status ubuntu-advantage.service` `with sudo` exits `3` Then stdout matches regexp: @@ -266,8 +334,7 @@ @series.lts @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro - Scenario Outline: daemon does not start when not on gcpgeneric + Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ @@ -301,7 +368,8 @@ @series.lts @uses.config.machine_type.gcp.pro - Scenario Outline: daemon does not start when not on gcpgeneric + @uses.config.machine_type.azure.pro + Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/docker.feature ubuntu-advantage-tools-28.1~18.04/features/docker.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/docker.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/docker.feature 2023-06-01 18:49:33.000000000 +0000 @@ -4,7 +4,7 @@ @slow @docker @series.focal - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Build docker images with pro services Given a `focal` machine with ubuntu-advantage-tools installed When I have the `` debs under test in `/home/ubuntu` @@ -77,4 +77,3 @@ | focal | xenial | [ esm-infra ] | curl | esm | | focal | bionic | [ fips ] | openssl | fips | | focal | focal | [ esm-apps ] | hello | esm | - diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_cloud.feature ubuntu-advantage-tools-28.1~18.04/features/enable_fips_cloud.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_cloud.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/enable_fips_cloud.feature 2023-05-30 19:02:35.000000000 +0000 @@ -217,7 +217,7 @@ And I verify that `strongswan-hmac` is installed from apt source `` When I run `apt-cache policy ubuntu-fips` as non-root Then stdout does not match regexp: - "" + """ .*Installed: \(none\) """ When I reboot the machine diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_container.feature ubuntu-advantage-tools-28.1~18.04/features/enable_fips_container.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_container.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/enable_fips_container.feature 2023-06-01 18:49:33.000000000 +0000 @@ -5,7 +5,7 @@ @series.xenial @series.bionic @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of FIPS in an ubuntu lxd container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -100,7 +100,7 @@ @series.xenial @series.bionic @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Try to enable FIPS after FIPS Updates in a lxd container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_vm.feature ubuntu-advantage-tools-28.1~18.04/features/enable_fips_vm.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/enable_fips_vm.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/enable_fips_vm.feature 2023-06-01 18:49:33.000000000 +0000 @@ -4,14 +4,14 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS in an ubuntu lxd vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run `pro status --format json` with sudo Then stdout contains substring """ - {"available": "yes", "blocked_by": [{"name": "livepatch", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "disabled", "status_details": "FIPS is not configured"} + {"available": "yes", "blocked_by": [{"name": "livepatch", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "disabled", "status_details": "FIPS is not configured", "warning": null} """ When I run `pro disable livepatch` with sudo And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo, retrying exit [100] @@ -48,15 +48,14 @@ When I run `pro status --format json --all` with sudo Then stdout contains substring: """ - {"available": "no", "blocked_by": [{"name": "fips", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}], "description": "Canonical Livepatch service", "description_override": null, "entitled": "yes", "name": "livepatch", "status": "n/a", "status_details": "Cannot enable Livepatch when FIPS is enabled."} + {"available": "no", "blocked_by": [{"name": "fips", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}], "description": "Canonical Livepatch service", "description_override": null, "entitled": "yes", "name": "livepatch", "status": "n/a", "status_details": "Cannot enable Livepatch when FIPS is enabled.", "warning": null} """ - When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ @@ -64,24 +63,24 @@ """ When I run `pro status --all` with sudo Then stdout does not match regexp: - """ - FIPS support requires system reboot to complete configuration - """ + """ + FIPS support requires system reboot to complete configuration + """ When I run `pro disable ` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will disable the FIPS entitlement but the FIPS packages will remain installed. - """ + """ + This will disable the FIPS entitlement but the FIPS packages will remain installed. + """ And stdout matches regexp: - """ - Updating package lists - A reboot is required to complete disable operation - """ + """ + Updating package lists + A reboot is required to complete disable operation + """ When I run `pro status --all` with sudo Then stdout matches regexp: - """ - Disabling FIPS requires system reboot to complete operation - """ + """ + Disabling FIPS requires system reboot to complete operation + """ When I run `apt-cache policy ubuntu-fips` as non-root Then stdout matches regexp: """ @@ -103,13 +102,13 @@ """ When I run `pro status --all` with sudo Then stdout matches regexp: - """ - +yes disabled - """ + """ + +yes disabled + """ Then stdout does not match regexp: - """ - Disabling FIPS requires system reboot to complete operation - """ + """ + Disabling FIPS requires system reboot to complete operation + """ When I run `pro enable --assume-yes --format json --assume-yes` with sudo Then stdout is a json matching the `ua_operation` schema And I will see the following on stdout: @@ -125,9 +124,9 @@ """ When I run `pro status --all` with sudo Then stdout matches regexp: - """ - +yes disabled - """ + """ + +yes disabled + """ Examples: ubuntu release | release | fips-name | fips-service |fips-apt-source | @@ -137,7 +136,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS-updates in an ubuntu lxd vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -171,7 +170,7 @@ When I run `pro status --all --format json` with sudo Then stdout contains substring: """ - {"available": "no", "blocked_by": [{"name": "fips-updates", "reason": "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS Updates installs security patches that aren't officially certified.", "reason_code": "fips-updates-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "n/a", "status_details": "Cannot enable FIPS when FIPS Updates is enabled."} + {"available": "no", "blocked_by": [{"name": "fips-updates", "reason": "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS Updates installs security patches that aren't officially certified.", "reason_code": "fips-updates-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "n/a", "status_details": "Cannot enable FIPS when FIPS Updates is enabled.", "warning": null} """ When I reboot the machine @@ -245,7 +244,7 @@ When I run `pro status --all --format json` with sudo Then stdout contains substring: """ - {"available": "no", "blocked_by": [{"name": "livepatch", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}, {"name": "fips-updates", "reason": "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS Updates installs security patches that aren't officially certified.", "reason_code": "fips-updates-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "n/a", "status_details": "Cannot enable FIPS when FIPS Updates is enabled."} + {"available": "no", "blocked_by": [{"name": "livepatch", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}, {"name": "fips-updates", "reason": "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS Updates installs security patches that aren't officially certified.", "reason_code": "fips-updates-invalidates-fips"}], "description": "NIST-certified core packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "n/a", "status_details": "Cannot enable FIPS when FIPS Updates is enabled.", "warning": null} """ When I run `pro disable --assume-yes` with sudo And I run `pro enable --assume-yes --format json --assume-yes` with sudo @@ -275,7 +274,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable FIPS-updates while livepatch is enabled Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -332,7 +331,7 @@ @slow @series.focal - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS in an ubuntu lxd vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -342,7 +341,6 @@ """ Updating package lists Installing packages - FIPS strongswan-hmac package could not be installed enabled A reboot is required to complete install """ @@ -376,6 +374,8 @@ When I reboot the machine Then I verify that `openssh-server` installed version matches regexp `fips` And I verify that `openssh-client` installed version matches regexp `fips` + And I verify that `strongswan` installed version matches regexp `fips` + And I verify that `strongswan-hmac` installed version matches regexp `fips` When I run `apt-mark unhold openssh-client openssh-server strongswan` with sudo Then I will see the following on stdout: """ @@ -395,7 +395,7 @@ @slow @series.focal - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS-updates in an ubuntu lxd vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -465,7 +465,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips-updates on fips enabled vm Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -520,7 +520,7 @@ """ And stdout matches regexp: """ - livepatch +yes enabled + livepatch +yes (enabled|warning) """ When I run `uname -r` as non-root Then stdout matches regexp: @@ -542,7 +542,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: FIPS enablement message when cloud init didn't run properly Given a `` machine with ubuntu-advantage-tools installed When I delete the file `/run/cloud-init/instance-data.json` @@ -566,7 +566,7 @@ @slow @series.focal - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: FIPS enablement message when cloud init didn't run properly Given a `` machine with ubuntu-advantage-tools installed When I delete the file `/run/cloud-init/instance-data.json` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/environment.py ubuntu-advantage-tools-28.1~18.04/features/environment.py --- ubuntu-advantage-tools-27.14.4~18.04/features/environment.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/environment.py 2023-06-01 18:49:33.000000000 +0000 @@ -6,6 +6,7 @@ import re import string import sys +import tarfile from typing import Dict, List, Optional, Tuple, Union # noqa: F401 import pycloudlib # type: ignore # noqa: F401 @@ -46,7 +47,7 @@ This indicates whether the image created for this test run should be cleaned up when all tests are complete. :param machine_type: - The default machine_type to test: lxd.container, lxd.vm, azure.pro, + The default machine_type to test: lxd-container, lxd-vm, azure.pro, azure.generic, aws.pro or aws.generic :param private_key_file: Optional path to pre-existing private key file to use when connecting @@ -105,8 +106,6 @@ # This variable is used in .from_environ() but also to emit the "Config # options" stanza in __init__ all_options = boolean_options + str_options - cloud_api = None # type: pycloudlib.cloud.BaseCloud - cloud_manager = None # type: cloud.Cloud def __init__( self, @@ -116,15 +115,15 @@ destroy_instances: bool = True, ephemeral_instance: bool = False, snapshot_strategy: bool = False, - machine_type: str = "lxd.container", + machine_type: str = "lxd-container", private_key_file: Optional[str] = None, private_key_name: str = "uaclient-integration", reuse_image: Optional[str] = None, contract_token: Optional[str] = None, contract_token_staging: Optional[str] = None, contract_token_staging_expired: Optional[str] = None, - artifact_dir: Optional[str] = None, - install_from: InstallationSource = InstallationSource.DAILY, + artifact_dir: str = "artifacts", + install_from: InstallationSource = InstallationSource.LOCAL, custom_ppa: Optional[str] = None, debs_path: Optional[str] = None, userdata_file: Optional[str] = None, @@ -214,47 +213,39 @@ ) timed_job_tag += "-" + random_suffix - if "aws" in self.machine_type: - # For AWS, we need to specify on the pycloudlib config file that - # the AWS region must be us-east-2. The reason for that is because - # our image ids were captured using that region. - self.cloud_manager = cloud.EC2( - machine_type=self.machine_type, + self.clouds = { + "aws": cloud.EC2( cloud_credentials_path=self.cloud_credentials_path, tag=timed_job_tag, timestamp_suffix=False, - ) - self.cloud = "aws" - elif "azure" in self.machine_type: - self.cloud_manager = cloud.Azure( - machine_type=self.machine_type, + ), + "azure": cloud.Azure( cloud_credentials_path=self.cloud_credentials_path, tag=timed_job_tag, timestamp_suffix=False, - ) - self.cloud = "azure" - elif "gcp" in self.machine_type: - self.cloud_manager = cloud.GCP( - machine_type=self.machine_type, + ), + "gcp": cloud.GCP( cloud_credentials_path=self.cloud_credentials_path, tag=timed_job_tag, timestamp_suffix=False, - ) - self.cloud = "gcp" - elif "lxd.vm" in self.machine_type: - self.cloud_manager = cloud.LXDVirtualMachine( - machine_type=self.machine_type, + ), + "lxd-vm": cloud.LXDVirtualMachine( cloud_credentials_path=self.cloud_credentials_path, - ) - self.cloud = "lxd.vm" - else: - self.cloud_manager = cloud.LXDContainer( - machine_type=self.machine_type, + ), + "lxd-container": cloud.LXDContainer( cloud_credentials_path=self.cloud_credentials_path, - ) - self.cloud = "lxd" - - self.cloud_api = self.cloud_manager.api + ), + } + if "aws" in self.machine_type: + self.default_cloud = self.clouds["aws"] + elif "azure" in self.machine_type: + self.default_cloud = self.clouds["azure"] + elif "gcp" in self.machine_type: + self.default_cloud = self.clouds["gcp"] + elif "lxd-vm" in self.machine_type: + self.default_cloud = self.clouds["lxd-vm"] + else: + self.default_cloud = self.clouds["lxd-container"] # Finally, print the config options. This helps users debug the use of # config options, and means they'll be included in test logs in CI. @@ -299,6 +290,9 @@ bool_value = False kwargs[key] = bool_value + # userdata should override environment variables + kwargs.update(config.userdata) + if "install_from" in kwargs: kwargs["install_from"] = InstallationSource(kwargs["install_from"]) @@ -329,17 +323,16 @@ print(" - {} = {}".format(key, value)) context.series_image_name = {} context.series_reuse_image = "" - context.config = UAClientBehaveConfig.from_environ(context.config) - context.config.cloud_manager.manage_ssh_key() + context.pro_config = UAClientBehaveConfig.from_environ(context.config) context.snapshots = {} context.machines = {} - if context.config.reuse_image: + if context.pro_config.reuse_image: series = lxc_get_property( - context.config.reuse_image, property_name="series", image=True + context.pro_config.reuse_image, property_name="series", image=True ) machine_type = lxc_get_property( - context.config.reuse_image, + context.pro_config.reuse_image, property_name="machine_type", image=True, ) @@ -347,26 +340,26 @@ print("Found machine_type: {vm_type}".format(vm_type=machine_type)) if series is not None: context.series_reuse_image = series - context.series_image_name[series] = context.config.reuse_image + context.series_image_name[series] = context.pro_config.reuse_image else: print(" Could not check image series. It will not be used. ") - context.config.reuse_image = None + context.pro_config.reuse_image = None def _should_skip_tags(context: Context, tags: List) -> str: """Return a reason if a feature or scenario should be skipped""" - machine_type = getattr(context.config, "machine_type", "") + machine_type = getattr(context.pro_config, "machine_type", "") machine_types = [] for tag in tags: parts = tag.split(".") - if parts[0] != "uses": - continue # Only process @uses.* tags for skipping: - val = context - for idx, attr in enumerate(parts[1:], 1): + if parts[0] != "uses" or parts[1] != "config": + continue # Only process @uses.config.* tags for skipping: + val = context.pro_config + for idx, attr in enumerate(parts[2:], 1): val = getattr(val, attr, None) if attr == "machine_type": - curr_machine_type = ".".join(parts[idx + 1 :]) + curr_machine_type = ".".join(parts[idx + 2 :]) machine_types.append(curr_machine_type) if curr_machine_type == machine_type: return "" @@ -397,9 +390,9 @@ scenario.skip(reason=reason) return - filter_series = context.config.filter_series + filter_series = context.pro_config.filter_series given_a_series_match = re.match( - "a `(.*)` machine with ubuntu-advantage-tools installed", + "a `([a-z]*)` machine with ubuntu-advantage-tools installed", scenario.steps[0].name, ) if filter_series and given_a_series_match: @@ -415,6 +408,38 @@ ) return + if hasattr(scenario, "_row") and scenario._row is not None: + row_release = scenario._row.get("release") + if ( + row_release + and len(filter_series) > 0 + and row_release not in filter_series + ): + scenario.skip( + reason=( + "Skipping scenario outline series `{series}`." + " Cmdline provided @series tags: {cmdline_series}".format( + series=row_release, cmdline_series=filter_series + ) + ) + ) + return + row_machine_type = scenario._row.get("machine_type") + if ( + row_machine_type + and context.pro_config.machine_type != "any" + and row_machine_type != context.pro_config.machine_type + ): + scenario.skip( + reason=( + "Skipping scenario outline machine_type `{}`." + " Cmdline provided machine_type: {}".format( + row_machine_type, context.pro_config.machine_type + ) + ) + ) + return + # before_step doesn't execute early enough to modify the step # so we perform step text surgery here # Also, logging capture is not set up when before_scenario is called, @@ -431,61 +456,30 @@ ) -FAILURE_FILES = ( - "/etc/ubuntu-advantage/uaclient.log", - "/var/log/cloud-init.log", - "/var/log/ubuntu-advantage.log", - "/var/log/ubuntu-advantage-daemon.log", - "/var/log/ubuntu-advantage-timer.log", - "/var/lib/cloud/instance/user-data.txt", - "/var/lib/cloud/instance/vendor-data.txt", -) -FAILURE_CMDS = { - "ua-version": ["pro", "version"], - "cloud-init-analyze": ["cloud-init", "analyze", "show"], - "cloud-init.status": ["cloud-init", "status", "--long"], - "status.yaml": ["pro", "status", "--all", "--format=yaml"], - "journal.log": ["journalctl", "-b", "0"], - "systemd-analyze-blame": ["systemd-analyze", "blame"], - "systemctl-status": ["systemctl", "status"], - "systemctl-status-ua-auto-attach": [ - "systemctl", - "status", - "ua-auto-attach.service", - ], - "systemctl-status-ua-reboot-cmds": [ - "systemctl", - "status", - "ua-reboot-cmds.service", - ], - "systemctl-status-ubuntu-advantage": [ - "systemctl", - "status", - "ubuntu-advantage.service", - ], - "systemctl-status-apt-news": [ - "systemctl", - "status", - "apt-news.service", - ], -} - - def after_step(context, step): """Collect test artifacts in the event of failure.""" if step.status == "failed": - if context.config.artifact_dir: - artifacts_dir = context.config.artifact_dir - else: - artifacts_dir = "artifacts" - artifacts_dir = os.path.join( - artifacts_dir, + logging.warning("STEP FAILED. Collecting logs.") + inner_dir = os.path.join( + datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S"), "{}_{}".format(os.path.basename(step.filename), step.line), ) + new_artifacts_dir = os.path.join( + context.pro_config.artifact_dir, + inner_dir, + ) + if not os.path.exists(new_artifacts_dir): + os.makedirs(new_artifacts_dir) + + latest_link_dir = os.path.join( + context.pro_config.artifact_dir, "latest" + ) + if os.path.exists(latest_link_dir): + os.unlink(latest_link_dir) + os.symlink(inner_dir, latest_link_dir) + if hasattr(context, "process"): - if not os.path.exists(artifacts_dir): - os.makedirs(artifacts_dir) - artifact_file = os.path.join(artifacts_dir, "process.log") + artifact_file = os.path.join(new_artifacts_dir, "process.log") process = context.process with open(artifact_file, "w") as stream: stream.write( @@ -497,35 +491,28 @@ ) if hasattr(context, "machines") and SUT in context.machines: - if not os.path.exists(artifacts_dir): - os.makedirs(artifacts_dir) - for log_file in FAILURE_FILES: - artifact_file = os.path.join( - artifacts_dir, os.path.basename(log_file) + try: + context.machines[SUT].instance.execute( + ["pro", "collect-logs", "-o", "/tmp/logs.tar.gz"], + use_sudo=True, ) - logging.info( - "-- pull instance:{} {}".format(log_file, artifact_file) + context.machines[SUT].instance.execute( + ["chmod", "666", "/tmp/logs.tar.gz"], use_sudo=True ) - try: - result = context.machines[SUT].instance.execute( - ["cat", log_file], use_sudo=True - ) - content = result.stdout if result.ok else "" - except RuntimeError: - content = "" - with open(artifact_file, "w") as stream: - stream.write(content) - for artifact_file, cmd in FAILURE_CMDS.items(): - result = context.machines[SUT].instance.execute( - cmd, use_sudo=True + dest = os.path.join(new_artifacts_dir, "logs.tar.gz") + context.machines[SUT].instance.pull_file( + "/tmp/logs.tar.gz", dest ) - artifact_file = os.path.join(artifacts_dir, artifact_file) - with open(artifact_file, "w") as stream: - stream.write(result.stdout) + with tarfile.open(dest) as logs_tarfile: + logs_tarfile.extractall(new_artifacts_dir) + logging.warning("Done collecting logs.") + except Exception as e: + logging.error(str(e)) + logging.warning("Failed to collect logs") def after_all(context): - if context.config.image_clean: + if context.pro_config.image_clean: for key, image in context.series_image_name.items(): if key == context.series_reuse_image: logging.info( @@ -533,11 +520,11 @@ context.series_image_name[key], ) else: - context.config.cloud_api.delete_image(image) + context.pro_config.default_cloud.api.delete_image(image) - if context.config.destroy_instances: + if context.pro_config.destroy_instances: try: - key_pair = context.config.cloud_manager.api.key_pair + key_pair = context.pro_config.default_cloud.api.key_pair os.remove(key_pair.private_key_path) os.remove(key_pair.public_key_path) except Exception as e: @@ -547,7 +534,7 @@ if "builder" in context.snapshots: try: - context.config.cloud_manager.api.delete_image( + context.pro_config.default_cloud.api.delete_image( context.snapshots["builder"] ) except RuntimeError as e: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/fix.feature ubuntu-advantage-tools-28.1~18.04/features/fix.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/fix.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/fix.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Ua fix command behaviour @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Useful SSL failure message when there aren't any ca-certs Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -33,70 +33,164 @@ | lunar | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` Then I will see the following on stderr: - """ - Error: CVE-1800-123456 not found. - """ + """ + Error: CVE-1800-123456 not found. + """ When I verify that running `pro fix USN-12345-12` `as non-root` exits `1` Then I will see the following on stderr: - """ - Error: USN-12345-12 not found. - """ + """ + Error: USN-12345-12 not found. + """ When I verify that running `pro fix CVE-12345678-12` `as non-root` exits `1` Then I will see the following on stderr: - """ - Error: issue "CVE-12345678-12" is not recognized. - Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" - """ + """ + Error: issue "CVE-12345678-12" is not recognized. + Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" + """ When I verify that running `pro fix USN-12345678-12` `as non-root` exits `1` Then I will see the following on stderr: - """ - Error: issue "USN-12345678-12" is not recognized. - Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" - """ + """ + Error: issue "USN-12345678-12" is not recognized. + Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" + """ When I run `apt install -y libawl-php=0.60-1 --allow-downgrades` with sudo And I run `pro fix USN-4539-1` with sudo Then stdout matches regexp: - """ - USN-4539-1: AWL vulnerability - Found CVEs: - - https://ubuntu.com/security/CVE-2020-11728 - - 1 affected source package is installed: awl - \(1/1\) awl: - A fix is available in Ubuntu standard updates. - .*\{ apt update && apt install --only-upgrade -y libawl-php \}.* + """ + USN-4539-1: AWL vulnerability + Found CVEs: + - https://ubuntu.com/security/CVE-2020-11728 - .*✔.* USN-4539-1 is resolved. - """ + Fixing requested USN-4539-1 + 1 affected source package is installed: awl + \(1/1\) awl: + A fix is available in Ubuntu standard updates. + .*\{ apt update && apt install --only-upgrade -y libawl-php \}.* + + .*✔.* USN-4539-1 is resolved. + """ When I run `pro fix CVE-2020-28196` as non-root Then stdout matches regexp: - """ - CVE-2020-28196: Kerberos vulnerability - - https://ubuntu.com/security/CVE-2020-28196 + """ + CVE-2020-28196: Kerberos vulnerability + - https://ubuntu.com/security/CVE-2020-28196 - 1 affected source package is installed: krb5 - \(1/1\) krb5: - A fix is available in Ubuntu standard updates. - The update is already installed. + 1 affected source package is installed: krb5 + \(1/1\) krb5: + A fix is available in Ubuntu standard updates. + The update is already installed. - .*✔.* CVE-2020-28196 is resolved. - """ + .*✔.* CVE-2020-28196 is resolved. + """ When I run `pro fix CVE-2022-24959` as non-root Then stdout matches regexp: - """ - CVE-2022-24959: Linux kernel vulnerabilities - - https://ubuntu.com/security/CVE-2022-24959 + """ + CVE-2022-24959: Linux kernel vulnerabilities + - https://ubuntu.com/security/CVE-2022-24959 - No affected source packages are installed. + No affected source packages are installed. - .*✔.* CVE-2022-24959 does not affect your system. - """ + .*✔.* CVE-2022-24959 does not affect your system. + """ + When I run `apt install -y rsync=3.1.3-8 --allow-downgrades` with sudo + And I run `apt install -y zlib1g=1:1.2.11.dfsg-2ubuntu1 --allow-downgrades` with sudo + And I run `pro fix USN-5573-1` with sudo + Then stdout matches regexp: + """ + USN-5573-1: rsync vulnerability + Found CVEs: + - https://ubuntu.com/security/CVE-2022-37434 + + Fixing requested USN-5573-1 + 1 affected source package is installed: rsync + \(1/1\) rsync: + A fix is available in Ubuntu standard updates. + .*\{ apt update && apt install --only-upgrade -y rsync \}.* + + .*✔.* USN-5573-1 is resolved. + + Found related USNs: + - USN-5570-1 + - USN-5570-2 + + Fixing related USNs: + - USN-5570-1 + No affected source packages are installed. + + .*✔.* USN-5570-1 does not affect your system. + + - USN-5570-2 + 1 affected source package is installed: zlib + \(1/1\) zlib: + A fix is available in Ubuntu standard updates. + .*\{ apt update && apt install --only-upgrade -y zlib1g \}.* + + .*✔.* USN-5570-2 is resolved. + + Summary: + .*✔.* USN-5573-1 \[requested\] is resolved. + .*✔.* USN-5570-1 \[related\] does not affect your system. + .*✔.* USN-5570-2 \[related\] is resolved. + """ + When I run `pro fix USN-5573-1` with sudo + Then stdout matches regexp: + """ + USN-5573-1: rsync vulnerability + Found CVEs: + - https://ubuntu.com/security/CVE-2022-37434 + + Fixing requested USN-5573-1 + 1 affected source package is installed: rsync + \(1/1\) rsync: + A fix is available in Ubuntu standard updates. + The update is already installed. + + .*✔.* USN-5573-1 is resolved. + + Found related USNs: + - USN-5570-1 + - USN-5570-2 + + Fixing related USNs: + - USN-5570-1 + No affected source packages are installed. + + .*✔.* USN-5570-1 does not affect your system. + + - USN-5570-2 + 1 affected source package is installed: zlib + \(1/1\) zlib: + A fix is available in Ubuntu standard updates. + The update is already installed. + + .*✔.* USN-5570-2 is resolved. + + Summary: + .*✔.* USN-5573-1 \[requested\] is resolved. + .*✔.* USN-5570-1 \[related\] does not affect your system. + .*✔.* USN-5570-2 \[related\] is resolved. + """ + When I run `pro fix USN-5573-1 --no-related` with sudo + Then stdout matches regexp: + """ + USN-5573-1: rsync vulnerability + Found CVEs: + - https://ubuntu.com/security/CVE-2022-37434 + + Fixing requested USN-5573-1 + 1 affected source package is installed: rsync + \(1/1\) rsync: + A fix is available in Ubuntu standard updates. + The update is already installed. + + .*✔.* USN-5573-1 is resolved. + """ Examples: ubuntu release details | release | @@ -104,25 +198,33 @@ @series.xenial @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine Given a `` machine with ubuntu-advantage-tools installed + When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` + Then I will see the following on stderr: + """ + Error: CVE-1800-123456 not found. + """ + When I verify that running `pro fix USN-12345-12` `as non-root` exits `1` + Then I will see the following on stderr: + """ + Error: USN-12345-12 not found. + """ When I run `apt-get update` with sudo When I run `apt install -y libawl-php` with sudo And I reboot the machine - And I verify that running `pro fix USN-4539-1` `as non-root` exits `1` + And I run `pro fix USN-4539-1` as non-root Then stdout matches regexp: """ USN-4539-1: AWL vulnerability Found CVEs: - https://ubuntu.com/security/CVE-2020-11728 - 1 affected source package is installed: awl - \(1/1\) awl: - Ubuntu security engineers are investigating this issue. + Fixing requested USN-4539-1 + No affected source packages are installed. - 1 package is still affected: awl - .*✘.* USN-4539-1 is not resolved. + .*✔.* USN-4539-1 does not affect your system. """ When I run `pro fix CVE-2020-15180` as non-root Then stdout matches regexp: @@ -192,6 +294,7 @@ - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 + Fixing requested USN-5079-2 1 affected source package is installed: curl \(1/1\) curl: A fix is available in Ubuntu Pro: ESM Infra. @@ -207,6 +310,19 @@ .*\{ apt update && apt install --only-upgrade -y curl libcurl3-gnutls \}.* .*✔.* USN-5079-2 is resolved. + + Found related USNs: + - USN-5079-1 + + Fixing related USNs: + - USN-5079-1 + No affected source packages are installed. + + .*✔.* USN-5079-1 does not affect your system. + + Summary: + .*✔.* USN-5079-2 \[requested\] is resolved. + .*✔.* USN-5079-1 \[related\] does not affect your system. """ When I fix `USN-5079-2` by attaching to a subscription with `contract_token_staging_expired` Then stdout matches regexp @@ -216,6 +332,7 @@ - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 + Fixing requested USN-5079-2 1 affected source package is installed: curl \(1/1\) curl: A fix is available in Ubuntu Pro: ESM Infra. @@ -240,6 +357,7 @@ - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 + Fixing requested USN-5079-2 1 affected source package is installed: curl \(1/1\) curl: A fix is available in Ubuntu Pro: ESM Infra. @@ -259,6 +377,19 @@ .*\{ apt update && apt install --only-upgrade -y curl libcurl3-gnutls \}.* .*✔.* USN-5079-2 is resolved. + + Found related USNs: + - USN-5079-1 + + Fixing related USNs: + - USN-5079-1 + No affected source packages are installed. + + .*✔.* USN-5079-1 does not affect your system. + + Summary: + .*✔.* USN-5079-2 \[requested\] is resolved. + .*✔.* USN-5079-1 \[related\] does not affect your system. """ When I verify that running `pro fix USN-5051-2` `with sudo` exits `2` Then stdout matches regexp: @@ -267,6 +398,7 @@ Found CVEs: - https://ubuntu.com/security/CVE-2021-3712 + Fixing requested USN-5051-2 1 affected source package is installed: openssl \(1/1\) openssl: A fix is available in Ubuntu Pro: ESM Infra. @@ -276,6 +408,8 @@ .*✘.* USN-5051-2 is not resolved. """ When I run `pro disable esm-infra` with sudo + # Allow esm-cache to be populated + And I run `sleep 5` as non-root And I run `apt-get install gzip -y` with sudo And I run `pro fix USN-5378-4 --dry-run` as non-root Then stdout matches regexp: @@ -286,17 +420,53 @@ Found CVEs: - https://ubuntu.com/security/CVE-2022-1271 - 2 affected source packages are installed: gzip, xz-utils - \(1/2, 2/2\) gzip, xz-utils: + Fixing requested USN-5378-4 + 1 affected source package is installed: gzip + \(1/1\) gzip: A fix is available in Ubuntu Pro: ESM Infra. .*Ubuntu Pro service: esm-infra is not enabled. To proceed with the fix, a prompt would ask permission to automatically enable this service. \{ pro enable esm-infra \}.* - .*\{ apt update && apt install --only-upgrade -y gzip liblzma5 xz-utils \}.* + .*\{ apt update && apt install --only-upgrade -y gzip \}.* .*✔.* USN-5378-4 is resolved. + + Found related USNs: + - USN-5378-1 + - USN-5378-2 + - USN-5378-3 + + Fixing related USNs: + - USN-5378-1 + No affected source packages are installed. + + .*✔.* USN-5378-1 does not affect your system. + + - USN-5378-2 + No affected source packages are installed. + + .*✔.* USN-5378-2 does not affect your system. + + - USN-5378-3 + 1 affected source package is installed: xz-utils + \(1/1\) xz-utils: + A fix is available in Ubuntu Pro: ESM Infra. + + .*Ubuntu Pro service: esm-infra is not enabled. + To proceed with the fix, a prompt would ask permission to automatically enable + this service. + \{ pro enable esm-infra \}.* + .*\{ apt update && apt install --only-upgrade -y liblzma5 xz-utils \}.* + + .*✔.* USN-5378-3 is resolved. + + Summary: + .*✔.* USN-5378-4 \[requested\] is resolved. + .*✔.* USN-5378-1 \[related\] does not affect your system. + .*✔.* USN-5378-2 \[related\] does not affect your system. + .*✔.* USN-5378-3 \[related\] is resolved. """ When I run `pro fix USN-5378-4` `with sudo` and stdin `E` Then stdout matches regexp: @@ -305,8 +475,9 @@ Found CVEs: - https://ubuntu.com/security/CVE-2022-1271 - 2 affected source packages are installed: gzip, xz-utils - \(1/2, 2/2\) gzip, xz-utils: + Fixing requested USN-5378-4 + 1 affected source package is installed: gzip + \(1/1\) gzip: A fix is available in Ubuntu Pro: ESM Infra. The update is not installed because this system does not have esm-infra enabled. @@ -316,9 +487,59 @@ One moment, checking your subscription first Updating package lists Ubuntu Pro: ESM Infra enabled - .*\{ apt update && apt install --only-upgrade -y gzip liblzma5 xz-utils \}.* + .*\{ apt update && apt install --only-upgrade -y gzip \}.* .*✔.* USN-5378-4 is resolved. + + Found related USNs: + - USN-5378-1 + - USN-5378-2 + - USN-5378-3 + + Fixing related USNs: + - USN-5378-1 + No affected source packages are installed. + + .*✔.* USN-5378-1 does not affect your system. + + - USN-5378-2 + No affected source packages are installed. + + .*✔.* USN-5378-2 does not affect your system. + + - USN-5378-3 + 1 affected source package is installed: xz-utils + \(1/1\) xz-utils: + A fix is available in Ubuntu Pro: ESM Infra. + .*\{ apt update && apt install --only-upgrade -y liblzma5 xz-utils \}.* + + .*✔.* USN-5378-3 is resolved. + + Summary: + .*✔.* USN-5378-4 \[requested\] is resolved. + .*✔.* USN-5378-1 \[related\] does not affect your system. + .*✔.* USN-5378-2 \[related\] does not affect your system. + .*✔.* USN-5378-3 \[related\] is resolved. + """ + When I run `pro detach --assume-yes` with sudo + And I run `sed -i "/xenial-updates/d" /etc/apt/sources.list` with sudo + And I run `sed -i "/xenial-security/d" /etc/apt/sources.list` with sudo + And I run `apt-get update` with sudo + And I run `apt-get install squid -y` with sudo + And I verify that running `pro fix CVE-2020-25097` `as non-root` exits `1` + Then stdout matches regexp: + """ + CVE-2020-25097: Squid vulnerabilities + - https://ubuntu.com/security/CVE-2020-25097 + + 1 affected source package is installed: squid3 + \(1/1\) squid3: + A fix is available in Ubuntu standard updates. + - Cannot install package squid version 3.5.12-1ubuntu7.16 + - Cannot install package squid-common version 3.5.12-1ubuntu7.16 + + 1 package is still affected: squid3 + .*✘.* CVE-2020-25097 is not resolved """ Examples: ubuntu release details @@ -326,7 +547,7 @@ | xenial | @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Fix command on an unattached machine Given a `bionic` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -353,7 +574,7 @@ Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" """ When I run `apt install -y libawl-php` with sudo - And I verify that running `pro fix USN-4539-1 --dry-run` `as non-root` exits `1` + And I run `pro fix USN-4539-1 --dry-run` as non-root Then stdout matches regexp: """ .*WARNING: The option --dry-run is being used. @@ -362,26 +583,22 @@ Found CVEs: - https://ubuntu.com/security/CVE-2020-11728 - 1 affected source package is installed: awl - \(1/1\) awl: - Ubuntu security engineers are investigating this issue. + Fixing requested USN-4539-1 + No affected source packages are installed. - 1 package is still affected: awl - .*✘.* USN-4539-1 is not resolved. + .*✔.* USN-4539-1 does not affect your system. """ - When I verify that running `pro fix USN-4539-1` `as non-root` exits `1` + When I run `pro fix USN-4539-1` as non-root Then stdout matches regexp: """ USN-4539-1: AWL vulnerability Found CVEs: - https://ubuntu.com/security/CVE-2020-11728 - 1 affected source package is installed: awl - \(1/1\) awl: - Ubuntu security engineers are investigating this issue. + Fixing requested USN-4539-1 + No affected source packages are installed. - 1 package is still affected: awl - .*✘.* USN-4539-1 is not resolved. + .*✔.* USN-4539-1 does not affect your system. """ When I run `pro fix CVE-2020-28196` as non-root Then stdout matches regexp: @@ -462,6 +679,7 @@ Found Launchpad bugs: - https://launchpad.net/bugs/1834494 + Fixing requested USN-4038-3 1 affected source package is installed: bzip2 \(1/1\) bzip2: A fix is available in Ubuntu standard updates. @@ -471,7 +689,7 @@ """ @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Fix command on a machine without security/updates source lists Given a `bionic` machine with ubuntu-advantage-tools installed When I run `sed -i "/bionic-updates/d" /etc/apt/sources.list` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/i8n.feature ubuntu-advantage-tools-28.1~18.04/features/i8n.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/i8n.feature 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/i8n.feature 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,128 @@ +Feature: Pro supports multiple languages + + @series.all + @uses.config.machine_type.lxd-container + @uses.config.contract_token + Scenario Outline: Pro client's commands run successfully + Given a `` machine with ubuntu-advantage-tools installed + ## Change the locale + When I run `apt install language-pack-fr -y` with sudo + And I run `update-locale LANG=fr_FR.UTF-8` with sudo + And I reboot the machine + And I run `cat /etc/default/locale` as non-root + Then stdout matches regexp: + """ + LANG=fr_FR.UTF-8 + """ + #Attach invalid token + When I verify that running `pro attach INVALID_TOKEN` `with sudo` exits `1` + Then stderr matches regexp: + """ + Invalid token. See https://ubuntu.com/pro + """ + When I run `lscpu` as non-root + Then stdout does not match regexp: + """ + Architecture: + """ + When I run `apt update` with sudo + Then stdout does not match regexp: + """ + Hit + """ + When I verify that running `pro attach INVALID_TOKEN` `as non-root` exits `1` + Then I will see the following on stderr: + """ + This command must be run as root (try using sudo). + """ + When I verify that running `pro attach invalid-token --format json` `with sudo` exits `1` + Then stdout is a json matching the `ua_operation` schema + And I will see the following on stdout: + """ + {"_schema_version": "0.1", "errors": [{"message": "Invalid token. See https://ubuntu.com/pro", "message_code": "attach-invalid-token", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ + When I attach `contract_token` with sudo + # Refresh command + When I run `pro refresh` with sudo + Then I will see the following on stdout: + """ + Successfully processed your pro configuration. + Successfully refreshed your subscription. + Successfully updated Ubuntu Pro related APT and MOTD messages. + """ + # auto-attach command + When I verify that running `pro auto-attach` `with sudo` exits `2` + Then stderr matches regexp: + """ + This machine is already attached to 'UA Client Test' + To use a different subscription first run: sudo pro detach. + """ + # status command + When I run `pro status --format json` as non-root + Then stdout is a json matching the `ua_status` schema + When I run `pro status --format yaml` as non-root + Then stdout is a yaml matching the `ua_status` schema + When I create the file `/tmp/machine-token-overlay.json` with the following: + """ + { + "machineTokenInfo": { + "contractInfo": { + "effectiveTo": null + } + } + } + """ + And I append the following on uaclient config: + """ + features: + machine_token_overlay: "/tmp/machine-token-overlay.json" + """ + And I run `pro status` with sudo + Then stdout contains substring: + """ + Valid until: Unknown/Expired + """ + # api command invalid endpoint + When I verify that running `pro api invalid.endpoint` `with sudo` exits `1` + Then stdout matches regexp: + """ + {\"_schema_version\": \"v1\", \"data\": {\"meta\": {\"environment_vars\": \[]}}, \"errors\": \[{\"code\": \"api\-invalid\-endpoint", \"meta\": {}, \"title\": \"'invalid\.endpoint' is not a valid endpoint\"}], \"result\": \"failure\", \"version\": \".*\", \"warnings\": \[]} + """ + When I verify that running `pro api u.pro.version.v1 --args extra=arg` `with sudo` exits `1` + Then stdout matches regexp: + """ + {\"_schema_version\": \"v1\", \"data\": {\"meta\": {\"environment_vars\": \[]}}, \"errors\": \[{\"code\": \"api\-no\-argument\-for\-endpoint\", \"meta\": {}, \"title\": \"u\.pro\.version\.v1 accepts no arguments\"}], \"result\": \"failure\", \"version\": \".*\", \"warnings\": \[]} + """ + # api command valid endpoint + When I run `pro api u.pro.version.v1` with sudo + Then stdout matches regexp: + """ + {\"_schema_version\": \"v1\", \"data\": {\"attributes\": {\"installed_version\": \".*\"}, \"meta\": {\"environment_vars\": \[]}, \"type\": \"Version\"}, \"errors\": \[], \"result\": \"success\", \"version\": \".*\", \"warnings\": \[]} + """ + When I run `UA_LOG_FILE=/tmp/some_file OTHER_ENVVAR=not_there pro api u.pro.version.v1` with sudo + Then stdout matches regexp: + """ + {\"_schema_version\": \"v1\", \"data\": {\"attributes\": {\"installed_version\": \".*\"}, \"meta\": {\"environment_vars\": \[{\"name\": \"UA_LOG_FILE\", \"value\": \"\/tmp\/some_file\"}]}, \"type\": \"Version\"}, \"errors\": \[], \"result\": \"success\", \"version\": \".*\", \"warnings\": \[]} + """ + When I run `ua api u.pro.attach.auto.should_auto_attach.v1` with sudo + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"should_auto_attach": false}, "meta": {"environment_vars": \[\]}, "type": "ShouldAutoAttach"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ + # version + When I run `pro version` as non-root + Then I will see the uaclient version on stdout + When I run `pro version` with sudo + Then I will see the uaclient version on stdout + When I run `pro --version` as non-root + Then I will see the uaclient version on stdout + When I run `pro --version` with sudo + Then I will see the uaclient version on stdout + Examples: ubuntu release + | release | + | bionic | + | focal | + | xenial | + | jammy | + | kinetic | + | lunar | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/install_uninstall.feature ubuntu-advantage-tools-28.1~18.04/features/install_uninstall.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/install_uninstall.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/install_uninstall.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Pro Install and Uninstall related tests @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Do not fail on postinst when cloud-id returns error Given a `` machine with ubuntu-advantage-tools installed When I delete the file `/run/cloud-init/instance-data.json` @@ -18,7 +18,7 @@ @series.lts @uses.config.contract_token - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Purge package after attaching it to a machine Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -50,7 +50,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Do not fail during postinst with nonstandard python setup Given a `` machine with ubuntu-advantage-tools installed # Works when in a python virtualenv diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/livepatch.feature ubuntu-advantage-tools-28.1~18.04/features/livepatch.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/livepatch.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/livepatch.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,18 +2,48 @@ Feature: Livepatch @series.focal - @uses.config.machine_type.lxd.vm - Scenario Outline: Attached livepatch status shows warning when on unsupported kernel - Given a `` machine with ubuntu-advantage-tools installed + @uses.config.machine_type.any + @uses.config.machine_type.lxd-vm + Scenario Outline: Unattached livepatch status shows warning when on unsupported kernel + Given a `` `` machine with ubuntu-advantage-tools installed + When I change config key `livepatch_url` to use value `` + Then I verify that no files exist matching `/home/ubuntu/.cache/ubuntu-pro/livepatch-kernel-support-cache.json` + When I run `pro status` as non-root + Then I verify that files exist matching `/home/ubuntu/.cache/ubuntu-pro/livepatch-kernel-support-cache.json` + Then I verify that no files exist matching `/run/ubuntu-advantage/livepatch-kernel-support-cache.json` When I run `pro status` with sudo Then stdout matches regexp: """ livepatch +yes +Current kernel is not supported """ + Then stdout contains substring: + """ + Supported livepatch kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels + """ + Then I verify that files exist matching `/run/ubuntu-advantage/livepatch-kernel-support-cache.json` + When I run `apt-get install linux-generic -y` with sudo + When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-kvm -y` with sudo + When I run `update-grub` with sudo + When I reboot the machine + When I run `pro status` with sudo Then stdout matches regexp: """ + livepatch +yes +Canonical Livepatch service + """ + Then stdout does not contain substring: + """ Supported livepatch kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels """ + Examples: ubuntu release + | release | machine_type | livepatch_url | + | focal | lxd-vm | https://livepatch.canonical.com | + | focal | lxd-vm | https://livepatch.staging.canonical.com | + + @series.focal + @uses.config.machine_type.any + @uses.config.machine_type.lxd-vm + Scenario Outline: Attached livepatch status shows warning when on unsupported kernel + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run `pro status` with sudo Then stdout matches regexp: @@ -23,7 +53,7 @@ Then stdout matches regexp: """ NOTICES - The current kernel \(5.4.0-(\d+)-kvm, amd64\) is not supported by livepatch. + The current kernel \(5.4.0-(\d+)-kvm, x86_64\) is not supported by livepatch. Supported kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels Either switch to a supported kernel or `pro disable livepatch` to dismiss this warning. @@ -37,7 +67,7 @@ Then stdout does not match regexp: """ NOTICES - The current kernel \(5.4.0-(\d+)-kvm, amd64\) is not supported by livepatch. + The current kernel \(5.4.0-(\d+)-kvm, x86_64\) is not supported by livepatch. Supported kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels Either switch to a supported kernel or `pro disable livepatch` to dismiss this warning. @@ -57,25 +87,66 @@ """ livepatch +yes +enabled +Canonical Livepatch service """ - When I run `pro detach --assume-yes` with sudo + Examples: ubuntu release + | release | machine_type | + | focal | lxd-vm | + + @series.focal + @uses.config.machine_type.any + @uses.config.machine_type.gcp.generic + Scenario Outline: Attached livepatch status shows upgrade required when on an old kernel + Given a `` `` machine with ubuntu-advantage-tools installed + When I attach `contract_token_staging` with sudo + When I run `apt-get install linux-headers- linux-image- -y` with sudo + When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-gcp -y` with sudo + When I run `update-grub` with sudo + When I reboot the machine + When I run `uname -r` with sudo + Then stdout contains substring: + """ + + """ When I run `pro status` with sudo Then stdout matches regexp: """ - livepatch +yes +Canonical Livepatch service + livepatch +yes +warning +Canonical Livepatch service """ - Then stdout does not match regexp: + Then stdout contains substring: """ - Supported livepatch kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels + NOTICES + The running kernel has reached the end of its active livepatch window. + Please upgrade the kernel with apt and reboot for continued livepatch support. + + """ + When I run `apt-get install linux-headers-generic linux-image-generic -y` with sudo + When I reboot the machine + When I run `uname -r` with sudo + Then stdout does not contain substring: + """ + + """ + When I run `pro status` with sudo + Then stdout matches regexp: + """ + livepatch +yes +enabled +Canonical Livepatch service + """ + Then stdout does not contain substring: + """ + NOTICES + The running kernel has reached the end of its active livepatch window. + Please upgrade the kernel with apt and reboot for continued livepatch support. + """ Examples: ubuntu release - | release | - | focal | + | release | machine_type | old_kernel_version | + | focal | gcp.generic | 5.4.0-28-generic | @series.kinetic @series.lunar - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.any + @uses.config.machine_type.lxd-vm Scenario Outline: Livepatch is not enabled by default and can't be enabled on interim releases - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro status --all` with sudo Then stdout matches regexp: """ @@ -98,6 +169,6 @@ livepatch +yes +n/a +Canonical Livepatch service """ Examples: ubuntu release - | release | pretty_name | - | kinetic | 22.10 (Kinetic Kudu) | - | lunar | 23.04 (Lunar Lobster) | + | release | machine_type | pretty_name | + | kinetic | lxd-vm | 22.10 (Kinetic Kudu) | + | lunar | lxd-vm | 23.04 (Lunar Lobster) | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/logs.feature ubuntu-advantage-tools-28.1~18.04/features/logs.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/logs.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/logs.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Logs in Json Array Formatter @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: The log file can be successfully parsed as json array Given a `` machine with ubuntu-advantage-tools installed When I run `apt update` with sudo @@ -22,6 +22,65 @@ """ Examples: ubuntu release | release | + | xenial | + | bionic | + | focal | + | kinetic | + | jammy | + | lunar | + + @series.all + @uses.config.machine_type.lxd.container + Scenario Outline: Non-root user and root user log files are different + Given a `` machine with ubuntu-advantage-tools installed + # Confirm user log file does not exist + When I verify `/var/log/ubuntu-advantage.log` is empty + Then I verify that no files exist matching `/home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` + When I verify that running `pro status` `as non-root` exits `0` + Then I verify that files exist matching `/home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` + When I verify `/var/log/ubuntu-advantage.log` is empty + And I run `cat /home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` as non-root + Then stdout contains substring + """ + Executed with sys.argv: ['/usr/bin/pro', 'status'] + """ + When I run `truncate -s 0 /home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` with sudo + And I attach `contract_token` with sudo + And I verify `/home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` is empty + And I run `cat /var/log/ubuntu-advantage.log` as non-root + Then stdout contains substring + """ + Executed with sys.argv: ['/usr/bin/pro', 'attach' + """ + Examples: ubuntu release + | release | + | xenial | + | bionic | + | focal | + | kinetic | + | jammy | + | lunar | + + @series.all + @uses.config.machine_type.lxd.container + Scenario Outline: Non-root user log files included in collect logs + Given a `` machine with ubuntu-advantage-tools installed + When i verify that running `pro status` `with sudo` exits `0` + And I verify that running `pro collect-logs` `with sudo` exits `0` + And I run `tar -tf ua_logs.tar.gz` as non-root + Then stdout does not contain substring + """ + user0.log + """ + When i verify that running `pro status` `as non-root` exits `0` + And I verify that running `pro collect-logs` `with sudo` exits `0` + And I run `tar -tf ua_logs.tar.gz` as non-root + Then stdout contains substring + """ + user0.log + """ + Examples: ubuntu release + | release | | xenial | | bionic | | focal | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/magic_attach.feature ubuntu-advantage-tools-28.1~18.04/features/magic_attach.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/magic_attach.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/magic_attach.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Magic attach flow related tests @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach using the magic attach flow Given a `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo @@ -47,11 +47,7 @@ Attaching the machine... """ - When I run `pro status --format yaml` with sudo - Then stdout matches regexp: - """ - attached: true - """ + And the machine is attached Examples: ubuntu release | release | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/motd_messages.feature ubuntu-advantage-tools-28.1~18.04/features/motd_messages.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/motd_messages.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/motd_messages.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Contract update prevents contract expiration messages Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -55,7 +55,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Contract Expiration Messages Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/proxy_config.feature ubuntu-advantage-tools-28.1~18.04/features/proxy_config.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/proxy_config.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/proxy_config.feature 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured for uaclient Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -42,11 +42,7 @@ .*CONNECT contracts.canonical.com.* """ When I run `pro status` with sudo - # Just to verify that the machine is attached - Then stdout matches regexp: - """ - esm-infra +yes +disabled +Expanded Security Maintenance for Infrastructure - """ + Then the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro config set ua_apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -163,7 +159,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attach command when proxy is configured Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -259,7 +255,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured for uaclient Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -356,7 +352,7 @@ @slow @series.xenial @series.bionic - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Attach command when authenticated proxy is configured Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -407,7 +403,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured manually via conf file for uaclient Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -433,11 +429,7 @@ .*CONNECT contracts.canonical.com.* """ When I run `pro status` with sudo - # Just to verify that the machine is attached - Then stdout matches regexp: - """ - esm-infra +yes +disabled +Expanded Security Maintenance for Infrastructure - """ + Then the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ @@ -552,7 +544,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured manually for uaclient Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -582,7 +574,7 @@ When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ { - "ua_apt_http_proxy": "http://someuser:somepassword@$behave_var{machine-ip proxy}:3128" + "ua_apt_http_proxy": "http://someuser:somepassword@$behave_var{machine-ip proxy}:3128", "ua_apt_https_proxy": "http://someuser:somepassword@$behave_var{machine-ip proxy}:3128" } """ @@ -627,7 +619,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured globally Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -671,11 +663,7 @@ .*CONNECT contracts.canonical.com.* """ When I run `pro status` with sudo - # Just to verify that the machine is attached - Then stdout matches regexp: - """ - esm-infra +yes +disabled +Expanded Security Maintenance for Infrastructure - """ + Then the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro config set global_apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -792,7 +780,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured globally Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -892,7 +880,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Get warning when configuring global or uaclient proxy Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -1045,7 +1033,7 @@ @slow @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: apt_http(s)_proxy still works Given a `` machine with ubuntu-advantage-tools installed Given a `focal` machine named `proxy` @@ -1058,11 +1046,7 @@ And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I attach `contract_token` with sudo - When I run `pro status --format yaml` with sudo - Then stdout matches regexp: - """ - attached: true - """ + Then the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro config set apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -1133,7 +1117,7 @@ When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ { - "apt_http_proxy": "http://$behave_var{machine-ip proxy}:3128" + "apt_http_proxy": "http://$behave_var{machine-ip proxy}:3128", "apt_https_proxy": "http://$behave_var{machine-ip proxy}:3128" } """ @@ -1185,7 +1169,7 @@ @slow @series.jammy - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario: Enable realtime kernel through proxy on a machine with no internet Given a `jammy` machine with ubuntu-advantage-tools installed When I disable any internet connection on the machine @@ -1206,7 +1190,8 @@ esm-apps +yes +enabled +Expanded Security Maintenance for Applications esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure """ - When I run `pro enable realtime-kernel --beta` `with sudo` and stdin `y` + When I run `pro disable livepatch --assume-yes` with sudo + When I run `pro enable realtime-kernel` `with sudo` and stdin `y` Then stdout matches regexp: """ Installing Real-time kernel packages diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/realtime_kernel.feature ubuntu-advantage-tools-28.1~18.04/features/realtime_kernel.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/realtime_kernel.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/realtime_kernel.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,7 @@ Feature: Enable command behaviour when attached to an Ubuntu Pro subscription @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Enable Real-time kernel service in a container Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` @@ -22,7 +22,7 @@ | jammy | @series.lts - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service on unsupported release Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` @@ -44,82 +44,302 @@ | focal | 20.04 LTS | Focal Fossa | @series.jammy - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo and options `--no-auto-enable` + When I attach `contract_token_staging` with sudo and options `--no-auto-enable` Then I verify that running `pro enable realtime-kernel` `as non-root` exits `1` And I will see the following on stderr: - """ - This command must be run as root (try using sudo). - """ + """ + This command must be run as root (try using sudo). + """ When I run `pro enable realtime-kernel` `with sudo` and stdin `y` Then stdout matches regexp: - """ - One moment, checking your subscription first - The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches integrated. + """ + One moment, checking your subscription first + The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches integrated. - .*This will change your kernel. To revert to your original kernel, you will need - to make the change manually..* + .*This will change your kernel. To revert to your original kernel, you will need + to make the change manually..* - Do you want to continue\? \[ default = Yes \]: \(Y/n\) Updating package lists - Installing Real-time kernel packages - Real-time kernel enabled - A reboot is required to complete install. - """ + Do you want to continue\? \[ default = Yes \]: \(Y/n\) Updating package lists + Installing Real-time kernel packages + Real-time kernel enabled + A reboot is required to complete install. + """ When I run `apt-cache policy ubuntu-realtime` as non-root Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ + """ + .*Installed: \(none\) + """ And stdout matches regexp: - """ - \s* 500 https://esm.ubuntu.com/realtime/ubuntu /main amd64 Packages - """ + """ + \s* 500 https://esm.staging.ubuntu.com/realtime/ubuntu /main amd64 Packages + """ + When I run `pro api u.pro.status.enabled_services.v1` as non-root + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"enabled_services": \[{"name": "realtime-kernel", "variant_enabled": true, "variant_name": "generic"}\]}, "meta": {"environment_vars": \[\]}, "type": "EnabledServices"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ When I verify that running `pro enable realtime-kernel` `with sudo` exits `1` Then stdout matches regexp - """ - One moment, checking your subscription first - Real-time kernel is already enabled. - See: sudo pro status - """ + """ + One moment, checking your subscription first + Real-time kernel is already enabled. + See: sudo pro status + """ When I reboot the machine When I run `uname -r` as non-root Then stdout matches regexp: - """ - realtime - """ + """ + realtime + """ When I run `pro disable realtime-kernel` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will remove the boot order preference for the Real-time kernel and - disable updates to the Real-time kernel. + """ + This will remove the boot order preference for the Real-time kernel and + disable updates to the Real-time kernel. - This will NOT fully remove the kernel from your system. + This will NOT fully remove the kernel from your system. - After this operation is complete you must: - - Ensure a different kernel is installed and configured to boot - - Reboot into that kernel - - Fully remove the realtime kernel packages from your system - - This might look something like `apt remove linux\*realtime`, - but you must ensure this is correct before running it. - """ + After this operation is complete you must: + - Ensure a different kernel is installed and configured to boot + - Reboot into that kernel + - Fully remove the realtime kernel packages from your system + - This might look something like `apt remove linux\*realtime`, + but you must ensure this is correct before running it. + """ When I run `apt-cache policy ubuntu-realtime` as non-root Then stdout contains substring - """ - Installed: (none) - """ + """ + Installed: (none) + """ + When I verify that running `pro enable realtime-kernel --access-only --variant nvidia-tegra` `with sudo` exits `1` + Then I will see the following on stderr: + """ + Error: Cannot use --access-only together with --variant. + """ + + # Test one variant + # We need to disable this job before adding the overlay, because we might + # write the machine token to disk with the override content + When I run `pro config set update_messaging_timer=0` with sudo + And I run `pro enable realtime-kernel --assume-yes` with sudo + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +enabled +Generic version of the RT kernel \(default\) + └ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform + """ + When I run `pro api u.pro.status.enabled_services.v1` as non-root + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"enabled_services": \[{"name": "realtime-kernel", "variant_enabled": true, "variant_name": "generic"}\]}, "meta": {"environment_vars": \[\]}, "type": "EnabledServices"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ + When I run `pro enable realtime-kernel --variant intel-iotg` `with sudo` and stdin `y\ny\n` + Then stdout contains substring: + """ + Real-time Intel IOTG Kernel cannot be enabled with Real-time kernel. + Disable Real-time kernel and proceed to enable Real-time Intel IOTG Kernel? (y/N) + """ + When I run `apt-cache policy ubuntu-intel-iot-realtime` as non-root + Then stdout does not match regexp: + """ + Installed: \(none\) + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +disabled +Generic version of the RT kernel \(default\) + └ intel-iotg yes +enabled +RT kernel optimized for Intel IOTG platform + """ + When I run `pro api u.pro.status.enabled_services.v1` as non-root + Then stdout matches regexp: + """ + {"_schema_version": "v1", "data": {"attributes": {"enabled_services": \[{"name": "realtime-kernel", "variant_enabled": true, "variant_name": "intel-iotg"}\]}, "meta": {"environment_vars": \[\]}, "type": "EnabledServices"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + """ + When I reboot the machine + And I run `uname -r` as non-root + Then stdout matches regexp: + """ + intel + """ + When I run `pro enable realtime-kernel --variant generic` `with sudo` and stdin `y\ny\n` + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +enabled +Generic version of the RT kernel \(default\) + └ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform + """ + When I run `pro enable realtime-kernel --variant intel-iotg` `with sudo` and stdin `y\ny\n` + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +disabled +Generic version of the RT kernel \(default\) + └ intel-iotg yes +enabled +RT kernel optimized for Intel IOTG platform + """ + When I verify that running `pro enable realtime-kernel` `with sudo` exits `1` + Then stdout contains substring: + """ + Real-time kernel is already enabled. + """ + When I run `pro disable realtime-kernel --assume-yes` with sudo + When I run `apt-cache policy ubuntu-intel-iot-realtime` as non-root + Then stdout contains substring: + """ + Installed: (none) + """ + + # Test multiple variants + When I set the machine token overlay to the following yaml + """ + machineTokenInfo: + contractInfo: + resourceEntitlements: + - type: realtime-kernel + overrides: + - directives: + additionalPackages: + - nvidia-prime + selector: + variant: nvidia-tegra + """ + When I run `pro enable realtime-kernel --variant nvidia-tegra` `with sudo` and stdin `y` + Then stdout matches regexp: + """ + One moment, checking your subscription first + The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches integrated. + + .*This will change your kernel. To revert to your original kernel, you will need + to make the change manually..* + + Do you want to continue\? \[ default = Yes \]: \(Y/n\) Updating package lists + Installing Real-time NVIDIA Tegra Kernel packages + Real-time NVIDIA Tegra Kernel enabled + """ + When I run `pro status` as non-root + Then stdout matches regexp: + """ + realtime-kernel\* yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + usg +yes +disabled +Security compliance and audit tools + + \* Service has variants + """ + Then stdout contains substring: + """ + For a list of all Ubuntu Pro services and variants, run 'pro status --all' + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +disabled +Generic version of the RT kernel \(default\) + ├ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform + └ nvidia-tegra yes +enabled +RT kernel optimized for NVIDIA Tegra platform + """ + When I verify that running `pro enable realtime-kernel --variant intel-iotg` `with sudo` and stdin `N` exits `1` + Then stdout matches regexp: + """ + Real-time Intel IOTG Kernel cannot be enabled with Real-time NVIDIA Tegra Kernel. + Disable Real-time NVIDIA Tegra Kernel and proceed to enable Real-time Intel IOTG Kernel\? \(y/N\) + """ + And stdout matches regexp: + """ + Cannot enable Real-time Intel IOTG Kernel when Real-time NVIDIA Tegra Kernel is enabled. + """ + When I run `pro help realtime-kernel` as non-root + Then I will see the following on stdout: + """ + Name: + realtime-kernel + + Entitled: + yes + + Status: + enabled + + Help: + The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches integrated. + It services latency-dependent use cases by providing deterministic response times. + The Real-time kernel meets stringent preemption specifications and is suitable for + telco applications and dedicated devices in industrial automation and robotics. + The Real-time kernel is currently incompatible with FIPS and Livepatch. + + Variants: + + * generic: Generic version of the RT kernel (default) + * intel-iotg: RT kernel optimized for Intel IOTG platform + * nvidia-tegra: RT kernel optimized for NVIDIA Tegra platform + """ + When I run `pro disable realtime-kernel` `with sudo` and stdin `y` + Then stdout matches regexp: + """ + This will remove the boot order preference for the Real-time kernel and + disable updates to the Real-time kernel. + + This will NOT fully remove the kernel from your system. + + After this operation is complete you must: + - Ensure a different kernel is installed and configured to boot + - Reboot into that kernel + - Fully remove the realtime kernel packages from your system + - This might look something like `apt remove linux\*realtime`, + but you must ensure this is correct before running it. + """ + When I run `pro status` as non-root + Then stdout matches regexp: + """ + realtime-kernel\* +yes +disabled +Ubuntu kernel with PREEMPT_RT patches integrated + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +disabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +disabled +Generic version of the RT kernel \(default\) + ├ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform + └ nvidia-tegra yes +disabled +RT kernel optimized for NVIDIA Tegra platform + """ + When I verify that running `pro enable realtime-kernel --variant nonexistent` `with sudo` exits `1` + Then I will see the following on stdout: + """ + One moment, checking your subscription first + could not find entitlement named "nonexistent" + """ + When I run `pro detach --assume-yes` with sudo + And I run `pro status` as non-root + Then stdout matches regexp: + """ + realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated + """ + And stdout does not match regexp: + """ + nvidia-tegra + """ + And stdout does not match regexp: + """ + intel-iotg + """ Examples: ubuntu release | release | | jammy | @series.jammy - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service access-only Given a `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` - When I run `pro enable realtime-kernel --beta --access-only` with sudo + When I run `pro enable realtime-kernel --access-only` with sudo Then stdout matches regexp: """ One moment, checking your subscription first diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/reboot_cmds.feature ubuntu-advantage-tools-28.1~18.04/features/reboot_cmds.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/reboot_cmds.feature 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/reboot_cmds.feature 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,48 @@ +@uses.config.contract_token +Feature: Reboot Commands + + @series.focal + @uses.config.machine_type.lxd-container + Scenario Outline: reboot-cmds removes fips package holds and updates packages + Given a `` machine with ubuntu-advantage-tools installed + When I attach `contract_token` with sudo + When I run `apt install -y strongswan` with sudo + When I run `pro enable fips --assume-yes` with sudo + When I reboot the machine + When I run `pro status` with sudo + Then stdout matches regexp: + """ + fips +yes +enabled + """ + When I run `apt install -y --allow-downgrades strongswan=` with sudo + When I run `apt-mark hold strongswan` with sudo + When I run `dpkg-reconfigure ubuntu-advantage-tools` with sudo + When I run `pro status` with sudo + Then stdout matches regexp: + """ + NOTICES + Reboot to FIPS kernel required + """ + When I reboot the machine + And I verify that running `systemctl status ua-reboot-cmds.service` `as non-root` exits `0,3` + Then stdout matches regexp: + """ + .*status=0\/SUCCESS.* + """ + When I run `pro status` with sudo + Then stdout does not match regexp: + """ + NOTICES + """ + When I run `apt-mark showholds` with sudo + Then I will see the following on stdout: + """ + """ + When I run `apt policy strongswan` with sudo + Then stdout contains substring: + """ + *** 1001 + """ + Examples: ubuntu release + | release | old_version | new_version | + | focal | 5.8.2-1ubuntu3.5 | 5.8.2-1ubuntu3.fips.3.1.2 | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/retry_auto_attach.feature ubuntu-advantage-tools-28.1~18.04/features/retry_auto_attach.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/retry_auto_attach.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/retry_auto_attach.feature 2023-06-01 18:49:33.000000000 +0000 @@ -190,7 +190,8 @@ """ Active: inactive (dead) """ - When I run `run-parts /etc/update-motd.d/` with sudo + # Workaround for livepatch issue LP #2015585 + Then I verify that running `run-parts /etc/update-motd.d/` `with sudo` exits `0,1` Then stdout does not match regexp: """ Failed to automatically attach to Ubuntu Pro services @@ -335,17 +336,15 @@ features: {} """ When I wait `60` seconds - When I run `ua status --wait --format yaml` with sudo - Then stdout contains substring - """ - attached: true - """ + And I run `pro status --wait` with sudo + Then the machine is attached When I verify that running `systemctl status ubuntu-advantage.service` `as non-root` exits `3` Then stdout contains substring """ Active: inactive (dead) """ - When I run `run-parts /etc/update-motd.d/` with sudo + # Workaround for livepatch issue LP #2015585 + Then I verify that running `run-parts /etc/update-motd.d/` `with sudo` exits `0,1` Then stdout does not match regexp: """ Failed to automatically attach to Ubuntu Pro services diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/security_status.feature ubuntu-advantage-tools-28.1~18.04/features/security_status.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/security_status.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/security_status.feature 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Run security status with JSON/YAML format Given a `` machine with ubuntu-advantage-tools installed When I run `apt-get update` with sudo @@ -92,7 +92,7 @@ | bionic | ansible | esm-apps | @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm Scenario: Check for livepatch CVEs in security-status on an Ubuntu machine Given a `xenial` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -111,7 +111,7 @@ """ @series.xenial - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Run security status in an Ubuntu machine Given a `xenial` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine @@ -136,10 +136,10 @@ This machine is NOT attached to an Ubuntu Pro subscription. Ubuntu Pro with 'esm-infra' enabled provides security updates for - Main/Restricted packages until 2026 and has \d+ pending security update[s]?\. + Main/Restricted packages until 2026\. There (is|are) \d+ pending security update[s]?\. Ubuntu Pro with 'esm-apps' enabled provides security updates for - Universe/Multiverse packages until 2026 and has \d+ pending security update[s]?\. + Universe/Multiverse packages until 2026\. There (is|are) \d+ pending security update[s]?\. Try Ubuntu Pro with a free personal subscription on up to 5 machines. Learn more at https://ubuntu.com/pro @@ -155,17 +155,18 @@ and esm-infra is not enabled. Ubuntu Pro with 'esm-infra' enabled provides security updates for - Main/Restricted packages until 2026 and has \d+ pending security update[s]?\. + Main/Restricted packages until 2026\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-infra' to learn more - Package names in .*bold.* currently have an available update - with 'esm-infra' enabled - Packages: + Installed packages with an available esm-infra update: + (.|\n)+ + + Further installed packages covered by esm-infra: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --esm-apps` calls have the same output @@ -176,17 +177,18 @@ +\d+ package[s]? from Ubuntu Universe/Multiverse repository Ubuntu Pro with 'esm-apps' enabled provides security updates for - Universe/Multiverse packages until 2026 and has \d+ pending security update[s]?\. + Universe/Multiverse packages until 2026\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages with an available esm-apps update: + (.|\n)+ + + Further installed packages covered by esm-apps: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I attach `contract_token` with sudo @@ -207,10 +209,10 @@ This machine is attached to an Ubuntu Pro subscription. Main/Restricted packages are receiving security updates from - Ubuntu Pro with 'esm-infra' enabled until 2026\. + Ubuntu Pro with 'esm-infra' enabled until 2026\. There (is|are) \d+ pending security update[s]?\. Universe/Multiverse packages are receiving security updates from - Ubuntu Pro with 'esm-apps' enabled until 2026\. + Ubuntu Pro with 'esm-apps' enabled until 2026\. There (is|are) \d+ pending security update[s]?\. """ When I verify root and non-root `pro security-status --esm-infra` calls have the same output And I run `pro security-status --esm-infra` as non-root @@ -220,17 +222,18 @@ +\d+ packages from Ubuntu Main/Restricted repository Main/Restricted packages are receiving security updates from - Ubuntu Pro with 'esm-infra' enabled until 2026\. + Ubuntu Pro with 'esm-infra' enabled until 2026\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-infra' to learn more - Package names in .*bold.* currently have an available update - with 'esm-infra' enabled - Packages: + Installed packages with an available esm-infra update: + (.|\n)+ + + Further installed packages covered by esm-infra: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --esm-apps` calls have the same output @@ -241,17 +244,18 @@ +\d+ package[s]? from Ubuntu Universe/Multiverse repository Universe/Multiverse packages are receiving security updates from - Ubuntu Pro with 'esm-apps' enabled until 2026\. + Ubuntu Pro with 'esm-apps' enabled until 2026\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages with an available esm-apps update: + (.|\n)+ + + Further installed packages covered by esm-apps: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I run `apt upgrade -y` with sudo @@ -300,10 +304,12 @@ Ubuntu Pro with 'esm-infra' enabled provides security updates for Main/Restricted packages until 2026. + Enable esm-infra with: pro enable esm-infra Ubuntu Pro with 'esm-apps' enabled provides security updates for Universe/Multiverse packages until 2026. + Enable esm-apps with: pro enable esm-apps """ When I verify root and non-root `pro security-status --thirdparty` calls have the same output @@ -320,7 +326,7 @@ (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --unavailable` calls have the same output @@ -338,7 +344,7 @@ (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --esm-infra` calls have the same output @@ -356,14 +362,8 @@ Run 'pro help esm-infra' to learn more - Package names in .*bold.* currently have an available update - with 'esm-infra' enabled - Packages: + Installed packages covered by esm-infra: (.|\n)+ - - For example, run: - apt-cache policy .+ - to learn more about that package\. """ When I verify root and non-root `pro security-status --esm-apps` calls have the same output And I run `pro security-status --esm-apps` as non-root @@ -377,14 +377,8 @@ Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages covered by esm-apps: (.|\n)+ - - For example, run: - apt-cache policy .+ - to learn more about that package\. """ When I verify that running `pro security-status --thirdparty --unavailable` `as non-root` exits `2` Then I will see the following on stderr @@ -393,9 +387,73 @@ [--thirdparty | --unavailable | --esm-infra | --esm-apps] argument --unavailable: not allowed with argument --thirdparty """ + When I run `rm /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ package[s]? from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt cache may be outdated\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + This machine is NOT receiving security patches because the LTS period has ended + and esm-infra is not enabled. + This machine is attached to an Ubuntu Pro subscription. + + Ubuntu Pro with 'esm-infra' enabled provides security updates for + Main/Restricted packages until 2026. + + Enable esm-infra with: pro enable esm-infra + + Ubuntu Pro with 'esm-apps' enabled provides security updates for + Universe/Multiverse packages until 2026. + + Enable esm-apps with: pro enable esm-apps + """ + When I run `touch -d '-2 days' /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ package[s]? from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt information was updated 2 day\(s\) ago\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + This machine is NOT receiving security patches because the LTS period has ended + and esm-infra is not enabled. + This machine is attached to an Ubuntu Pro subscription. + + Ubuntu Pro with 'esm-infra' enabled provides security updates for + Main/Restricted packages until 2026. + + Enable esm-infra with: pro enable esm-infra + + Ubuntu Pro with 'esm-apps' enabled provides security updates for + Universe/Multiverse packages until 2026. + + Enable esm-apps with: pro enable esm-apps + """ @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Run security status in an Ubuntu machine Given a `focal` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine @@ -423,7 +481,7 @@ Main/Restricted packages until 2030. Ubuntu Pro with 'esm-apps' enabled provides security updates for - Universe/Multiverse packages until 2030 and has \d+ pending security update[s]?\. + Universe/Multiverse packages until 2030\. There (is|are) \d+ pending security update[s]?\. Try Ubuntu Pro with a free personal subscription on up to 5 machines. Learn more at https://ubuntu.com/pro @@ -451,17 +509,18 @@ +\d+ package[s]? from Ubuntu Universe/Multiverse repository Ubuntu Pro with 'esm-apps' enabled provides security updates for - Universe/Multiverse packages until 2030 and has \d+ pending security update[s]?\. + Universe/Multiverse packages until 2030\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages with an available esm-apps update: + (.|\n)+ + + Further installed packages covered by esm-apps: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I attach `contract_token` with sudo @@ -485,7 +544,7 @@ Ubuntu Pro with 'esm-infra' enabled until 2030. Universe/Multiverse packages are receiving security updates from - Ubuntu Pro with 'esm-apps' enabled until 2030\. + Ubuntu Pro with 'esm-apps' enabled until 2030\. There (is|are) \d+ pending security update[s]?\. """ When I verify root and non-root `pro security-status --esm-infra` calls have the same output And I run `pro security-status --esm-infra` as non-root @@ -507,17 +566,18 @@ +\d+ package[s]? from Ubuntu Universe/Multiverse repository Universe/Multiverse packages are receiving security updates from - Ubuntu Pro with 'esm-apps' enabled until 2030\. + Ubuntu Pro with 'esm-apps' enabled until 2030\. There (is|are) \d+ pending security update[s]?\. Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages with an available esm-apps update: + (.|\n)+ + + Further installed packages covered by esm-apps: (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I run `apt upgrade -y` with sudo @@ -565,10 +625,12 @@ Ubuntu Pro with 'esm-infra' enabled provides security updates for Main/Restricted packages until 2030. + Enable esm-infra with: pro enable esm-infra Ubuntu Pro with 'esm-apps' enabled provides security updates for Universe/Multiverse packages until 2030. + Enable esm-apps with: pro enable esm-apps """ When I verify root and non-root `pro security-status --thirdparty` calls have the same output @@ -585,7 +647,7 @@ (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --unavailable` calls have the same output @@ -603,7 +665,7 @@ (.|\n)+ For example, run: - apt-cache policy .+ + apt-cache show .+ to learn more about that package\. """ When I verify root and non-root `pro security-status --esm-infra` calls have the same output @@ -633,14 +695,8 @@ Run 'pro help esm-apps' to learn more - Package names in .*bold.* currently have an available update - with 'esm-apps' enabled - Packages: + Installed packages covered by esm-apps: (.|\n)+ - - For example, run: - apt-cache policy .+ - to learn more about that package\. """ When I verify that running `pro security-status --thirdparty --unavailable` `as non-root` exits `2` Then I will see the following on stderr @@ -649,9 +705,73 @@ [--thirdparty | --unavailable | --esm-infra | --esm-apps] argument --unavailable: not allowed with argument --thirdparty """ + When I run `rm /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ package[s]? from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt cache may be outdated\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + This machine is receiving security patching for Ubuntu Main/Restricted + repository until 2025. + This machine is attached to an Ubuntu Pro subscription. + + Ubuntu Pro with 'esm-infra' enabled provides security updates for + Main/Restricted packages until 2030. + + Enable esm-infra with: pro enable esm-infra + + Ubuntu Pro with 'esm-apps' enabled provides security updates for + Universe/Multiverse packages until 2030. + + Enable esm-apps with: pro enable esm-apps + """ + When I run `touch -d '-2 days' /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ package[s]? from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt information was updated 2 day\(s\) ago\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + This machine is receiving security patching for Ubuntu Main/Restricted + repository until 2025. + This machine is attached to an Ubuntu Pro subscription. + + Ubuntu Pro with 'esm-infra' enabled provides security updates for + Main/Restricted packages until 2030. + + Enable esm-infra with: pro enable esm-infra + + Ubuntu Pro with 'esm-apps' enabled provides security updates for + Universe/Multiverse packages until 2030. + + Enable esm-apps with: pro enable esm-apps + """ @series.kinetic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario: Run security status in an Ubuntu machine Given a `kinetic` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine @@ -695,3 +815,47 @@ Ubuntu Pro is not available for non-LTS releases\. """ + When I run `rm /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ packages from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt cache may be outdated\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + Main/Restricted packages receive updates until 7/2023\. + + Ubuntu Pro is not available for non-LTS releases\. + """ + When I run `touch -d '-2 days' /var/lib/apt/periodic/update-success-stamp` with sudo + And I run `pro security-status` as non-root + Then stdout matches regexp: + """ + \d+ packages installed: + +\d+ packages from Ubuntu Main/Restricted repository + +\d+ package[s]? from Ubuntu Universe/Multiverse repository + +\d+ package[s]? from a third party + +\d+ package[s]? no longer available for download + + To get more information about the packages, run + pro security-status --help + for a list of available options\. + + The system apt information was updated 2 day\(s\) ago\. Make sure to run + sudo apt-get update + to get the latest package information from apt\. + + Main/Restricted packages receive updates until 7/2023\. + + Ubuntu Pro is not available for non-LTS releases\. + """ diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/airgap.py ubuntu-advantage-tools-28.1~18.04/features/steps/airgap.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/airgap.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/airgap.py 2023-05-30 19:02:35.000000000 +0000 @@ -10,7 +10,7 @@ @when("I download the service credentials on the `{machine_name}` machine") def download_service_credentials(context, machine_name): - token = context.config.contract_token + token = context.pro_config.contract_token when_i_run_command( context, "get-resource-tokens {}".format(token), @@ -112,7 +112,7 @@ "I create the contract config overrides file for `{service_list}` on the `{machine_name}` machine" # noqa ) def create_contract_overrides(context, service_list, machine_name): - token = context.config.contract_token + token = context.pro_config.contract_token config_override = {token: {}} # type: Dict[str, Any] for service in service_list.split(","): diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/attach.py ubuntu-advantage-tools-28.1~18.04/features/steps/attach.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/attach.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,4 +1,6 @@ -from behave import when +import json + +from behave import then, when from features.steps.contract import change_contract_endpoint_to_staging from features.steps.shell import ( @@ -23,7 +25,7 @@ def when_i_attach_staging_token( context, token_type, user_spec, verify_return=True, options="" ): - token = getattr(context.config, token_type) + token = getattr(context.pro_config, token_type) if ( token_type == "contract_token_staging" or token_type == "contract_token_staging_expired" @@ -47,7 +49,9 @@ "I verify that running attach `{spec}` with json response exits `{exit_codes}`" # noqa ) def when_i_verify_attach_with_json_response(context, spec, exit_codes): - cmd = "pro attach {} --format json".format(context.config.contract_token) + cmd = "pro attach {} --format json".format( + context.pro_config.contract_token + ) then_i_verify_that_running_cmd_with_spec_exits_with_codes( context=context, cmd_name=cmd, spec=spec, exit_codes=exit_codes ) @@ -59,8 +63,20 @@ def when_i_verify_attach_expired_token_with_json_response(context, spec): change_contract_endpoint_to_staging(context, user_spec="with sudo") cmd = "pro attach {} --format json".format( - context.config.contract_token_staging_expired + context.pro_config.contract_token_staging_expired ) then_i_verify_that_running_cmd_with_spec_exits_with_codes( context=context, cmd_name=cmd, spec=spec, exit_codes=ERROR_CODE ) + + +@then("the machine is attached") +def then_the_machine_is_attached(context): + when_i_run_command( + context, + command="pro api u.pro.status.is_attached.v1", + user_spec="as non-root", + ) + + is_attached_resp = json.loads(context.process.stdout.strip()) + assert is_attached_resp["data"]["attributes"]["is_attached"] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/files.py ubuntu-advantage-tools-28.1~18.04/features/steps/files.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/files.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/files.py 2023-05-30 19:02:35.000000000 +0000 @@ -36,8 +36,11 @@ ) +@when("I verify `{file_name}` is empty") @when("I verify `{file_name}` is empty on `{machine_name}` machine") -def when_i_verify_file_is_empty_on_machine(context, file_name, machine_name): +def when_i_verify_file_is_empty_on_machine( + context, file_name, machine_name=SUT +): command = 'sh -c "cat {} | wc -l"'.format(file_name) when_i_run_command( context, command, user_spec="with sudo", machine_name=machine_name @@ -97,7 +100,7 @@ def when_i_replace_string_in_file_with_token( context, original, filename, token_name ): - token = getattr(context.config, token_name) + token = getattr(context.pro_config, token_name) when_i_replace_string_in_file(context, original, filename, token) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/fix.py ubuntu-advantage-tools-28.1~18.04/features/steps/fix.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/fix.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/fix.py 2023-05-30 19:02:35.000000000 +0000 @@ -9,7 +9,7 @@ @when("I fix `{issue}` by attaching to a subscription with `{token_type}`") def when_i_fix_a_issue_by_attaching(context, issue, token_type): - token = getattr(context.config, token_type) + token = getattr(context.pro_config, token_type) if ( token_type == "contract_token_staging" @@ -40,7 +40,7 @@ @when("I fix `{issue}` by updating expired token") def when_i_fix_a_issue_by_updating_expired_token(context, issue): - token = getattr(context.config, "contract_token") + token = getattr(context.pro_config, "contract_token") when_i_run_command( context=context, command="pro fix {}".format(issue), diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/machines.py ubuntu-advantage-tools-28.1~18.04/features/steps/machines.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/machines.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/machines.py 2023-06-01 18:49:33.000000000 +0000 @@ -25,12 +25,19 @@ def given_a_machine( context, series, + machine_type=None, machine_name=SUT, snapshot_name=None, user_data=None, ports=None, cleanup=True, ): + if machine_type is None: + machine_type = context.pro_config.machine_type + + cloud = machine_type.split(".")[0] + context.pro_config.clouds[cloud].manage_ssh_key() + time_suffix = datetime.datetime.now().strftime("%m%d-%H%M%S%f") instance_name = "upro-behave-{series}-{machine_name}-{time_suffix}".format( series=series, @@ -40,7 +47,7 @@ inbound_ports = ports.split(",") if ports is not None else None - is_pro = "pro" in context.config.machine_type + is_pro = "pro" in machine_type pro_user_data = ( "bootcmd:\n" """ - "cloud-init-per once disable-auto-attach printf '\\nfeatures: {disable_auto_attach: true}\\n' >> /etc/ubuntu-advantage/uaclient.conf"\n""" # noqa: E501 @@ -53,10 +60,11 @@ if user_data is not None: user_data_to_use += user_data - instance = context.config.cloud_manager.launch( + instance = context.pro_config.clouds[cloud].launch( series=series, + machine_type=machine_type, instance_name=instance_name, - ephemeral=context.config.ephemeral_instance, + ephemeral=context.pro_config.ephemeral_instance, image_name=context.snapshots.get(snapshot_name, None), inbound_ports=inbound_ports, user_data=user_data_to_use, @@ -69,7 +77,7 @@ if cleanup: def cleanup_instance(): - if not context.config.destroy_instances: + if not context.pro_config.destroy_instances: logging.info( "--- Leaving instance running: {}".format( context.machines[machine_name].instance.name @@ -90,9 +98,15 @@ @when("I take a snapshot of the machine") -def when_i_take_a_snapshot(context, machine_name=SUT, cleanup=True): +def when_i_take_a_snapshot( + context, machine_type=None, machine_name=SUT, cleanup=True +): + if machine_type is None: + machine_type = context.pro_config.machine_type + + cloud = machine_type.split(".")[0] inst = context.machines[machine_name].instance - snapshot = context.config.cloud_manager.api.snapshot(inst) + snapshot = context.pro_config.clouds[cloud].api.snapshot(inst) context.snapshots[machine_name] = snapshot @@ -100,7 +114,7 @@ def cleanup_snapshot() -> None: try: - context.config.cloud_manager.api.delete_image( + context.pro_config.clouds[cloud].api.delete_image( context.snapshots[machine_name] ) except RuntimeError as e: @@ -114,26 +128,36 @@ @given("a `{series}` machine with ubuntu-advantage-tools installed") -def given_a_sut_machine(context, series): - if context.config.install_from == InstallationSource.LOCAL: +@given( + "a `{series}` `{machine_type}` machine with ubuntu-advantage-tools installed" # noqa: E501 +) +def given_a_sut_machine(context, series, machine_type=None): + if context.pro_config.install_from == InstallationSource.LOCAL: # build right away, this will cache the built debs for later use # building early means we catch build errors before investing in # launching instances build_debs(series) - if context.config.snapshot_strategy: + if context.pro_config.snapshot_strategy: if "builder" not in context.snapshots: given_a_machine( - context, series, machine_name="builder", cleanup=False + context, + series, + machine_type=machine_type, + machine_name="builder", + cleanup=False, ) when_i_install_uat(context, machine_name="builder") when_i_take_a_snapshot( - context, machine_name="builder", cleanup=False + context, + machine_type=machine_type, + machine_name="builder", + cleanup=False, ) context.machines["builder"].instance.delete(wait=False) given_a_machine(context, series, snapshot_name="builder") else: - given_a_machine(context, series) + given_a_machine(context, series, machine_type=machine_type) when_i_install_uat(context) logging.info( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/output.py ubuntu-advantage-tools-28.1~18.04/features/steps/output.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/output.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/output.py 2023-05-30 19:02:35.000000000 +0000 @@ -65,6 +65,13 @@ assert_that(content, contains_string(text)) +@then("{stream} does not contain substring") +def then_stream_not_contains_substring(context, stream): + content = getattr(context.process, stream).strip() + text = process_template_vars(context, context.text) + assert_that(content, not_(contains_string(text))) + + @then("I will see the following on stderr") def then_i_will_see_on_stderr(context): text = process_template_vars(context, context.text) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/packages.py ubuntu-advantage-tools-28.1~18.04/features/steps/packages.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/packages.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/packages.py 2023-05-30 19:02:35.000000000 +0000 @@ -3,16 +3,9 @@ from behave import then, when from hamcrest import assert_that, contains_string, matches_regexp -from features.steps.files import when_i_create_file_with_content from features.steps.shell import when_i_run_command from features.util import SUT -APT_REPO_ENTRY = ( - "deb [trusted=yes] " - "http://proclientdummyrepo.s3-website.us-east-2.amazonaws.com/" - "actual_ppa/ unstable main" -) - @when("I apt install `{package_name}`") @when("I apt install `{package_name}` on the `{machine_name}` machine") @@ -108,27 +101,33 @@ @when("I install third-party / unknown packages in the machine") def when_i_install_packages(context): # Dummy packages are installed to serve as third-party and unknown - # packages for the tests. They live happy and joyful lives in a s3 bucket. + # packages for the tests. Those packages are in Launchpad PPAs + # owned by the uaclient team. # Many APT updates just to make sure we are up to date - # Unknown package - installed directly, no external reference + # Unknown package - we remove the PPA afterwards so there is no + # external references for the deb. when_i_run_command( context, - ( - "curl -L " - "http://proclientdummyrepo.s3-website.us-east-2.amazonaws.com/" - "unknown_deb/pro-dummy-unknown_1.2.3_all.deb " - "-o /tmp/unknown.deb" - ), + "add-apt-repository -y ppa:ua-client/pro-client-ci-test-unknown", "with sudo", ) + when_i_run_command(context, "apt-get update", "with sudo") when_i_run_command( - context, "apt-get install -y /tmp/unknown.deb", "with sudo" + context, "apt-get install -y pro-dummy-unknown", "with sudo" + ) + # Why no remove-apt-repository? + when_i_run_command( + context, + "add-apt-repository -y -r ppa:ua-client/pro-client-ci-test-unknown", + "with sudo", ) # PPA to install the third-party package - when_i_create_file_with_content( - context, "/etc/apt/sources.list.d/prodummy.list", text=APT_REPO_ENTRY + when_i_run_command( + context, + "add-apt-repository -y ppa:ua-client/pro-client-ci-test-thirdparty", + "with sudo", ) when_i_run_command(context, "apt-get update", "with sudo") when_i_run_command( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/shell.py ubuntu-advantage-tools-28.1~18.04/features/steps/shell.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/shell.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/shell.py 2023-05-30 19:02:35.000000000 +0000 @@ -43,10 +43,12 @@ machine_name=SUT, ): command = process_template_vars(context, command) - prefix = get_command_prefix_for_user_spec(user_spec) - full_cmd = prefix + shlex.split(command) + + if stdin is not None: + stdin = stdin.replace("\\n", "\n") + result = context.machines[machine_name].instance.execute( full_cmd, stdin=stdin ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/status.py ubuntu-advantage-tools-28.1~18.04/features/steps/status.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/status.py 2023-05-30 19:02:35.000000000 +0000 @@ -5,7 +5,7 @@ @when("I do a preflight check for `{contract_token}` {user_spec}") def when_i_preflight(context, contract_token, user_spec, verify_return=True): - token = getattr(context.config, contract_token, "invalid_token") + token = getattr(context.pro_config, contract_token, "invalid_token") command = "pro status --simulate-with-token {}".format(token) if user_spec == "with the all flag": command += " --all" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/steps/ubuntu_advantage_tools.py ubuntu-advantage-tools-28.1~18.04/features/steps/ubuntu_advantage_tools.py --- ubuntu-advantage-tools-27.14.4~18.04/features/steps/ubuntu_advantage_tools.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/steps/ubuntu_advantage_tools.py 2023-05-30 19:02:35.000000000 +0000 @@ -14,8 +14,8 @@ def when_i_install_uat(context, machine_name=SUT): instance = context.machines[machine_name].instance series = context.machines[machine_name].series - is_pro = "pro" in context.config.machine_type - if context.config.install_from is InstallationSource.ARCHIVE: + is_pro = "pro" in context.pro_config.machine_type + if context.pro_config.install_from is InstallationSource.ARCHIVE: instance.execute("sudo apt update") when_i_apt_install( context, "ubuntu-advantage-tools", machine_name=machine_name @@ -24,8 +24,8 @@ when_i_apt_install( context, "ubuntu-advantage-pro", machine_name=machine_name ) - elif context.config.install_from is InstallationSource.PREBUILT: - debs_path = context.config.debs_path + elif context.pro_config.install_from is InstallationSource.PREBUILT: + debs_path = context.pro_config.debs_path deb_paths = [ os.path.join(debs_path, deb_file) for deb_file in os.listdir(debs_path) @@ -39,7 +39,7 @@ context, "/tmp/behave_ua.deb", machine_name=machine_name ) instance.execute("sudo rm /tmp/behave_ua.deb") - elif context.config.install_from is InstallationSource.LOCAL: + elif context.pro_config.install_from is InstallationSource.LOCAL: ua_deb_path, pro_deb_path = build_debs(series) instance.push_file(ua_deb_path, "/tmp/behave_ua.deb") when_i_apt_install( @@ -52,7 +52,7 @@ context, "/tmp/behave_ua.deb", machine_name=machine_name ) instance.execute("sudo rm /tmp/behave_ua.deb") - elif context.config.install_from is InstallationSource.DAILY: + elif context.pro_config.install_from is InstallationSource.DAILY: instance.execute("sudo add-apt-repository ppa:ua-client/daily") instance.execute("sudo apt update") when_i_apt_install( @@ -62,7 +62,7 @@ when_i_apt_install( context, "ubuntu-advantage-pro", machine_name=machine_name ) - elif context.config.install_from is InstallationSource.STAGING: + elif context.pro_config.install_from is InstallationSource.STAGING: instance.execute("sudo add-apt-repository ppa:ua-client/staging") instance.execute("sudo apt update") when_i_apt_install( @@ -72,7 +72,7 @@ when_i_apt_install( context, "ubuntu-advantage-pro", machine_name=machine_name ) - elif context.config.install_from is InstallationSource.STABLE: + elif context.pro_config.install_from is InstallationSource.STABLE: instance.execute("sudo add-apt-repository ppa:ua-client/stable") instance.execute("sudo apt update") when_i_apt_install( @@ -82,7 +82,7 @@ when_i_apt_install( context, "ubuntu-advantage-pro", machine_name=machine_name ) - elif context.config.install_from is InstallationSource.PROPOSED: + elif context.pro_config.install_from is InstallationSource.PROPOSED: context.text = "deb http://archive.ubuntu.com/ubuntu/ {series}-proposed main\n".format( # noqa: E501 series=series ) @@ -127,9 +127,9 @@ when_i_apt_install( context, "ubuntu-advantage-pro", machine_name=machine_name ) - elif context.config.install_from is InstallationSource.CUSTOM: + elif context.pro_config.install_from is InstallationSource.CUSTOM: instance.execute( - "sudo add-apt-repository {}".format(context.config.custom_ppa) + "sudo add-apt-repository {}".format(context.pro_config.custom_ppa) ) instance.execute("sudo apt update") when_i_apt_install( @@ -143,7 +143,7 @@ @when("I have the `{series}` debs under test in `{dest}`") def when_i_have_the_debs_under_test(context, series, dest): - if context.config.install_from is InstallationSource.LOCAL: + if context.pro_config.install_from is InstallationSource.LOCAL: deb_paths = build_debs(series) for deb_path in deb_paths: @@ -151,17 +151,17 @@ dest_path = "{}/ubuntu-advantage-{}.deb".format(dest, tools_or_pro) context.machines[SUT].instance.push_file(deb_path, dest_path) else: - if context.config.install_from is InstallationSource.PROPOSED: + if context.pro_config.install_from is InstallationSource.PROPOSED: ppa_opts = "" else: - if context.config.install_from is InstallationSource.DAILY: + if context.pro_config.install_from is InstallationSource.DAILY: ppa = "ppa:ua-client/daily" - elif context.config.install_from is InstallationSource.STAGING: + elif context.pro_config.install_from is InstallationSource.STAGING: ppa = "ppa:ua-client/staging" - elif context.config.install_from is InstallationSource.STABLE: + elif context.pro_config.install_from is InstallationSource.STABLE: ppa = "ppa:ua-client/stable" - elif context.config.install_from is InstallationSource.CUSTOM: - ppa = context.config.custom_ppa + elif context.pro_config.install_from is InstallationSource.CUSTOM: + ppa = context.pro_config.custom_ppa if not ppa.startswith("ppa"): # assumes format "http://domain.name/user/ppa/ubuntu" match = re.match(r"https?://[\w.]+/([^/]+/[^/]+)", ppa) @@ -197,7 +197,7 @@ "I prepare the local PPAs to upgrade from `{release}` to `{next_release}`" ) def when_i_create_local_ppas(context, release, next_release): - if context.config.install_from is not InstallationSource.LOCAL: + if context.pro_config.install_from is not InstallationSource.LOCAL: return from features.steps.machines import given_a_machine @@ -253,13 +253,14 @@ @when("I install ubuntu-advantage-pro") -def when_i_install_pro(context): - if context.config.install_from is InstallationSource.LOCAL: - deb_paths = build_debs(context.machines[SUT].instance.series) +def when_i_install_pro(context, machine_name=SUT): + if context.pro_config.install_from is InstallationSource.LOCAL: + series = context.machines[machine_name].series + deb_paths = build_debs(series) for deb_path in deb_paths: if "pro" in deb_path: - context.instances["uaclient"].push_file( + context.machines[machine_name].instance.push_file( deb_path, "/tmp/pro.deb" ) when_i_run_command( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/timer.feature ubuntu-advantage-tools-28.1~18.04/features/timer.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/timer.feature 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/timer.feature 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,20 @@ +@uses.config.contract_token +Feature: Timer for regular background jobs while attached + + @series.xenial + @series.jammy + @series.lunar + @uses.config.machine_type.lxd.container + Scenario Outline: Timer is stopped when detached, started when attached + Given a `` machine with ubuntu-advantage-tools installed + Then I verify the `ua-timer` systemd timer is disabled + When I attach `contract_token` with sudo + # 6 hour timer with 1 hour randomized delay -> potentially 7 hours + Then I verify the `ua-timer` systemd timer is scheduled to run within `420` minutes + When I run `pro detach --assume-yes` with sudo + Then I verify the `ua-timer` systemd timer is disabled + Examples: ubuntu release + | release | + | xenial | + | jammy | + | lunar | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_pro.feature ubuntu-advantage-tools-28.1~18.04/features/ubuntu_pro.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_pro.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/ubuntu_pro.feature 2023-05-30 19:02:35.000000000 +0000 @@ -180,7 +180,7 @@ esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure fips +yes + +NIST-certified core packages fips-updates +yes + +NIST-certified core packages with priority security updates - livepatch +yes + +Canonical Livepatch service + livepatch +yes + + """ Then stdout matches regexp: """ @@ -212,10 +212,10 @@ .*CONNECT metadata.* """ Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | livepatch-s | cis_or_usg | - | xenial | n/a | disabled | disabled | n/a | cis | - | bionic | disabled | disabled | disabled | enabled | cis | - | focal | disabled | n/a | disabled | enabled | usg | + | release | fips-s | cc-eal-s | cis-s | livepatch-s | lp-desc | cis_or_usg | + | xenial | n/a | disabled | disabled | warning | Current kernel is not supported | cis | + | bionic | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | focal | disabled | n/a | disabled | enabled | Canonical Livepatch service | usg | @series.lts @uses.config.machine_type.aws.pro @@ -490,7 +490,7 @@ esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure fips +yes + +NIST-certified core packages fips-updates +yes + +NIST-certified core packages with priority security updates - livepatch +yes + +Canonical Livepatch service + livepatch +yes + + """ Then stdout matches regexp: """ @@ -508,7 +508,7 @@ esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure fips +yes + +NIST-certified core packages fips-updates +yes + +NIST-certified core packages with priority security updates - livepatch +yes + +Canonical Livepatch service + livepatch +yes + + """ Then stdout matches regexp: """ @@ -584,11 +584,11 @@ """ Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | infra-pkg | apps-pkg | livepatch | cis_or_usg | - | xenial | n/a | disabled | disabled | libkrad0 | jq | n/a | cis | - | bionic | disabled | disabled | disabled | libkrad0 | bundler | enabled | cis | - | focal | disabled | n/a | disabled | hello | ant | enabled | usg | - | jammy | n/a | n/a | n/a | hello | hello | enabled | usg | + | release | fips-s | cc-eal-s | cis-s | infra-pkg | apps-pkg | livepatch | lp-desc | cis_or_usg | + | xenial | n/a | disabled | disabled | libkrad0 | jq | warning | Current kernel is not supported | cis | + | bionic | disabled | disabled | disabled | libkrad0 | bundler | enabled | Canonical Livepatch service | cis | + | focal | disabled | n/a | disabled | hello | ant | enabled | Canonical Livepatch service | usg | + | jammy | n/a | n/a | n/a | hello | hello | enabled | Canonical Livepatch service | usg | @series.lts @uses.config.machine_type.gcp.pro diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_pro_fips.feature ubuntu-advantage-tools-28.1~18.04/features/ubuntu_pro_fips.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_pro_fips.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/ubuntu_pro_fips.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Command behaviour when auto-attached in an ubuntu PRO fips image @series.lts - @uses.config.machine_type.azure.pro.fips + @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips Azure machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -153,7 +153,7 @@ | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | azure-fips | @series.focal - @uses.config.machine_type.azure.pro.fips + @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips packages are correctly installed on Azure Focal machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -186,7 +186,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.azure.pro.fips + @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips packages are correctly installed on Azure Bionic & Xenial machines Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -223,7 +223,7 @@ | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | @series.lts - @uses.config.machine_type.aws.pro.fips + @uses.config.machine_type.aws.pro-fips Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips AWS machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -373,7 +373,7 @@ | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | aws-fips | @series.focal - @uses.config.machine_type.aws.pro.fips + @uses.config.machine_type.aws.pro-fips Scenario Outline: Check fips packages are correctly installed on AWS Focal machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -406,7 +406,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.aws.pro.fips + @uses.config.machine_type.aws.pro-fips Scenario Outline: Check fips packages are correctly installed on AWS Bionic & Xenial machines Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -443,9 +443,9 @@ | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | @series.focal - @uses.config.machine_type.azure.pro.fips - @uses.config.machine_type.aws.pro.fips - @uses.config.machine_type.gcp.pro.fips + @uses.config.machine_type.azure.pro-fips + @uses.config.machine_type.aws.pro-fips + @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check fips-updates can be enabled in a focal PRO FIPS machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -496,7 +496,7 @@ @series.focal @series.bionic - @uses.config.machine_type.gcp.pro.fips + @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips GCP machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -645,7 +645,7 @@ | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | gcp-fips | @series.focal - @uses.config.machine_type.gcp.pro.fips + @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check fips packages are correctly installed on GCP Pro Focal machine Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -677,7 +677,7 @@ | focal | https://esm.ubuntu.com/fips/ubuntu focal/main | @series.bionic - @uses.config.machine_type.gcp.pro.fips + @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check fips packages are correctly installed on GCP Pro Bionic machines Given a `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_upgrade.feature ubuntu-advantage-tools-28.1~18.04/features/ubuntu_upgrade.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_upgrade.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/ubuntu_upgrade.feature 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ @slow @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @upgrade Scenario Outline: Attached upgrade Given a `` machine with ubuntu-advantage-tools installed @@ -58,7 +58,7 @@ @slow @series.xenial - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-vm @upgrade Scenario Outline: Attached FIPS upgrade across LTS releases Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_upgrade_unattached.feature ubuntu-advantage-tools-28.1~18.04/features/ubuntu_upgrade_unattached.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/ubuntu_upgrade_unattached.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/ubuntu_upgrade_unattached.feature 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ @slow @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @upgrade Scenario Outline: Unattached upgrade Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/unattached_commands.feature ubuntu-advantage-tools-28.1~18.04/features/unattached_commands.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/unattached_commands.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/unattached_commands.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Command behaviour when unattached @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached auto-attach does nothing in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed # Validate systemd unit/timer syntax @@ -32,7 +32,7 @@ | lunar | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached commands that requires enabled user in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I verify that running `pro ` `as non-root` exits `1` @@ -63,7 +63,7 @@ | lunar | refresh | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Help command on an unattached machine Given a `` machine with ubuntu-advantage-tools installed When I run `pro help esm-infra` as non-root @@ -110,7 +110,7 @@ | lunar | no | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached enable/disable fails in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I verify that running `pro esm-infra` `as non-root` exits `1` @@ -185,15 +185,19 @@ | lunar | disable | @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Check for newer versions of the client in an ubuntu machine Given a `` machine with ubuntu-advantage-tools installed # Make sure we have a fresh, just rebooted, environment When I reboot the machine Then I verify that no files exist matching `/run/ubuntu-advantage/candidate-version` When I run `pro status` with sudo - Then I will see the following on stderr + Then stderr does not match regexp: """ + .*\[info\].* A new version is available: 99.9.9 + Please run: + sudo apt-get install ubuntu-advantage-tools + to get the latest version with new features and bug fixes. """ And I verify that files exist matching `/run/ubuntu-advantage/candidate-version` # We forge a candidate to see results @@ -211,8 +215,12 @@ to get the latest version with new features and bug fixes. """ When I run `pro status --format json` as non-root - Then I will see the following on stderr + Then stderr does not match regexp: """ + .*\[info\].* A new version is available: 99.9.9 + Please run: + sudo apt-get install ubuntu-advantage-tools + to get the latest version with new features and bug fixes. """ When I run `pro config show` as non-root Then stderr matches regexp: @@ -233,14 +241,22 @@ \"code\": \"new-version-available\" """ When I run `pro api u.pro.version.v1` as non-root - Then I will see the following on stderr + Then stderr does not match regexp: """ + .*\[info\].* A new version is available: 99.9.9 + Please run: + sudo apt-get install ubuntu-advantage-tools + to get the latest version with new features and bug fixes. """ When I run `apt-get update` with sudo # apt-get update will bring a new candidate, which is the current installed version And I run `pro status` as non-root - Then I will see the following on stderr + Then stderr does not match regexp: """ + .*\[info\].* A new version is available: 99.9.9 + Please run: + sudo apt-get install ubuntu-advantage-tools + to get the latest version with new features and bug fixes. """ Examples: ubuntu release @@ -252,8 +268,9 @@ | kinetic | | lunar | - @series.all - @uses.config.machine_type.lxd.container + @series.xenial + @series.bionic + @uses.config.machine_type.lxd-container # Side effect: this verifies that `ua` still works as a command Scenario Outline: Verify autocomplete options Given a `` machine with ubuntu-advantage-tools installed @@ -309,13 +326,71 @@ | release | # | xenial | Can't rely on Xenial because of bash sorting things weirdly | bionic | + + @series.focal + @series.jammy + @series.lunar + @uses.config.machine_type.lxd-container + # Side effect: this verifies that `ua` still works as a command + Scenario Outline: Verify autocomplete options + Given a `` machine with ubuntu-advantage-tools installed + When I prepare the autocomplete test + And I press tab twice to autocomplete the `ua` command + Then stdout matches regexp: + """ + --debug +auto-attach +enable +status\r + --help +collect-logs +fix +system\r + --version +config +help +version\r + api +detach +refresh +\r + attach +disable +security-status + """ + When I press tab twice to autocomplete the `pro` command + Then stdout matches regexp: + """ + --debug +auto-attach +enable +status\r + --help +collect-logs +fix +system\r + --version +config +help +version\r + api +detach +refresh +\r + attach +disable +security-status + """ + When I press tab twice to autocomplete the `ua enable` command + Then stdout matches regexp: + """ + cc-eal +fips +realtime-kernel +usg\r + esm-apps +fips-updates +ros +\r + esm-infra +livepatch +ros-updates +\r + """ + When I press tab twice to autocomplete the `pro enable` command + Then stdout matches regexp: + """ + cc-eal +fips +realtime-kernel +usg\r + esm-apps +fips-updates +ros +\r + esm-infra +livepatch +ros-updates +\r + """ + When I press tab twice to autocomplete the `ua disable` command + Then stdout matches regexp: + """ + cc-eal +fips +realtime-kernel +usg\r + esm-apps +fips-updates +ros +\r + esm-infra +livepatch +ros-updates +\r + """ + When I press tab twice to autocomplete the `pro disable` command + Then stdout matches regexp: + """ + cc-eal +fips +realtime-kernel +usg\r + esm-apps +fips-updates +ros +\r + esm-infra +livepatch +ros-updates +\r + """ + + Examples: ubuntu release + | release | | focal | | jammy | # | kinetic | There is a very weird error on Kinetic, irrelevant to this test | lunar | @series.lts - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: esm cache failures don't generate errors Given a `` machine with ubuntu-advantage-tools installed When I disable access to esm.ubuntu.com @@ -353,7 +428,7 @@ @series.jammy @series.kinetic @series.lunar - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container # Services fail, degraded systemctl, but no crashes. Scenario Outline: services fail gracefully when yaml is broken/absent Given a `` machine with ubuntu-advantage-tools installed @@ -424,3 +499,98 @@ | kinetic | python3.10 | | # Lunar has a BIG error message explaining why this is a clear user error... | lunar | python3.11 | --break-system-packages | + + + @series.all + @uses.config.machine_type.lxd-container + Scenario Outline: Warn users not to redirect/pipe human readable output + Given a `` machine with ubuntu-advantage-tools installed + When I run shell command `pro version | cat` as non-root + Then I will see the following on stderr + """ + """ + When I run shell command `pro version > version_out` as non-root + Then I will see the following on stderr + """ + """ + When I run shell command `pro status | cat` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status | cat` with sudo + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status > status_out` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status > status_out` with sudo + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status --format tabular | cat` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status --format tabular > status_out` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro status --format json`. + """ + When I run shell command `pro status --format json | cat` as non-root + Then I will see the following on stderr + """ + """ + When I run shell command `pro status --format json > status_out` as non-root + Then I will see the following on stderr + """ + """ + When I run shell command `pro security-status | cat` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro security-status --format json`. + """ + When I run shell command `pro security-status > status_out` as non-root + Then I will see the following on stderr + """ + WARNING: this output is intended to be human readable, and subject to change. + In scripts, prefer using machine readable data from the `pro api` command, + or use `pro security-status --format json`. + """ + When I run shell command `pro security-status --format json | cat` as non-root + Then I will see the following on stderr + """ + """ + When I run shell command `pro security-status --format json > status_out` as non-root + Then I will see the following on stderr + """ + """ + + Examples: ubuntu release + | release | + | xenial | + | bionic | + | focal | + | jammy | + | kinetic | + | lunar | diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/unattached_status.feature ubuntu-advantage-tools-28.1~18.04/features/unattached_status.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/unattached_status.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/unattached_status.feature 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,7 @@ Feature: Unattached status @series.all - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine - formatted Given a `` machine with ubuntu-advantage-tools installed When I run `pro status --format json` as non-root @@ -43,7 +43,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output @@ -61,6 +61,8 @@ ros +yes +Security Updates for the Robot Operating System ros-updates +yes +All Updates for the Robot Operating System + For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ @@ -106,6 +108,8 @@ FEATURES allow_beta: True + For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ @@ -116,7 +120,7 @@ | bionic | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output @@ -131,6 +135,8 @@ livepatch +yes +Canonical Livepatch service usg +yes +Security compliance and audit tools + For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ @@ -173,6 +179,8 @@ FEATURES allow_beta: True + For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ @@ -182,7 +190,7 @@ | focal | @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output @@ -194,6 +202,9 @@ esm-infra +yes +Expanded Security Maintenance for Infrastructure livepatch +yes +Canonical Livepatch service realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated + usg +yes +Security compliance and audit tools + + For a list of all Ubuntu Pro services, run 'pro status --all' This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro @@ -212,7 +223,7 @@ realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated ros +no +Security Updates for the Robot Operating System ros-updates +no +All Updates for the Robot Operating System - usg +no +Security compliance and audit tools + usg +yes +Security compliance and audit tools This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro @@ -231,10 +242,13 @@ esm-infra +yes +Expanded Security Maintenance for Infrastructure livepatch +yes +Canonical Livepatch service realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated + usg +yes +Security compliance and audit tools FEATURES allow_beta: True + For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ @@ -245,7 +259,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed @@ -306,7 +320,7 @@ | bionic | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed @@ -366,7 +380,7 @@ | focal | @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed @@ -377,6 +391,8 @@ esm-apps +yes +yes +yes +Expanded Security Maintenance for Applications esm-infra +yes +yes +yes +Expanded Security Maintenance for Infrastructure livepatch +yes +yes +yes +Canonical Livepatch service + realtime-kernel +yes +yes +no +Ubuntu kernel with PREEMPT_RT patches integrated + usg +yes +yes +no +Security compliance and audit tools """ When I do a preflight check for `contract_token` with the all flag Then stdout matches regexp: @@ -391,7 +407,7 @@ realtime-kernel +yes +yes +no +Ubuntu kernel with PREEMPT_RT patches integrated ros +no +yes +no +Security Updates for the Robot Operating System ros-updates +no +yes +no +All Updates for the Robot Operating System - usg +no +yes +no +Security compliance and audit tools + usg +yes +yes +no +Security compliance and audit tools """ When I do a preflight check for `contract_token` formatted as json Then stdout is a json matching the `ua_status` schema @@ -425,7 +441,7 @@ @series.xenial @series.bionic - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed @@ -471,7 +487,7 @@ | bionic | @series.focal - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed @@ -513,7 +529,7 @@ | focal | @series.jammy - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/util.py ubuntu-advantage-tools-28.1~18.04/features/util.py --- ubuntu-advantage-tools-27.14.4~18.04/features/util.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/util.py 2023-06-01 18:49:33.000000000 +0000 @@ -246,9 +246,14 @@ if proc.returncode == 0: sbuild_cmd += ["--chroot", ua_chroot] + # disable unit-test during sbuild + env = os.environ.copy() + env["DEB_BUILD_OPTIONS"] = env.get("DEB_BUILD_OPTIONS", "") + " nocheck" + logging.info('--- Running "{}"'.format(" ".join(sbuild_cmd))) subprocess.run( sbuild_cmd, + env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, @@ -293,11 +298,11 @@ args = match.group(1).split() function_name = args[0] if function_name == "version": - if context.config.check_version: + if context.pro_config.check_version: processed_template = _replace_and_log( processed_template, match.group(0), - context.config.check_version, + context.pro_config.check_version, logger_fn, ) elif function_name == "machine-ip": @@ -312,7 +317,7 @@ processed_template = _replace_and_log( processed_template, match.group(0), - context.config.cloud, + context.pro_config.default_cloud.name, logger_fn, ) elif function_name == "today": @@ -331,7 +336,7 @@ processed_template = _replace_and_log( processed_template, match.group(0), - context.config.contract_token_staging, + context.pro_config.contract_token_staging, logger_fn, ) elif function_name == "stored_var": diff -Nru ubuntu-advantage-tools-27.14.4~18.04/features/_version.feature ubuntu-advantage-tools-28.1~18.04/features/_version.feature --- ubuntu-advantage-tools-27.14.4~18.04/features/_version.feature 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/features/_version.feature 2023-06-01 18:49:33.000000000 +0000 @@ -2,17 +2,17 @@ @series.all @uses.config.check_version - @uses.config.machine_type.lxd.container - @uses.config.machine_type.lxd.vm + @uses.config.machine_type.lxd-container + @uses.config.machine_type.lxd-vm @uses.config.machine_type.aws.generic @uses.config.machine_type.aws.pro - @uses.config.machine_type.aws.pro.fips + @uses.config.machine_type.aws.pro-fips @uses.config.machine_type.azure.generic @uses.config.machine_type.azure.pro - @uses.config.machine_type.azure.pro.fips + @uses.config.machine_type.azure.pro-fips @uses.config.machine_type.gcp.generic @uses.config.machine_type.gcp.pro - @uses.config.machine_type.gcp.pro.fips + @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check pro version Given a `` machine with ubuntu-advantage-tools installed When I run `dpkg-query --showformat='${Version}' --show ubuntu-advantage-tools` with sudo @@ -44,7 +44,7 @@ @series.all @uses.config.check_version - @uses.config.machine_type.lxd.container + @uses.config.machine_type.lxd-container @upgrade Scenario Outline: Check pro version Given a `` machine with ubuntu-advantage-tools installed diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/action.yml ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/action.yml --- ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/action.yml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/action.yml 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,9 @@ +name: 'Require Bug References' +description: 'Block PRs on missing bug references' +inputs: + repo-token: + description: 'Token for the repository. Can be passed in using {{ secrets.GITHUB_TOKEN }}' + required: true +runs: + using: 'node16' + main: 'index.js' diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/index.js ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/index.js --- ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/index.js 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/index.js 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,107 @@ +const core = require('@actions/core'); +const github = require('@actions/github'); + +const commentHeader = ""; + +function createCommentBody(commits, title) { + let newComment = ""; + newComment += commentHeader; + newComment += "\n"; + + newComment += "Jira: "; + const jiraMatches = title.toLocaleUpperCase().match(/SC-\d+/g); + if (jiraMatches === null || jiraMatches.length === 0) { + newComment += "This PR is not related to a Jira item. (The PR title does not include a SC-#### reference)\n"; + } else { + const jiraID = jiraMatches[0]; + newComment += `[${jiraID}](https://warthogs.atlassian.net/browse/${jiraID})\n`; + } + newComment += "\n"; + + let lpBugs = []; + let ghIssues = []; + commits.forEach(commit => { + const message = commit.commit.message.toLocaleUpperCase(); + lpBugs = lpBugs.concat(Array.from(message.matchAll(/LP: #(\d+)/g)).map(m => m[1])); + ghIssues = ghIssues.concat(Array.from(message.matchAll(/FIXES: #(\d+)/g)).map(m => m[1])); + ghIssues = ghIssues.concat(Array.from(message.matchAll(/CLOSES: #(\d+)/g)).map(m => m[1])); + }); + + newComment += "GitHub Issues:"; + if (ghIssues.length === 0) { + newComment += " No GitHub issues are fixed by this PR. (No commits have Fixes: #### references)\n"; + } else { + newComment += "\n"; + ghIssues.forEach(issue => { + newComment += `- Fixes: #${issue}\n`; + }); + } + newComment += "\n"; + + newComment += "Launchpad Bugs:"; + if (lpBugs.length === 0) { + newComment += " No Launchpad bugs are fixed by this PR. (No commits have LP: #### references)\n"; + } else { + newComment += "\n"; + lpBugs.forEach(bug => { + newComment += `- LP: [#${bug}](https://bugs.launchpad.net/ubuntu/+source/ubuntu-advantage-tools/+bug/${bug})\n`; + }); + } + newComment += "\n"; + + newComment += "👍 this comment to confirm that this is correct."; + + return newComment; +} + +async function run() { + const context = github.context; + if (context.eventName !== "pull_request") { + console.log( + 'The event that triggered this action was not a pull request, skipping.' + ); + return; + } + + const client = github.getOctokit( + core.getInput('repo-token', {required: true}) + ); + const commits = await client.rest.pulls.listCommits({ + owner: context.issue.owner, + repo: context.issue.repo, + pull_number: context.issue.number, + }); + const comments = await client.rest.issues.listComments({ + owner: context.issue.owner, + repo: context.issue.repo, + issue_number: context.issue.number, + }); + const theComment = comments.data.find(c => c.body.includes(commentHeader)); + if (theComment) { + // comment already exists, update it appropriately + const existingBody = theComment.body; + const newBody = createCommentBody(commits.data, context.payload.pull_request.title); + if (existingBody !== newBody) { + client.rest.issues.updateComment({ + owner: context.issue.owner, + repo: context.issue.repo, + comment_id: theComment.id, + body: newBody, + }); + } + } else { + // first run, comment doesn't exist yet + const newBody = createCommentBody(commits.data, context.payload.pull_request.title); + client.rest.issues.createComment({ + owner: context.issue.owner, + repo: context.issue.repo, + issue_number: context.issue.number, + body: newBody, + }); + } +} + +run().catch(error => { + console.error(error); + core.setFailed(error.message); +}) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/package.json ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/package.json --- ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/package.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/package.json 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "bug-refs", + "version": "1.0.0", + "description": "Block PRs on missing bug references", + "main": "index.js", + "dependencies": { + "@actions/core": "^1.10.0", + "@actions/github": "^5.1.1" + } +} diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/package-lock.json ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/package-lock.json --- ubuntu-advantage-tools-27.14.4~18.04/.github/actions/bug-refs/package-lock.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/actions/bug-refs/package-lock.json 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,430 @@ +{ + "name": "bug-refs", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "bug-refs", + "version": "1.0.0", + "dependencies": { + "@actions/core": "^1.10.0", + "@actions/github": "^5.1.1" + } + }, + "node_modules/@actions/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz", + "integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==", + "dependencies": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "node_modules/@actions/github": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-5.1.1.tgz", + "integrity": "sha512-Nk59rMDoJaV+mHCOJPXuvB1zIbomlKS0dmSIqPGxd0enAXBnOfn4VWF+CGtRCwXZG9Epa54tZA7VIRlJDS8A6g==", + "dependencies": { + "@actions/http-client": "^2.0.1", + "@octokit/core": "^3.6.0", + "@octokit/plugin-paginate-rest": "^2.17.0", + "@octokit/plugin-rest-endpoint-methods": "^5.13.0" + } + }, + "node_modules/@actions/http-client": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz", + "integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==", + "dependencies": { + "tunnel": "^0.0.6" + } + }, + "node_modules/@octokit/auth-token": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.5.0.tgz", + "integrity": "sha512-r5FVUJCOLl19AxiuZD2VRZ/ORjp/4IN98Of6YJoJOkY75CIBuYfmiNHGrDwXr+aLGG55igl9QrxX3hbiXlLb+g==", + "dependencies": { + "@octokit/types": "^6.0.3" + } + }, + "node_modules/@octokit/core": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.6.0.tgz", + "integrity": "sha512-7RKRKuA4xTjMhY+eG3jthb3hlZCsOwg3rztWh75Xc+ShDWOfDDATWbeZpAHBNRpm4Tv9WgBMOy1zEJYXG6NJ7Q==", + "dependencies": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.6.3", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/endpoint": { + "version": "6.0.12", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.12.tgz", + "integrity": "sha512-lF3puPwkQWGfkMClXb4k/eUT/nZKQfxinRWJrdZaJO85Dqwo/G0yOC434Jr2ojwafWJMYqFGFa5ms4jJUgujdA==", + "dependencies": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/graphql": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.8.0.tgz", + "integrity": "sha512-0gv+qLSBLKF0z8TKaSKTsS39scVKF9dbMxJpj3U0vC7wjNWFuIpL/z76Qe2fiuCbDRcJSavkXsVtMS6/dtQQsg==", + "dependencies": { + "@octokit/request": "^5.6.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "12.11.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-12.11.0.tgz", + "integrity": "sha512-VsXyi8peyRq9PqIz/tpqiL2w3w80OgVMwBHltTml3LmVvXiphgeqmY9mvBw9Wu7e0QWk/fqD37ux8yP5uVekyQ==" + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "2.21.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.21.3.tgz", + "integrity": "sha512-aCZTEf0y2h3OLbrgKkrfFdjRL6eSOo8komneVQJnYecAxIej7Bafor2xhuDJOIFau4pk0i/P28/XgtbyPF0ZHw==", + "dependencies": { + "@octokit/types": "^6.40.0" + }, + "peerDependencies": { + "@octokit/core": ">=2" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "5.16.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.16.2.tgz", + "integrity": "sha512-8QFz29Fg5jDuTPXVtey05BLm7OB+M8fnvE64RNegzX7U+5NUXcOcnpTIK0YfSHBg8gYd0oxIq3IZTe9SfPZiRw==", + "dependencies": { + "@octokit/types": "^6.39.0", + "deprecation": "^2.3.1" + }, + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/request": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.6.3.tgz", + "integrity": "sha512-bFJl0I1KVc9jYTe9tdGGpAMPy32dLBXXo1dS/YwSCTL/2nd9XeHsY616RE3HPXDVk+a+dBuzyz5YdlXwcDTr2A==", + "dependencies": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.1.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.7", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/request-error": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.1.0.tgz", + "integrity": "sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg==", + "dependencies": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "node_modules/@octokit/types": { + "version": "6.41.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.41.0.tgz", + "integrity": "sha512-eJ2jbzjdijiL3B4PrSQaSjuF2sPEQPVCPzBvTHJD9Nz+9dw2SGH4K4xeQJ77YfTq5bRQ+bD8wT11JbeDPmxmGg==", + "dependencies": { + "@octokit/openapi-types": "^12.11.0" + } + }, + "node_modules/before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/node-fetch": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", + "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", + "engines": { + "node": ">=0.6.11 <=0.7.0 || >=0.7.3" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + }, + "dependencies": { + "@actions/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz", + "integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==", + "requires": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "@actions/github": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-5.1.1.tgz", + "integrity": "sha512-Nk59rMDoJaV+mHCOJPXuvB1zIbomlKS0dmSIqPGxd0enAXBnOfn4VWF+CGtRCwXZG9Epa54tZA7VIRlJDS8A6g==", + "requires": { + "@actions/http-client": "^2.0.1", + "@octokit/core": "^3.6.0", + "@octokit/plugin-paginate-rest": "^2.17.0", + "@octokit/plugin-rest-endpoint-methods": "^5.13.0" + } + }, + "@actions/http-client": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz", + "integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==", + "requires": { + "tunnel": "^0.0.6" + } + }, + "@octokit/auth-token": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.5.0.tgz", + "integrity": "sha512-r5FVUJCOLl19AxiuZD2VRZ/ORjp/4IN98Of6YJoJOkY75CIBuYfmiNHGrDwXr+aLGG55igl9QrxX3hbiXlLb+g==", + "requires": { + "@octokit/types": "^6.0.3" + } + }, + "@octokit/core": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.6.0.tgz", + "integrity": "sha512-7RKRKuA4xTjMhY+eG3jthb3hlZCsOwg3rztWh75Xc+ShDWOfDDATWbeZpAHBNRpm4Tv9WgBMOy1zEJYXG6NJ7Q==", + "requires": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.6.3", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "6.0.12", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.12.tgz", + "integrity": "sha512-lF3puPwkQWGfkMClXb4k/eUT/nZKQfxinRWJrdZaJO85Dqwo/G0yOC434Jr2ojwafWJMYqFGFa5ms4jJUgujdA==", + "requires": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.8.0.tgz", + "integrity": "sha512-0gv+qLSBLKF0z8TKaSKTsS39scVKF9dbMxJpj3U0vC7wjNWFuIpL/z76Qe2fiuCbDRcJSavkXsVtMS6/dtQQsg==", + "requires": { + "@octokit/request": "^5.6.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/openapi-types": { + "version": "12.11.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-12.11.0.tgz", + "integrity": "sha512-VsXyi8peyRq9PqIz/tpqiL2w3w80OgVMwBHltTml3LmVvXiphgeqmY9mvBw9Wu7e0QWk/fqD37ux8yP5uVekyQ==" + }, + "@octokit/plugin-paginate-rest": { + "version": "2.21.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.21.3.tgz", + "integrity": "sha512-aCZTEf0y2h3OLbrgKkrfFdjRL6eSOo8komneVQJnYecAxIej7Bafor2xhuDJOIFau4pk0i/P28/XgtbyPF0ZHw==", + "requires": { + "@octokit/types": "^6.40.0" + } + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "5.16.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.16.2.tgz", + "integrity": "sha512-8QFz29Fg5jDuTPXVtey05BLm7OB+M8fnvE64RNegzX7U+5NUXcOcnpTIK0YfSHBg8gYd0oxIq3IZTe9SfPZiRw==", + "requires": { + "@octokit/types": "^6.39.0", + "deprecation": "^2.3.1" + } + }, + "@octokit/request": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.6.3.tgz", + "integrity": "sha512-bFJl0I1KVc9jYTe9tdGGpAMPy32dLBXXo1dS/YwSCTL/2nd9XeHsY616RE3HPXDVk+a+dBuzyz5YdlXwcDTr2A==", + "requires": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.1.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.7", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.1.0.tgz", + "integrity": "sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg==", + "requires": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/types": { + "version": "6.41.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.41.0.tgz", + "integrity": "sha512-eJ2jbzjdijiL3B4PrSQaSjuF2sPEQPVCPzBvTHJD9Nz+9dw2SGH4K4xeQJ77YfTq5bRQ+bD8wT11JbeDPmxmGg==", + "requires": { + "@octokit/openapi-types": "^12.11.0" + } + }, + "before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==" + }, + "node-fetch": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", + "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "requires": { + "whatwg-url": "^5.0.0" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "requires": { + "wrappy": "1" + } + }, + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==" + }, + "universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + } +} diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/PULL_REQUEST_TEMPLATE.md ubuntu-advantage-tools-28.1~18.04/.github/PULL_REQUEST_TEMPLATE.md --- ubuntu-advantage-tools-27.14.4~18.04/.github/PULL_REQUEST_TEMPLATE.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/PULL_REQUEST_TEMPLATE.md 2023-05-30 19:02:35.000000000 +0000 @@ -1,5 +1,13 @@ -## Proposed Commit Message - +## Why is this needed? + +This PR solves all of our problems because... + + + ## Test Steps + + ## Checklist diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/ci-base.yaml ubuntu-advantage-tools-28.1~18.04/.github/workflows/ci-base.yaml --- ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/ci-base.yaml 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/workflows/ci-base.yaml 2023-05-30 19:02:35.000000000 +0000 @@ -29,6 +29,8 @@ run: tox -e mypy - name: Version Consistency run: python3 ./tools/check-versions-are-consistent.py + - name: Docs + run: tox -e docs unit-tests: name: Unit Tests runs-on: ubuntu-22.04 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/ci-integration.yaml ubuntu-advantage-tools-28.1~18.04/.github/workflows/ci-integration.yaml --- ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/ci-integration.yaml 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/workflows/ci-integration.yaml 2023-05-30 19:02:35.000000000 +0000 @@ -92,6 +92,9 @@ # in a way that is incompatible with lxd. # https://linuxcontainers.org/lxd/docs/master/howto/network_bridge_firewalld/#prevent-issues-with-lxd-and-docker sudo iptables -I DOCKER-USER -j ACCEPT + - name: Refresh LXD + if: matrix.platform == 'lxd' || matrix.platform == 'vm' + run: sudo snap refresh --channel latest/stable lxd - name: Initialize LXD if: matrix.platform == 'lxd' || matrix.platform == 'vm' run: sudo lxd init --auto diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/custom_pr_checks.yaml ubuntu-advantage-tools-28.1~18.04/.github/workflows/custom_pr_checks.yaml --- ubuntu-advantage-tools-27.14.4~18.04/.github/workflows/custom_pr_checks.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.github/workflows/custom_pr_checks.yaml 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,27 @@ +--- + +name: Custom PR Checks + +on: + pull_request: + types: + - opened + - synchronize + - reopened + - edited + branches: + - main + +jobs: + bug-refs: + runs-on: ubuntu-latest + steps: + - name: Git checkout + uses: actions/checkout@v3 + - name: Install dependencies + run: cd ./.github/actions/bug-refs && npm install + - name: Check for bug references + uses: ./.github/actions/bug-refs + id: bug-refs + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} diff -Nru ubuntu-advantage-tools-27.14.4~18.04/integration-requirements.txt ubuntu-advantage-tools-28.1~18.04/integration-requirements.txt --- ubuntu-advantage-tools-27.14.4~18.04/integration-requirements.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/integration-requirements.txt 2023-05-30 19:02:35.000000000 +0000 @@ -2,11 +2,8 @@ behave jsonschema PyHamcrest -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@e7c4a42eb98a914b084b253c3f96c960de42e8fa +pycloudlib==1!2.1.3 toml==0.10 - -# Simplestreams is not found on PyPi so pull from repo directly -git+https://git.launchpad.net/simplestreams@21c5bba2a5413c51e6b9131fc450e96f6b46090d ipdb diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/apt_news.py ubuntu-advantage-tools-28.1~18.04/lib/apt_news.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/apt_news.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/apt_news.py 2023-05-30 19:02:35.000000000 +0000 @@ -3,7 +3,7 @@ import logging from datetime import datetime, timedelta, timezone -from uaclient import apt +from uaclient import apt, defaults from uaclient.apt_news import update_apt_news from uaclient.config import UAConfig from uaclient.daemon import setup_logging @@ -22,6 +22,12 @@ if __name__ == "__main__": + setup_logging( + logging.INFO, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["log_file"], + logger=logging.getLogger(), + ) cfg = UAConfig() setup_logging( logging.INFO, diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/auto_attach.py ubuntu-advantage-tools-28.1~18.04/lib/auto_attach.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/auto_attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/auto_attach.py 2023-05-30 19:02:35.000000000 +0000 @@ -13,7 +13,7 @@ import logging import sys -from uaclient import messages, system +from uaclient import defaults, messages, system from uaclient.api.exceptions import ( AlreadyAttachedError, AutoAttachDisabledError, @@ -103,6 +103,12 @@ if __name__ == "__main__": + setup_logging( + logging.INFO, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["log_file"], + logger=logging.getLogger(), + ) cfg = UAConfig() setup_logging( logging.INFO, diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/daemon.py ubuntu-advantage-tools-28.1~18.04/lib/daemon.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/daemon.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/daemon.py 2023-06-01 18:49:33.000000000 +0000 @@ -4,6 +4,7 @@ from systemd.daemon import notify # type: ignore +from uaclient import defaults from uaclient.config import UAConfig from uaclient.daemon import ( poll_for_pro_license, @@ -15,6 +16,12 @@ def main() -> int: + setup_logging( + logging.INFO, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["daemon_log_file"], + logger=LOG, + ) cfg = UAConfig() setup_logging( logging.INFO, logging.DEBUG, log_file=cfg.daemon_log_file, logger=LOG @@ -35,7 +42,11 @@ notify("READY=1") - if os.path.exists("/run/cloud-init/cloud-id-gce") and not os.path.exists( + is_correct_cloud = any( + os.path.exists("/run/cloud-init/cloud-id-{}".format(cloud)) + for cloud in ("gce", "azure") + ) + if is_correct_cloud and not os.path.exists( retry_auto_attach.FLAG_FILE_PATH ): LOG.info("mode: poll for pro license") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/esm_cache.py ubuntu-advantage-tools-28.1~18.04/lib/esm_cache.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/esm_cache.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/esm_cache.py 2023-05-30 19:02:35.000000000 +0000 @@ -2,6 +2,7 @@ import logging +from uaclient import defaults from uaclient.apt import update_esm_caches from uaclient.config import UAConfig from uaclient.daemon import setup_logging @@ -18,6 +19,12 @@ if __name__ == "__main__": + setup_logging( + logging.INFO, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["log_file"], + logger=logging.getLogger(), + ) cfg = UAConfig() setup_logging( logging.INFO, diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/reboot_cmds.py ubuntu-advantage-tools-28.1~18.04/lib/reboot_cmds.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/reboot_cmds.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/reboot_cmds.py 2023-06-01 18:49:33.000000000 +0000 @@ -15,40 +15,21 @@ """ import logging -import os import sys -from uaclient import config, contract, exceptions, lock, messages +from uaclient import ( + config, + contract, + defaults, + exceptions, + lock, + messages, + upgrade_lts_contract, +) +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.cli import setup_logging from uaclient.entitlements.fips import FIPSEntitlement -from uaclient.files import notices -from uaclient.files.notices import Notice -from uaclient.system import subp - -# Retry sleep backoff algorithm if lock is held. -# Lock may be held by auto-attach on systems with ubuntu-advantage-pro. -SLEEP_ON_LOCK_HELD = 1 -MAX_RETRIES_ON_LOCK_HELD = 7 - - -def run_command(cmd, cfg: config.UAConfig): - try: - out, _ = subp(cmd.split(), capture=True) - logging.debug("Successfully executed cmd: {}".format(cmd)) - except exceptions.ProcessExecutionError as exec_error: - msg = ( - "Failed running cmd: {}\n" - "Return code: {}\n" - "Stderr: {}\n" - "Stdout: {}".format( - cmd, exec_error.exit_code, exec_error.stderr, exec_error.stdout - ) - ) - - cfg.delete_cache_key("marker-reboot-cmds") - - logging.warning(msg) - sys.exit(1) +from uaclient.files import notices, state_files def fix_pro_pkg_holds(cfg: config.UAConfig): @@ -57,101 +38,88 @@ return for service in status_cache.get("services", []): if service.get("name") == "fips": - service_status = service.get("status") - if service_status == "enabled": - ent_cls = FIPSEntitlement - logging.debug( - "Attempting to remove Ubuntu Pro FIPS package holds" - ) - entitlement = ent_cls(cfg) - try: - entitlement.setup_apt_config() # Removes package holds - logging.debug( - "Successfully removed Ubuntu Pro FIPS package holds" - ) - except Exception as e: - logging.exception(e) - logging.warning( - "Could not remove Ubuntu PRO FIPS package holds" - ) - try: - entitlement.install_packages(cleanup_on_failure=False) - except exceptions.UserFacingError as e: - logging.error(e.msg) - logging.warning( - "Failed to install packages at boot: {}".format( - ", ".join(entitlement.packages) - ) - ) - sys.exit(1) + if service.get("status") == "enabled": + # fips was enabled, fix the holds + break + else: + # fips was not enabled, don't do anything + return + + logging.debug("Attempting to remove Ubuntu Pro FIPS package holds") + fips = FIPSEntitlement(cfg) + try: + fips.setup_apt_config() # Removes package holds + logging.debug("Successfully removed Ubuntu Pro FIPS package holds") + except Exception as e: + logging.error(e) + logging.warning("Could not remove Ubuntu Pro FIPS package holds") + + try: + fips.install_packages(cleanup_on_failure=False) + except exceptions.UserFacingError: + logging.warning( + "Failed to install packages at boot: {}".format( + ", ".join(fips.packages) + ) + ) + raise def refresh_contract(cfg: config.UAConfig): try: contract.request_updated_contract(cfg) - except exceptions.UrlError as exc: - logging.exception(exc) + except exceptions.UrlError: logging.warning(messages.REFRESH_CONTRACT_FAILURE) - sys.exit(1) - + raise -def process_remaining_deltas(cfg: config.UAConfig): - cmd = "/usr/bin/python3 /usr/lib/ubuntu-advantage/upgrade_lts_contract.py" - run_command(cmd=cmd, cfg=cfg) +def main(cfg: config.UAConfig) -> int: + if not state_files.reboot_cmd_marker_file.is_present: + logging.debug("Skipping reboot_cmds. Marker file not present") + notices.remove(notices.Notice.REBOOT_SCRIPT_FAILED) + return 0 -def process_reboot_operations(cfg: config.UAConfig): - - reboot_cmd_marker_file = cfg.data_path("marker-reboot-cmds") - - if not cfg.is_attached: + if not _is_attached(cfg).is_attached: logging.debug("Skipping reboot_cmds. Machine is unattached") + state_files.reboot_cmd_marker_file.delete() + notices.remove(notices.Notice.REBOOT_SCRIPT_FAILED) + return 0 - if os.path.exists(reboot_cmd_marker_file): - cfg.delete_cache_key("marker-reboot-cmds") - - return - - if os.path.exists(reboot_cmd_marker_file): - logging.debug("Running process contract deltas on reboot ...") - - try: + logging.debug("Running reboot commands...") + try: + with lock.SpinLock(cfg=cfg, lock_holder="pro-reboot-cmds"): fix_pro_pkg_holds(cfg) refresh_contract(cfg) - process_remaining_deltas(cfg) + upgrade_lts_contract.process_contract_delta_after_apt_lock(cfg) + # cleanup state after a succesful run + state_files.reboot_cmd_marker_file.delete() + notices.remove(notices.Notice.REBOOT_SCRIPT_FAILED) - cfg.delete_cache_key("marker-reboot-cmds") - notices.remove(Notice.REBOOT_SCRIPT_FAILED) - logging.debug("Successfully ran all commands on reboot.") - except Exception as e: - msg = "Failed running commands on reboot." - msg += str(e) - logging.error(msg) - notices.add( - Notice.REBOOT_SCRIPT_FAILED, - ) - - -def main(cfg: config.UAConfig): - """Retry running process_reboot_operations on LockHeldError - - :raises: LockHeldError when lock still held by auto-attach after retries. - UserFacingError for all other errors - """ - try: - with lock.SpinLock( - cfg=cfg, - lock_holder="ua-reboot-cmds", - sleep_time=SLEEP_ON_LOCK_HELD, - max_retries=MAX_RETRIES_ON_LOCK_HELD, - ): - process_reboot_operations(cfg=cfg) except exceptions.LockHeldError as e: logging.warning("Lock not released. %s", str(e.msg)) - sys.exit(1) + notices.add(notices.Notice.REBOOT_SCRIPT_FAILED) + return 1 + except exceptions.UserFacingError as e: + logging.error( + "Error while running commands on reboot: %s, %s", e.msg_code, e.msg + ) + notices.add(notices.Notice.REBOOT_SCRIPT_FAILED) + return 1 + except Exception as e: + logging.error("Failed running commands on reboot. Error: %s", str(e)) + notices.add(notices.Notice.REBOOT_SCRIPT_FAILED) + return 1 + + logging.debug("Successfully ran all commands on reboot.") + return 0 if __name__ == "__main__": + setup_logging( + logging.INFO, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["log_file"], + ) cfg = config.UAConfig() setup_logging(logging.INFO, logging.DEBUG, log_file=cfg.log_file) - main(cfg=cfg) + sys.exit(main(cfg=cfg)) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/timer.py ubuntu-advantage-tools-28.1~18.04/lib/timer.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/timer.py 2023-04-06 13:49:20.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/timer.py 2023-05-30 19:02:35.000000000 +0000 @@ -6,6 +6,7 @@ from datetime import datetime, timedelta, timezone from typing import Callable, Optional +from uaclient import defaults from uaclient.cli import setup_logging from uaclient.config import UAConfig from uaclient.exceptions import InvalidFileFormatError @@ -14,9 +15,9 @@ TimerJobState, timer_jobs_state_file, ) -from uaclient.jobs.metering import metering_enabled_resources -from uaclient.jobs.update_contract_info import update_contract_info -from uaclient.jobs.update_messaging import update_motd_messages +from uaclient.timer.metering import metering_enabled_resources +from uaclient.timer.update_contract_info import update_contract_info +from uaclient.timer.update_messaging import update_motd_messages LOG = logging.getLogger(__name__) UPDATE_MESSAGING_INTERVAL = 21600 # 6 hours @@ -178,6 +179,12 @@ if __name__ == "__main__": + setup_logging( + logging.CRITICAL, + logging.DEBUG, + defaults.CONFIG_DEFAULTS["timer_log_file"], + logger=LOG, + ) cfg = UAConfig() current_time = datetime.now(timezone.utc) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/lib/upgrade_lts_contract.py ubuntu-advantage-tools-28.1~18.04/lib/upgrade_lts_contract.py --- ubuntu-advantage-tools-27.14.4~18.04/lib/upgrade_lts_contract.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/lib/upgrade_lts_contract.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,126 +1,18 @@ #!/usr/bin/env python3 """ -This script should be used after running do-release-upgrade in a machine. -It will detect any contract deltas between the release before -do-release-upgrade and the current release. If we find any differences in -the uaclient contract between those releases, we will apply that difference -in the upgraded release. - -For example, suppose we are on Trusty and we are upgrading to Xenial. We found -that the apt url for esm services on trusty: - -https://esm.ubuntu.com/ubuntu - -While on Xenial, the apt url is: - -https://esm.ubuntu.com/infra/ubuntu - -This script will detect differences like that and update the Xenial system -to reflect them. +This script is called after running do-release-upgrade in a machine. +See uaclient/upgrade_lts_contract.py for more details. """ import logging -import sys -import time +from uaclient import upgrade_lts_contract from uaclient.cli import setup_logging from uaclient.config import UAConfig -from uaclient.contract import process_entitlements_delta -from uaclient.defaults import ESM_APT_ROOTDIR -from uaclient.system import ensure_folder_absent, parse_os_release, subp - -version_to_codename = { - "14.04": "trusty", - "16.04": "xenial", - "18.04": "bionic", - "20.04": "focal", - "22.04": "jammy", - "22.10": "kinetic", -} - -# We consider the past release for LTSs to be the last LTS, -# because we don't have any services available on non-LTS. -# This makes it safer for us to try to process contract deltas. -# For example, we had "jammy": "focal" even when Impish was -# still supported. -current_codename_to_past_codename = { - "xenial": "trusty", - "bionic": "xenial", - "focal": "bionic", - "jammy": "focal", - "kinetic": "jammy", -} - - -def process_contract_delta_after_apt_lock() -> None: - logging.debug("Check whether to upgrade-lts-contract") - cfg = UAConfig() - if not cfg.is_attached: - logging.debug("Skipping upgrade-lts-contract. Machine is unattached") - return - out, _err = subp(["lsof", "/var/lib/apt/lists/lock"], rcs=[0, 1]) - msg = "Starting upgrade-lts-contract." - if out: - msg += " Retrying every 10 seconds waiting on released apt lock" - print(msg) - logging.debug(msg) - - current_version = parse_os_release()["VERSION_ID"] - current_release = version_to_codename.get(current_version) - - if current_release is None: - msg = "Unable to get release codename for version: {}".format( - current_version - ) - print(msg) - logging.warning(msg) - sys.exit(1) - - past_release = current_codename_to_past_codename.get(current_release) - if past_release is None: - msg = "Could not find past release for: {}".format(current_release) - print(msg) - logging.warning(msg) - sys.exit(1) - - past_entitlements = UAConfig( - series=past_release, - ).machine_token_file.entitlements - new_entitlements = UAConfig( - series=current_release, - ).machine_token_file.entitlements - - retry_count = 0 - while out: - # Loop until apt hold is released at the end of `do-release-upgrade` - time.sleep(10) - out, _err = subp(["lsof", "/var/lib/apt/lists/lock"], rcs=[0, 1]) - retry_count += 1 - - msg = "upgrade-lts-contract processing contract deltas: {} -> {}".format( - past_release, current_release - ) - print(msg) - logging.debug(msg) - - process_entitlements_delta( - cfg=cfg, - past_entitlements=past_entitlements, - new_entitlements=new_entitlements, - allow_enable=True, - series_overrides=False, - ) - msg = "upgrade-lts-contract succeeded after {} retries".format(retry_count) - print(msg) - logging.debug(msg) - - -def remove_private_esm_apt_cache(): - ensure_folder_absent(ESM_APT_ROOTDIR) - if __name__ == "__main__": setup_logging(logging.INFO, logging.DEBUG) - process_contract_delta_after_apt_lock() - remove_private_esm_apt_cache() + cfg = UAConfig() + upgrade_lts_contract.process_contract_delta_after_apt_lock(cfg) + upgrade_lts_contract.remove_private_esm_apt_cache() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/mypy.ini ubuntu-advantage-tools-28.1~18.04/mypy.ini --- ubuntu-advantage-tools-27.14.4~18.04/mypy.ini 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/mypy.ini 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -[mypy] - -[mypy-uaclient.conftest] -ignore_errors = True - -[mypy-*.tests.*] -ignore_errors = True - -[mypy-uaclient.testing.*] -ignore_errors = True - -[mypy-apt_pkg] -ignore_missing_imports = True - -[mypy-behave.*] -ignore_missing_imports = True - -[mypy-hamcrest] -ignore_missing_imports = True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/.pre-commit-config.yaml ubuntu-advantage-tools-28.1~18.04/.pre-commit-config.yaml --- ubuntu-advantage-tools-27.14.4~18.04/.pre-commit-config.yaml 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/.pre-commit-config.yaml 2023-06-01 18:49:33.000000000 +0000 @@ -4,7 +4,7 @@ hooks: - id: black - repo: https://github.com/pycqa/isort - rev: 5.8.0 # Also stored in dev-requirements.txt; update both together! + rev: 5.12.0 # Also stored in dev-requirements.txt; update both together! hooks: - id: isort - repo: https://github.com/shellcheck-py/shellcheck-py diff -Nru ubuntu-advantage-tools-27.14.4~18.04/pyproject.toml ubuntu-advantage-tools-28.1~18.04/pyproject.toml --- ubuntu-advantage-tools-27.14.4~18.04/pyproject.toml 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/pyproject.toml 2023-05-30 19:02:35.000000000 +0000 @@ -5,3 +5,23 @@ [tool.isort] profile = "black" line_length = 79 + +[tool.mypy] +check_untyped_defs = "true" +explicit_package_bases = "true" + +[[tool.mypy.overrides]] +module = [ + "*.tests.*", + "uaclient.conftest", + "uaclient.testing.*", +] +ignore_errors = "true" + +[[tool.mypy.overrides]] +module = [ + "apt_pkg", + "behave.*", + "hamcrest", +] +ignore_missing_imports = "true" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/sru/release-27.14/test-migrate-user-config.sh ubuntu-advantage-tools-28.1~18.04/sru/release-27.14/test-migrate-user-config.sh --- ubuntu-advantage-tools-27.14.4~18.04/sru/release-27.14/test-migrate-user-config.sh 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/sru/release-27.14/test-migrate-user-config.sh 2023-05-30 19:02:35.000000000 +0000 @@ -1,10 +1,6 @@ #!/bin/bash set -e -# TODO remove TESTING sections for -proposed verification -# TESTING: -local_deb=$1 - function install_old_version { name=$1 series=$2 @@ -25,17 +21,6 @@ name=$1 verify=$2 - # TESTING: - echo -e "\n-------------------------------------------" - echo "** upgrading to 27.14 from local - VERIFY $verify" - echo "-------------------------------------------" - lxc file push $local_deb $name/tmp/uanew.deb - lxc exec $name -- dpkg -i /tmp/uanew.deb - lxc exec $name -- apt-cache policy ubuntu-advantage-tools - echo "-------------------------------------------" - return - # END TESTING - echo -e "\n-------------------------------------------" echo "** upgrading to 27.14 from proposed - VERIFY $verify" echo "-------------------------------------------" @@ -127,7 +112,7 @@ upgrade_to_proposed $name "NO CONFFILE PROMPT" echo -e "\n-------------------------------------------" - echo "** Backup file is gone after successful migration" + echo "** Backup file is kept after successful migration" echo "-------------------------------------------" lxc exec $name -- ls -la /etc/ubuntu-advantage/ echo "-------------------------------------------" @@ -198,7 +183,7 @@ upgrade_to_proposed $name "NO CONFFILE PROMPT" echo -e "\n-------------------------------------------" - echo "** Backup file is gone after successful migration" + echo "** Backup file is kept after successful migration" echo "-------------------------------------------" lxc exec $name -- ls -la /etc/ubuntu-advantage/ echo "-------------------------------------------" @@ -275,14 +260,47 @@ echo "###########################################" } -# xenial -test_normal_upgrade xenial 27.11.3~16.04.1 -test_normal_upgrade xenial 27.13.1~16.04.1 -test_apt_news_false_upgrade xenial 27.11.3~16.04.1 -test_apt_news_false_upgrade xenial 27.13.1~16.04.1 -test_uaclient_conf_changes_upgrade xenial 27.11.3~16.04.1 -test_uaclient_conf_changes_upgrade xenial 27.13.1~16.04.1 -test_migration_failure xenial 27.11.3~16.04.1 -test_migration_failure xenial 27.13.1~16.04.1 - -# TODO: repeat for each release +test_normal_upgrade xenial 27.12~16.04.1 +test_normal_upgrade xenial 27.13.6~16.04.1 +test_apt_news_false_upgrade xenial 27.12~16.04.1 +test_apt_news_false_upgrade xenial 27.13.6~16.04.1 +test_uaclient_conf_changes_upgrade xenial 27.12~16.04.1 +test_uaclient_conf_changes_upgrade xenial 27.13.6~16.04.1 +test_migration_failure xenial 27.12~16.04.1 +test_migration_failure xenial 27.13.6~16.04.1 + +test_normal_upgrade bionic 27.12~18.04.1 +test_normal_upgrade bionic 27.13.6~18.04.1 +test_apt_news_false_upgrade bionic 27.12~18.04.1 +test_apt_news_false_upgrade bionic 27.13.6~18.04.1 +test_uaclient_conf_changes_upgrade bionic 27.12~18.04.1 +test_uaclient_conf_changes_upgrade bionic 27.13.6~18.04.1 +test_migration_failure bionic 27.12~18.04.1 +test_migration_failure bionic 27.13.6~18.04.1 + +test_normal_upgrade focal 27.12~20.04.1 +test_normal_upgrade focal 27.13.6~20.04.1 +test_apt_news_false_upgrade focal 27.12~20.04.1 +test_apt_news_false_upgrade focal 27.13.6~20.04.1 +test_uaclient_conf_changes_upgrade focal 27.12~20.04.1 +test_uaclient_conf_changes_upgrade focal 27.13.6~20.04.1 +test_migration_failure focal 27.12~20.04.1 +test_migration_failure focal 27.13.6~20.04.1 + +test_normal_upgrade jammy 27.12~22.04.1 +test_normal_upgrade jammy 27.13.6~22.04.1 +test_apt_news_false_upgrade jammy 27.12~22.04.1 +test_apt_news_false_upgrade jammy 27.13.6~22.04.1 +test_uaclient_conf_changes_upgrade jammy 27.12~22.04.1 +test_uaclient_conf_changes_upgrade jammy 27.13.6~22.04.1 +test_migration_failure jammy 27.12~22.04.1 +test_migration_failure jammy 27.13.6~22.04.1 + +test_normal_upgrade kinetic 27.12~22.10.1 +test_normal_upgrade kinetic 27.13.6~22.10.1 +test_apt_news_false_upgrade kinetic 27.12~22.10.1 +test_apt_news_false_upgrade kinetic 27.13.6~22.10.1 +test_uaclient_conf_changes_upgrade kinetic 27.12~22.10.1 +test_uaclient_conf_changes_upgrade kinetic 27.13.6~22.10.1 +test_migration_failure kinetic 27.12~22.10.1 +test_migration_failure kinetic 27.13.6~22.10.1 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-auto-attach.service ubuntu-advantage-tools-28.1~18.04/systemd/ua-auto-attach.service --- ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-auto-attach.service 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/systemd/ua-auto-attach.service 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,15 @@ +# This unit is delivered by the ubuntu-advantage-pro package and should +# only be installed on Ubuntu Pro images for use in public clouds that +# support them (AWS, Azure, GCP). +# On boot, if the instance has not already successfully attached to Ubuntu +# Pro services, it will attempt to "auto attach" by querying the cloud's +# attested metadata and sending it to https://contracts.canonical.com. +# If Canonical servers successfully verify that the metadata says this +# instance is entitled to Ubuntu Pro, then it allows the attachment +# process to continue and Ubuntu Pro services get enabled. + [Unit] -Description=Ubuntu Advantage auto attach +Description=Ubuntu Pro auto attach Before=cloud-config.service After=cloud-config.target diff -Nru ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-reboot-cmds.service ubuntu-advantage-tools-28.1~18.04/systemd/ua-reboot-cmds.service --- ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-reboot-cmds.service 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/systemd/ua-reboot-cmds.service 2023-06-27 00:49:37.000000000 +0000 @@ -1,7 +1,14 @@ +# On machines that are currently attached to Ubuntu Pro services, sometimes an action +# is required immediately after the next reboot. +# In those situations, a marker file is created that activates this service on the next boot. +# Circumstances that could cause this include: +# - Upgrading from one LTS to the next LTS: to account for service availability changes between releases +# - Pro FIPS images with outstanding apt hold on FIPS packages: to clear the holds + [Unit] -Description=Ubuntu Advantage reboot cmds +Description=Ubuntu Pro reboot cmds ConditionPathExists=/var/lib/ubuntu-advantage/marker-reboot-cmds-required -Wants=ua-auto-attach.service +ConditionPathExists=/var/lib/ubuntu-advantage/private/machine-token.json After=ua-auto-attach.service [Service] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-timer.service ubuntu-advantage-tools-28.1~18.04/systemd/ua-timer.service --- ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-timer.service 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/systemd/ua-timer.service 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,12 @@ +# On machines that are currently attached to Ubuntu Pro services, some tasks need to run +# periodically in the background to maintain the state of the Ubuntu Pro services. +# These include: +# - Periodically ping https://contracts.canonical.com for metering and to check the contract expiration +# - If this contract is about to expire, add notification messages to MOTD +# Triggered by ua-timer.timer + [Unit] -Description=Ubuntu Advantage Timer for running repeated jobs +Description=Ubuntu Pro Timer for running repeated jobs After=network.target network-online.target systemd-networkd.service ua-auto-attach.service [Service] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-timer.timer ubuntu-advantage-tools-28.1~18.04/systemd/ua-timer.timer --- ubuntu-advantage-tools-27.14.4~18.04/systemd/ua-timer.timer 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/systemd/ua-timer.timer 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,9 @@ +# See ua-timer.service for a description of why this exists. + [Unit] -Description=Ubuntu Advantage Timer for running repeated jobs +Description=Ubuntu Pro Timer for running repeated jobs +# Only run if attached +ConditionPathExists=/var/lib/ubuntu-advantage/private/machine-token.json [Timer] OnUnitActiveSec=6h diff -Nru ubuntu-advantage-tools-27.14.4~18.04/systemd/ubuntu-advantage.service ubuntu-advantage-tools-28.1~18.04/systemd/ubuntu-advantage.service --- ubuntu-advantage-tools-27.14.4~18.04/systemd/ubuntu-advantage.service 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/systemd/ubuntu-advantage.service 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,5 @@ -# This service runs on GCP to enable auto-attaching to Ubuntu Advantage -# services when an Ubuntu Pro license is added to a GCP machine. +# This service runs on GCP and Azure to enable auto-attaching to Ubuntu Pro +# services when an Ubuntu Pro license is added to a machine. # It also serves as the retry service if an auto-attach fails and will # retry for up to one month after the failed attempt. # If you are uninterested in Ubuntu Pro services, then you can safely @@ -16,10 +16,11 @@ ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json # This service has two modes: -# 1. GCP detect pro mode - only on GCP +# 1. Detect possible in-place upgrade to pro - on GCP and Azure # 2. auto-attach retry mode - only if ua-auto-attach.service fails # The following conditions correspond to those two modes. ConditionPathExists=|/run/cloud-init/cloud-id-gce +ConditionPathExists=|/run/cloud-init/cloud-id-azure ConditionPathExists=|/run/ubuntu-advantage/flags/auto-attach-failed [Service] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-bionic.txt ubuntu-advantage-tools-28.1~18.04/tools/constraints-bionic.txt --- ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-bionic.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/constraints-bionic.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -attrs==17.4 -flake8==3.5.0 -py==1.5.2 -pycodestyle==2.3.1 -pyflakes==1.6.0 -pytest==3.3.2 -pytest-cov==2.5.1 -pyyaml==3.12 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-focal.txt ubuntu-advantage-tools-28.1~18.04/tools/constraints-focal.txt --- ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-focal.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/constraints-focal.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -attrs==19.3 -flake8==3.7.9 -py==1.8.1 -pycodestyle==2.5.0 -pyflakes==2.1.1 -pytest==4.6.9 -pytest-cov==2.8.1 -pyyaml==5.3.1 \ No newline at end of file diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-mypy.txt ubuntu-advantage-tools-28.1~18.04/tools/constraints-mypy.txt --- ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-mypy.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/constraints-mypy.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -mypy -pyparsing==3.0.7 -pytest==6.1.2 -importlib-metadata==3.3.0 -packaging==20.9 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-xenial.txt ubuntu-advantage-tools-28.1~18.04/tools/constraints-xenial.txt --- ubuntu-advantage-tools-27.14.4~18.04/tools/constraints-xenial.txt 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/constraints-xenial.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -attrs==15.2 -flake8==2.5.4 -pep8==1.7.0 -py==1.4.31 -# Xenial ships pyflakes 1.1.0, but there is a dependency mismatch between the -# deb and the pip versions of flake8==2.5.4, which requires pyflakes. -pyflakes==1.0.0 -pytest==2.8.7 -pytest-cov==2.2.1 -pyyaml==3.11 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/create-lp-release-branches.sh ubuntu-advantage-tools-28.1~18.04/tools/create-lp-release-branches.sh --- ubuntu-advantage-tools-27.14.4~18.04/tools/create-lp-release-branches.sh 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/create-lp-release-branches.sh 2023-05-30 19:02:35.000000000 +0000 @@ -50,11 +50,11 @@ fi case "${release}" in - xenial) version=${UA_VERSION}~16.04.1;; - bionic) version=${UA_VERSION}~18.04.1;; - focal) version=${UA_VERSION}~20.04.1;; - jammy) version=${UA_VERSION}~22.04.1;; - kinetic) version=${UA_VERSION}~22.10.1;; + xenial) version=${UA_VERSION}~16.04;; + bionic) version=${UA_VERSION}~18.04;; + focal) version=${UA_VERSION}~20.04;; + jammy) version=${UA_VERSION}~22.04;; + kinetic) version=${UA_VERSION}~22.10;; esac dch_cmd=(dch -m -v "${version}" -D "${release}" -b "Backport new upstream release: (LP: #${SRU_BUG}) to $release") if [ -z "$DO_IT" ]; then diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/install-staging-keyrings.sh ubuntu-advantage-tools-28.1~18.04/tools/install-staging-keyrings.sh --- ubuntu-advantage-tools-27.14.4~18.04/tools/install-staging-keyrings.sh 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/install-staging-keyrings.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -#!/bin/sh -eu -# Pull down the keys used in staging, and install them in the current machine -# -# The intended use here is to push this script on to a system and then run it -# in situ. As this should never be used in production, to simplify -# implementation this script puts all relevant keys in to a single keyring and -# install that keyring multiple times. -# -# !! WARNING !! -# This will install insecure keys in to the machine on which you run it, -# overwriting known-good keys. DO NOT RUN IT ON YOUR LAPTOP! - -KEY_IDS="B220D065" -TARGET_PATHS="ubuntu-cc-keyring.gpg ubuntu-esm-v2-keyring.gpg ubuntu-fips-keyring.gpg ubuntu-fips-updates-keyring.gpg ubuntu-securitybenchmarks-keyring.gpg" - -# Create a temporary directory for keyring generation -TMPDIR="$(mktemp -d)" -echo "Working in $TMPDIR..." -cleanup () { - echo "Cleaning up $TMPDIR..." - rm -rf "$TMPDIR" - echo "Removed $TMPDIR." -} -trap cleanup EXIT - -KEYRING_FILE="$TMPDIR/keyring.gpg" - -for KEY_ID in $KEY_IDS; do - gpg \ - --homedir "$TMPDIR" \ - --keyring "$KEYRING_FILE" \ - --no-default-keyring \ - --keyserver keyserver.ubuntu.com \ - --recv-keys "$KEY_ID" -done - -for TARGET_PATH in $TARGET_PATHS; do - FULL_TARGET_PATH="/usr/share/keyrings/$TARGET_PATH" - if [ -w "$FULL_TARGET_PATH" ]; then - cp "$KEYRING_FILE" "$FULL_TARGET_PATH" - else - echo "!!! Not copying to unwriteable path: $FULL_TARGET_PATH" - fi -done diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/make-release ubuntu-advantage-tools-28.1~18.04/tools/make-release --- ubuntu-advantage-tools-27.14.4~18.04/tools/make-release 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/make-release 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -#!/bin/bash -set -ex -# -# Create a native package release of ubuntu-advantage-tools for a given series -# This assumes latest debian/changelog entry version is (MM.NN) UNRELEASED. -# -# Release this package version to devel release with the format: YY.N where -# YY is the 2-digit year and N is a counter of public releases in that year. -# -# When releasing to stable series, an ~ubuntu1~XX.YY.1 suffix will be added -# where XX.YY is the release version 18.04, 16.04 etc. - -# This scipt temporarily sets the appropriate version and series in the most -# recent debian/changelog entry, runs build-package, and prints the steps -# necessary to queue an upload for review. - -# Release version schemes are described in RELEASES.md - -DEVEL_SERIES=$(distro-info --devel) - -Usage() { - cat < /dev/null 2>&1 -if [ $? -ne 0 ]; then - echo "Missing build-package script." - echo "Install with 'git clone git@github.com:CanonicalLtd/uss-tableflip.git'" - echo "Modify PATH to include uss-tableflip/scripts" - exit 1 -fi - -CWD=$PWD - -short_opts="hp:s:" -long_opts="help,ppa:,series:" -getopt_out=$(getopt --name "${0##*/}" \ - --options "${short_opts}" --long "${long_opts}" -- "$@") && - eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; } - -PPA_URL="" -SERIES=$DEVEL_SERIES -while [ $# -ne 0 ]; do - cur=$1; next=$2 - case "$cur" in - -h|--help) Usage; exit 0;; - -p|--ppa) PPA_URL=$next; shift;; - -s|--series) SERIES=$next; shift;; - --) shift; break;; - esac - shift; -done - -if [ -z "$PPA_URL" ]; then - echo -e "\nMissing --ppa\n" - Usage - exit 1 -fi - - -cd /tmp -[ -e ubuntu-advantage-client ] && rm -rf ubuntu-advantage-client -git clone git@github.com:canonical/ubuntu-advantage-client.git -cd ubuntu-advantage-client -CHANGELOG_VERSION=$(dpkg-parsechangelog -S Version) -CHANGELOG_MAJOR=${CHANGELOG_VERSION%.*} -CHANGELOG_MINOR=${CHANGELOG_VERSION#*.} -RELEASE_NUMBER=$(distro-info --series ${SERIES} -r) - -YEAR=$(date +%y) -if [ $YEAR == $CHANGELOG_MAJOR ]; then - # increment CHANGELOG_MINOR for this year - NEW_VERSION=$YEAR.$(($CHANGELOG_MINOR + 1)) -else - # First release of the new year - NEW_VERSION=$YEAR.1 -fi - -if [ "${SERIES}" != "${DEVEL_SERIES}" ]; then - # Only append ~XX.YY.1 to stable releases - CHANGELOG_VERSION=${CHANGELOG_VERSION}~${RELEASE_NUMBER/ LTS/}.1 -fi - -sed -i "s/ubuntu-advantage-tools (${CHANGELOG_VERSION}) [[:alpha:]]\+;/ubuntu-advantage-tools (${CHANGELOG_VERSION}) ${SERIES};/" debian/changelog -cp $CWD/tools/make-tarball tools/ -git add tools -git commit -am "update changelog for release to ${SERIES}" -build-package --verbose -git reset HEAD~1 -cd $CWD - -TAG_EXISTS=$(git tag --list ${CHANGELOG_VERSION}) -if [ -z "${TAG_EXISTS}" ]; then - sed -i "s/ubuntu-advantage-tools (${CHANGELOG_VERSION}) [[:alpha:]]\+;/ubuntu-advantage-tools (${CHANGELOG_VERSION}) ${DEVEL_SERIES};/" debian/changelog - git commit -am "update changelog for release to ${DEVEL_SERIES}" - git tag -a ${CHANGELOG_VERSION} -fi -git checkout -b release/dev-${NEW_VERSION} -sed -i "s/${CHANGELOG_VERSION}/${NEW_VERSION}/" uaclient/version.py -git commit -am "open $NEW_VERSION for development" -dch -v ${NEW_VERSION} -m "open $NEW_VERSION for development" -git commit -am "update changelog" - -cat << EOF ----- To release ${CHANGELOG_VERSION} to ${SERIES} ---- -dput $PPA_URL /tmp/out/ubuntu-advantage-tools_${CHANGELOG_VERSION}_source.changes -# Push annotated tag upstream to change daily build versions -git push upstream ${CHANGELOG_VERSION} -# Open ${NEW_VERSION} version for development by pushing a PR up for review -git push release/dev-${NEW_VERSION} -EOF diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/make-tarball ubuntu-advantage-tools-28.1~18.04/tools/make-tarball --- ubuntu-advantage-tools-27.14.4~18.04/tools/make-tarball 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/make-tarball 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -#!/bin/sh -set -e - -TEMP_D="" -cleanup() { - [ -z "$TEMP_D" ] || rm -Rf "${TEMP_D}" -} -trap cleanup EXIT - -Usage() { - cat <.orig.tar.gz - --long Use git describe --long for versioning -EOF -} - -short_opts="ho:v" -long_opts="help,output:,version:,orig-tarball,long" -getopt_out=$(getopt --name "${0##*/}" \ - --options "${short_opts}" --long "${long_opts}" -- "$@") && - eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; } - -long_opt="" -orig_opt="" -output="" -version="" -while [ $# -ne 0 ]; do - cur=$1; next=$2 - case "$cur" in - -h|--help) Usage; exit 0;; - -o|--output) output=$next; shift;; - --version) version=$next; shift;; - --long) long_opt="--long";; - --orig-tarball) orig_opt=".orig";; - --) shift; break;; - esac - shift; -done - -rev=${1:-HEAD} -if [ -z "$version" ]; then - version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev) -elif [ ! -z "$long_opt" ]; then - echo "WARNING: --long has no effect when --version is passed" >&2 - exit 1 -fi - -archive_base="ubuntu-advantage-tools-$version" -if [ -z "$output" ]; then - if [ ! -z "$orig_opt" ]; then - archive_base="ubuntu-advantage-tools_$version" - fi - output="${archive_base}${orig_opt}.tar.gz" -fi - -# when building an archive from HEAD, ensure that there aren't any -# uncomitted changes in the working directory (because these would not -# end up in the archive). -if [ "$rev" = HEAD ] && ! git diff-index --quiet HEAD --; then - if [ -z "$SKIP_UNCOMITTED_CHANGES_CHECK" ]; then - echo "ERROR: There are uncommitted changes in your working directory." >&2 - exit 1 - else - echo "WARNING: There are uncommitted changes in your working directory." >&2 - echo " These changes will not be included in the archive." >&2 - fi -fi - -TEMP_D=$(mktemp -d) -tar=${output##*/} -tar="$TEMP_D/${tar%.gz}" -git archive --format=tar --prefix="$archive_base/" "$rev" > "$tar" -gzip -9 -c "$tar" > "$output" -echo "$output" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/multipass.md ubuntu-advantage-tools-28.1~18.04/tools/multipass.md --- ubuntu-advantage-tools-27.14.4~18.04/tools/multipass.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/multipass.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -# Using multipass For Development - -This is a document outlining how one developer has configured their -system to enable the easy use of multipass for UA client development. -It is intended to be used as a reference point for other developers to -build their own workflows, rather than as a single, mandated workflow. - -## Create cloud-config for instance configuration - -This is the cloud-config that I use (stored in -`~/rc/multipass-cloud-config`): - -```yaml -#cloud-config -apt: - http_proxy: http://10.76.88.1:3142 - https_proxy: "DIRECT" -apt_http_proxy: http://10.76.88.1:3142 # support trusty -apt_https_proxy: "DIRECT" -packages: - - devscripts - - equivs - - git - - libpython3-dev - - libffi-dev - - sshfs # for `multipass mount`; save time by installing it ourselves - - virtualenvwrapper -runcmd: - # The expectation is that we will mount our local development repo in to the - # VM, but to install the build-deps at launch time we clone the public repo - # temporarily - - 'git clone https://github.com/CanonicalLtd/ubuntu-advantage-client /var/tmp/uac' - - 'make -f /var/tmp/uac/Makefile deps' - - 'rm -rf /var/tmp/uac' - - 'echo alias pytest=py.test-3 >> /home/multipass/.bashrc' -ssh_import_id: - - daniel-thewatkins -``` - -## Create an alias to launch multipass instances - -Adding these lines to my `.aliases` file (which is source'd by my -`.zshrc`) means I can launch a multipass VM ready for development with -a single command: - -```sh -alias mpl="multipass launch --cloud-init ~/rc/multipass-cloud-config" - -uamultipass() { - SERIES="$1" - if [ -z "$SERIES" ]; then - echo "needs argument" - return 1 - fi - name="$SERIES-$(date +%y%m%d-%H%M)" - mpl -n "$name" $SERIES - - while ! multipass exec "$name" -- test -e /run/cloud-init/result.json; do - sleep 5 - done - - multipass mount /home/daniel/dev/ubuntu-advantage-client $name:/home/multipass/ubuntu-advantage-client -} -``` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/README.md ubuntu-advantage-tools-28.1~18.04/tools/README.md --- ubuntu-advantage-tools-27.14.4~18.04/tools/README.md 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/README.md 2023-05-30 19:02:35.000000000 +0000 @@ -5,5 +5,17 @@ ## Files -- constraints-bionic: Bionic versions of packages used by Tox -- constraints-xenial: Xenial versions of packages used by Tox +- build.py: Build the package from the current directory +- build.sh: Shell entrypoint for the build script +- create-lp-release-branches.sh: Generate the stable release branches on Launchpad, after the devel branch is ready +- check-versions-are-consistent.py: Helper script to verify changelog and package version matches. +- README.md: This file. +- refresh-keyrings.sh: Refresh the keyring files for services, stored in the repo +- run-integration-tests.py: Python entrypoint for the `tox -e behave` commands. +- setup_pyenv.sh: Downloads and compiles all Python versions we should support, and configures `pyenv` to use those. +- setup_sbuild.sh: Downloads and prepares chroots used in the build (and test) process. +- test-in-lxd.sh: Build the package and then install it on an LXD instance for testing +- test-in-multipass.sh: Build the package and then install it on a multipass instance for testing +- ua.bash: Bash completion script for `ua` | `pro` +- ua-test-credentials.example.yaml: Template for inserting your own test +credentials - make a copy and/or remove `'.example'` to use it in `run-integration-tests.py` diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/run-integration-tests.py ubuntu-advantage-tools-28.1~18.04/tools/run-integration-tests.py --- ubuntu-advantage-tools-27.14.4~18.04/tools/run-integration-tests.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/run-integration-tests.py 2023-06-01 18:49:33.000000000 +0000 @@ -57,28 +57,35 @@ series_version = SERIES_TO_VERSION[s] env = os.environ.copy() - # Add check_version variable - if check_version: - env["UACLIENT_BEHAVE_CHECK_VERSION"] = "{}~{}.1".format( - check_version, series_version - ) - - # choose the appropriate installation source for the deb - env["UACLIENT_BEHAVE_INSTALL_FROM"] = install_from - # Inject tokens from credentials for t in token: envvar = TOKEN_TO_ENVVAR[t] env[envvar] = credentials["token"].get(envvar) # Tox command itself - command = "tox -e behave-{}-{}".format( - p, series_version - ).split() + command = [ + "tox", + "-e", + "behave-{}-{}".format(p, series_version), + "--", + "-D", + "install_from={}".format(install_from), + ] + + if check_version: + command.extend( + [ + "-D", + "check_version={}~{}".format( + check_version, + series_version, + ), + ] + ) # Wip if wip: - command.extend(["--", "--tags=wip", "--stop"]) + command.extend(["--tags=wip", "--stop"]) commands.append((command, env)) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/tox-lxd-runner ubuntu-advantage-tools-28.1~18.04/tools/tox-lxd-runner --- ubuntu-advantage-tools-27.14.4~18.04/tools/tox-lxd-runner 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/tox-lxd-runner 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -#!/bin/bash - -set -euf -o pipefail - -uapath=$PWD -uadir=$(basename "$uapath") -container="ua-tox-lxd-runner-$(cut -d- -f1 < /proc/sys/kernel/random/uuid)" - -function cleanup { - # Safe to run even if the container/vm does not exist. - if lxc info "$container" &> /dev/null; then - echo "[CLEANING UP CONTAINER '$container']" - lxc delete "$container" --force - fi -} - -if (($# != 2)); then - echo "Usage: $0 " - echo "Example: $0 xenial 'tox -e flake8-xenial'" - exit 1 -fi - -release=$1 -cmd=$2 - -# Prevent carrying over .tox to a possibly very different environment. -if [[ -e .tox ]]; then - echo "Please remove .tox before running this script." - exit 1 -fi - -trap cleanup EXIT - -cleanup - -echo "[STARTING LXD CONTAINER '$container']" - -lxc launch "ubuntu:$release" "$container" --ephemeral - -echo "[WAITING FOR CONTAINER]" - -# Adapted from pycloudlib (instance.py). -# shellcheck disable=SC2016 -until lxc exec "$container" -- sh -c 'test "$(runlevel | cut -d" " -f2)" -ge 2 -a -f /run/cloud-init/result.json' > /dev/null 2>&1; do - printf . - sleep 2 -done -echo - -echo "[CONFIGURING CONTAINER]" - -# Install system packages -lxc exec "$container" -- apt-get --quiet --yes update -lxc exec "$container" --env DEBIAN_FRONTEND=noninteractive -- apt-get --quiet --yes install git - -python_minor=$(lxc exec "$container" -- python3 -c 'import sys; print(sys.version_info[1])') -if ((python_minor > 5)); then - get_pip_url="https://bootstrap.pypa.io/get-pip.py" -elif ((python_minor == 5)); then - get_pip_url="https://bootstrap.pypa.io/pip/3.5/get-pip.py" -elif ((python_minor == 4)); then - get_pip_url="https://bootstrap.pypa.io/pip/3.4/get-pip.py" -else - echo "Unsupported Python version (3.$python_minor)" - exit 1 -fi - -# Bootstrap pip -lxc exec "$container" -- sh -c "curl -Ss '$get_pip_url' | python3" - -# Install tox -lxc exec "$container" -- pip --quiet install tox -lxc exec "$container" -- tox --version - -echo "[COPYING SOURCE TREE TO CONTAINER]" - -c_home=$(lxc exec "$container" -- pwd) -c_uapath="$c_home/$uadir" -lxc file push --recursive --create-dirs "$uapath" "$container/$(dirname "$c_uapath")" --quiet - -echo "[RUNNING TESTS]" - -testrun_rc=1 -lxc exec "$container" --cwd "$c_uapath" -- sh -c "$cmd" && testrun_rc=0 || testrun_rc=$? -((testrun_rc == 0)) && echo "[SUCCESS]" || echo "[FAILURE (RC=$testrun_rc)]" - -cleanup - -exit $testrun_rc diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/ua.bash ubuntu-advantage-tools-28.1~18.04/tools/ua.bash --- ubuntu-advantage-tools-27.14.4~18.04/tools/ua.bash 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/ua.bash 2023-06-01 18:49:33.000000000 +0000 @@ -1,25 +1,35 @@ # bash completion for ubuntu-advantage-tools +. /etc/os-release # For VERSION_ID + _ua_complete() { local cur_word prev_word services subcmds base_params cur_word="${COMP_WORDS[COMP_CWORD]}" prev_word="${COMP_WORDS[COMP_CWORD-1]}" - services=$(python3 -c "from uaclient.entitlements import valid_services; from uaclient.config import UAConfig; print(*valid_services(cfg=UAConfig()), sep=' ') -") - subcmds=$(pro --help | awk '/^\s*$|^\s{5,}|Available|Use/ {next;} /Flags:/{check=1;next}/Use ubuntu-avantage/{check=0}check{ if ( $1 ~ /,/ ) { print $2} else print $1}') + if [ "$VERSION_ID" = "16.04" ] || [ "$VERSION_ID" == "18.04" ]; then + services="cc-eal cis esm-apps esm-infra fips fips-updates livepatch realtime-kernel ros ros-updates" + else + services="cc-eal esm-apps esm-infra fips fips-updates livepatch realtime-kernel ros ros-updates usg" + fi + + subcmds="--debug --help --version api attach auto-attach collect-logs config detach disable enable fix help refresh security-status status system version" base_params="" + case ${COMP_CWORD} in 1) + # shellcheck disable=SC2207 COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word)) ;; 2) case ${prev_word} in disable) + # shellcheck disable=SC2207 COMPREPLY=($(compgen -W "$services" -- $cur_word)) ;; enable) + # shellcheck disable=SC2207 COMPREPLY=($(compgen -W "$services" -- $cur_word)) ;; esac diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tools/ua-dev-cloud-config.yaml ubuntu-advantage-tools-28.1~18.04/tools/ua-dev-cloud-config.yaml --- ubuntu-advantage-tools-27.14.4~18.04/tools/ua-dev-cloud-config.yaml 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tools/ua-dev-cloud-config.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -#cloud-config -# Setup an ubuntu-advantage-tools development environment with cloud-init -packages: - - git - - make -runcmd: - - git clone https://github.com/canonical/ubuntu-advantage-client.git /var/tmp/uac - - cd /var/tmp/uac/ - - make deps - - dpkg-buildpackage -us -uc - - apt-get remove ubuntu-advantage-tools --assume-yes - - dpkg -i /var/tmp/ubuntu-advantage-*deb diff -Nru ubuntu-advantage-tools-27.14.4~18.04/tox.ini ubuntu-advantage-tools-28.1~18.04/tox.ini --- ubuntu-advantage-tools-27.14.4~18.04/tox.ini 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/tox.ini 2023-06-01 18:49:33.000000000 +0000 @@ -19,16 +19,23 @@ [testenv:py38-test] setuptools_version = 45.2.0 +[testenv:py310-flake8] +setuptools_version = 59.6.0 + +[testenv:py310-test] +setuptools_version = 59.6.0 + [testenv] allowlist_externals=/usr/bin/bash deps = -rrequirements.txt -rtest-requirements.txt - py35: -ctools/constraints-xenial.txt - py36: -ctools/constraints-bionic.txt - py38: -ctools/constraints-focal.txt + py35: -cuaclient/tests/constraints/constraints-xenial.txt + py36: -cuaclient/tests/constraints/constraints-bionic.txt + py38: -cuaclient/tests/constraints/constraints-focal.txt + py310: -cuaclient/tests/constraints/constraints-jammy.txt mypy: -rtypes-requirements.txt - mypy: -ctools/constraints-mypy.txt + mypy: -cuaclient/tests/constraints/constraints-mypy.txt black: -rdev-requirements.txt isort: -rdev-requirements.txt behave: -rintegration-requirements.txt @@ -43,40 +50,41 @@ setenv = awsgeneric: UACLIENT_BEHAVE_MACHINE_TYPE = aws.generic awspro: UACLIENT_BEHAVE_MACHINE_TYPE = aws.pro - awspro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = aws.pro.fips + awspro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = aws.pro-fips azuregeneric: UACLIENT_BEHAVE_MACHINE_TYPE = azure.generic azurepro: UACLIENT_BEHAVE_MACHINE_TYPE = azure.pro - azurepro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = azure.pro.fips + azurepro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = azure.pro-fips gcpgeneric: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.generic gcppro: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.pro - gcppro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.pro.fips - vm: UACLIENT_BEHAVE_MACHINE_TYPE = lxd.vm - docker: UACLIENT_BEHAVE_MACHINE_TYPE = lxd.vm + gcppro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.pro-fips + vm: UACLIENT_BEHAVE_MACHINE_TYPE = lxd-vm + docker: UACLIENT_BEHAVE_MACHINE_TYPE = lxd-vm commands = test: py.test --junitxml=pytest_results.xml {posargs:--cov uaclient uaclient} flake8: flake8 uaclient lib setup.py features - mypy: mypy --explicit-package-bases --check-untyped-defs --python-version 3.6 uaclient/ features/ lib/ - mypy: mypy --explicit-package-bases --check-untyped-defs --python-version 3.8 uaclient/ features/ lib/ - mypy: mypy --explicit-package-bases --check-untyped-defs --python-version 3.10 uaclient/ features/ lib/ + mypy: mypy --python-version 3.6 uaclient/ features/ lib/ + mypy: mypy --python-version 3.8 uaclient/ features/ lib/ + mypy: mypy --python-version 3.10 uaclient/ features/ lib/ black: black --check --diff uaclient/ features/ lib/ setup.py isort: isort --check --diff uaclient/ features/ lib/ setup.py - shellcheck: bash -O extglob -O nullglob -c "shellcheck -S warning tools/{*.sh,make-release,make-tarball} debian/*.{config,postinst,postrm,prerm} lib/*.sh sru/*.sh update-motd.d/*" - docs: sphinx-build -M html docs/ docs/build/ + shellcheck: bash -O extglob -O nullglob -c "shellcheck -S warning tools/*.sh debian/*.{config,postinst,postrm,prerm} lib/*.sh sru/*.sh update-motd.d/*" + docs: sphinx-build -W -b html docs/ docs/build/ docs-dev: sphinx-autobuild docs/ docs/build/html + behave-any: behave -v -D machine_type=any {posargs} - behave-lxd-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-lxd-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-lxd-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-lxd-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - behave-lxd-22.10: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.kinetic,series.all" --tags="~upgrade" - behave-lxd-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.container" --tags="series.lunar,series.all" --tags="~upgrade" - - behave-vm-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.xenial,series.all,series.lts" --tags="~upgrade" - behave-vm-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.bionic,series.all,series.lts" --tags="~upgrade" - behave-vm-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.focal,series.all,series.lts" --tags="~upgrade" --tags="~docker" - behave-vm-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.jammy,series.all,series.lts" --tags="~upgrade" - behave-vm-22.10: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.kinetic,series.all" --tags="~upgrade" - behave-vm-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.lunar,series.all" --tags="~upgrade" + behave-lxd-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" + behave-lxd-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" + behave-lxd-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.focal,series.lts,series.all" --tags="~upgrade" + behave-lxd-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" + behave-lxd-22.10: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.kinetic,series.all" --tags="~upgrade" + behave-lxd-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.lunar,series.all" --tags="~upgrade" + + behave-vm-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.xenial,series.all,series.lts" --tags="~upgrade" + behave-vm-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.bionic,series.all,series.lts" --tags="~upgrade" + behave-vm-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.focal,series.all,series.lts" --tags="~upgrade" --tags="~docker" + behave-vm-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.jammy,series.all,series.lts" --tags="~upgrade" + behave-vm-22.10: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.kinetic,series.all" --tags="~upgrade" + behave-vm-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.lunar,series.all" --tags="~upgrade" behave-upgrade-16.04: behave -v {posargs} --tags="upgrade" --tags="series.xenial,series.all" behave-upgrade-18.04: behave -v {posargs} --tags="upgrade" --tags="series.bionic,series.all" @@ -84,7 +92,7 @@ behave-upgrade-22.04: behave -v {posargs} --tags="upgrade" --tags="series.jammy,series.all" behave-upgrade-22.10: behave -v {posargs} --tags="upgrade" --tags="series.kinetic,series.all" - behave-docker-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd.vm" --tags="series.focal" features/docker.feature + behave-docker-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.focal" features/docker.feature behave-awsgeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" behave-awsgeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" @@ -96,23 +104,24 @@ behave-awspro-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.focal,series.lts,series.all" behave-awspro-22.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.jammy,series.lts,series.all" - behave-awspro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro.fips" --tags="series.xenial,series.lts,series.all" - behave-awspro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro.fips" --tags="series.bionic,series.lts,series.all" - behave-awspro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro.fips" --tags="series.focal,series.lts,series.all" + behave-awspro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.xenial,series.lts,series.all" + behave-awspro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.bionic,series.lts,series.all" + behave-awspro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.focal,series.lts,series.all" behave-azuregeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" behave-azuregeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" behave-azuregeneric-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.focal,series.lts,series.all" --tags="~upgrade" behave-azuregeneric-22.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" + behave-azuregeneric-22.10: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.kinetic,series.all" --tags="~upgrade" behave-azurepro-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.xenial,series.lts,series.all" behave-azurepro-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.bionic,series.lts,series.all" behave-azurepro-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.focal,series.lts,series.all" behave-azurepro-22.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.jammy,series.lts,series.all" - behave-azurepro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro.fips" --tags="series.xenial,series.lts,series.all" - behave-azurepro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro.fips" --tags="series.bionic,series.lts,series.all" - behave-azurepro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro.fips" --tags="series.focal,series.lts,series.all" + behave-azurepro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.xenial,series.lts,series.all" + behave-azurepro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.bionic,series.lts,series.all" + behave-azurepro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.focal,series.lts,series.all" behave-gcpgeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" behave-gcpgeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" @@ -125,8 +134,8 @@ behave-gcppro-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.focal,series.lts,series.all" --tags="~upgrade" behave-gcppro-22.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - behave-gcppro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro.fips" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-gcppro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro.fips" --tags="series.focal,series.lts,series.all" --tags="~upgrade" + behave-gcppro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro-fips" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" + behave-gcppro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro-fips" --tags="series.focal,series.lts,series.all" --tags="~upgrade" [flake8] # E251: Older versions of flake8 et al don't permit the diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/actions.py ubuntu-advantage-tools-28.1~18.04/uaclient/actions.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/actions.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/actions.py 2023-06-01 18:49:33.000000000 +0000 @@ -10,10 +10,11 @@ entitlements, exceptions, livepatch, - messages, ) +from uaclient import log as pro_log +from uaclient import messages from uaclient import status as ua_status -from uaclient import system, util +from uaclient import system, timer, util from uaclient.clouds import AutoAttachCloudInstance # noqa: F401 from uaclient.clouds import identity from uaclient.defaults import ( @@ -35,6 +36,8 @@ "ubuntu-advantage.service", ) +USER_LOG_COLLECTED_LIMIT = 10 + def attach_with_token( cfg: config.UAConfig, token: str, allow_enable: bool @@ -46,7 +49,7 @@ :raise ContractAPIError: On unexpected errors when talking to the contract server. """ - from uaclient.jobs.update_messaging import update_motd_messages + from uaclient.timer.update_messaging import update_motd_messages try: contract.request_updated_contract( @@ -61,6 +64,7 @@ # Persist updated status in the event of partial attach ua_status.status(cfg=cfg) update_motd_messages(cfg) + # raise this exception in case we cannot enable all services raise exc current_iid = identity.get_instance_id() @@ -68,6 +72,7 @@ cfg.write_cache("instance-id", current_iid) update_motd_messages(cfg) + timer.start() def auto_attach( @@ -84,7 +89,7 @@ auto-attach support. """ contract_client = contract.UAContractClient(cfg) - tokenResponse = contract_client.request_auto_attach_contract_token( + tokenResponse = contract_client.get_contract_token_for_cloud_instance( instance=cloud ) @@ -99,7 +104,8 @@ *, assume_yes: bool = False, allow_beta: bool = False, - access_only: bool = False + access_only: bool = False, + variant: str = "" ): """ Constructs an entitlement based on the name provided. Passes kwargs onto @@ -107,7 +113,9 @@ :raise EntitlementNotFoundError: If no entitlement with the given name is found, then raises this error. """ - ent_cls = entitlements.entitlement_factory(cfg=cfg, name=name) + ent_cls = entitlements.entitlement_factory( + cfg=cfg, name=name, variant=variant + ) entitlement = ent_cls( cfg, assume_yes=assume_yes, @@ -209,6 +217,23 @@ ) state_files = _get_state_files(cfg) + user_log_files = ( + pro_log.get_all_user_log_files()[:USER_LOG_COLLECTED_LIMIT] + if util.we_are_currently_root() + else [pro_log.get_user_log_file()] + ) + # save log file in compressed file + for log_file_idx, log_file in enumerate(user_log_files): + try: + content = util.redact_sensitive_logs(system.load_file(log_file)) + system.write_file( + os.path.join(output_dir, "user{}.log".format(log_file_idx)), + content, + ) + except Exception as e: + logging.warning( + "Failed to collect user log file: %s\n%s", log_file, str(e) + ) # also get default logrotated log files for f in state_files + glob.glob(DEFAULT_LOG_PREFIX + "*"): @@ -225,6 +250,7 @@ if util.we_are_currently_root(): # if root, overwrite the original with redacted content system.write_file(f, content) + system.write_file( os.path.join(output_dir, os.path.basename(f)), content ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/api.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/api.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/api.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/api.py 2023-06-01 18:49:33.000000000 +0000 @@ -26,6 +26,8 @@ "u.pro.packages.updates.v1", "u.pro.security.status.livepatch_cves.v1", "u.pro.security.status.reboot_required.v1", + "u.pro.status.enabled_services.v1", + "u.pro.status.is_attached.v1", "u.pro.version.v1", "u.security.package_manifest.v1", "u.unattended_upgrades.status.v1", diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/exceptions.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/exceptions.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/exceptions.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/exceptions.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,5 +1,3 @@ -from typing import List, Tuple - from uaclient import messages from uaclient.api.errors import APIError from uaclient.exceptions import ( @@ -7,6 +5,7 @@ ConnectivityError, ContractAPIError, EntitlementNotFoundError, + EntitlementsNotEnabledError, InvalidProImage, LockHeldError, NonAutoAttachImageError, @@ -24,24 +23,10 @@ "NonAutoAttachImageError", "UrlError", "UserFacingError", + "EntitlementsNotEnabledError", ] -class EntitlementsNotEnabledError(UserFacingError): - def __init__( - self, failed_services: List[Tuple[str, messages.NamedMessage]] - ): - info_dicts = [ - {"name": f[0], "code": f[1].name, "title": f[1].msg} - for f in failed_services - ] - super().__init__( - messages.ENTITLEMENTS_NOT_ENABLED_ERROR.msg, - messages.ENTITLEMENTS_NOT_ENABLED_ERROR.name, - additional_info={"services": info_dicts}, - ) - - class AutoAttachDisabledError(UserFacingError): def __init__(self): super().__init__( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,73 @@ +import mock + +from uaclient import entitlements +from uaclient.api.u.pro.status.enabled_services.v1 import ( + EnabledService, + _enabled_services, +) +from uaclient.entitlements.entitlement_status import UserFacingStatus + + +class TestEnabledServicesV1: + @mock.patch("uaclient.api.u.pro.status.enabled_services.v1._is_attached") + def test_enabled_services(self, m_is_attached): + m_is_attached.return_value = mock.MagicMock(is_attached=True) + + m_cls_1 = mock.MagicMock() + m_inst_1 = mock.MagicMock(variants={}) + type(m_inst_1).name = mock.PropertyMock(return_value="ent1") + m_inst_1.user_facing_status.return_value = ( + UserFacingStatus.ACTIVE, + "", + ) + m_cls_1.return_value = m_inst_1 + + m_variant_cls = mock.MagicMock() + m_variant_inst = mock.MagicMock(variant_name="variant") + m_variant_inst.user_facing_status.return_value = ( + UserFacingStatus.ACTIVE, + "", + ) + m_variant_cls.return_value = m_variant_inst + + m_cls_2 = mock.MagicMock() + m_inst_2 = mock.MagicMock(variants={"variant": m_variant_cls}) + type(m_inst_2).name = mock.PropertyMock(return_value="ent2") + m_inst_2.user_facing_status.return_value = ( + UserFacingStatus.ACTIVE, + "", + ) + m_cls_2.return_value = m_inst_2 + + m_cls_3 = mock.MagicMock() + m_inst_3 = mock.MagicMock() + type(m_inst_3).name = mock.PropertyMock(return_value="ent3") + m_inst_3.user_facing_status.return_value = ( + UserFacingStatus.INACTIVE, + "", + ) + + ents = [m_cls_1, m_cls_2, m_cls_3] + expected_enabled_services = [ + EnabledService(name="ent1"), + EnabledService( + name="ent2", + variant_enabled=True, + variant_name="variant", + ), + ] + + with mock.patch.object(entitlements, "ENTITLEMENT_CLASSES", ents): + actual_enabled_services = _enabled_services( + cfg=mock.MagicMock() + ).enabled_services + + assert 1 == m_is_attached.call_count + assert expected_enabled_services == actual_enabled_services + + @mock.patch("uaclient.api.u.pro.status.enabled_services.v1._is_attached") + def test_enabled_services_when_unattached(self, m_is_attached): + m_is_attached.return_value = mock.MagicMock(is_attached=False) + + assert [] == _enabled_services(cfg=mock.MagicMock()).enabled_services + assert 1 == m_is_attached.call_count diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py 2023-06-01 18:49:33.000000000 +0000 @@ -4,6 +4,7 @@ from uaclient.api import exceptions from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.config import UAConfig from uaclient.data_types import DataObject, Field, StringDataValue, data_list from uaclient.entitlements import order_entitlements_for_enabling @@ -91,7 +92,7 @@ ) -> FullAutoAttachResult: event.set_event_mode(mode) - if cfg.is_attached: + if _is_attached(cfg).is_attached: raise exceptions.AlreadyAttachedError( cfg.machine_token_file.account.get("name", "") ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/status/enabled_services/v1.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/status/enabled_services/v1.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/status/enabled_services/v1.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/status/enabled_services/v1.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,83 @@ +from typing import List, Optional + +from uaclient.api.api import APIEndpoint +from uaclient.api.data_types import AdditionalInfo +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.config import UAConfig +from uaclient.data_types import ( + BoolDataValue, + DataObject, + Field, + StringDataValue, + data_list, +) +from uaclient.entitlements.entitlement_status import UserFacingStatus + + +class EnabledService(DataObject): + fields = [ + Field("name", StringDataValue), + Field("variant_enabled", BoolDataValue), + Field("variant_name", StringDataValue, False), + ] + + def __init__( + self, + *, + name: str, + variant_enabled: bool = False, + variant_name: Optional[str] = None + ): + self.name = name + self.variant_enabled = variant_enabled + self.variant_name = variant_name + + +class EnabledServicesResult(DataObject, AdditionalInfo): + fields = [ + Field("enabled_services", data_list(EnabledService)), + ] + + def __init__(self, *, enabled_services: List[EnabledService]): + self.enabled_services = enabled_services + + +def enabled_services() -> EnabledServicesResult: + return _enabled_services(UAConfig()) + + +def _enabled_services(cfg: UAConfig) -> EnabledServicesResult: + from uaclient.entitlements import ENTITLEMENT_CLASSES + + if not _is_attached(cfg).is_attached: + return EnabledServicesResult(enabled_services=[]) + + enabled_services = [] # type: List[EnabledService] + for ent_cls in ENTITLEMENT_CLASSES: + ent = ent_cls(cfg) + if ent.user_facing_status()[0] == UserFacingStatus.ACTIVE: + enabled_service = EnabledService(name=ent.name) + for _, variant_cls in ent.variants.items(): + variant = variant_cls(cfg) + + if variant.user_facing_status()[0] == UserFacingStatus.ACTIVE: + enabled_service = EnabledService( + name=ent.name, + variant_enabled=True, + variant_name=variant.variant_name, + ) + break + + enabled_services.append(enabled_service) + + return EnabledServicesResult( + enabled_services=sorted(enabled_services, key=lambda x: x.name) + ) + + +endpoint = APIEndpoint( + version="v1", + name="EnabledServices", + fn=_enabled_services, + options_cls=None, +) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/status/is_attached/v1.py ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/status/is_attached/v1.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/api/u/pro/status/is_attached/v1.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/api/u/pro/status/is_attached/v1.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,29 @@ +from uaclient.api.api import APIEndpoint +from uaclient.api.data_types import AdditionalInfo +from uaclient.config import UAConfig +from uaclient.data_types import BoolDataValue, DataObject, Field + + +class IsAttachedResult(DataObject, AdditionalInfo): + fields = [ + Field("is_attached", BoolDataValue), + ] + + def __init__(self, *, is_attached: bool): + self.is_attached = is_attached + + +def is_attached() -> IsAttachedResult: + return _is_attached(UAConfig()) + + +def _is_attached(cfg: UAConfig) -> IsAttachedResult: + return IsAttachedResult(is_attached=bool(cfg.machine_token)) + + +endpoint = APIEndpoint( + version="v1", + name="IsAttached", + fn=_is_attached, + options_cls=None, +) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/apt_news.py ubuntu-advantage-tools-28.1~18.04/uaclient/apt_news.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/apt_news.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/apt_news.py 2023-06-01 18:49:33.000000000 +0000 @@ -8,6 +8,7 @@ import apt_pkg from uaclient import defaults, messages, system, util +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.clouds.identity import get_cloud_type from uaclient.config import UAConfig from uaclient.data_types import ( @@ -19,7 +20,7 @@ data_list, ) from uaclient.files import state_files -from uaclient.jobs.update_messaging import ( +from uaclient.timer.update_messaging import ( ContractExpiryStatus, get_contract_expiry_status, ) @@ -81,7 +82,7 @@ return True if selectors.codenames is not None: - if system.get_platform_info()["series"] not in selectors.codenames: + if system.get_release_info().series not in selectors.codenames: return False if selectors.clouds is not None: @@ -92,7 +93,7 @@ return False if selectors.pro is not None: - if selectors.pro != cfg.is_attached: + if selectors.pro != _is_attached(cfg).is_attached: return False return True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/apt.py ubuntu-advantage-tools-28.1~18.04/uaclient/apt.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/apt.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/apt.py 2023-06-27 00:49:37.000000000 +0000 @@ -169,21 +169,24 @@ def run_apt_command( cmd: List[str], error_msg: Optional[str] = None, - env: Optional[Dict[str, str]] = {}, + override_env_vars: Optional[Dict[str, str]] = None, ) -> str: """Run an apt command, retrying upon failure APT_RETRIES times. :param cmd: List containing the apt command to run, passed to subp. :param error_msg: The string to raise as UserFacingError when all retries are exhausted in failure. - :param env: Optional dictionary of environment variables to pass to subp. + :param override_env_vars: Passed directly as subp's override_env_vars arg :return: stdout from successful run of the apt command. :raise UserFacingError: on issues running apt-cache policy. """ try: out, _err = system.subp( - cmd, capture=True, retry_sleeps=APT_RETRIES, env=env + cmd, + capture=True, + retry_sleeps=APT_RETRIES, + override_env_vars=override_env_vars, ) except exceptions.ProcessExecutionError as e: if "Could not get lock /var/lib/dpkg/lock" in str(e.stderr): @@ -208,10 +211,12 @@ @lru_cache(maxsize=None) def get_apt_cache_policy( error_msg: Optional[str] = None, - env: Optional[Dict[str, str]] = {}, + override_env_vars: Optional[Dict[str, str]] = None, ) -> str: return run_apt_command( - cmd=["apt-cache", "policy"], error_msg=error_msg, env=env + cmd=["apt-cache", "policy"], + error_msg=error_msg, + override_env_vars=override_env_vars, ) @@ -264,7 +269,9 @@ return cache -def get_pkg_candidate_version(pkg: str) -> Optional[str]: +def get_pkg_candidate_version( + pkg: str, check_esm_cache: bool = False +) -> Optional[str]: with PreserveAptCfg(get_apt_cache) as cache: try: package = cache[pkg] @@ -278,6 +285,8 @@ if not pkg_candidate: return None + elif not check_esm_cache: + return pkg_candidate with PreserveAptCfg(get_esm_cache) as esm_cache: if esm_cache: @@ -303,16 +312,22 @@ def get_apt_cache_policy_for_package( package: str, error_msg: Optional[str] = None, - env: Optional[Dict[str, str]] = {}, + override_env_vars: Optional[Dict[str, str]] = None, ) -> str: return run_apt_command( - cmd=["apt-cache", "policy", package], error_msg=error_msg, env=env + cmd=["apt-cache", "policy", package], + error_msg=error_msg, + override_env_vars=override_env_vars, ) -def run_apt_update_command(env: Optional[Dict[str, str]] = {}) -> str: +def run_apt_update_command( + override_env_vars: Optional[Dict[str, str]] = None +) -> str: try: - out = run_apt_command(cmd=["apt-get", "update"], env=env) + out = run_apt_command( + cmd=["apt-get", "update"], override_env_vars=override_env_vars + ) except exceptions.APTProcessConflictError: raise exceptions.APTUpdateProcessConflictError() except exceptions.APTInvalidRepoError as e: @@ -335,7 +350,7 @@ packages: List[str], apt_options: Optional[List[str]] = None, error_msg: Optional[str] = None, - env: Optional[Dict[str, str]] = {}, + override_env_vars: Optional[Dict[str, str]] = None, ) -> str: if apt_options is None: apt_options = [] @@ -346,7 +361,7 @@ + apt_options + packages, error_msg=error_msg, - env=env, + override_env_vars=override_env_vars, ) except exceptions.APTProcessConflictError: raise exceptions.APTInstallProcessConflictError(header_msg=error_msg) @@ -375,7 +390,7 @@ except ValueError: # Then we have a bearer token username = "bearer" password = credentials - series = system.get_platform_info()["series"] + series = system.get_release_info().series if repo_url.endswith("/"): repo_url = repo_url[:-1] assert_valid_apt_credentials(repo_url, username, password) @@ -755,7 +770,7 @@ ] + list(package_names), error_message, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/cli.py ubuntu-advantage-tools-28.1~18.04/uaclient/cli.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/cli.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/cli.py 2023-06-01 18:49:33.000000000 +0000 @@ -29,7 +29,7 @@ from uaclient import log as pro_log from uaclient import messages, security, security_status from uaclient import status as ua_status -from uaclient import util, version +from uaclient import timer, util, version from uaclient.api.api import call_api from uaclient.api.u.pro.attach.auto.full_auto_attach.v1 import ( FullAutoAttachOptions, @@ -47,6 +47,7 @@ from uaclient.api.u.pro.security.status.reboot_required.v1 import ( _reboot_required, ) +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.apt import AptProxyScope, setup_apt_proxy from uaclient.data_types import AttachActionsConfigFile, IncorrectTypeError from uaclient.defaults import PRINT_WRAP_WIDTH @@ -63,8 +64,8 @@ ) from uaclient.files import notices, state_files from uaclient.files.notices import Notice -from uaclient.jobs.update_messaging import refresh_motd, update_motd_messages from uaclient.log import JsonArrayFormatter +from uaclient.timer.update_messaging import refresh_motd, update_motd_messages from uaclient.yaml import safe_dump, safe_load NAME = "pro" @@ -148,6 +149,9 @@ self.description = "\n".join( [self.base_desc] + service_descriptions ) + + self.description += "\n\n" + messages.PRO_HELP_SERVICE_INFO.msg + super().print_help(file=file) @staticmethod @@ -245,7 +249,7 @@ def wrapper(f): @wraps(f) def new_f(args, cfg, **kwargs): - if not cfg.is_attached: + if not _is_attached(cfg).is_attached: if msg_function: command = getattr(args, "command", "") service_names = getattr(args, "service", "") @@ -268,7 +272,7 @@ @wraps(f) def new_f(args, cfg): - if cfg.is_attached: + if _is_attached(cfg).is_attached: raise exceptions.AlreadyAttachedError( cfg.machine_token_file.account.get("name", "") ) @@ -486,6 +490,14 @@ " command." ), ) + parser.add_argument( + "--no-related", + action="store_true", + help=( + "If used, when fixing a USN, the command will not try to" + " also fix related USNs to the target USN." + ), + ) return parser @@ -624,8 +636,9 @@ cfg=cfg, issue_id=args.security_issue, dry_run=args.dry_run, + no_related=args.no_related, ) - return fix_status.value + return fix_status.exit_code def detach_parser(parser): @@ -733,6 +746,11 @@ default="cli", help=("output enable in the specified format (default: cli)"), ) + parser.add_argument( + "--variant", + action="store", + help=("The name of the variant to use when enabling the service"), + ) return parser @@ -920,6 +938,12 @@ @return: True on success, False otherwise """ + # Make sure we have the correct variant of the service + # This can affect what packages get uninstalled + variant = entitlement.enabled_variant + if variant is not None: + entitlement = variant + ret, reason = entitlement.disable() if not ret: @@ -1251,6 +1275,14 @@ @return: 0 on success, 1 otherwise """ + variant = getattr(args, "variant", "") + access_only = args.access_only + + if variant and access_only: + raise exceptions.InvalidOptionCombination( + option1="--access-only", option2="--variant" + ) + event.info(messages.REFRESH_CONTRACT_ENABLE) try: contract.request_updated_contract(cfg) @@ -1271,7 +1303,8 @@ ent_name, assume_yes=args.assume_yes, allow_beta=args.beta, - access_only=args.access_only, + access_only=access_only, + variant=variant, ) ua_status.status(cfg=cfg) # Update the status cache @@ -1327,6 +1360,7 @@ ret = _detach(cfg, assume_yes=args.assume_yes) if ret == 0: daemon.start() + timer.stop() event.process_events() return ret @@ -1688,7 +1722,7 @@ event.info("") event.set_output_content(status) - output = ua_status.format_tabular(status) + output = ua_status.format_tabular(status, show_all=show_all) event.info(util.handle_unicode_characters(output)) event.process_events() return ret @@ -1831,11 +1865,35 @@ logging.warning(NEW_VERSION_NOTICE.format(version=new_version)) +def _warn_about_output_redirection(cmd_args) -> None: + """Warn users that the user readable output may change.""" + if ( + cmd_args.command in ("status", "security-status") + and not sys.stdout.isatty() + ): + if hasattr(cmd_args, "format") and cmd_args.format in ("json", "yaml"): + return + logging.warning( + messages.WARNING_HUMAN_READABLE_OUTPUT.format( + command=cmd_args.command + ) + ) + + def setup_logging(console_level, log_level, log_file=None, logger=None): - """Setup console logging and debug logging to log_file""" + """Setup console logging and debug logging to log_file + + If run as non_root and cfg.log_file is provided, it is replaced + with another non-root log file. + """ if log_file is None: cfg = config.UAConfig() log_file = cfg.log_file + # if we are running as non-root, change log file + if not util.we_are_currently_root(): + log_file = pro_log.get_user_log_file() + if isinstance(log_level, str): + log_level = log_level.upper() console_formatter = util.LogFormatter() if logger is None: # Then we configure the root logger @@ -1853,20 +1911,18 @@ console_handler.set_name("ua-console") # Used to disable console logging logger.addHandler(console_handler) + log_file_path = pathlib.Path(log_file) + + if not log_file_path.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + log_file_path.touch(mode=0o640) # Setup file logging - if util.we_are_currently_root(): - # Setup readable-by-root-only debug file logging if running as root - log_file_path = pathlib.Path(log_file) - - if not log_file_path.exists(): - log_file_path.touch() - log_file_path.chmod(0o644) - - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter(JsonArrayFormatter()) - file_handler.setLevel(log_level) - file_handler.set_name("ua-file") - logger.addHandler(file_handler) + + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(JsonArrayFormatter()) + file_handler.setLevel(log_level) + file_handler.set_name("ua-file") + logger.addHandler(file_handler) def set_event_mode(cmd_args): @@ -1958,6 +2014,11 @@ @main_error_handler def main(sys_argv=None): + setup_logging( + logging.INFO, + defaults.CONFIG_DEFAULTS["log_level"], + defaults.CONFIG_DEFAULTS["log_file"], + ) if not sys_argv: sys_argv = sys.argv cfg = config.UAConfig() @@ -1991,6 +2052,9 @@ logging.debug( "Executed with environment variables: %r" % pro_environment ) + + _warn_about_output_redirection(args) + return_value = args.action(args, cfg=cfg) _warn_about_new_version(args) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/azure.py ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/azure.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/azure.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/azure.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,3 +1,4 @@ +import logging import os from typing import Any, Dict from urllib.error import HTTPError @@ -5,6 +6,8 @@ from uaclient import exceptions, system, util from uaclient.clouds import AutoAttachCloudInstance +LOG = logging.getLogger("pro.clouds.azure") + IMDS_BASE_URL = "http://169.254.169.254/metadata/" API_VERSION = "2020-09-01" # Needed to get subscription ID in attested data @@ -16,10 +19,10 @@ DMI_CHASSIS_ASSET_TAG = "/sys/class/dmi/id/chassis_asset_tag" AZURE_OVF_ENV_FILE = "/var/lib/cloud/seed/azure/ovf-env.xml" AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77" +AZURE_PRO_LICENSE_TYPE = "UBUNTU_PRO" class UAAutoAttachAzureInstance(AutoAttachCloudInstance): - # mypy does not handle @property around inner decorators # https://github.com/python/mypy/issues/1362 @property # type: ignore @@ -50,8 +53,18 @@ return os.path.exists(AZURE_OVF_ENV_FILE) def should_poll_for_pro_license(self) -> bool: - """Unsupported""" - return False + # Azure will make sure it is on all supported versions + return True def is_pro_license_present(self, *, wait_for_change: bool) -> bool: - raise exceptions.InPlaceUpgradeNotSupportedError() + if wait_for_change: + raise exceptions.CancelProLicensePolling() + + url = IMDS_URLS.get("compute", "") + try: + data, headers = util.readurl(url, headers={"Metadata": "true"}) + except (HTTPError, OSError) as e: + LOG.error(e) + raise exceptions.CancelProLicensePolling() + + return data.get("licenseType") == AZURE_PRO_LICENSE_TYPE diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/gcp.py ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/gcp.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/gcp.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/gcp.py 2023-05-30 19:02:35.000000000 +0000 @@ -105,7 +105,7 @@ ) def should_poll_for_pro_license(self) -> bool: - series = system.get_platform_info()["series"] + series = system.get_release_info().series if series not in GCP_LICENSES: LOG.info("This series isn't supported for GCP auto-attach.") return False @@ -132,5 +132,5 @@ license_ids = [license["id"] for license in licenses] self.etag = headers.get("ETag", None) - series = system.get_platform_info()["series"] + series = system.get_release_info().series return GCP_LICENSES.get(series) in license_ids diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_azure.py ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_azure.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_azure.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_azure.py 2023-06-01 18:49:33.000000000 +0000 @@ -5,8 +5,8 @@ import mock import pytest +from uaclient import exceptions from uaclient.clouds.azure import IMDS_BASE_URL, UAAutoAttachAzureInstance -from uaclient.exceptions import InPlaceUpgradeNotSupportedError M_PATH = "uaclient.clouds.azure." @@ -124,13 +124,32 @@ instance = UAAutoAttachAzureInstance() assert viable is instance.is_viable - def test_unsupported_should_poll_for_pro_license(self): - """Unsupported""" + def test_should_poll_for_license(self): instance = UAAutoAttachAzureInstance() - assert not instance.should_poll_for_pro_license() + result = instance.should_poll_for_pro_license() + assert result - def test_unsupported_is_pro_license_present(self): - """Unsupported""" + @pytest.mark.parametrize( + "metadata_response, expected_result", + ( + (({}, {}), False), + (({"licenseType": None}, {}), False), + (({"licenseType": ""}, {}), False), + (({"licenseType": "RHEL_BYOS"}, {}), False), + (({"licenseType": "SLES_BYOS"}, {}), False), + (({"licenseType": "UBUNTU_PRO"}, {}), True), + ), + ) + @mock.patch(M_PATH + "util.readurl") + def test_is_licence_present( + self, m_readurl, metadata_response, expected_result + ): + instance = UAAutoAttachAzureInstance() + m_readurl.return_value = metadata_response + result = instance.is_pro_license_present(wait_for_change=False) + assert expected_result == result + + def test_is_licence_present_wait_for_change_raises_exception(self): instance = UAAutoAttachAzureInstance() - with pytest.raises(InPlaceUpgradeNotSupportedError): - instance.is_pro_license_present(wait_for_change=False) + with pytest.raises(exceptions.CancelProLicensePolling): + instance.is_pro_license_present(wait_for_change=True) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_gcp.py ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_gcp.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_gcp.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_gcp.py 2023-05-30 19:02:35.000000000 +0000 @@ -5,6 +5,7 @@ import mock import pytest +from uaclient import system from uaclient.clouds.gcp import ( LAST_ETAG, LICENSES_URL, @@ -123,7 +124,12 @@ None, False, ([], {}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), None, False, [ @@ -136,7 +142,12 @@ None, False, ([{"id": "8045211386737108299"}], {}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), None, True, [ @@ -149,7 +160,12 @@ None, False, ([{"id": "8045211386737108299"}], {}), - {"series": "bionic"}, + system.ReleaseInfo( + distribution="", + release="", + series="bionic", + pretty_version="", + ), None, False, [ @@ -162,7 +178,12 @@ None, False, ([{"id": "6022427724719891830"}], {}), - {"series": "bionic"}, + system.ReleaseInfo( + distribution="", + release="", + series="bionic", + pretty_version="", + ), None, True, [ @@ -175,7 +196,12 @@ None, False, ([{"id": "599959289349842382"}], {}), - {"series": "focal"}, + system.ReleaseInfo( + distribution="", + release="", + series="focal", + pretty_version="", + ), None, True, [ @@ -188,7 +214,12 @@ None, False, ([{"id": "8045211386737108299"}], {"ETag": "test-etag"}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), "test-etag", True, [ @@ -201,7 +232,12 @@ None, False, ([{"id": "wrong"}], {"ETag": "test-etag"}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), "test-etag", False, [ @@ -214,7 +250,12 @@ None, True, ([{"id": "8045211386737108299"}], {"ETag": "test-etag"}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), "test-etag", True, [ @@ -228,7 +269,12 @@ "existing-etag", True, ([{"id": "8045211386737108299"}], {"ETag": "test-etag"}), - {"series": "xenial"}, + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), "test-etag", True, [ @@ -242,12 +288,12 @@ ), ), ) - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "util.readurl") def test_is_license_present( self, m_readurl, - m_get_platform_info, + m_get_release_info, existing_etag, wait_for_change, metadata_response, @@ -259,7 +305,7 @@ instance = UAAutoAttachGCPInstance() instance.etag = existing_etag m_readurl.return_value = metadata_response - m_get_platform_info.return_value = platform_info + m_get_release_info.return_value = platform_info result = instance.is_pro_license_present( wait_for_change=wait_for_change @@ -273,18 +319,58 @@ @pytest.mark.parametrize( "platform_info, expected_result", ( - ({"series": "xenial"}, True), - ({"series": "bionic"}, True), - ({"series": "focal"}, True), - ({"series": "non_lts"}, False), - ({"series": "jammy"}, True), + ( + system.ReleaseInfo( + distribution="", + release="", + series="xenial", + pretty_version="", + ), + True, + ), + ( + system.ReleaseInfo( + distribution="", + release="", + series="bionic", + pretty_version="", + ), + True, + ), + ( + system.ReleaseInfo( + distribution="", + release="", + series="focal", + pretty_version="", + ), + True, + ), + ( + system.ReleaseInfo( + distribution="", + release="", + series="non_lts", + pretty_version="", + ), + False, + ), + ( + system.ReleaseInfo( + distribution="", + release="", + series="jammy", + pretty_version="", + ), + True, + ), ), ) - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") def test_should_poll_for_license( - self, m_get_platform_info, platform_info, expected_result + self, m_get_release_info, platform_info, expected_result ): - m_get_platform_info.return_value = platform_info + m_get_release_info.return_value = platform_info instance = UAAutoAttachGCPInstance() result = instance.should_poll_for_pro_license() assert expected_result == result diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_identity.py ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_identity.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/clouds/tests/test_identity.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/clouds/tests/test_identity.py 2023-05-30 19:02:35.000000000 +0000 @@ -71,11 +71,12 @@ ), ), ) + @mock.patch("os.path.exists", return_value=True) @mock.patch("uaclient.system.load_file") - @mock.patch(M_PATH + "system.which", return_value="/usr/bin/cloud-id") - @mock.patch(M_PATH + "system.subp", return_value=("test", "")) + @mock.patch("uaclient.system.which", return_value="/usr/bin/cloud-id") + @mock.patch("uaclient.system.subp", return_value=("test", "")) def test_cloud_type_when_using_settings_override( - self, m_subp, m_which, m_load_file, settings_overrides + self, m_subp, m_which, m_load_file, _m_path_exists, settings_overrides ): if "azure" in settings_overrides: expected_value = "azure" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/config.py ubuntu-advantage-tools-28.1~18.04/uaclient/config.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/config.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/config.py 2023-06-01 18:49:33.000000000 +0000 @@ -71,9 +71,8 @@ ) # A data path is a filename, an attribute ("private") indicating whether it -# should only be readable by root, and an attribute ("permanent") indicating -# whether it should stick around even when detached. -DataPath = namedtuple("DataPath", ("filename", "private", "permanent")) +# should only be readable by root. +DataPath = namedtuple("DataPath", ("filename", "private")) event = event_logger.get_event_logger() @@ -89,13 +88,10 @@ class UAConfig: data_paths = { - "instance-id": DataPath("instance-id", True, False), - "machine-access-cis": DataPath("machine-access-cis.json", True, False), - "lock": DataPath("lock", False, False), - "status-cache": DataPath("status.json", False, False), - "marker-reboot-cmds": DataPath( - "marker-reboot-cmds-required", False, False - ), + "instance-id": DataPath("instance-id", True), + "machine-access-cis": DataPath("machine-access-cis.json", True), + "lock": DataPath("lock", False), + "status-cache": DataPath("status.json", False), } # type: Dict[str, DataPath] ua_scoped_proxy_options = ("ua_apt_http_proxy", "ua_apt_https_proxy") @@ -396,11 +392,6 @@ ) @property - def is_attached(self): - """Report whether this machine configuration is attached to UA.""" - return bool(self.machine_token) # machine_token is removed on detach - - @property def features(self): """Return a dictionary of any features provided in uaclient.conf.""" features = self.cfg.get("features") @@ -438,14 +429,6 @@ cache_path = self.data_path(key) return os.path.exists(cache_path) - def _perform_delete(self, cache_path: str) -> None: - """Delete the given cache_path if it exists. - - (This is a separate method to allow easier disabling of deletion during - tests.) - """ - system.ensure_file_absent(cache_path) - def delete_cache_key(self, key: str) -> None: """Remove specific cache file.""" if not key: @@ -457,17 +440,14 @@ elif key == "lock": notices.remove(Notice.OPERATION_IN_PROGRESS) cache_path = self.data_path(key) - self._perform_delete(cache_path) + system.ensure_file_absent(cache_path) - def delete_cache(self, delete_permanent: bool = False): + def delete_cache(self): """ Remove configuration cached response files class attributes. - - :param delete_permanent: even delete the "permanent" files """ for path_key in self.data_paths.keys(): - if delete_permanent or not self.data_paths[path_key].permanent: - self.delete_cache_key(path_key) + self.delete_cache_key(path_key) def read_cache(self, key: str, silent: bool = False) -> Optional[Any]: cache_path = self.data_path(key) @@ -640,12 +620,6 @@ if config_file: return config_file - local_cfg = os.path.join( - os.getcwd(), os.path.basename(DEFAULT_CONFIG_FILE) - ) - if os.path.exists(local_cfg): - return local_cfg - return DEFAULT_CONFIG_FILE diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/conftest.py ubuntu-advantage-tools-28.1~18.04/uaclient/conftest.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/conftest.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/conftest.py 2023-05-30 19:02:35.000000000 +0000 @@ -85,7 +85,13 @@ (It returns a function so that the requester can decide when to examine the logs; if it returned caplog.text directly, that would always be empty.) """ - log_level = getattr(request, "param", logging.INFO) + cap_params = getattr(request, "param", logging.INFO) + log_filter = None + if isinstance(cap_params, tuple): + log_level = cap_params[0] + log_filter = cap_params[1] + else: + log_level = cap_params try: try: caplog = request.getfixturevalue("caplog") @@ -94,6 +100,8 @@ # deprecated in favour of getfixturevalue caplog = request.getfuncargvalue("caplog") caplog.set_level(log_level) + if log_filter: + caplog.handler.addFilter(log_filter()) def _func(): return caplog.text @@ -108,6 +116,8 @@ "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" ) ) + if log_filter: + handler.addFilter(log_filter()) root.addHandler(handler) def _func(): @@ -144,6 +154,10 @@ ) -> None: if not cfg_overrides.get("data_dir"): cfg_overrides.update({"data_dir": tmpdir.strpath}) + if not cfg_overrides.get("log_file"): + cfg_overrides.update( + {"log_file": tmpdir.join("log_file.log").strpath} + ) super().__init__( cfg_overrides, user_config=UserConfigData(), diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/contract_data_types.py ubuntu-advantage-tools-28.1~18.04/uaclient/contract_data_types.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/contract_data_types.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/contract_data_types.py 2023-06-01 18:49:33.000000000 +0000 @@ -91,6 +91,18 @@ self.externalAccountIDs = externalAccountIDs +class PlatformChecks(DataObject): + fields = [ + Field("cpu_vendor_ids", data_list(StringDataValue), False), + ] + + def __init__( + self, + cpu_vendor_ids: Optional[List[str]], + ): + self.cpu_vendor_ids = cpu_vendor_ids + + class Affordances(DataObject): fields = [ Field("architectures", data_list(StringDataValue), False), @@ -100,6 +112,7 @@ Field("minKernelVersion", StringDataValue, False), Field("tier", StringDataValue, False), Field("supportLevel", StringDataValue, False), + Field("platformChecks", PlatformChecks, False), ] def __init__( @@ -111,6 +124,7 @@ minKernelVersion: Optional[str], tier: Optional[str], supportLevel: Optional[str], + platformChecks: Optional[PlatformChecks], ): self.architectures = architectures self.presentedAs = presentedAs @@ -119,6 +133,7 @@ self.minKernelVersion = minKernelVersion self.tier = tier self.supportLevel = supportLevel + self.platformChecks = platformChecks class Obligations(DataObject): @@ -176,15 +191,18 @@ fields = [ Field("series", StringDataValue, False), Field("cloud", StringDataValue, False), + Field("variant", StringDataValue, False), ] def __init__( self, series: Optional[str], cloud: Optional[str], + variant: Optional[str], ): self.series = series self.cloud = cloud + self.variant = variant class Override(DataObject): diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/contract.py ubuntu-advantage-tools-28.1~18.04/uaclient/contract.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/contract.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/contract.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,3 +1,4 @@ +import copy import logging import socket from typing import Any, Dict, List, Optional, Tuple @@ -11,25 +12,39 @@ system, util, ) +from uaclient.api.u.pro.status.enabled_services.v1 import _enabled_services from uaclient.config import UAConfig from uaclient.defaults import ATTACH_FAIL_DATE_FORMAT -from uaclient.entitlements.entitlement_status import UserFacingStatus -API_V1_CONTEXT_MACHINE_TOKEN = "/v1/context/machines/token" -API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE = ( +# Here we describe every endpoint from the ua-contracts +# service that is used by this client implementation. +API_V1_ADD_CONTRACT_MACHINE = "/v1/context/machines/token" +API_V1_GET_CONTRACT_MACHINE = ( "/v1/contracts/{contract}/context/machines/{machine}" ) -API_V1_RESOURCES = "/v1/resources" -API_V1_TMPL_RESOURCE_MACHINE_ACCESS = ( +API_V1_UPDATE_CONTRACT_MACHINE = ( + "/v1/contracts/{contract}/context/machines/{machine}" +) +API_V1_AVAILABLE_RESOURCES = "/v1/resources" +API_V1_GET_RESOURCE_MACHINE_ACCESS = ( "/v1/resources/{resource}/context/machines/{machine}" ) -API_V1_AUTO_ATTACH_CLOUD_TOKEN = "/v1/clouds/{cloud_type}/token" -API_V1_MACHINE_ACTIVITY = "/v1/contracts/{contract}/machine-activity/{machine}" -API_V1_CONTRACT_INFORMATION = "/v1/contract" - -API_V1_MAGIC_ATTACH = "/v1/magic-attach" +API_V1_GET_CONTRACT_TOKEN_FOR_CLOUD_INSTANCE = "/v1/clouds/{cloud_type}/token" +API_V1_UPDATE_ACTIVITY_TOKEN = ( + "/v1/contracts/{contract}/machine-activity/{machine}" +) +API_V1_GET_CONTRACT_USING_TOKEN = "/v1/contract" -OVERRIDE_SELECTOR_WEIGHTS = {"series_overrides": 1, "series": 2, "cloud": 3} +API_V1_GET_MAGIC_ATTACH_TOKEN_INFO = "/v1/magic-attach" +API_V1_NEW_MAGIC_ATTACH = "/v1/magic-attach" +API_V1_REVOKE_MAGIC_ATTACH = "/v1/magic-attach" + +OVERRIDE_SELECTOR_WEIGHTS = { + "series_overrides": 1, + "series": 2, + "cloud": 3, + "variant": 4, +} event = event_logger.get_event_logger() @@ -40,7 +55,7 @@ api_error_cls = exceptions.ContractAPIError @util.retry(socket.timeout, retry_sleeps=[1, 2, 2]) - def request_contract_machine_attach(self, contract_token, machine_id=None): + def add_contract_machine(self, contract_token, machine_id=None): """Requests machine attach to the provided machine_id. @param contract_token: Token string providing authentication to @@ -54,7 +69,7 @@ headers.update({"Authorization": "Bearer {}".format(contract_token)}) data = self._get_platform_data(machine_id) machine_token, _headers = self.request_url( - API_V1_CONTEXT_MACHINE_TOKEN, data=data, headers=headers + API_V1_ADD_CONTRACT_MACHINE, data=data, headers=headers ) self.cfg.machine_token_file.write(machine_token) @@ -66,25 +81,24 @@ return machine_token - def request_resources(self) -> Dict[str, Any]: + def available_resources(self) -> Dict[str, Any]: """Requests list of entitlements available to this machine type.""" resource_response, headers = self.request_url( - API_V1_RESOURCES, query_params=self._get_platform_basic_info() + API_V1_AVAILABLE_RESOURCES, + query_params=self._get_platform_basic_info(), ) return resource_response - def request_contract_information( - self, contract_token: str - ) -> Dict[str, Any]: + def get_contract_using_token(self, contract_token: str) -> Dict[str, Any]: headers = self.headers() headers.update({"Authorization": "Bearer {}".format(contract_token)}) response_data, _response_headers = self.request_url( - API_V1_CONTRACT_INFORMATION, headers=headers + API_V1_GET_CONTRACT_USING_TOKEN, headers=headers ) return response_data @util.retry(socket.timeout, retry_sleeps=[1, 2, 2]) - def request_auto_attach_contract_token( + def get_contract_token_for_cloud_instance( self, *, instance: clouds.AutoAttachCloudInstance ): """Requests contract token for auto-attach images for Pro clouds. @@ -95,7 +109,7 @@ """ try: response, _headers = self.request_url( - API_V1_AUTO_ATTACH_CLOUD_TOKEN.format( + API_V1_GET_CONTRACT_TOKEN_FOR_CLOUD_INSTANCE.format( cloud_type=instance.cloud_type ), data=instance.identity_doc, @@ -110,7 +124,7 @@ self.cfg.write_cache("contract-token", response) return response - def request_resource_machine_access( + def get_resource_machine_access( self, machine_token: str, resource: str, @@ -130,7 +144,7 @@ machine_id = system.get_machine_id(self.cfg) headers = self.headers() headers.update({"Authorization": "Bearer {}".format(machine_token)}) - url = API_V1_TMPL_RESOURCE_MACHINE_ACCESS.format( + url = API_V1_GET_RESOURCE_MACHINE_ACCESS.format( resource=resource, machine=machine_id ) resource_access, headers = self.request_url(url, headers=headers) @@ -141,20 +155,20 @@ ) return resource_access - def request_machine_token_update( + def update_contract_machine( self, machine_token: str, contract_id: str, machine_id: Optional[str] = None, ) -> Dict: """Update existing machine-token for an attached machine.""" - return self._request_machine_token_update( + return self._update_contract_machine( machine_token=machine_token, contract_id=contract_id, machine_id=machine_id, ) - def report_machine_activity(self): + def update_activity_token(self): """Report current activity token and enabled services. This will report to the contracts backend all the current @@ -165,7 +179,7 @@ machine_id = system.get_machine_id(self.cfg) request_data = self._get_activity_info(machine_id) - url = API_V1_MACHINE_ACTIVITY.format( + url = API_V1_UPDATE_ACTIVITY_TOKEN.format( contract=contract_id, machine=machine_id ) headers = self.headers() @@ -200,7 +214,7 @@ try: response, _ = self.request_url( - API_V1_MAGIC_ATTACH, headers=headers + API_V1_GET_MAGIC_ATTACH_TOKEN_INFO, headers=headers ) except exceptions.ContractAPIError as e: if hasattr(e, "code"): @@ -221,7 +235,7 @@ try: response, _ = self.request_url( - API_V1_MAGIC_ATTACH, + API_V1_NEW_MAGIC_ATTACH, headers=headers, method="POST", ) @@ -242,7 +256,7 @@ try: self.request_url( - API_V1_MAGIC_ATTACH, + API_V1_REVOKE_MAGIC_ATTACH, headers=headers, method="DELETE", ) @@ -259,7 +273,7 @@ logging.exception(str(e)) raise exceptions.ConnectivityError() - def get_updated_contract_info( + def get_contract_machine( self, machine_token: str, contract_id: str, @@ -279,7 +293,7 @@ ) headers = self.headers() headers.update({"Authorization": "Bearer {}".format(machine_token)}) - url = API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE.format( + url = API_V1_GET_CONTRACT_MACHINE.format( contract=contract_id, machine=machine_id, ) @@ -288,13 +302,12 @@ method="GET", headers=headers, query_params=self._get_platform_basic_info(), - timeout=2, ) if headers.get("expires"): response["expires"] = headers["expires"] return response - def _request_machine_token_update( + def _update_contract_machine( self, machine_token: str, contract_id: str, @@ -314,7 +327,7 @@ headers.update({"Authorization": "Bearer {}".format(machine_token)}) data = self._get_platform_data(machine_id) data["activityInfo"] = self._get_activity_info() - url = API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE.format( + url = API_V1_UPDATE_CONTRACT_MACHINE.format( contract=contract_id, machine=data["machineId"] ) response, headers = self.request_url( @@ -342,46 +355,43 @@ """Return a dict of platform-related data for contract requests""" if not machine_id: machine_id = system.get_machine_id(self.cfg) - platform = system.get_platform_info() - platform_os = platform.copy() - arch = platform_os.pop("arch") return { "machineId": machine_id, - "architecture": arch, - "os": platform_os, + "architecture": system.get_dpkg_arch(), + "os": { + "type": "Linux", + "distribution": system.get_release_info().distribution, + "release": system.get_release_info().release, + "series": system.get_release_info().series, + "version": system.get_release_info().pretty_version, + "kernel": system.get_kernel_info().uname_release, + "virt": system.get_virt_type(), + }, } def _get_platform_basic_info(self): """Return a dict of platform basic info for some contract requests""" - platform = system.get_platform_info() return { - "architecture": platform["arch"], - "series": platform["series"], - "kernel": platform["kernel"], - "virt": platform["virt"], + "architecture": system.get_dpkg_arch(), + "series": system.get_release_info().series, + "kernel": system.get_kernel_info().uname_release, + "virt": system.get_virt_type(), } def _get_activity_info(self, machine_id: Optional[str] = None): """Return a dict of activity info data for contract requests""" - from uaclient.entitlements import ENTITLEMENT_CLASSES - if not machine_id: machine_id = system.get_machine_id(self.cfg) # If the activityID is null we should provide the endpoint # with the instance machine id as the activityID activity_id = self.cfg.machine_token_file.activity_id or machine_id - - enabled_services = [ - ent(self.cfg).name - for ent in ENTITLEMENT_CLASSES - if ent(self.cfg).user_facing_status()[0] == UserFacingStatus.ACTIVE - ] + enabled_services = _enabled_services(self.cfg).enabled_services or [] return { "activityID": activity_id, "activityToken": self.cfg.machine_token_file.activity_token, - "resources": enabled_services, + "resources": [service.name for service in enabled_services], } @@ -413,12 +423,14 @@ # We need to sort our entitlements because some of them # depend on other service to be enable first. + failed_services = [] # type: List[str] for name in entitlements_enable_order(cfg): try: new_entitlement = new_entitlements[name] except KeyError: continue + failed_services = [] try: deltas, service_enabled = process_entitlement_delta( cfg=cfg, @@ -429,7 +441,7 @@ ) except exceptions.UserFacingError: delta_error = True - event.service_failed(name) + failed_services.append(name) with util.disable_log_to_console(): logging.error( "Failed to process contract delta for {name}:" @@ -437,7 +449,7 @@ ) except Exception: unexpected_error = True - event.service_failed(name) + failed_services.append(name) with util.disable_log_to_console(): logging.exception( "Unexpected error processing contract delta for {name}:" @@ -448,15 +460,19 @@ # them, then we will mark that service as successfully enabled if service_enabled and deltas: event.service_processed(name) + event.services_failed(failed_services) if unexpected_error: - raise exceptions.UserFacingError( - msg=messages.UNEXPECTED_ERROR.msg, - msg_code=messages.UNEXPECTED_ERROR.name, + raise exceptions.AttachFailureUnknownError( + failed_services=[ + (name, messages.UNEXPECTED_ERROR) for name in failed_services + ] ) elif delta_error: - raise exceptions.UserFacingError( - msg=messages.ATTACH_FAILURE_DEFAULT_SERVICES.msg, - msg_code=messages.ATTACH_FAILURE_DEFAULT_SERVICES.name, + raise exceptions.AttachFailureDefaultServices( + failed_services=[ + (name, messages.ATTACH_FAILURE_DEFAULT_SERVICES) + for name in failed_services + ] ) @@ -500,8 +516,14 @@ orig=orig_access, new=new_access ) raise exceptions.UserFacingError(msg=msg.msg, msg_code=msg.name) + + variant = ( + new_access.get("entitlements", {}) + .get("obligations", {}) + .get("use_selector", "") + ) try: - ent_cls = entitlement_factory(cfg=cfg, name=name) + ent_cls = entitlement_factory(cfg=cfg, name=name, variant=variant) except exceptions.EntitlementNotFoundError as exc: logging.debug( 'Skipping entitlement deltas for "%s". No such class', name @@ -583,9 +605,7 @@ contract_client = UAContractClient(cfg) if contract_token: # We are a mid ua-attach and need to get machinetoken try: - contract_client.request_contract_machine_attach( - contract_token=contract_token - ) + contract_client.add_contract_machine(contract_token=contract_token) except exceptions.UrlError as e: if isinstance(e, exceptions.ContractAPIError): if hasattr(e, "code"): @@ -605,7 +625,7 @@ else: machine_token = orig_token["machineToken"] contract_id = orig_token["machineTokenInfo"]["contractInfo"]["id"] - resp = contract_client.request_machine_token_update( + resp = contract_client.update_contract_machine( machine_token=machine_token, contract_id=contract_id ) contract_client.update_files_after_machine_token_update(resp) @@ -621,14 +641,14 @@ def get_available_resources(cfg: UAConfig) -> List[Dict]: """Query available resources from the contract server for this machine.""" client = UAContractClient(cfg) - resources = client.request_resources() + resources = client.available_resources() return resources.get("resources", []) def get_contract_information(cfg: UAConfig, token: str) -> Dict[str, Any]: """Query contract information for a specific token""" client = UAContractClient(cfg) - return client.request_contract_information(token) + return client.get_contract_using_token(token) def is_contract_changed(cfg: UAConfig) -> bool: @@ -644,9 +664,7 @@ return False contract_client = UAContractClient(cfg) - resp = contract_client.get_updated_contract_info( - machine_token, contract_id - ) + resp = contract_client.get_contract_machine(machine_token, contract_id) resp_expiry = ( resp.get("machineTokenInfo", {}) .get("contractInfo", {}) @@ -684,11 +702,16 @@ def _select_overrides( - entitlement: Dict[str, Any], series_name: str, cloud_type: str + entitlement: Dict[str, Any], + series_name: str, + cloud_type: str, + variant: Optional[str] = None, ) -> Dict[int, Dict[str, Any]]: overrides = {} selector_values = {"series": series_name, "cloud": cloud_type} + if variant: + selector_values["variant"] = variant series_overrides = entitlement.pop("series", {}).pop(series_name, {}) if series_overrides: @@ -696,7 +719,7 @@ OVERRIDE_SELECTOR_WEIGHTS["series_overrides"] ] = series_overrides - general_overrides = entitlement.pop("overrides", []) + general_overrides = copy.deepcopy(entitlement.get("overrides", [])) for override in general_overrides: weight = _get_override_weight( override.pop("selector"), selector_values @@ -708,7 +731,9 @@ def apply_contract_overrides( - orig_access: Dict[str, Any], series: Optional[str] = None + orig_access: Dict[str, Any], + series: Optional[str] = None, + variant: Optional[str] = None, ) -> None: """Apply series-specific overrides to an entitlement dict. @@ -733,12 +758,14 @@ ) series_name = ( - system.get_platform_info()["series"] if series is None else series + system.get_release_info().series if series is None else series ) cloud_type, _ = get_cloud_type() orig_entitlement = orig_access.get("entitlement", {}) - overrides = _select_overrides(orig_entitlement, series_name, cloud_type) + overrides = _select_overrides( + orig_entitlement, series_name, cloud_type, variant + ) for _weight, overrides_to_apply in sorted(overrides.items()): for key, value in overrides_to_apply.items(): diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/poll_for_pro_license.py ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/poll_for_pro_license.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/poll_for_pro_license.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/poll_for_pro_license.py 2023-06-01 18:49:33.000000000 +0000 @@ -2,7 +2,9 @@ import time from uaclient import actions, exceptions, lock, system, util +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.clouds import AutoAttachCloudInstance +from uaclient.clouds.azure import UAAutoAttachAzureInstance from uaclient.clouds.gcp import UAAutoAttachGCPInstance from uaclient.clouds.identity import cloud_instance_factory from uaclient.config import UAConfig @@ -32,7 +34,7 @@ ): LOG.debug("Configured to not auto attach, shutting down") return - if cfg.is_attached: + if _is_attached(cfg).is_attached: LOG.debug("Already attached, shutting down") return if not system.is_current_series_lts(): @@ -45,8 +47,15 @@ LOG.debug("Not on cloud, shutting down") return - if not isinstance(cloud, UAAutoAttachGCPInstance): - LOG.debug("Not on gcp, shutting down") + is_supported_cloud = any( + isinstance(cloud, cloud_instance) + for cloud_instance in ( + UAAutoAttachGCPInstance, + UAAutoAttachAzureInstance, + ) + ) + if not is_supported_cloud: + LOG.debug("Not on supported cloud platform, shutting down") return if not cloud.should_poll_for_pro_license(): @@ -87,7 +96,7 @@ time.sleep(cfg.polling_error_retry_delay) continue else: - if cfg.is_attached: + if _is_attached(cfg).is_attached: # This could have changed during the long poll or sleep LOG.debug("Already attached, shutting down") return diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/retry_auto_attach.py ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/retry_auto_attach.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/retry_auto_attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/retry_auto_attach.py 2023-06-01 18:49:33.000000000 +0000 @@ -8,6 +8,7 @@ FullAutoAttachOptions, full_auto_attach, ) +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.config import UAConfig from uaclient.daemon import AUTO_ATTACH_STATUS_MOTD_FILE from uaclient.files import notices, state_files @@ -89,7 +90,7 @@ def retry_auto_attach(cfg: UAConfig) -> None: # in case we got started while already attached somehow - if cfg.is_attached: + if _is_attached(cfg).is_attached: return # pick up where we left off @@ -145,7 +146,7 @@ time.sleep(interval) - if cfg.is_attached: + if _is_attached(cfg).is_attached: # We attached while sleeping - hooray! break @@ -171,7 +172,7 @@ cleanup(cfg) - if not cfg.is_attached: + if not _is_attached(cfg).is_attached: # Total failure!! state_files.retry_auto_attach_state_file.write( state_files.RetryAutoAttachState( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/tests/test_poll_for_pro_license.py ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/tests/test_poll_for_pro_license.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/tests/test_poll_for_pro_license.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/tests/test_poll_for_pro_license.py 2023-06-01 18:49:33.000000000 +0000 @@ -158,7 +158,7 @@ None, None, None, - [mock.call("Not on gcp, shutting down")], + [mock.call("Not on supported cloud platform, shutting down")], [], [], ), diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/tests/test_retry_auto_attach.py ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/tests/test_retry_auto_attach.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/daemon/tests/test_retry_auto_attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/daemon/tests/test_retry_auto_attach.py 2023-06-01 18:49:33.000000000 +0000 @@ -115,9 +115,13 @@ FakeConfig, ): with mock.patch( - "uaclient.config.UAConfig.is_attached", + "uaclient.daemon.retry_auto_attach._is_attached", new_callable=mock.PropertyMock, - side_effect=[False, True, True], + side_effect=[ + mock.MagicMock(is_attached=False), + mock.MagicMock(is_attached=True), + mock.MagicMock(is_attached=True), + ], ): cfg = FakeConfig() retry_auto_attach(cfg) @@ -670,9 +674,12 @@ interval_index=18, failure_reason=None ) with mock.patch( - "uaclient.config.UAConfig.is_attached", + "uaclient.daemon.retry_auto_attach._is_attached", new_callable=mock.PropertyMock, - side_effect=[False, is_attached_at_end], + side_effect=[ + mock.MagicMock(is_attached=False), + mock.MagicMock(is_attached=is_attached_at_end), + ], ): cfg = FakeConfig() retry_auto_attach(cfg) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/data_types.py ubuntu-advantage-tools-28.1~18.04/uaclient/data_types.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/data_types.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/data_types.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,10 +1,13 @@ import datetime import json +import logging from enum import Enum from typing import Any, List, Optional, Type, TypeVar, Union from uaclient import exceptions, messages, util +LOG = logging.getLogger(__name__) + class IncorrectTypeError(exceptions.UserFacingError): def __init__(self, expected_type: str, got_type: str): @@ -290,16 +293,13 @@ val = field.data_cls.from_value(val) except IncorrectTypeError as e: if not field.required and optional_type_errors_become_null: - # SC-1428: we should warn here, but this currently runs - # before setup_logging() in the case of - # user-config.json. - # - # logging.warning( - # "{} is wrong type (expected {} but got {}) but " - # "considered optional - treating as null".format( - # field.key, e.expected_type, e.got_type - # ) - # ) + LOG.warning( + "%s is wrong type (expected %s but got %s) but " + "considered optional - treating as null", + field.key, + e.expected_type, + e.got_type, + ) val = None else: raise IncorrectFieldTypeError(e, field.dict_key) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/defaults.py ubuntu-advantage-tools-28.1~18.04/uaclient/defaults.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/defaults.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/defaults.py 2023-05-30 19:02:35.000000000 +0000 @@ -7,7 +7,6 @@ UAC_ETC_PATH = "/etc/ubuntu-advantage/" UAC_RUN_PATH = "/run/ubuntu-advantage/" -UAC_TMP_PATH = "/tmp/ubuntu-advantage/" DEFAULT_DATA_DIR = "/var/lib/ubuntu-advantage" MACHINE_TOKEN_FILE = "machine-token.json" PRIVATE_SUBDIR = "/private" @@ -69,3 +68,4 @@ WORLD_READABLE_MODE = 0o644 NOTICES_PERMANENT_DIRECTORY = DEFAULT_DATA_DIR + "/notices/" NOTICES_TEMPORARY_DIRECTORY = UAC_RUN_PATH + "notices/" +USER_CACHE_SUBDIR = "ubuntu-pro" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/base.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/base.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/base.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/base.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,11 +1,13 @@ import abc +import copy import logging import os import sys from datetime import datetime -from typing import Any, Dict, List, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union from uaclient import config, contract, event_logger, messages, system, util +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.defaults import DEFAULT_HELP_FILE from uaclient.entitlements.entitlement_status import ( ApplicabilityStatus, @@ -65,6 +67,9 @@ affordance_check_kernel_min_version = True affordance_check_kernel_flavor = True + # Determine if the service is a variant of an existing service + is_variant = False + @property @abc.abstractmethod def name(self) -> str: @@ -72,6 +77,11 @@ pass @property + def variant_name(self) -> str: + """The lowercase name of this entitlement, in case it is a variant""" + return "" + + @property def valid_names(self) -> List[str]: """The list of names this entitlement may be called.""" valid_names = [self.name] @@ -94,16 +104,27 @@ @property def presentation_name(self) -> str: """The user-facing name shown for this entitlement""" - if self.cfg.machine_token_file.is_present: + if self.is_variant: + return self.variant_name + elif self.cfg.machine_token_file.is_present: return ( - self.cfg.machine_token_file.entitlements.get(self.name, {}) - .get("entitlement", {}) + self.entitlement_cfg.get("entitlement", {}) .get("affordances", {}) .get("presentedAs", self.name) ) else: return self.name + def verify_platform_checks( + self, platform_check: Dict[str, Any] + ) -> Tuple[bool, Optional[messages.NamedMessage]]: + """Verify specific platform checks for a service. + + This should only be used if the service requires custom platform checks + to check if it is available or not in the machine. + """ + return True, None + @property def help_info(self) -> str: """Help information for the entitlement""" @@ -116,6 +137,15 @@ self._help_info = help_dict.get(self.name, {}).get("help", "") + if self.variants: + variant_items = [ + " * {}: {}".format(variant_name, variant_cls.description) + for variant_name, variant_cls in self.variants.items() + ] + + variant_text = "\n".join(["\nVariants:\n"] + variant_items) + self._help_info += variant_text + return self._help_info # A tuple of 3-tuples with (failure_message, functor, expected_results) @@ -156,6 +186,84 @@ """ return self._dependent_services + def _get_variants(self) -> Dict[str, Type["UAEntitlement"]]: + return {} + + def _get_contract_variants(self) -> Set[str]: + """ + Fetch all available variants defined in the Contract Server response + """ + valid_variants = set() + entitlement_cfg = self._base_entitlement_cfg() + + overrides = entitlement_cfg.get("entitlement", {}).get("overrides", []) + for override in overrides: + variant = override.get("selector", {}).get("variant") + if variant: + valid_variants.add(variant) + + return valid_variants + + def _get_valid_variants(self) -> Dict[str, Type["UAEntitlement"]]: + service_variants = self._get_variants() + contract_variants = self._get_contract_variants() + + if "generic" in service_variants: + valid_variants = {"generic": service_variants["generic"]} + else: + valid_variants = {} + + for variant in sorted(contract_variants): + if variant in service_variants: + valid_variants[variant] = service_variants[variant] + + return valid_variants if len(valid_variants) > 1 else {} + + @property + def variants(self) -> Dict[str, Type["UAEntitlement"]]: + """ + Return a list of services that are considered a variant + of the main service. + """ + if self.is_variant: + return {} + return self._get_valid_variants() + + @property + def other_variants(self) -> Dict[str, Type["UAEntitlement"]]: + """ + On a variant, return the other variants of the main service. + On a non-variant, returns empty. + """ + if not self.is_variant: + return {} + return { + name: cls + for name, cls in self._get_valid_variants().items() + if name != self.variant_name + } + + @property + def enabled_variant(self) -> Optional["UAEntitlement"]: + """ + On an enabled service class, return the variant that is enabled. + Return None if no variants exist or none are enabled (e.g. access-only) + """ + for variant_cls in self.variants.values(): + if variant_cls.variant_name == "generic": + continue + variant = variant_cls( + cfg=self.cfg, + assume_yes=self.assume_yes, + allow_beta=self.allow_beta, + called_name=self._called_name, + access_only=self.access_only, + ) + status, _ = variant.application_status() + if status == ApplicationStatus.ENABLED: + return variant + return None + # Any custom messages to emit to the console or callables which are # handled at pre_enable, pre_disable, pre_install or post_enable stages @property @@ -195,6 +303,24 @@ return self._valid_service + def _base_entitlement_cfg(self): + return copy.deepcopy( + self.cfg.machine_token_file.entitlements.get(self.name, {}) + ) + + @property + def entitlement_cfg(self): + entitlement_cfg = self._base_entitlement_cfg() + + if not self.is_variant or not entitlement_cfg: + return entitlement_cfg + + contract.apply_contract_overrides( + orig_access=entitlement_cfg, variant=self.variant_name + ) + + return entitlement_cfg + def can_enable(self) -> Tuple[bool, Optional[CanEnableFailure]]: """ Report whether or not enabling is possible for the entitlement. @@ -674,9 +800,7 @@ platform passes all defined affordances, INAPPLICABLE if it doesn't meet all of the provided constraints. """ - entitlement_cfg = self.cfg.machine_token_file.entitlements.get( - self.name - ) + entitlement_cfg = self.entitlement_cfg if not entitlement_cfg: return ( ApplicabilityStatus.APPLICABLE, @@ -686,19 +810,18 @@ if functor() != expected_result: return ApplicabilityStatus.INAPPLICABLE, error_message affordances = entitlement_cfg["entitlement"].get("affordances", {}) - platform = system.get_platform_info() affordance_arches = affordances.get("architectures", None) if ( self.affordance_check_arch and affordance_arches is not None - and platform["arch"] not in affordance_arches + and system.get_dpkg_arch() not in affordance_arches ): deduplicated_arches = util.deduplicate_arches(affordance_arches) return ( ApplicabilityStatus.INAPPLICABLE, messages.INAPPLICABLE_ARCH.format( title=self.title, - arch=platform["arch"], + arch=system.get_dpkg_arch(), supported_arches=", ".join(deduplicated_arches), ), ) @@ -706,12 +829,13 @@ if ( self.affordance_check_series and affordance_series is not None - and platform["series"] not in affordance_series + and system.get_release_info().series not in affordance_series ): return ( ApplicabilityStatus.INAPPLICABLE, messages.INAPPLICABLE_SERIES.format( - title=self.title, series=platform["version"] + title=self.title, + series=system.get_release_info().pretty_version, ), ) kernel_info = system.get_kernel_info() @@ -759,15 +883,19 @@ and kernel_info.minor < min_kern_minor ): return ApplicabilityStatus.INAPPLICABLE, invalid_msg + + affordances_platform_check = affordances.get("platformChecks", {}) + ret, reason = self.verify_platform_checks(affordances_platform_check) + + if not ret: + return (ApplicabilityStatus.INAPPLICABLE, reason) return ApplicabilityStatus.APPLICABLE, None def contract_status(self) -> ContractStatus: """Return whether the user is entitled to the entitlement or not""" - if not self.cfg.is_attached: + if not _is_attached(self.cfg).is_attached: return ContractStatus.UNENTITLED - entitlement_cfg = self.cfg.machine_token_file.entitlements.get( - self.name, {} - ) + entitlement_cfg = self.entitlement_cfg if entitlement_cfg and entitlement_cfg["entitlement"].get("entitled"): return ContractStatus.ENTITLED return ContractStatus.UNENTITLED @@ -779,9 +907,7 @@ applicability, details = self.applicability_status() if applicability != ApplicabilityStatus.APPLICABLE: return UserFacingStatus.INAPPLICABLE, details - entitlement_cfg = self.cfg.machine_token_file.entitlements.get( - self.name - ) + entitlement_cfg = self.entitlement_cfg if not entitlement_cfg: return ( UserFacingStatus.UNAVAILABLE, @@ -836,11 +962,8 @@ def is_access_expired(self) -> bool: """Return entitlement access info as stale and needing refresh.""" - entitlement_contract = self.cfg.machine_token_file.entitlements.get( - self.name, {} - ) # TODO(No expiry per resource in MVP yet) - expire_str = entitlement_contract.get("expires") + expire_str = self.entitlement_cfg.get("expires") if not expire_str: return False expiry = datetime.strptime(expire_str, "%Y-%m-%dT%H:%M:%S.%fZ") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/esm.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/esm.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/esm.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/esm.py 2023-05-30 19:02:35.000000000 +0000 @@ -22,7 +22,7 @@ return (ROSEntitlement, ROSUpdatesEntitlement) def _perform_enable(self, silent: bool = False) -> bool: - from uaclient.jobs.update_messaging import update_motd_messages + from uaclient.timer.update_messaging import update_motd_messages enable_performed = super()._perform_enable(silent=silent) if enable_performed: @@ -31,7 +31,7 @@ return enable_performed def setup_local_esm_repo(self) -> None: - series = system.get_platform_info()["series"] + series = system.get_release_info().series # Ugly? Yes, but so is python < 3.8 without removeprefix assert self.name.startswith("esm-") esm_name = self.name[len("esm-") :] @@ -78,7 +78,7 @@ def disable( self, silent=False ) -> Tuple[bool, Union[None, CanDisableFailure]]: - from uaclient.jobs.update_messaging import update_motd_messages + from uaclient.timer.update_messaging import update_motd_messages disable_performed, fail = super().disable(silent=silent) if disable_performed: @@ -98,7 +98,7 @@ def disable( self, silent=False ) -> Tuple[bool, Union[None, CanDisableFailure]]: - from uaclient.jobs.update_messaging import update_motd_messages + from uaclient.timer.update_messaging import update_motd_messages disable_performed, fail = super().disable(silent=silent) if disable_performed: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/fips.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/fips.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/fips.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/fips.py 2023-06-27 00:49:37.000000000 +0000 @@ -27,6 +27,7 @@ "strongswan-hmac", "openssh-client", "openssh-server", + "shim-signed", ] CONDITIONAL_PACKAGES_OPENSSH_HMAC = [ "openssh-client-hmac", @@ -112,6 +113,7 @@ "libgcrypt20", "libgcrypt20-hmac", "fips-initramfs-generic", + "shim-signed", ] @property @@ -126,7 +128,7 @@ 2. Install the corresponding hmac version of that package when available. """ - series = system.get_platform_info().get("series", "") + series = system.get_release_info().series if system.is_container(): return FIPS_CONTAINER_CONDITIONAL_PACKAGES.get(series, []) @@ -246,7 +248,7 @@ if cloud_id is None: cloud_id = "" - series = system.get_platform_info().get("series", "") + series = system.get_release_info().series blocked_message = messages.FIPS_BLOCK_ON_CLOUD.format( series=series.title(), cloud=cloud_titles.get(cloud_id) ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/__init__.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/__init__.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/__init__.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/__init__.py 2023-05-30 19:02:35.000000000 +0000 @@ -29,7 +29,7 @@ ] # type: List[Type[UAEntitlement]] -def entitlement_factory(cfg: UAConfig, name: str): +def entitlement_factory(cfg: UAConfig, name: str, variant: str = ""): """Returns a UAEntitlement class based on the provided name. The return type is Optional[Type[UAEntitlement]]. @@ -42,8 +42,14 @@ entitlement with the given name is found, then raises this error. """ for entitlement in ENTITLEMENT_CLASSES: - if name in entitlement(cfg=cfg).valid_names: - return entitlement + ent = entitlement(cfg=cfg) + if name in ent.valid_names: + if not variant: + return entitlement + elif variant in ent.variants: + return ent.variants[variant] + else: + raise EntitlementNotFoundError(variant) raise EntitlementNotFoundError(name) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/livepatch.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/livepatch.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/livepatch.py 2023-04-06 13:49:20.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/livepatch.py 2023-06-01 18:49:33.000000000 +0000 @@ -224,39 +224,49 @@ if not livepatch.is_livepatch_installed(): return (ApplicationStatus.DISABLED, messages.LIVEPATCH_NOT_ENABLED) - try: - system.subp( - [livepatch.LIVEPATCH_CMD, "status"], - retry_sleeps=LIVEPATCH_RETRIES, - ) - except exceptions.ProcessExecutionError as e: + if livepatch.status() is None: # TODO(May want to parse INACTIVE/failure assessment) - logging.debug("Livepatch not enabled. %s", str(e)) return ( ApplicationStatus.DISABLED, - messages.NamedMessage(name="", msg=str(e)), + messages.LIVEPATCH_APPLICATION_STATUS_CLIENT_FAILURE, ) return status def enabled_warning_status( self, ) -> Tuple[bool, Optional[messages.NamedMessage]]: - if livepatch.on_supported_kernel() is False: + support = livepatch.on_supported_kernel() + if support == livepatch.LivepatchSupport.UNSUPPORTED: kernel_info = system.get_kernel_info() - arch = system.get_dpkg_arch() return ( True, messages.LIVEPATCH_KERNEL_NOT_SUPPORTED.format( - version=kernel_info.uname_release, arch=arch + version=kernel_info.uname_release, + arch=kernel_info.uname_machine_arch, ), ) - # if on_supported_kernel returns None we default to no warning + if support == livepatch.LivepatchSupport.KERNEL_EOL: + kernel_info = system.get_kernel_info() + return ( + True, + messages.LIVEPATCH_KERNEL_EOL.format( + version=kernel_info.uname_release, + arch=kernel_info.uname_machine_arch, + ), + ) + if support == livepatch.LivepatchSupport.KERNEL_UPGRADE_REQUIRED: + return ( + True, + messages.LIVEPATCH_KERNEL_UPGRADE_REQUIRED, + ) + # if on_supported_kernel returns UNKNOWN we default to no warning # because there would be no way for a user to resolve the warning return False, None def status_description_override(self): if ( - livepatch.on_supported_kernel() is False + livepatch.on_supported_kernel() + == livepatch.LivepatchSupport.UNSUPPORTED and not system.is_container() ): return messages.LIVEPATCH_KERNEL_NOT_SUPPORTED_DESCRIPTION diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/realtime.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/realtime.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/realtime.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/realtime.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,8 +1,8 @@ -from typing import Optional, Tuple # noqa: F401 +from typing import Any, Dict, Optional, Tuple, Type # noqa: F401 from uaclient import apt, event_logger, messages, system, util from uaclient.entitlements import repo -from uaclient.entitlements.base import IncompatibleService +from uaclient.entitlements.base import IncompatibleService, UAEntitlement from uaclient.types import ( # noqa: F401 MessagingOperations, MessagingOperationsDict, @@ -32,6 +32,13 @@ event.needs_reboot(reboot_required) return reboot_required + def _get_variants(self) -> Dict[str, Type[UAEntitlement]]: + return { + GenericRealtime.variant_name: GenericRealtime, + NvidiaTegraRealtime.variant_name: NvidiaTegraRealtime, + IntelIotgRealtime.variant_name: IntelIotgRealtime, + } + @property def incompatible_services(self) -> Tuple[IncompatibleService, ...]: from uaclient.entitlements.fips import ( @@ -101,3 +108,58 @@ list(packages), messages.DISABLE_FAILED_TMPL.format(title=self.title), ) + + +class RealtimeVariant(RealtimeKernelEntitlement): + @property + def incompatible_services(self) -> Tuple[IncompatibleService, ...]: + incompatible_variants = tuple( + [ + IncompatibleService( + cls, + messages.REALTIME_VARIANT_INCOMPATIBLE.format( + service=self.title, variant=cls.title + ), + ) + for name, cls in self.other_variants.items() + ] + ) + return super().incompatible_services + incompatible_variants + + +class GenericRealtime(RealtimeVariant): + variant_name = "generic" + title = "Real-time kernel" + description = "Generic version of the RT kernel (default)" + is_variant = True + check_packages_are_installed = True + + +class NvidiaTegraRealtime(RealtimeVariant): + variant_name = "nvidia-tegra" + title = "Real-time NVIDIA Tegra Kernel" + description = "RT kernel optimized for NVIDIA Tegra platform" + is_variant = True + check_packages_are_installed = True + + +class IntelIotgRealtime(RealtimeVariant): + variant_name = "intel-iotg" + title = "Real-time Intel IOTG Kernel" + description = "RT kernel optimized for Intel IOTG platform" + is_variant = True + check_packages_are_installed = True + + def verify_platform_checks( + self, platform_checks: Dict[str, Any] + ) -> Tuple[bool, Optional[messages.NamedMessage]]: + vendor_id = system.get_cpu_info().vendor_id + cpu_vendor_ids = platform_checks.get("cpu_vendor_ids", []) + if vendor_id in cpu_vendor_ids: + return True, None + else: + return False, messages.INAPPLICABLE_VENDOR_NAME.format( + title=self.title, + vendor=vendor_id, + supported_vendors=",".join(cpu_vendor_ids), + ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/repo.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/repo.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/repo.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/repo.py 2023-06-27 00:49:37.000000000 +0000 @@ -31,6 +31,10 @@ # GH: #1084 call apt in noninteractive mode apt_noninteractive = False + # Check if the requested packages are installed to inform if + # the service is enabled or not + check_packages_are_installed = False + # Optional repo pin priority in subclass @property def repo_pin_priority(self) -> Union[int, str, None]: @@ -41,9 +45,7 @@ """debs to install on enablement""" packages = [] - entitlement = self.cfg.machine_token_file.entitlements.get( - self.name, {} - ).get("entitlement", {}) + entitlement = self.entitlement_cfg.get("entitlement", {}) if entitlement: directives = entitlement.get("directives", {}) @@ -99,9 +101,12 @@ def application_status( self, ) -> Tuple[ApplicationStatus, Optional[messages.NamedMessage]]: - entitlement_cfg = self.cfg.machine_token_file.entitlements.get( - self.name, {} + current_status = ( + ApplicationStatus.DISABLED, + messages.SERVICE_NOT_CONFIGURED.format(title=self.title), ) + + entitlement_cfg = self.entitlement_cfg directives = entitlement_cfg.get("entitlement", {}).get( "directives", {} ) @@ -116,14 +121,22 @@ ) match = re.search(r"{}/ubuntu".format(repo_url), policy) if match: - return ( + current_status = ( ApplicationStatus.ENABLED, messages.SERVICE_IS_ACTIVE.format(title=self.title), ) - return ( - ApplicationStatus.DISABLED, - messages.SERVICE_NOT_CONFIGURED.format(title=self.title), - ) + + if self.check_packages_are_installed: + for package in self.packages: + if not apt.is_installed(package): + return ( + ApplicationStatus.DISABLED, + messages.SERVICE_DISABLED_MISSING_PACKAGE.format( + service=self.name, package=package + ), + ) + + return current_status def _check_apt_url_is_applied(self, apt_url): """Check if apt url delta should be applied. @@ -240,14 +253,14 @@ event.info("Installing {title} packages".format(title=self.title)) if self.apt_noninteractive: - env = {"DEBIAN_FRONTEND": "noninteractive"} + override_env_vars = {"DEBIAN_FRONTEND": "noninteractive"} apt_options = [ "--allow-downgrades", '-o Dpkg::Options::="--force-confdef"', '-o Dpkg::Options::="--force-confold"', ] else: - env = {} + override_env_vars = None apt_options = [] try: @@ -256,7 +269,7 @@ packages=package_list, apt_options=apt_options, error_msg=msg.msg, - env=env, + override_env_vars=override_env_vars, ) except exceptions.UserFacingError: if cleanup_on_failure: @@ -302,7 +315,7 @@ http_proxy=http_proxy, https_proxy=https_proxy, proxy_scope=scope ) repo_filename = self.repo_list_file_tmpl.format(name=self.name) - resource_cfg = self.cfg.machine_token_file.entitlements.get(self.name) + resource_cfg = self.entitlement_cfg directives = resource_cfg["entitlement"].get("directives", {}) obligations = resource_cfg["entitlement"].get("obligations", {}) token = resource_cfg.get("resourceToken") @@ -313,7 +326,7 @@ # resource access for tokens. We want to refresh this every # enable call because it is not refreshed by `pro refresh`. client = contract.UAContractClient(self.cfg) - machine_access = client.request_resource_machine_access( + machine_access = client.get_resource_machine_access( machine_token, self.name ) if machine_access: @@ -399,7 +412,7 @@ :param run_apt_update: If after removing the apt update command after removing the apt files. """ - series = system.get_platform_info()["series"] + series = system.get_release_info().series repo_filename = self.repo_list_file_tmpl.format(name=self.name) entitlement = self.cfg.machine_token_file.entitlements[self.name].get( "entitlement", {} diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_base.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_base.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_base.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_base.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,11 +1,12 @@ """Tests related to uaclient.entitlement.base module.""" +import copy import logging from typing import Any, Dict, Optional, Tuple import mock import pytest -from uaclient import messages, util +from uaclient import messages, system, util from uaclient.entitlements import base from uaclient.entitlements.entitlement_status import ( ApplicabilityStatus, @@ -38,9 +39,14 @@ dependent_services=None, required_services=None, blocking_incompatible_services=None, + variant_name="", **kwargs ): - super().__init__(cfg, allow_beta=allow_beta, access_only=access_only) + super().__init__( + cfg, + allow_beta=allow_beta, + access_only=access_only, + ) self.supports_access_only = supports_access_only self._disable = disable self._enable = enable @@ -49,6 +55,7 @@ self._dependent_services = dependent_services self._required_services = required_services self._blocking_incompatible_services = blocking_incompatible_services + self._variant_name = variant_name def _perform_disable(self, **kwargs): self._application_status = ( @@ -72,6 +79,14 @@ else: return super().blocking_incompatible_services() + @property + def variant_name(self): + return self._variant_name + + @property + def is_variant(self): + return False if not self._variant_name else True + @pytest.fixture def concrete_entitlement_factory(FakeConfig): @@ -93,7 +108,8 @@ enable: bool = False, disable: bool = False, dependent_services: Tuple[Any, ...] = None, - required_services: Tuple[Any, ...] = None + required_services: Tuple[Any, ...] = None, + variant_name: str = "" ) -> ConcreteTestEntitlement: cfg = FakeConfig() machineToken = { @@ -125,6 +141,7 @@ disable=disable, dependent_services=dependent_services, required_services=required_services, + variant_name=variant_name, ) return factory @@ -945,7 +962,10 @@ ), ) @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "example"} + "uaclient.system.get_release_info", + return_value=system.ReleaseInfo( + distribution="", release="", series="example", pretty_version="" + ), ) def test_process_contract_deltas_does_nothing_when_delta_remains_entitled( self, m_platform_info, concrete_entitlement_factory, orig_access, delta @@ -1061,3 +1081,149 @@ assert 1 == m_enable.call_count assert entitlement.allow_beta + + +class TestEntitlementCfg: + @pytest.mark.parametrize( + "variant_name", ((""), ("test-variant"), ("invalid-variant")) + ) + def test_entitlement_cfg_respects_variant( + self, variant_name, concrete_entitlement_factory + ): + entitlement = concrete_entitlement_factory( + entitled=True, + applicability_status=(ApplicabilityStatus.APPLICABLE, ""), + application_status=(ApplicationStatus.DISABLED, ""), + variant_name=variant_name, + ) + base_ent_dict = { + "entitlement": { + "entitled": True, + "obligations": {"enableByDefault": False}, + "affordances": { + "architectures": [ + "amd64", + "ppc64el", + ], + "series": ["xenial", "bionic", "focal"], + }, + "directives": { + "additionalPackages": ["test-package"], + "suites": ["xenial", "bionic", "focal"], + }, + "overrides": [ + { + "directives": { + "additionalPackages": ["test-package-variant"] + }, + "selector": { + "variant": "test-variant", + }, + }, + { + "directives": { + "additionalPackages": ["test-package-unused"] + }, + "selector": {"cloud": "aws", "series": "focal"}, + }, + ], + "type": "test", + } + } + + expected_entitlement = copy.deepcopy(base_ent_dict) + if variant_name == "test-variant": + expected_entitlement["entitlement"]["directives"][ + "additionalPackages" + ] = ["test-package-variant"] + + with mock.patch.object( + entitlement, "_base_entitlement_cfg" + ) as m_ent_cfg: + m_ent_cfg.return_value = base_ent_dict + actual_entitlement = entitlement.entitlement_cfg + + assert expected_entitlement == actual_entitlement + + +class TestVariant: + @pytest.mark.parametrize( + "contract_variants", + ( + ([]), + (["not-found-variant"]), + (["test_variant"]), + (["test_variant", "test_variant2"]), + ), + ) + @mock.patch("uaclient.entitlements.base.UAEntitlement._get_variants") + @mock.patch( + "uaclient.entitlements.base.UAEntitlement._get_contract_variants" + ) + def test_variant_property( + self, + m_get_contract_variants, + m_get_variants, + contract_variants, + concrete_entitlement_factory, + ): + entitlement = concrete_entitlement_factory() + service_variants = {"test_variant": "test", "generic": "generic"} + m_get_contract_variants.return_value = contract_variants + m_get_variants.return_value = service_variants + actual_variants = entitlement.variants + + expected_variants = ( + {} if "test_variant" not in contract_variants else service_variants + ) + assert expected_variants == actual_variants + assert 1 == m_get_contract_variants.call_count + assert 1 == m_get_variants.call_count + + +class TestGetContractVariant: + @pytest.mark.parametrize( + "entitlement_cfg", + ( + ({}), + ( + { + "entitlement": { + "overrides": [ + { + "selector": { + "variant": "test1", + }, + }, + { + "selector": { + "variant": "test2", + } + }, + { + "selector": { + "cloud": "cloud", + }, + }, + ], + } + } + ), + ), + ) + @mock.patch( + "uaclient.entitlements.base.UAEntitlement._base_entitlement_cfg" + ) + def test_get_contract_variant( + self, m_base_ent_cfg, entitlement_cfg, concrete_entitlement_factory + ): + entitlement = concrete_entitlement_factory() + m_base_ent_cfg.return_value = entitlement_cfg + + actual_contract_variants = entitlement._get_contract_variants() + expected_contract_variants = ( + set() if not entitlement_cfg else set(["test1", "test2"]) + ) + + assert expected_contract_variants == actual_contract_variants + assert 1 == m_base_ent_cfg.call_count diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_cc.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_cc.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_cc.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_cc.py 2023-06-27 00:49:37.000000000 +0000 @@ -1,14 +1,12 @@ """Tests related to uaclient.entitlement.base module.""" -import copy import itertools import os.path -from types import MappingProxyType import mock import pytest -from uaclient import apt, messages, status +from uaclient import apt, messages, status, system from uaclient.entitlements.cc import CC_README, CommonCriteriaEntitlement from uaclient.entitlements.tests.conftest import machine_token @@ -31,16 +29,6 @@ ) -PLATFORM_INFO_SUPPORTED = MappingProxyType( - { - "arch": "s390x", - "series": "xenial", - "kernel": "4.15.0-00-generic", - "version": "16.04 LTS (Xenial Xerus)", - } -) - - class TestCommonCriteriaEntitlementUserFacingStatus: @pytest.mark.parametrize( "arch,series,version,details", @@ -61,10 +49,12 @@ ), ), ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_dpkg_arch") + @mock.patch("uaclient.system.get_release_info") def test_inapplicable_on_invalid_affordances( self, - m_platform_info, + m_release_info, + m_dpkg_arch, arch, series, version, @@ -72,11 +62,10 @@ FakeConfig, ): """Test invalid affordances result in inapplicable status.""" - unsupported_info = copy.deepcopy(dict(PLATFORM_INFO_SUPPORTED)) - unsupported_info["arch"] = arch - unsupported_info["series"] = series - unsupported_info["version"] = version - m_platform_info.return_value = unsupported_info + m_release_info.return_value = system.ReleaseInfo( + distribution="", release="", series=series, pretty_version=version + ) + m_dpkg_arch.return_value = arch cfg = FakeConfig().for_attached_machine( machine_token=CC_MACHINE_TOKEN, ) @@ -88,12 +77,16 @@ class TestCommonCriteriaEntitlementCanEnable: @mock.patch("uaclient.system.subp", return_value=("", "")) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_dpkg_arch") + @mock.patch("uaclient.system.get_release_info") def test_can_enable_true_on_entitlement_inactive( - self, m_platform_info, _m_subp, capsys, FakeConfig + self, m_release_info, m_dpkg_arch, _m_subp, capsys, FakeConfig ): """When entitlement is INACTIVE, can_enable returns True.""" - m_platform_info.return_value = PLATFORM_INFO_SUPPORTED + m_release_info.return_value = system.ReleaseInfo( + distribution="", release="", series="xenial", pretty_version="" + ) + m_dpkg_arch.return_value = "s390x" cfg = FakeConfig().for_attached_machine( machine_token=CC_MACHINE_TOKEN, ) @@ -117,12 +110,14 @@ @mock.patch("uaclient.system.should_reboot") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.apt.get_apt_cache_policy") - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_dpkg_arch") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.contract.apply_contract_overrides") def test_enable_configures_apt_sources_and_auth_files( self, _m_contract_overrides, - m_platform_info, + m_release_info, + m_dpkg_arch, m_apt_cache_policy, m_subp, m_should_reboot, @@ -138,13 +133,12 @@ m_subp.return_value = ("fakeout", "") m_apt_cache_policy.return_value = "fakeout" m_should_reboot.return_value = False + m_release_info.return_value = system.ReleaseInfo( + distribution="", release="", series="xenial", pretty_version="" + ) + m_dpkg_arch.return_value = "s390x" original_exists = os.path.exists - def fake_platform(key=None): - if key == "series": - return PLATFORM_INFO_SUPPORTED[key] - return PLATFORM_INFO_SUPPORTED - def exists(path): if path == apt.APT_METHOD_HTTPS_FILE: return not apt_transport_https @@ -156,7 +150,6 @@ ) return original_exists(path) - m_platform_info.side_effect = fake_platform cfg = FakeConfig().for_attached_machine( machine_token=CC_MACHINE_TOKEN, ) @@ -199,7 +192,7 @@ ["apt-get", "install", "--assume-yes"] + prerequisite_pkgs, capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ) ) else: @@ -211,7 +204,7 @@ ["apt-get", "update"], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ), mock.call( [ @@ -225,7 +218,7 @@ + entitlement.packages, capture=True, retry_sleeps=apt.APT_RETRIES, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ), ] ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_cis.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_cis.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_cis.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_cis.py 2023-06-27 00:49:37.000000000 +0000 @@ -3,7 +3,7 @@ import mock import pytest -from uaclient import apt, messages +from uaclient import apt, messages, system from uaclient.entitlements.cis import CIS_DOCS_URL, CISEntitlement from uaclient.entitlements.entitlement_status import ApplicationStatus @@ -40,10 +40,12 @@ @mock.patch("uaclient.apt.setup_apt_proxy") @mock.patch("uaclient.system.should_reboot") @mock.patch("uaclient.system.subp") - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_kernel_info") + @mock.patch("uaclient.system.get_release_info") def test_enable_configures_apt_sources_and_auth_files( self, - m_platform_info, + m_release_info, + m_kernel_info, m_subp, m_should_reboot, m_setup_apt_proxy, @@ -60,7 +62,20 @@ return info[key] return info - m_platform_info.side_effect = fake_platform + m_release_info.return_value = system.ReleaseInfo( + distribution="", release="", series="xenial", pretty_version="" + ) + m_kernel_info.return_value = system.KernelInfo( + uname_machine_arch="x86_64", + uname_release="4.15.0-00-generic", + proc_version_signature_version=None, + build_date=None, + major=None, + minor=None, + patch=None, + abi=None, + flavor=None, + ) m_subp.return_value = ("fakeout", "") m_apt_policy.return_value = "fakeout" m_should_reboot.return_value = False @@ -91,7 +106,7 @@ ["apt-get", "update"], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ), mock.call( [ @@ -105,7 +120,7 @@ + entitlement.packages, capture=True, retry_sleeps=apt.APT_RETRIES, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ), ] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_entitlements.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_entitlements.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_entitlements.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_entitlements.py 2023-05-30 19:02:35.000000000 +0000 @@ -2,7 +2,7 @@ import mock import pytest -from uaclient import entitlements, exceptions +from uaclient import entitlements, exceptions, messages class TestValidServices: @@ -51,7 +51,9 @@ class TestEntitlementFactory: def test_entitlement_factory(self, FakeConfig): m_cls_1 = mock.MagicMock() + m_variant = mock.MagicMock() m_cls_1.return_value.valid_names = ["ent1", "othername"] + m_cls_1.return_value.variants = {"variant1": m_variant} m_cls_2 = mock.MagicMock() m_cls_2.return_value.valid_names = ["ent2"] @@ -66,9 +68,23 @@ assert m_cls_2 == entitlements.entitlement_factory( cfg=cfg, name="ent2" ) + assert m_variant == entitlements.entitlement_factory( + cfg=cfg, name="ent1", variant="variant1" + ) with pytest.raises(exceptions.EntitlementNotFoundError): entitlements.entitlement_factory(cfg=cfg, name="nonexistent") + with mock.patch.object(entitlements, "ENTITLEMENT_CLASSES", ents): + with pytest.raises(exceptions.EntitlementNotFoundError) as excinfo: + entitlements.entitlement_factory( + cfg=cfg, name="ent1", variant="nonexistent" + ) + + assert ( + messages.ENTITLEMENT_NOT_FOUND.format(name="nonexistent").msg + == excinfo.value.msg + ) + class TestSortEntitlements: def test_disable_order(self, FakeConfig): diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_esm.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_esm.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_esm.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_esm.py 2023-05-30 19:02:35.000000000 +0000 @@ -8,7 +8,6 @@ M_PATH = "uaclient.entitlements.esm.ESMInfraEntitlement." M_REPOPATH = "uaclient.entitlements.repo." -M_GETPLATFORM = M_REPOPATH + "system.get_platform_info" @pytest.fixture(params=[ESMAppsEntitlement, ESMInfraEntitlement]) @@ -16,9 +15,10 @@ return entitlement_factory(request.param, suites=["xenial"]) -@mock.patch("uaclient.jobs.update_messaging.update_motd_messages") +@mock.patch("uaclient.timer.update_messaging.update_motd_messages") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) class TestESMEntitlementDisable: @pytest.mark.parametrize("silent", [False, True]) @@ -26,7 +26,7 @@ def test_disable_returns_false_on_can_disable_false_and_does_nothing( self, m_can_disable, - _m_platform_info, + _m_get_release_info, m_update_apt_and_motd_msgs, silent, ): @@ -57,7 +57,7 @@ self, m_active_esm, m_lts, - _m_platform_info, + _m_get_release_info, m_update_apt_and_motd_msgs, is_active_esm, is_lts, @@ -96,7 +96,7 @@ class TestUpdateESMCaches: @pytest.mark.parametrize("file_exists", (False, True)) @mock.patch("uaclient.apt.os.path.exists") - @mock.patch("uaclient.apt.system.get_platform_info") + @mock.patch("uaclient.apt.system.get_release_info") @mock.patch("uaclient.apt.system.write_file") @mock.patch("uaclient.apt.os.makedirs") @mock.patch("uaclient.apt.gpg.export_gpg_key") @@ -105,12 +105,12 @@ m_export_gpg, m_makedirs, m_write_file, - m_get_platform_info, + m_get_release_info, m_exists, file_exists, entitlement, ): - m_get_platform_info.return_value = {"series": "example"} + m_get_release_info.return_value = mock.MagicMock(series="example") m_exists.return_value = file_exists entitlement.setup_local_esm_repo() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_fips.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_fips.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_fips.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_fips.py 2023-06-27 00:49:37.000000000 +0000 @@ -32,7 +32,6 @@ M_PATH = "uaclient.entitlements.fips." M_LIVEPATCH_PATH = "uaclient.entitlements.livepatch.LivepatchEntitlement." M_REPOPATH = "uaclient.entitlements.repo." -M_GETPLATFORM = M_REPOPATH + "system.get_platform_info" FIPS_ADDITIONAL_PACKAGES = ["ubuntu-fips"] @@ -92,10 +91,10 @@ ), ) @mock.patch("uaclient.system.is_container") - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_conditional_packages( self, - m_get_platform_info, + m_get_release_info, m_is_container, series, is_container, @@ -103,7 +102,7 @@ entitlement, ): """Test conditional package respect series restrictions""" - m_get_platform_info.return_value = {"series": series} + m_get_release_info.return_value = mock.MagicMock(series=series) m_is_container.return_value = is_container conditional_packages = entitlement.conditional_packages @@ -303,7 +302,10 @@ mock.patch("uaclient.util.handle_message_operations") ) stack.enter_context( - mock.patch(M_GETPLATFORM, return_value={"series": "xenial"}) + mock.patch( + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), + ) ) stack.enter_context( mock.patch( @@ -363,7 +365,7 @@ + patched_packages, capture=True, retry_sleeps=apt.APT_RETRIES, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) ) @@ -381,7 +383,7 @@ ], capture=True, retry_sleeps=apt.APT_RETRIES, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) ) @@ -390,13 +392,13 @@ ["apt-mark", "showholds"], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ), mock.call( ["apt-get", "update"], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ), ] subp_calls += install_cmd @@ -475,11 +477,12 @@ @mock.patch("uaclient.apt.setup_apt_proxy") @mock.patch("uaclient.apt.add_auth_apt_repo") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_enable_returns_false_on_missing_suites_directive( self, - m_platform_info, + m_get_release_info, m_add_apt, _m_setup_apt_proxy, fips_entitlement_factory, @@ -525,7 +528,10 @@ mock.patch.object(entitlement, "remove_apt_config") ) stack.enter_context( - mock.patch(M_GETPLATFORM, return_value={"series": "xenial"}) + mock.patch( + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), + ) ) stack.enter_context(mock.patch(M_REPOPATH + "exists")) @@ -544,7 +550,7 @@ @mock.patch( "uaclient.entitlements.fips.get_cloud_type", return_value=("", None) ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.util.is_config_value_true", return_value=False) @mock.patch("uaclient.util.prompt_for_confirmation", return_value=False) @mock.patch("uaclient.util.handle_message_operations") @@ -555,14 +561,14 @@ m_handle_message_op, m_prompt, m_is_config_value_true, - m_platform_info, + m_get_release_info, m_get_cloud_type, entitlement_factory, ): fips_ent = entitlement_factory(FIPSEntitlement) m_handle_message_op.return_value = True base_path = "uaclient.entitlements.livepatch.LivepatchEntitlement" - m_platform_info.return_value = {"series": "test"} + m_get_release_info.return_value = mock.MagicMock(series="test") with mock.patch( "{}.application_status".format(base_path) @@ -651,7 +657,7 @@ ) assert expected_msg.strip() == reason.message.msg.strip() - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.entitlements.fips.get_cloud_type") @mock.patch("uaclient.util.handle_message_operations") @mock.patch("uaclient.system.is_container", return_value=False) @@ -660,12 +666,12 @@ m_is_container, m_handle_message_op, m_cloud_type, - m_platform_info, + m_get_release_info, entitlement, ): m_handle_message_op.return_value = True m_cloud_type.return_value = ("gce", None) - m_platform_info.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") base_path = "uaclient.entitlements.livepatch.LivepatchEntitlement" with mock.patch( @@ -678,7 +684,7 @@ Ubuntu Xenial does not provide a GCP optimized FIPS kernel""" assert expected_msg.strip() in reason.message.msg.strip() - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.util.is_config_value_true", return_value=False) @mock.patch("uaclient.entitlements.fips.get_cloud_type") @mock.patch("uaclient.util.handle_message_operations") @@ -689,12 +695,12 @@ m_handle_message_op, m_get_cloud_type, m_is_config_value_true, - m_platform_info, + m_get_release_info, entitlement, ): m_handle_message_op.return_value = True m_get_cloud_type.return_value = ("gce", None) - m_platform_info.return_value = {"series": "test"} + m_get_release_info.return_value = mock.MagicMock(series="test") ent_name = entitlement.name fips_cls_name = "FIPS" if ent_name == "fips" else "FIPSUpdates" @@ -817,14 +823,17 @@ class TestFIPSEntitlementRemovePackages: @pytest.mark.parametrize("installed_pkgs", (["sl"], ["ubuntu-fips", "sl"])) - @mock.patch(M_GETPLATFORM, return_value={"series": "xenial"}) + @mock.patch( + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), + ) @mock.patch(M_PATH + "system.subp") @mock.patch(M_PATH + "apt.get_installed_packages_names") def test_remove_packages_only_removes_if_package_is_installed( self, m_get_installed_packages, m_subp, - _m_get_platform, + _m_get_release_info, installed_pkgs, entitlement, ): @@ -842,18 +851,25 @@ ], capture=True, retry_sleeps=apt.APT_RETRIES, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) if "ubuntu-fips" in installed_pkgs: assert [remove_cmd] == m_subp.call_args_list else: assert 0 == m_subp.call_count - @mock.patch(M_GETPLATFORM, return_value={"series": "xenial"}) + @mock.patch( + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), + ) @mock.patch(M_PATH + "system.subp") @mock.patch(M_PATH + "apt.get_installed_packages_names") def test_remove_packages_output_message_when_fail( - self, m_get_installed_packages, m_subp, _m_get_platform, entitlement + self, + m_get_installed_packages, + m_subp, + _m_get_release_info, + entitlement, ): m_get_installed_packages.return_value = ["ubuntu-fips"] m_subp.side_effect = exceptions.ProcessExecutionError(cmd="test") @@ -868,12 +884,13 @@ @mock.patch("uaclient.util.handle_message_operations", return_value=True) @mock.patch("uaclient.system.should_reboot", return_value=True) @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) class TestFIPSEntitlementDisable: def test_disable_on_can_disable_true_removes_apt_config_and_packages( self, - _m_platform_info, + _m_get_release_info, _m_should_reboot, m_handle_message_operations, entitlement, @@ -1104,7 +1121,7 @@ '-o Dpkg::Options::="--force-confold"', ], error_msg="Could not enable {}.".format(entitlement.title), - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) ) @@ -1165,53 +1182,53 @@ class TestFipsEntitlementPackages: @mock.patch(M_PATH + "apt.get_installed_packages_names", return_value=[]) - @mock.patch("uaclient.system.get_platform_info") - def test_packages_is_list(self, m_platform_info, _mock, entitlement): + @mock.patch("uaclient.system.get_release_info") + def test_packages_is_list(self, m_get_release_info, _mock, entitlement): """RepoEntitlement.enable will fail if it isn't""" # Do not trigger metapackage override by # _replace_metapackage_on_cloud_instance - m_platform_info.return_value = {"series": "test"} + m_get_release_info.return_value = mock.MagicMock(series="test") assert isinstance(entitlement.packages, list) @mock.patch(M_PATH + "apt.get_installed_packages_names", return_value=[]) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_fips_required_packages_included( - self, m_platform_info, _mock, entitlement + self, m_get_release_info, _mock, entitlement ): """The fips_required_packages should always be in .packages""" # Do not trigger metapackage override by # _replace_metapackage_on_cloud_instance - m_platform_info.return_value = {"series": "test"} + m_get_release_info.return_value = mock.MagicMock(series="test") assert set(FIPS_ADDITIONAL_PACKAGES).issubset( set(entitlement.packages) ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_currently_installed_packages_are_included_in_packages( - self, m_platform_info, entitlement + self, m_get_release_info, entitlement ): # Do not trigger metapackage override by # _replace_metapackage_on_cloud_instance # and xenial should not trigger that - m_platform_info.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") assert sorted(FIPS_ADDITIONAL_PACKAGES) == sorted(entitlement.packages) @mock.patch(M_PATH + "apt.get_installed_packages_names") - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_multiple_packages_calls_dont_mutate_state( - self, m_platform_info, m_get_installed_packages, entitlement + self, m_get_release_info, m_get_installed_packages, entitlement ): # Make it appear like all packages are installed m_get_installed_packages.return_value.__contains__.return_value = True # Do not trigger metapackage override by # _replace_metapackage_on_cloud_instance - m_platform_info.return_value = {"series": "test"} + m_get_release_info.return_value = mock.MagicMock(series="test") before = copy.deepcopy(entitlement.conditional_packages) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_livepatch.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_livepatch.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_livepatch.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_livepatch.py 2023-06-01 18:49:33.000000000 +0000 @@ -5,7 +5,6 @@ import io import logging from functools import partial -from types import MappingProxyType import mock import pytest @@ -25,15 +24,6 @@ from uaclient.entitlements.tests.conftest import machine_token from uaclient.snap import SNAP_CMD -PLATFORM_INFO_SUPPORTED = MappingProxyType( - { - "arch": "x86_64", - "kernel": "4.4.0-00-generic", - "series": "xenial", - "version": "16.04 LTS (Xenial Xerus)", - } -) - M_PATH = "uaclient.entitlements.livepatch." # mock path M_LIVEPATCH_STATUS = M_PATH + "LivepatchEntitlement.application_status" DISABLED_APP_STATUS = (ApplicationStatus.DISABLED, "") @@ -97,9 +87,9 @@ entitlement = livepatch_entitlement_factory(affordances=affordances) with mock.patch( - "uaclient.system.get_platform_info" - ) as m_platform_info: - m_platform_info.return_value = PLATFORM_INFO_SUPPORTED + "uaclient.system.get_release_info" + ) as m_get_release_info: + m_get_release_info.return_value = mock.MagicMock(series="xenial") uf_status, details = entitlement.user_facing_status() assert uf_status == UserFacingStatus.INAPPLICABLE expected_details = "Cannot install Livepatch on a container." @@ -115,9 +105,9 @@ entitlement.cfg.machine_token_file.write(no_entitlements) with mock.patch( - "uaclient.system.get_platform_info" - ) as m_platform_info: - m_platform_info.return_value = PLATFORM_INFO_SUPPORTED + "uaclient.system.get_release_info" + ) as m_get_release_info: + m_get_release_info.return_value = mock.MagicMock(series="xenial") uf_status, details = entitlement.user_facing_status() assert uf_status == UserFacingStatus.UNAVAILABLE assert "Livepatch is not entitled" == details.msg @@ -193,8 +183,10 @@ "supported_kernel_ver", ( system.KernelInfo( + uname_machine_arch="", uname_release="4.4.0-00-generic", proc_version_signature_version="", + build_date=None, major=4, minor=4, patch=0, @@ -202,8 +194,10 @@ flavor="generic", ), system.KernelInfo( + uname_machine_arch="", uname_release="5.0.0-00-generic", proc_version_signature_version="", + build_date=None, major=5, minor=0, patch=0, @@ -211,8 +205,10 @@ flavor="generic", ), system.KernelInfo( + uname_machine_arch="", uname_release="4.19.0-00-generic", proc_version_signature_version="", + build_date=None, major=4, minor=19, patch=0, @@ -221,15 +217,17 @@ ), ), ) + @mock.patch("uaclient.system.get_dpkg_arch", return_value="x86_64") @mock.patch("uaclient.system.get_kernel_info") @mock.patch( - "uaclient.system.get_platform_info", - return_value=PLATFORM_INFO_SUPPORTED, + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_can_enable_true_on_entitlement_inactive( self, - _m_platform, + _m_get_release_info, m_kernel_info, + _m_dpkg_arch, _m_is_container, _m_livepatch_status, _m_fips_status, @@ -245,21 +243,31 @@ assert ("", "") == capsys.readouterr() assert [mock.call()] == m_container.call_args_list + @mock.patch( + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), + ) + @mock.patch( + "uaclient.system.get_kernel_info", + return_value=mock.MagicMock(uname_release="4.2.9-00-generic"), + ) def test_can_enable_false_on_containers( - self, m_is_container, _m_livepatch_status, _m_fips_status, entitlement + self, + _m_get_kernel_info, + _m_get_release_info, + m_is_container, + _m_livepatch_status, + _m_fips_status, + entitlement, ): """When is_container is True, can_enable returns False.""" - unsupported_min_kernel = copy.deepcopy(dict(PLATFORM_INFO_SUPPORTED)) - unsupported_min_kernel["kernel"] = "4.2.9-00-generic" - with mock.patch("uaclient.system.get_platform_info") as m_platform: - m_platform.return_value = unsupported_min_kernel - m_is_container.return_value = True - entitlement = LivepatchEntitlement(entitlement.cfg) - result, reason = entitlement.can_enable() - assert False is result - assert CanEnableFailureReason.INAPPLICABLE == reason.reason - msg = "Cannot install Livepatch on a container." - assert msg == reason.message.msg + m_is_container.return_value = True + entitlement = LivepatchEntitlement(entitlement.cfg) + result, reason = entitlement.can_enable() + assert False is result + assert CanEnableFailureReason.INAPPLICABLE == reason.reason + msg = "Cannot install Livepatch on a container." + assert msg == reason.message.msg class TestLivepatchProcessContractDeltas: @@ -409,7 +417,7 @@ @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) @pytest.mark.parametrize("apt_update_success", (True, False)) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.contract.apply_contract_overrides") @mock.patch("uaclient.apt.run_apt_install_command") @@ -428,7 +436,7 @@ m_run_apt_install, _m_contract_overrides, m_subp, - _m_get_platform_info, + _m_get_release_info, m_livepatch_proxy, m_snap_proxy, m_validate_proxy, @@ -478,7 +486,7 @@ assert m_snap_proxy.call_count == 1 assert m_livepatch_proxy.call_count == 1 - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.contract.apply_contract_overrides") @mock.patch( @@ -496,7 +504,7 @@ m_which, _m_contract_overrides, m_subp, - _m_get_platform_info, + _m_get_release_info, m_livepatch_proxy, m_snap_proxy, m_validate_proxy, @@ -569,7 +577,7 @@ assert m_snap_proxy.call_count == 0 assert m_livepatch_proxy.call_count == 0 - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.contract.apply_contract_overrides") @mock.patch( @@ -586,7 +594,7 @@ m_which, _m_contract_overrides, m_subp, - _m_get_platform_info, + _m_get_release_info, m_livepatch_proxy, m_snap_proxy, m_validate_proxy, @@ -625,7 +633,7 @@ assert m_snap_proxy.call_count == 1 assert m_livepatch_proxy.call_count == 1 - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.contract.apply_contract_overrides") @mock.patch( @@ -642,7 +650,7 @@ m_which, _m_contract_overrides, m_subp, - _m_get_platform_info, + _m_get_release_info, m_livepatch_proxy, m_snap_proxy, m_validate_proxy, @@ -865,41 +873,40 @@ class TestLivepatchApplicationStatus: @pytest.mark.parametrize("which_result", (("/path/to/exe"), (None))) - @pytest.mark.parametrize("subp_raise_exception", ((True), (False))) + @pytest.mark.parametrize( + "livepatch_status_result", + ( + (None), + ( + livepatch.LivepatchStatusStatus( + kernel=None, livepatch=None, supported=None + ) + ), + ), + ) @mock.patch("uaclient.system.which") - @mock.patch("uaclient.system.subp") + @mock.patch("uaclient.livepatch.status") def test_application_status( - self, m_subp, m_which, subp_raise_exception, which_result, entitlement + self, + m_livepatch_status, + m_which, + livepatch_status_result, + which_result, + entitlement, ): m_which.return_value = which_result - - if subp_raise_exception: - m_subp.side_effect = exceptions.ProcessExecutionError("error msg") + m_livepatch_status.return_value = livepatch_status_result status, details = entitlement.application_status() if not which_result: assert status == ApplicationStatus.DISABLED assert "canonical-livepatch snap is not installed." in details.msg - elif subp_raise_exception: + elif livepatch_status_result is None: assert status == ApplicationStatus.DISABLED - assert "error msg" in details.msg + assert ( + messages.LIVEPATCH_APPLICATION_STATUS_CLIENT_FAILURE == details + ) else: assert status == ApplicationStatus.ENABLED assert details is None - - @mock.patch("time.sleep") - @mock.patch("uaclient.system.which", return_value="/path/to/exe") - def test_status_command_retry_on_application_status( - self, m_which, m_sleep, entitlement - ): - from uaclient import system - - with mock.patch.object(system, "_subp") as m_subp: - m_subp.side_effect = exceptions.ProcessExecutionError("error msg") - status, details = entitlement.application_status() - - assert m_subp.call_count == 3 - assert m_sleep.call_count == 2 - assert status == ApplicationStatus.DISABLED - assert "error msg" in details.msg diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_realtime.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_realtime.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_realtime.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_realtime.py 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,61 @@ +import mock +import pytest + +from uaclient import messages +from uaclient.entitlements.entitlement_status import ApplicabilityStatus +from uaclient.entitlements.realtime import IntelIotgRealtime +from uaclient.system import CpuInfo + +RT_PATH = "uaclient.entitlements.realtime.RealtimeKernelEntitlement." + + +class TestIntelIOTGVariannt: + @pytest.mark.parametrize( + "cpu_info,expected_status,expected_msg", + ( + ( + CpuInfo(vendor_id="test", model=None, stepping=None), + ApplicabilityStatus.INAPPLICABLE, + messages.INAPPLICABLE_VENDOR_NAME.format( + title=IntelIotgRealtime.title, + vendor="test", + supported_vendors="intel", + ), + ), + ( + CpuInfo(vendor_id="intel", model=None, stepping=None), + ApplicabilityStatus.APPLICABLE, + None, + ), + ), + ) + @mock.patch("uaclient.system.get_kernel_info") + @mock.patch("uaclient.system.get_cpu_info") + def test_applicability_status( + self, + m_get_cpu_info, + _m_get_kernel_info, + cpu_info, + expected_status, + expected_msg, + FakeConfig, + ): + m_get_cpu_info.return_value = cpu_info + ent = IntelIotgRealtime(FakeConfig()) + with mock.patch.object( + ent, "_base_entitlement_cfg" + ) as m_entitlement_cfg: + m_entitlement_cfg.return_value = { + "entitlement": { + "affordances": { + "platformChecks": { + "cpu_vendor_ids": ["intel"], + } + } + } + } + actual_ret = ent.applicability_status() + assert ( + expected_status, + expected_msg, + ) == actual_ret diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_repo.py ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_repo.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/entitlements/tests/test_repo.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/entitlements/tests/test_repo.py 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,4 @@ import copy -from types import MappingProxyType import mock import pytest @@ -16,15 +15,6 @@ M_PATH = "uaclient.entitlements.repo." M_CONTRACT_PATH = "uaclient.entitlements.repo.contract.UAContractClient." -PLATFORM_INFO_SUPPORTED = MappingProxyType( - { - "arch": "x86_64", - "kernel": "4.4.0-00-generic", - "series": "xenial", - "version": "16.04 LTS (Xenial Xerus)", - } -) - class RepoTestEntitlement(RepoEntitlement): """Subclass so we can test shared repo functionality""" @@ -47,27 +37,25 @@ class TestUserFacingStatus: - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") def test_inapplicable_on_inapplicable_applicability_status( - self, m_platform_info, entitlement + self, m_release_info, entitlement ): """When applicability_status is INAPPLICABLE, return INAPPLICABLE.""" - platform_unsupported = copy.deepcopy(dict(PLATFORM_INFO_SUPPORTED)) - platform_unsupported["series"] = "example" - platform_unsupported["version"] = "01.01 LTS (Example Version)" - m_platform_info.return_value = platform_unsupported + m_release_info.return_value = mock.MagicMock( + series="example", pretty_version="version" + ) applicability, details = entitlement.applicability_status() assert ApplicabilityStatus.INAPPLICABLE == applicability expected_details = ( - "Repo Test Class is not available for Ubuntu 01.01" - " LTS (Example Version)." + "Repo Test Class is not available for Ubuntu version." ) assert expected_details == details.msg uf_status, _ = entitlement.user_facing_status() assert UserFacingStatus.INAPPLICABLE == uf_status - @mock.patch(M_PATH + "system.get_platform_info") - def test_unavailable_on_unentitled(self, m_platform_info, entitlement): + @mock.patch(M_PATH + "system.get_release_info") + def test_unavailable_on_unentitled(self, m_release_info, entitlement): """When unentitled, return UNAVAILABLE.""" no_entitlements = copy.deepcopy(machine_token("blah")) # delete all enttlements @@ -75,7 +63,7 @@ "resourceEntitlements" ].pop() entitlement.cfg.machine_token_file.write(no_entitlements) - m_platform_info.return_value = dict(PLATFORM_INFO_SUPPORTED) + m_release_info.return_value = mock.MagicMock(series="xenial") applicability, _details = entitlement.applicability_status() assert ApplicabilityStatus.APPLICABLE == applicability uf_status, uf_details = entitlement.user_facing_status() @@ -260,7 +248,7 @@ "uaclient.entitlements.base.UAEntitlement.process_contract_deltas" ) @mock.patch("uaclient.config.UAConfig.read_cache") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "apt.remove_auth_apt_repo") @mock.patch.object(RepoTestEntitlement, "setup_apt_config") @mock.patch.object(RepoTestEntitlement, "remove_apt_config") @@ -271,7 +259,7 @@ m_remove_apt_config, m_setup_apt_config, m_remove_auth_apt_repo, - m_platform_info, + m_release_info, m_read_cache, m_process_contract_deltas, entitlement, @@ -320,7 +308,7 @@ "uaclient.entitlements.base.UAEntitlement.process_contract_deltas" ) @mock.patch("uaclient.config.UAConfig.read_cache") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "apt.remove_auth_apt_repo") @mock.patch.object(RepoTestEntitlement, "setup_apt_config") @mock.patch.object(RepoTestEntitlement, "remove_apt_config") @@ -331,7 +319,7 @@ m_remove_apt_config, m_setup_apt_config, m_remove_auth_apt_repo, - m_platform_info, + m_release_info, m_read_cache, m_process_contract_deltas, entitlement, @@ -438,14 +426,14 @@ @mock.patch(M_PATH + "system.subp", return_value=("", "")) @mock.patch(M_PATH + "apt.add_auth_apt_repo") @mock.patch(M_PATH + "exists", return_value=True) - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch.object( RepoTestEntitlement, "can_enable", return_value=(True, None) ) def test_enable_calls_adds_apt_repo_and_calls_apt_update( self, m_can_enable, - m_platform, + m_release_info, m_exists, m_apt_add, m_subp, @@ -460,7 +448,7 @@ should_reboot, ): """On enable add authenticated apt repo and refresh package lists.""" - m_platform.return_value = {"series": "xenial"} + m_release_info.return_value = mock.MagicMock(series="xenial") m_should_reboot.return_value = should_reboot pre_install_msgs = ["Some pre-install information", "Some more info"] @@ -478,7 +466,7 @@ ["apt-get", "update"], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ) ] @@ -503,7 +491,7 @@ ], capture=True, retry_sleeps=apt.APT_RETRIES, - env={}, + override_env_vars=None, ) ) expected_output = ( @@ -673,17 +661,17 @@ @mock.patch(M_PATH + "apt.remove_auth_apt_repo") @mock.patch(M_PATH + "apt.remove_apt_list_files") @mock.patch(M_PATH + "apt.run_apt_command") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") def test_disable_removes_all_apt_config( self, - m_get_platform, + m_get_release_info, _m_run_apt_command, m_remove_apt_list_files, m_remove_auth_apt_repo, entitlement_factory, ): """Remove all APT config when disable_apt_auth_only is False""" - m_get_platform.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") entitlement = entitlement_factory( RepoTestEntitlement, @@ -706,12 +694,12 @@ @mock.patch(M_PATH + "apt.remove_auth_apt_repo") @mock.patch(M_PATH + "apt.remove_apt_list_files") @mock.patch(M_PATH + "apt.run_apt_command") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "contract.apply_contract_overrides") def test_repo_pin_priority_int_removes_apt_preferences( self, _m_contract_overrides, - m_get_platform, + m_get_release_info, _m_run_apt_command, _m_remove_apt_list_files, _m_remove_auth_apt_repo, @@ -719,7 +707,7 @@ entitlement_factory, ): """Remove apt preferences file when repo_pin_priority is an int.""" - m_get_platform.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") entitlement = entitlement_factory( RepoTestEntitlementRepoWithPin, affordances={"series": ["xenial"]} @@ -777,14 +765,14 @@ @pytest.mark.parametrize("enable_by_default", (True, False)) @mock.patch("uaclient.apt.setup_apt_proxy") - @mock.patch(M_CONTRACT_PATH + "request_resource_machine_access") + @mock.patch(M_CONTRACT_PATH + "get_resource_machine_access") @mock.patch(M_PATH + "apt.add_auth_apt_repo") @mock.patch(M_PATH + "apt.run_apt_command") def test_setup_apt_config_request_machine_access_when_no_resource_token( self, run_apt_command, add_auth_apt_repo, - request_resource_machine_access, + _m_get_resource_machine_access, _setup_apt_proxy, enable_by_default, entitlement_factory, @@ -807,12 +795,12 @@ ) if enable_by_default: assert expected_msg in caplog_text() - assert 0 == request_resource_machine_access.call_count + assert 0 == _m_get_resource_machine_access.call_count else: assert expected_msg not in caplog_text() assert [ mock.call("blah", "repotest") - ] == request_resource_machine_access.call_args_list + ] == _m_get_resource_machine_access.call_args_list @mock.patch("uaclient.apt.setup_apt_proxy") @mock.patch("os.path.exists", return_value=False) @@ -862,12 +850,12 @@ assert install_call in m_run_apt_install_command.call_args_list @mock.patch("uaclient.apt.setup_apt_proxy") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") def test_setup_error_with_repo_pin_priority_and_missing_origin( - self, m_get_platform_info, _setup_apt_proxy, entitlement_factory + self, m_get_release_info, _setup_apt_proxy, entitlement_factory ): """Raise error when repo_pin_priority is set and origin is None.""" - m_get_platform_info.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") entitlement = entitlement_factory( RepoTestEntitlementRepoWithPin, affordances={"series": ["xenial"]} ) @@ -882,12 +870,12 @@ @mock.patch(M_PATH + "apt.add_auth_apt_repo") @mock.patch(M_PATH + "apt.run_apt_update_command") @mock.patch(M_PATH + "apt.add_ppa_pinning") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "contract.apply_contract_overrides") def test_setup_with_repo_pin_priority_int_adds_a_pins_repo_apt_preference( self, _m_apply_overrides, - m_get_platform_info, + m_get_release_info, m_add_ppa_pinning, m_run_apt_update_command, m_add_auth_repo, @@ -895,7 +883,7 @@ entitlement_factory, ): """When repo_pin_priority is an int, set pin in apt preferences.""" - m_get_platform_info.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") entitlement = entitlement_factory( RepoTestEntitlementRepoWithPin, affordances={"series": ["xenial"]} ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/event_logger.py ubuntu-advantage-tools-28.1~18.04/uaclient/event_logger.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/event_logger.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/event_logger.py 2023-05-30 19:02:35.000000000 +0000 @@ -56,6 +56,13 @@ # not processed status.setdefault("services", []) + # We are redacting every variant information from the status output + # because we still are not sure on the best way to represent this + # information on the status machine readable output + for service in status.get("services", []): + if "variants" in service: + service.pop("variants") + return status diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/exceptions.py ubuntu-advantage-tools-28.1~18.04/uaclient/exceptions.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/exceptions.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/exceptions.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,5 +1,5 @@ import textwrap -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional, Tuple from urllib import error from uaclient import messages @@ -288,7 +288,9 @@ "Error: " + msg + "\n" - + messages.SECURITY_ISSUE_NOT_RESOLVED.format(issue=issue_id) + + messages.SECURITY_ISSUE_NOT_RESOLVED.format( + issue=issue_id, extra_info="" + ) ) @@ -335,6 +337,45 @@ super().__init__(msg=msg.msg, msg_code=msg.name) +class EntitlementsNotEnabledError(UserFacingError): + + exit_code = 4 + + def __init__( + self, + failed_services: List[Tuple[str, messages.NamedMessage]], + msg: messages.NamedMessage = messages.ENTITLEMENTS_NOT_ENABLED_ERROR, + ): + info_dicts = [ + {"name": f[0], "code": f[1].name, "title": f[1].msg} + for f in failed_services + ] + super().__init__( + msg=msg.msg, + msg_code=msg.name, + additional_info={"services": info_dicts}, + ) + + +class AttachFailureDefaultServices(EntitlementsNotEnabledError): + def __init__( + self, failed_services: List[Tuple[str, messages.NamedMessage]] + ): + super().__init__( + failed_services=failed_services, + msg=messages.ATTACH_FAILURE_DEFAULT_SERVICES, + ) + + +class AttachFailureUnknownError(EntitlementsNotEnabledError): + def __init__( + self, failed_services: List[Tuple[str, messages.NamedMessage]] + ): + super().__init__( + failed_services=failed_services, msg=messages.UNEXPECTED_ERROR + ) + + class UrlError(IOError): def __init__( self, @@ -475,3 +516,11 @@ def __init__(self, lock_file_path): msg = messages.INVALID_LOCK_FILE.format(lock_file_path=lock_file_path) super().__init__(msg=msg.msg, msg_code=msg.name) + + +class InvalidOptionCombination(UserFacingError): + def __init__(self, option1: str, option2: str): + msg = messages.INVALID_OPTION_COMBINATION.format( + option1=option1, option2=option2 + ) + super().__init__(msg=msg.msg, msg_code=msg.name) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/files/files.py ubuntu-advantage-tools-28.1~18.04/uaclient/files/files.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/files/files.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/files/files.py 2023-05-30 19:02:35.000000000 +0000 @@ -59,6 +59,13 @@ system.ensure_file_absent(self.path) +class UserCacheFile(UAFile): + def __init__(self, name: str): + super().__init__( + name, directory=system.get_user_cache_dir(), private=False + ) + + class MachineTokenFile: def __init__( self, diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/files/state_files.py ubuntu-advantage-tools-28.1~18.04/uaclient/files/state_files.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/files/state_files.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/files/state_files.py 2023-05-30 19:02:35.000000000 +0000 @@ -12,7 +12,7 @@ data_list, ) from uaclient.files.data_types import DataObjectFile, DataObjectFileFormat -from uaclient.files.files import UAFile +from uaclient.files.files import UAFile, UserCacheFile SERVICES_ONCE_ENABLED = "services-once-enabled" @@ -167,11 +167,7 @@ livepatch_support_cache = DataObjectFile( LivepatchSupportCacheData, - UAFile( - "livepatch-kernel-support-cache.json", - directory=defaults.UAC_TMP_PATH, - private=False, - ), + UserCacheFile("livepatch-kernel-support-cache.json"), file_format=DataObjectFileFormat.JSON, ) @@ -233,3 +229,6 @@ DataObjectFileFormat.JSON, optional_type_errors_become_null=True, ) + + +reboot_cmd_marker_file = UAFile("marker-reboot-cmds-required") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/metering.py ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/metering.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/metering.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/metering.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -""" -Functions to be used when running metering jobs -""" - -from uaclient import config -from uaclient.cli import assert_lock_file -from uaclient.contract import UAContractClient - - -@assert_lock_file("timer metering job") -def metering_enabled_resources(cfg: config.UAConfig) -> bool: - # We only run this job if there is no other job running. - # The reason for that is to avoid potential conflicts with - # auto-attach, attach and enable operations. - - if not cfg.is_attached: - return False - - contract = UAContractClient(cfg) - contract.report_machine_activity() - - return True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/tests/test_update_contract_info.py ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/tests/test_update_contract_info.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/tests/test_update_contract_info.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/tests/test_update_contract_info.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -import logging - -import mock -import pytest - -from uaclient.files.notices import Notice -from uaclient.jobs.update_contract_info import update_contract_info - -M_PATH = "uaclient.jobs.update_contract_info." - - -@mock.patch(M_PATH + "contract.is_contract_changed", return_value=False) -class TestUpdateContractInfo: - @pytest.mark.parametrize( - "contract_changed,is_attached", - ( - (False, True), - (True, False), - (True, True), - (False, False), - ), - ) - @mock.patch(M_PATH + "notices", autospec=True) - def test_is_contract_changed( - self, - m_notices, - m_contract_changed, - contract_changed, - is_attached, - FakeConfig, - ): - m_contract_changed.return_value = contract_changed - if is_attached: - cfg = FakeConfig().for_attached_machine() - else: - cfg = FakeConfig() - - update_contract_info(cfg=cfg) - - if is_attached: - if contract_changed: - assert [ - mock.call( - Notice.CONTRACT_REFRESH_WARNING, - ) - ] == m_notices.add.call_args_list - else: - assert [ - mock.call( - Notice.CONTRACT_REFRESH_WARNING, - ) - ] not in m_notices.add.call_args_list - assert [ - mock.call(Notice.CONTRACT_REFRESH_WARNING) - ] in m_notices.remove.call_args_list - else: - assert m_contract_changed.call_count == 0 - - @pytest.mark.parametrize( - "contract_changed", - ( - False, - True, - Exception("Error checking contract info"), - ), - ) - @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) - @mock.patch(M_PATH + "notices", autospec=True) - def test_contract_failure( - self, - m_notices, - m_contract_changed, - contract_changed, - caplog_text, - FakeConfig, - ): - m_contract_changed.side_effect = (contract_changed,) - m_notices.add.side_effect = Exception("Error checking contract info") - m_notices.remove.side_effect = Exception( - "Error checking contract info" - ) - cfg = FakeConfig().for_attached_machine() - - assert False is update_contract_info(cfg=cfg) - assert ( - "Failed to check for change in machine contract." - " Reason: Error checking contract info\n" - ) in caplog_text() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/tests/test_update_messaging.py ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/tests/test_update_messaging.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/tests/test_update_messaging.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/tests/test_update_messaging.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,392 +0,0 @@ -import datetime - -import mock -import pytest - -from uaclient import messages -from uaclient.api.u.pro.packages.updates.v1 import ( - PackageUpdatesResult, - UpdateSummary, -) -from uaclient.entitlements.entitlement_status import ApplicationStatus -from uaclient.jobs.update_messaging import ( - ContractExpiryStatus, - get_contract_expiry_status, - update_motd_messages, -) - -M_PATH = "uaclient.jobs.update_messaging." - - -class TestGetContractExpiryStatus: - @pytest.mark.parametrize( - "contract_remaining_days,expected_status", - ( - (21, ContractExpiryStatus.ACTIVE), - (20, ContractExpiryStatus.ACTIVE_EXPIRED_SOON), - (-1, ContractExpiryStatus.EXPIRED_GRACE_PERIOD), - (-20, ContractExpiryStatus.EXPIRED), - ), - ) - def test_contract_expiry_status_based_on_remaining_days( - self, contract_remaining_days, expected_status, FakeConfig - ): - """Return a tuple of ContractExpiryStatus and remaining_days""" - now = datetime.datetime.utcnow() - expire_date = now + datetime.timedelta(days=contract_remaining_days) - cfg = FakeConfig.for_attached_machine() - m_token = cfg.machine_token - m_token["machineTokenInfo"]["contractInfo"][ - "effectiveTo" - ] = expire_date - - assert ( - expected_status, - contract_remaining_days, - ) == get_contract_expiry_status(cfg) - - @pytest.mark.parametrize( - "expiry,is_updated", - (("2040-05-08T19:02:26Z", False), ("2042-05-08T19:02:26Z", True)), - ) - @mock.patch("uaclient.files.MachineTokenFile.write") - @mock.patch(M_PATH + "contract.UAContractClient.get_updated_contract_info") - def test_update_contract_expiry( - self, - get_updated_contract_info, - machine_token_write, - expiry, - is_updated, - ): - get_updated_contract_info.return_value = { - "machineTokenInfo": {"contractInfo": {"effectiveTo": expiry}} - } - if is_updated: - 1 == machine_token_write.call_count - else: - 0 == machine_token_write.call_count - - -class TestUpdateMotdMessages: - @pytest.mark.parametrize( - [ - "attached", - "contract_expiry_statuses", - "is_current_series_active_esm", - "infra_status", - "is_current_series_lts", - "apps_status", - "updates", - "expected", - "update_contract_expiry_calls", - "ensure_file_absent_calls", - "write_file_calls", - ], - [ - ( - # not attached - False, - [], - False, - None, - False, - None, - None, - False, - [], - [], - [], - ), - ( - # somehow attached but none contract status - True, - [(ContractExpiryStatus.NONE, None)], - False, - None, - False, - None, - None, - True, - [], - [mock.call(mock.ANY)], - [], - ), - ( - # active contract - True, - [(ContractExpiryStatus.ACTIVE, None)], - False, - None, - False, - None, - None, - True, - [], - [mock.call(mock.ANY)], - [], - ), - ( - # expiring soon contract, updated to be active - True, - [ - (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, None), - (ContractExpiryStatus.ACTIVE, None), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [mock.call(mock.ANY)], - [], - ), - ( - # expired grace period contract, updated to be active - True, - [ - (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, None), - (ContractExpiryStatus.ACTIVE, None), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [mock.call(mock.ANY)], - [], - ), - ( - # expired contract, updated to be active - True, - [ - (ContractExpiryStatus.EXPIRED, None), - (ContractExpiryStatus.ACTIVE, None), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [mock.call(mock.ANY)], - [], - ), - ( - # expiring soon for real - True, - [ - (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, 3), - (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, 3), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [], - [ - mock.call( - mock.ANY, - messages.CONTRACT_EXPIRES_SOON_MOTD.format( - remaining_days=3 - ), - ) - ], - ), - ( - # expired grace period for real - True, - [ - (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, -3), - (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, -3), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [], - [ - mock.call( - mock.ANY, - messages.CONTRACT_EXPIRED_GRACE_PERIOD_MOTD.format( - remaining_days=11, expired_date="21 Dec 2012" - ), - ) - ], - ), - ( - # expired, eol release, esm-infra not enabled - True, - [ - (ContractExpiryStatus.EXPIRED, 3), - (ContractExpiryStatus.EXPIRED, 3), - ], - True, - (ApplicationStatus.DISABLED, None), - False, - None, - None, - True, - [mock.call(mock.ANY)], - [], - [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], - ), - ( - # expired, lts release, esm-apps not enabled - True, - [ - (ContractExpiryStatus.EXPIRED, 3), - (ContractExpiryStatus.EXPIRED, 3), - ], - False, - None, - True, - (ApplicationStatus.DISABLED, None), - None, - True, - [mock.call(mock.ANY)], - [], - [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], - ), - ( - # expired, interim release - True, - [ - (ContractExpiryStatus.EXPIRED, 3), - (ContractExpiryStatus.EXPIRED, 3), - ], - False, - None, - False, - None, - None, - True, - [mock.call(mock.ANY)], - [], - [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], - ), - ( - # expired, eol release, esm-infra enabled - True, - [ - (ContractExpiryStatus.EXPIRED, 3), - (ContractExpiryStatus.EXPIRED, 3), - ], - True, - (ApplicationStatus.ENABLED, None), - False, - None, - PackageUpdatesResult(UpdateSummary(0, 0, 4, 0, 0), []), - True, - [mock.call(mock.ANY)], - [], - [ - mock.call( - mock.ANY, - messages.CONTRACT_EXPIRED_MOTD_PKGS.format( - service="esm-infra", pkg_num=4 - ), - ) - ], - ), - ( - # expired, lts release, esm-apps enabled - True, - [ - (ContractExpiryStatus.EXPIRED, 3), - (ContractExpiryStatus.EXPIRED, 3), - ], - False, - None, - True, - (ApplicationStatus.ENABLED, None), - PackageUpdatesResult(UpdateSummary(0, 5, 0, 0, 0), []), - True, - [mock.call(mock.ANY)], - [], - [ - mock.call( - mock.ANY, - messages.CONTRACT_EXPIRED_MOTD_PKGS.format( - service="esm-apps", pkg_num=5 - ), - ) - ], - ), - ], - ) - @mock.patch(M_PATH + "api_u_pro_packages_updates_v1") - @mock.patch(M_PATH + "ESMAppsEntitlement.application_status") - @mock.patch(M_PATH + "system.is_current_series_lts") - @mock.patch(M_PATH + "ESMInfraEntitlement.application_status") - @mock.patch(M_PATH + "system.is_current_series_active_esm") - @mock.patch( - M_PATH + "UAConfig.machine_token_file", new_callable=mock.PropertyMock - ) - @mock.patch(M_PATH + "system.write_file") - @mock.patch(M_PATH + "system.ensure_file_absent") - @mock.patch(M_PATH + "update_contract_expiry") - @mock.patch(M_PATH + "get_contract_expiry_status") - @mock.patch( - M_PATH + "UAConfig.is_attached", new_callable=mock.PropertyMock - ) - def test_update_motd_messages( - self, - m_is_attached, - m_get_contract_expiry_status, - m_update_contract_expiry, - m_ensure_file_absent, - m_write_file, - m_machine_token_file, - m_is_current_series_active_esm, - m_infra_status, - m_is_current_series_lts, - m_apps_status, - m_api_updates_v1, - attached, - contract_expiry_statuses, - is_current_series_active_esm, - infra_status, - is_current_series_lts, - apps_status, - updates, - expected, - update_contract_expiry_calls, - ensure_file_absent_calls, - write_file_calls, - FakeConfig, - ): - m_is_attached.return_value = attached - m_get_contract_expiry_status.side_effect = contract_expiry_statuses - m_is_current_series_active_esm.return_value = ( - is_current_series_active_esm - ) - m_infra_status.return_value = infra_status - m_is_current_series_lts.return_value = is_current_series_lts - m_apps_status.return_value = apps_status - m_api_updates_v1.return_value = updates - - machine_token_file = mock.MagicMock() - machine_token_file.contract_expiry_datetime = datetime.datetime( - 2012, 12, 21 - ) - m_machine_token_file.return_value = machine_token_file - - assert expected == update_motd_messages(FakeConfig()) - - assert ( - update_contract_expiry_calls - == m_update_contract_expiry.call_args_list - ) - assert ensure_file_absent_calls == m_ensure_file_absent.call_args_list - assert write_file_calls == m_write_file.call_args_list diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/update_contract_info.py ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/update_contract_info.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/update_contract_info.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/update_contract_info.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -import logging - -from uaclient import contract, messages, util -from uaclient.config import UAConfig -from uaclient.files import notices -from uaclient.files.notices import Notice - -LOG = logging.getLogger(__name__) - - -def update_contract_info(cfg: UAConfig) -> bool: - if cfg.is_attached: - try: - if contract.is_contract_changed(cfg): - notices.add( - Notice.CONTRACT_REFRESH_WARNING, - ) - else: - notices.remove( - Notice.CONTRACT_REFRESH_WARNING, - ) - except Exception as e: - with util.disable_log_to_console(): - err_msg = messages.UPDATE_CHECK_CONTRACT_FAILURE.format( - reason=str(e) - ) - LOG.warning(err_msg) - return False - return True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/update_messaging.py ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/update_messaging.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/jobs/update_messaging.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/jobs/update_messaging.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -""" -Update messaging text for use in MOTD and APT custom Ubuntu Pro messages. - -Messaging files will be emitted to /var/lib/ubuntu-advantage/message-* which -will be sourced by apt-hook/hook.cc and various /etc/update-motd.d/ hooks to -present updated text about Ubuntu Pro service and token state. -""" - -import enum -import logging -import os -from os.path import exists -from typing import Tuple - -from uaclient import contract, defaults, messages, system -from uaclient.api.u.pro.packages.updates.v1 import ( - _updates as api_u_pro_packages_updates_v1, -) -from uaclient.config import UAConfig -from uaclient.entitlements import ESMAppsEntitlement, ESMInfraEntitlement -from uaclient.entitlements.entitlement_status import ApplicationStatus - -MOTD_CONTRACT_STATUS_FILE_NAME = "motd-contract-status" -UPDATE_NOTIFIER_MOTD_SCRIPT = ( - "/usr/lib/update-notifier/update-motd-updates-available" -) - - -@enum.unique -class ContractExpiryStatus(enum.Enum): - NONE = 0 - ACTIVE = 1 - ACTIVE_EXPIRED_SOON = 2 - EXPIRED_GRACE_PERIOD = 3 - EXPIRED = 4 - - -def update_contract_expiry(cfg: UAConfig): - orig_token = cfg.machine_token - machine_token = orig_token.get("machineToken", "") - contract_id = ( - orig_token.get("machineTokenInfo", {}) - .get("contractInfo", {}) - .get("id", None) - ) - contract_client = contract.UAContractClient(cfg) - resp = contract_client.get_updated_contract_info( - machine_token, contract_id - ) - resp_expiry = ( - resp.get("machineTokenInfo", {}) - .get("contractInfo", {}) - .get("effectiveTo", None) - ) - if ( - resp_expiry is not None - and resp_expiry != cfg.machine_token_file.contract_expiry_datetime - ): - orig_token["machineTokenInfo"]["contractInfo"][ - "effectiveTo" - ] = resp_expiry - cfg.machine_token_file.write(orig_token) - - -def get_contract_expiry_status( - cfg: UAConfig, -) -> Tuple[ContractExpiryStatus, int]: - """Return a tuple [ContractExpiryStatus, num_days]""" - if not cfg.is_attached: - return ContractExpiryStatus.NONE, 0 - - grace_period = defaults.CONTRACT_EXPIRY_GRACE_PERIOD_DAYS - pending_expiry = defaults.CONTRACT_EXPIRY_PENDING_DAYS - remaining_days = cfg.machine_token_file.contract_remaining_days - - # if unknown assume the worst - if remaining_days is None: - logging.warning( - "contract effectiveTo date is null - assuming it is expired" - ) - return ContractExpiryStatus.EXPIRED, -grace_period - - if 0 <= remaining_days <= pending_expiry: - return ContractExpiryStatus.ACTIVE_EXPIRED_SOON, remaining_days - elif -grace_period <= remaining_days < 0: - return ContractExpiryStatus.EXPIRED_GRACE_PERIOD, remaining_days - elif remaining_days < -grace_period: - return ContractExpiryStatus.EXPIRED, remaining_days - return ContractExpiryStatus.ACTIVE, remaining_days - - -def update_motd_messages(cfg: UAConfig) -> bool: - """Emit human-readable status message used by motd. - - Used by /etc/update.motd.d/91-contract-ua-esm-status - - :param cfg: UAConfig instance for this environment. - """ - if not cfg.is_attached: - return False - - logging.debug("Updating Ubuntu Pro messages for MOTD.") - motd_contract_status_msg_path = os.path.join( - cfg.data_dir, "messages", MOTD_CONTRACT_STATUS_FILE_NAME - ) - - expiry_status, remaining_days = get_contract_expiry_status(cfg) - if expiry_status in ( - ContractExpiryStatus.ACTIVE_EXPIRED_SOON, - ContractExpiryStatus.EXPIRED_GRACE_PERIOD, - ContractExpiryStatus.EXPIRED, - ): - update_contract_expiry(cfg) - expiry_status, remaining_days = get_contract_expiry_status(cfg) - - if expiry_status in ( - ContractExpiryStatus.ACTIVE, - ContractExpiryStatus.NONE, - ): - system.ensure_file_absent(motd_contract_status_msg_path) - elif expiry_status == ContractExpiryStatus.ACTIVE_EXPIRED_SOON: - system.write_file( - motd_contract_status_msg_path, - messages.CONTRACT_EXPIRES_SOON_MOTD.format( - remaining_days=remaining_days, - ), - ) - elif expiry_status == ContractExpiryStatus.EXPIRED_GRACE_PERIOD: - grace_period_remaining = ( - defaults.CONTRACT_EXPIRY_GRACE_PERIOD_DAYS + remaining_days - ) - exp_dt = cfg.machine_token_file.contract_expiry_datetime - if exp_dt is None: - exp_dt_str = "Unknown" - else: - exp_dt_str = exp_dt.strftime("%d %b %Y") - system.write_file( - motd_contract_status_msg_path, - messages.CONTRACT_EXPIRED_GRACE_PERIOD_MOTD.format( - expired_date=exp_dt_str, - remaining_days=grace_period_remaining, - ), - ) - elif expiry_status == ContractExpiryStatus.EXPIRED: - service = "n/a" - pkg_num = 0 - - if system.is_current_series_active_esm(): - esm_infra_status, _ = ESMInfraEntitlement(cfg).application_status() - if esm_infra_status == ApplicationStatus.ENABLED: - service = "esm-infra" - pkg_num = api_u_pro_packages_updates_v1( - cfg - ).summary.num_esm_infra_updates - elif system.is_current_series_lts(): - esm_apps_status, _ = ESMAppsEntitlement(cfg).application_status() - if esm_apps_status == ApplicationStatus.ENABLED: - service = "esm-apps" - pkg_num = api_u_pro_packages_updates_v1( - cfg - ).summary.num_esm_apps_updates - - if pkg_num == 0: - system.write_file( - motd_contract_status_msg_path, - messages.CONTRACT_EXPIRED_MOTD_NO_PKGS, - ) - else: - system.write_file( - motd_contract_status_msg_path, - messages.CONTRACT_EXPIRED_MOTD_PKGS.format( - pkg_num=pkg_num, - service=service, - ), - ) - - return True - - -def refresh_motd(): - # If update-notifier is present, we might as well update - # the package updates count related to MOTD - if exists(UPDATE_NOTIFIER_MOTD_SCRIPT): - # If this command fails, we shouldn't break the entire command, - # since this command should already be triggered by - # update-notifier apt hooks - try: - system.subp([UPDATE_NOTIFIER_MOTD_SCRIPT, "--force"]) - except Exception as exc: - logging.exception(exc) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/livepatch.py ubuntu-advantage-tools-28.1~18.04/uaclient/livepatch.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/livepatch.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/livepatch.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,4 +1,5 @@ import datetime +import enum import json import logging import re @@ -33,6 +34,15 @@ event = event_logger.get_event_logger() +@enum.unique +class LivepatchSupport(enum.Enum): + SUPPORTED = object() + KERNEL_UPGRADE_REQUIRED = object() + KERNEL_EOL = object() + UNSUPPORTED = object() + UNKNOWN = object() + + class LivepatchPatchFixStatus(DataObject): fields = [ Field("name", StringDataValue, required=False, dict_key="Name"), @@ -57,15 +67,18 @@ required=False, dict_key="Fixes", ), + Field("version", StringDataValue, required=False, dict_key="Version"), ] def __init__( self, state: Optional[str], fixes: Optional[List[LivepatchPatchFixStatus]], + version: Optional[str], ): self.state = state self.fixes = fixes + self.version = version class LivepatchStatusStatus(DataObject): @@ -119,7 +132,9 @@ return None try: - out, _ = system.subp([LIVEPATCH_CMD, "status", "--format", "json"]) + out, _ = system.subp( + [LIVEPATCH_CMD, "status", "--verbose", "--format", "json"] + ) except exceptions.ProcessExecutionError: with util.disable_log_to_console(): logging.warning( @@ -155,24 +170,43 @@ return status_root.status[0] +def _convert_str_to_livepatch_support_status( + status_str: Optional[str], +) -> Optional[LivepatchSupport]: + if status_str == "supported": + return LivepatchSupport.SUPPORTED + if status_str == "kernel-upgrade-required": + return LivepatchSupport.KERNEL_UPGRADE_REQUIRED + if status_str == "kernel-end-of-life": + return LivepatchSupport.KERNEL_EOL + if status_str == "unsupported": + return LivepatchSupport.UNSUPPORTED + if status_str == "unknown": + return LivepatchSupport.UNKNOWN + return None + + class UALivepatchClient(serviceclient.UAServiceClient): cfg_url_base_attr = "livepatch_url" api_error_cls = exceptions.UrlError def is_kernel_supported( - self, version: str, flavor: str, arch: str, codename: str - ) -> Optional[bool]: - """ - :returns: True if supported - False if unsupported - None if API returns error or ambiguous response - """ + self, + version: str, + flavor: str, + arch: str, + codename: str, + build_date: Optional[datetime.datetime], + ) -> Optional[LivepatchSupport]: query_params = { "kernel-version": version, "flavour": flavor, "architecture": arch, "codename": codename, + "build-date": build_date.isoformat() + if build_date is not None + else "unknown", } headers = self.headers() try: @@ -195,18 +229,21 @@ ) return None - return bool(result.get("Supported", False)) + api_supported_val = result.get("Supported") + if api_supported_val is None or isinstance(api_supported_val, bool): + # old version, True means supported, None means unsupported + if api_supported_val: + return LivepatchSupport.SUPPORTED + return LivepatchSupport.UNSUPPORTED + # new version, value is a string + return _convert_str_to_livepatch_support_status(api_supported_val) -def _on_supported_kernel_cli() -> Optional[bool]: +def _on_supported_kernel_cli() -> Optional[LivepatchSupport]: lp_status = status() if lp_status is None: return None - if lp_status.supported == "supported": - return True - if lp_status.supported == "unsupported": - return False - return None + return _convert_str_to_livepatch_support_status(lp_status.supported) def _on_supported_kernel_cache( @@ -244,23 +281,33 @@ def _on_supported_kernel_api( - version: str, flavor: str, arch: str, codename: str -) -> Optional[bool]: + version: str, + flavor: str, + arch: str, + codename: str, + build_date: Optional[datetime.datetime], +) -> Optional[LivepatchSupport]: supported = UALivepatchClient().is_kernel_supported( version=version, flavor=flavor, arch=arch, codename=codename, + build_date=build_date, ) - # cache response before returning + # cache response as a bool/None before returning + cache_supported = None + if supported == LivepatchSupport.SUPPORTED: + cache_supported = True + elif supported == LivepatchSupport.UNSUPPORTED: + cache_supported = False state_files.livepatch_support_cache.write( state_files.LivepatchSupportCacheData( version=version, flavor=flavor, arch=arch, codename=codename, - supported=supported, + supported=cache_supported, cached_at=datetime.datetime.now(datetime.timezone.utc), ) ) @@ -274,7 +321,7 @@ @lru_cache(maxsize=None) -def on_supported_kernel() -> Optional[bool]: +def on_supported_kernel() -> LivepatchSupport: """ Checks CLI, local cache, and API in that order for kernel support If all checks fail to return an authoritative answer, we return None @@ -297,10 +344,10 @@ "unable to determine enough kernel information to " "check livepatch support" ) - return None + return LivepatchSupport.UNKNOWN - arch = util.standardize_arch_name(system.get_dpkg_arch()) - codename = system.get_platform_info()["series"] + arch = util.standardize_arch_name(kernel_info.uname_machine_arch) + codename = system.get_release_info().series lp_api_kernel_ver = "{major}.{minor}".format( major=kernel_info.major, minor=kernel_info.minor @@ -312,13 +359,25 @@ ) if is_cache_valid: logging.debug("using livepatch support cache") - return cache_says + if cache_says is None: + return LivepatchSupport.UNKNOWN + if cache_says: + return LivepatchSupport.SUPPORTED + if not cache_says: + return LivepatchSupport.UNSUPPORTED # finally check api logging.debug("using livepatch support api") - return _on_supported_kernel_api( - lp_api_kernel_ver, kernel_info.flavor, arch, codename + api_says = _on_supported_kernel_api( + lp_api_kernel_ver, + kernel_info.flavor, + arch, + codename, + kernel_info.build_date, ) + if api_says is None: + return LivepatchSupport.UNKNOWN + return api_says def unconfigure_livepatch_proxy( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/log.py ubuntu-advantage-tools-28.1~18.04/uaclient/log.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/log.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/log.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,9 +1,10 @@ import json import logging +import os from collections import OrderedDict -from typing import Any, Dict # noqa: F401 +from typing import Any, Dict, List # noqa: F401 -from uaclient import util +from uaclient import defaults, system, util class RedactionFilter(logging.Filter): @@ -58,3 +59,28 @@ local_log_record["extra"] = extra_message_dict return json.dumps(list(local_log_record.values())) + + +def get_user_log_file() -> str: + """Gets the correct user log_file storage location""" + return system.get_user_cache_dir() + "/ubuntu-pro.log" + + +def get_all_user_log_files() -> List[str]: + """Gets all the log files for the users in the system + + Returns a list of all user log files in their home directories. + """ + user_directories = os.listdir("/home") + log_files = [] + for user_directory in user_directories: + user_path = ( + "/home/" + + user_directory + + "/.cache/" + + defaults.USER_CACHE_SUBDIR + + "/ubuntu-pro.log" + ) + if os.path.isfile(user_path): + log_files.append(user_path) + return log_files diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/messages.py ubuntu-advantage-tools-28.1~18.04/uaclient/messages.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/messages.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/messages.py 2023-06-02 19:38:08.000000000 +0000 @@ -88,10 +88,14 @@ "For easiest security on {title}, use Ubuntu Pro." " https://ubuntu.com/{cloud}/pro." ) -SECURITY_ISSUE_RESOLVED = OKGREEN_CHECK + " {issue} is resolved." -SECURITY_ISSUE_NOT_RESOLVED = FAIL_X + " {issue} is not resolved." +SECURITY_ISSUE_RESOLVED = OKGREEN_CHECK + " {issue}{extra_info} is resolved." +SECURITY_ISSUE_NOT_RESOLVED = FAIL_X + " {issue}{extra_info} is not resolved." SECURITY_ISSUE_UNAFFECTED = ( - OKGREEN_CHECK + " {issue} does not affect your system." + OKGREEN_CHECK + " {issue}{extra_info} does not affect your system." +) +SECURITY_PKG_STILL_AFFECTED = FormattedNamedMessage( + "security-pkg-still-affected", + "{num_pkgs} package{s} {verb} still affected: {pkgs}", ) SECURITY_AFFECTED_PKGS = ( "{count} affected source package{plural_str} installed" @@ -135,6 +139,47 @@ SECURITY_UA_SERVICE_NOT_ENTITLED = """\ Error: The current Ubuntu Pro subscription is not entitled to: {service}. Without it, we cannot fix the system.""" +SECURITY_UA_SERVICE_REQUIRED = """\ +{service} is required for upgrade.""" +SECURITY_UA_SERVICE_WITH_EXPIRED_SUB = """\ +{service} is required for upgrade, but current subscription is expired.""" +SECURITY_UA_SERVICE_NOT_ENABLED_SHORT = """\ +{service} is required for upgrade, but it is not enabled.""" +SECURITY_UA_APT_FAILURE = """\ +APT failed to install the package. +""" +SECURITY_CVE_STATUS_NEEDED = """\ +Sorry, no fix is available yet.""" +SECURITY_CVE_STATUS_TRIAGE = """\ +Ubuntu security engineers are investigating this issue.""" +SECURITY_CVE_STATUS_PENDING = """\ +A fix is coming soon. Try again tomorrow.""" +SECURITY_CVE_STATUS_IGNORED = """\ +Sorry, no fix is available.""" +SECURITY_CVE_STATUS_DNE = """\ +Source package does not exist on this release.""" +SECURITY_CVE_STATUS_NOT_AFFECTED = """\ +Source package is not affected on this release.""" +SECURITY_CVE_STATUS_UNKNOWN = """\ +UNKNOWN: {status}""" + +SECURITY_FIXING_REQUESTED_USN = """\ +Fixing requested {issue_id}""" +SECURITY_FIXING_RELATED_USNS = """\ +Fixing related USNs:""" +SECURITY_RELATED_USNS = """\ +Found related USNs:\n- {related_usns}""" +SECURITY_USN_SUMMARY = """\ +Summary:""" +SECURITY_RELATED_USN_ERROR = """\ +Even though a related USN failed to be fixed, note +that {issue_id} was fixed. Related USNs do not +affect the original USN. Learn more about the related +USNs, please refer to this page: + +https://canonical-ubuntu-pro-client.readthedocs-hosted.com/en/latest/explanations/cves_and_usns_explained.html#what-are-related-usns +""" # noqa + APT_UPDATING_LISTS = "Updating package lists" DISABLE_FAILED_TMPL = "Could not disable {title}." ACCESS_ENABLED_TMPL = "{title} access enabled" @@ -433,7 +478,7 @@ ) ENABLED_FAILED = FormattedNamedMessage( - "enable-failes", "Could not enable {title}." + "enable-failed", "Could not enable {title}." ) UNENTITLED = FormattedNamedMessage( @@ -476,6 +521,13 @@ Supported platforms are: {supported_arches}.""", ) +INAPPLICABLE_VENDOR_NAME = FormattedNamedMessage( + "inapplicable-vendor-name", + """\ +{title} is not available for CPU vendor {vendor}. +Supported CPU vendors are: {supported_vendors}.""", +) + NO_ENTITLEMENT_AFFORDANCES_CHECKED = NamedMessage( "no-entitlement-affordances-checked", "no entitlement affordances checked" ) @@ -799,6 +851,10 @@ "realtime-livepatch-incompatible", "Livepatch is not currently supported for the Real-time kernel.", ) +REALTIME_VARIANT_INCOMPATIBLE = FormattedNamedMessage( + "realtime-variant-incompatible", + "{service} cannot be enabled together with {variant}", +) REALTIME_BETA_FLAG_REQUIRED = NamedMessage( "beta-flag-required", "Use `pro enable realtime-kernel --beta` to acknowledge the real-time" @@ -998,6 +1054,16 @@ pro security-status --help for a list of available options.""" +SS_UPDATE_CALL = """\ + Make sure to run + sudo apt-get update +to get the latest package information from apt.""" +SS_UPDATE_DAYS = ( + "The system apt information was updated {days} day(s) ago." + + SS_UPDATE_CALL +) +SS_UPDATE_UNKNOWN = "The system apt cache may be outdated." + SS_UPDATE_CALL + SS_INTERIM_SUPPORT = "Main/Restricted packages receive updates until {date}." SS_LTS_SUPPORT = """\ This machine is receiving security patching for Ubuntu Main/Restricted @@ -1021,9 +1087,9 @@ SS_SERVICE_ADVERTISE = """\ Ubuntu Pro with '{service}' enabled provides security updates for -{repository} packages until {year}""" +{repository} packages until {year}.""" SS_SERVICE_ADVERTISE_COUNTS = ( - " and has {updates} pending security update{plural}." + " There {verb} {updates} pending security update{plural}." ) SS_SERVICE_ENABLED = """\ @@ -1041,9 +1107,9 @@ url=BASE_UA_URL ) -SS_POLICY_HINT = """\ +SS_SHOW_HINT = """\ For example, run: - apt-cache policy {package} + apt-cache show {package} to learn more about that package.""" SS_NO_THIRD_PARTY = "You have no packages installed from a third party." @@ -1053,11 +1119,10 @@ SS_NO_INTERIM_PRO_SUPPORT = "Ubuntu Pro is not available for non-LTS releases." SS_SERVICE_HELP = "Run 'pro help {service}' to learn more" -SS_BOLD_PACKAGES = """\ -Package names in {bold}bold{end_bold} currently have an available update -with '{{service}}' enabled""".format( - bold=TxtColor.BOLD, end_bold=TxtColor.ENDC -) + +SS_UPDATES_AVAILABLE = "Installed packages with an available {service} update:" +SS_UPDATES_INSTALLED = "Installed packages with an {service} update applied:" +SS_OTHER_PACKAGES = "{prefix} packages covered by {service}:" ENTITLEMENT_NOT_FOUND = FormattedNamedMessage( "entitlement-not-found", @@ -1151,6 +1216,19 @@ "Value provided was not found in {enum_class}'s allowed: value: {values}", ) +LIVEPATCH_KERNEL_UPGRADE_REQUIRED = NamedMessage( + name="livepatch-kernel-upgrade-required", + msg="""\ +The running kernel has reached the end of its active livepatch window. +Please upgrade the kernel with apt and reboot for continued livepatch support.""", # noqa: E501 +) +LIVEPATCH_KERNEL_EOL = FormattedNamedMessage( + name="livepatch-kernel-eol", + msg="""\ +The current kernel ({version}, {arch}) has reached the end of its livepatch support. +Supported kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels +Either switch to a supported kernel or `pro disable livepatch` to dismiss this warning.""", # noqa: E501 +) LIVEPATCH_KERNEL_NOT_SUPPORTED = FormattedNamedMessage( name="livepatch-kernel-not-supported", msg="""\ @@ -1247,3 +1325,43 @@ USER_CONFIG_MIGRATION_WARNING_NEW_UACLIENT_CONF_WRITE = """\ Warning: Failed to migrate /etc/ubuntu-advantage/uaclient.conf Please add following to uaclient.conf to keep your config:""" + +LIVEPATCH_APPLICATION_STATUS_CLIENT_FAILURE = NamedMessage( + "livepatch-client-failure", + "canonical-livepatch status didn't finish successfully", +) + +STATUS_NO_SERVICES_AVAILABLE = ( + """No Ubuntu Pro services are available to this system.""" +) + +STATUS_ALL_HINT = ( + "For a list of all Ubuntu Pro services, run 'pro status --all'" +) +STATUS_SERVICE_HAS_VARIANTS = " * Service has variants" + +STATUS_ALL_HINT_WITH_VARIANTS = """\ +For a list of all Ubuntu Pro services and variants, run 'pro status --all'""" + +SERVICE_DISABLED_MISSING_PACKAGE = FormattedNamedMessage( + "service-disabled-missing-package", + """\ +The {service} service is not enabled because the {package} package is +not installed.""", +) + +INVALID_OPTION_COMBINATION = FormattedNamedMessage( + "invalid-option-combination", + "Error: Cannot use {option1} together with {option2}.", +) + +PRO_HELP_SERVICE_INFO = NamedMessage( + "pro-help-service-info", + "Use pro help to get more details about each service", +) + +WARNING_HUMAN_READABLE_OUTPUT = """\ +WARNING: this output is intended to be human readable, and subject to change. +In scripts, prefer using machine readable data from the `pro api` command, +or use `pro {command} --format json`. +""" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/pip.py ubuntu-advantage-tools-28.1~18.04/uaclient/pip.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/pip.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/pip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -import os -from configparser import ConfigParser - -PIP_CONFIG_FILE = "/etc/pip.conf" - - -def update_pip_conf(pip_config_dict): - """ - Update pip.conf file on /etc/ with the required configurations - for enabling a service. - - :param pip_config_dict: - A dictionaty representing a valid pip config - """ - new_conf_parser = ConfigParser() - new_conf_parser.read_dict(pip_config_dict) - - if os.path.exists(PIP_CONFIG_FILE): - existing_conf_parser = ConfigParser() - with open(PIP_CONFIG_FILE, "r") as f: - existing_conf_parser.read_file(f) - - existing_conf_parser.update(new_conf_parser) - new_conf_parser = existing_conf_parser - - with open(PIP_CONFIG_FILE, "w") as f: - new_conf_parser.write(f) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/security.py ubuntu-advantage-tools-28.1~18.04/uaclient/security.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/security.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/security.py 2023-06-27 00:49:37.000000000 +0000 @@ -1,13 +1,20 @@ import copy import enum -import json import socket import textwrap from collections import defaultdict from datetime import datetime from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple -from uaclient import apt, exceptions, messages, serviceclient, system, util +from uaclient import ( + apt, + exceptions, + livepatch, + messages, + serviceclient, + system, + util, +) from uaclient.api.u.pro.attach.magic.initiate.v1 import _initiate from uaclient.api.u.pro.attach.magic.revoke.v1 import ( MagicAttachRevokeOptions, @@ -47,11 +54,20 @@ UA_APPS_POCKET = "Ubuntu Pro: ESM Apps" +UnfixedPackage = NamedTuple( + "UnfixedPackage", + [ + ("pkg", str), + ("unfixed_reason", str), + ], +) + + ReleasedPackagesInstallResult = NamedTuple( "ReleasedPackagesInstallResult", [ ("fix_status", bool), - ("unfixed_pkgs", Set[str]), + ("unfixed_pkgs", List[UnfixedPackage]), ("installed_pkgs", Set[str]), ("all_already_installed", bool), ], @@ -68,15 +84,41 @@ ) -@enum.unique +UpgradeResult = NamedTuple( + "UpgradeResult", + [ + ("status", bool), + ("failure_reason", Optional[str]), + ], +) + + class FixStatus(enum.Enum): """ An enum to represent the system status after fix operation """ - SYSTEM_NON_VULNERABLE = 0 - SYSTEM_STILL_VULNERABLE = 1 - SYSTEM_VULNERABLE_UNTIL_REBOOT = 2 + class _Value: + def __init__(self, value): + self.value = value + + SYSTEM_NON_VULNERABLE = _Value(0) + SYSTEM_NOT_AFFECTED = _Value(0) + SYSTEM_STILL_VULNERABLE = _Value(1) + SYSTEM_VULNERABLE_UNTIL_REBOOT = _Value(2) + + @property + def exit_code(self): + return self.value.value + + +FixResult = NamedTuple( + "FixResult", + [ + ("status", FixStatus), + ("unfixed_pkgs", Optional[List[UnfixedPackage]]), + ], +) class UASecurityClient(serviceclient.UAServiceClient): @@ -227,22 +269,22 @@ @property def status_message(self): if self.status == "needed": - return "Sorry, no fix is available yet." + return messages.SECURITY_CVE_STATUS_NEEDED elif self.status == "needs-triage": - return "Ubuntu security engineers are investigating this issue." + return messages.SECURITY_CVE_STATUS_TRIAGE elif self.status == "pending": - return "A fix is coming soon. Try again tomorrow." + return messages.SECURITY_CVE_STATUS_PENDING elif self.status in ("ignored", "deferred"): - return "Sorry, no fix is available." + return messages.SECURITY_CVE_STATUS_IGNORED elif self.status == "DNE": - return "Source package does not exist on this release." + return messages.SECURITY_CVE_STATUS_DNE elif self.status == "not-affected": - return "Source package is not affected on this release." + return messages.SECURITY_CVE_STATUS_NOT_AFFECTED elif self.status == "released": return messages.SECURITY_FIX_RELEASE_STREAM.format( fix_stream=self.pocket_source ) - return "UNKNOWN: {}".format(self.status) + return messages.SECURITY_CVE_STATUS_UNKNOWN.format(status=self.status) @property def requires_ua(self) -> bool: @@ -331,7 +373,7 @@ if hasattr(self, "_packages_status"): return self._packages_status # type: ignore self._packages_status = {} - series = system.get_platform_info()["series"] + series = system.get_release_info().series for package in self.response["packages"]: for pkg_status in package["statuses"]: if pkg_status["release_codename"] == series: @@ -419,7 +461,7 @@ """ if hasattr(self, "_release_packages"): return self._release_packages - series = system.get_platform_info()["series"] + series = system.get_release_info().series self._release_packages = {} # type: Dict[str, Dict[str, Any]] # Organize source and binary packages under a common source package key for pkg in self.response.get("release_packages", {}).get(series, []): @@ -552,14 +594,15 @@ """ # If the usn does not have any associated cves on it, - # we must consider that only the current usn should be - # evaluated. + # we cannot establish a relation between USNs if not usn.cves: - return [usn] + return [] related_usns = {} for cve in usn.cves: for related_usn_id in cve.notices_ids: + if related_usn_id == usn.id: + continue if related_usn_id not in related_usns: related_usns[related_usn_id] = client.get_notice( notice_id=related_usn_id @@ -568,8 +611,167 @@ return list(sorted(related_usns.values(), key=lambda x: x.id)) +def _check_cve_fixed_by_livepatch( + issue_id: str, +) -> Tuple[Optional[FixStatus], Optional[str]]: + # Check livepatch status for CVE in fixes before checking CVE api + lp_status = livepatch.status() + if ( + lp_status is not None + and lp_status.livepatch is not None + and lp_status.livepatch.fixes is not None + ): + for fix in lp_status.livepatch.fixes: + if fix.name == issue_id.lower() and fix.patched: + version = lp_status.livepatch.version or "N/A" + return (FixStatus.SYSTEM_NON_VULNERABLE, version) + + return (None, None) + + +def _fix_cve( + cve: CVE, + usns: List[USN], + issue_id: str, + installed_packages: Dict[str, Dict[str, str]], + cfg: UAConfig, + beta_pockets: Dict[str, bool], + dry_run: bool, +) -> FixStatus: + affected_pkg_status = get_cve_affected_source_packages_status( + cve=cve, installed_packages=installed_packages + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + usns, beta_pockets + ) + + print() + return prompt_for_affected_packages( + cfg=cfg, + issue_id=issue_id, + affected_pkg_status=affected_pkg_status, + installed_packages=installed_packages, + usn_released_pkgs=usn_released_pkgs, + dry_run=dry_run, + ).status + + +def _fix_usn( + usn: USN, + related_usns: List[USN], + issue_id: str, + installed_packages: Dict[str, Dict[str, str]], + cfg: UAConfig, + beta_pockets: Dict[str, bool], + dry_run: bool, + no_related: bool, +) -> FixStatus: + # We should only highlight the target USN if we have related USNs to fix + print( + "\n" + messages.SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) + ) + + affected_pkg_status = get_affected_packages_from_usn( + usn=usn, installed_packages=installed_packages + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + [usn], beta_pockets + ) + target_fix_status, _ = prompt_for_affected_packages( + cfg=cfg, + issue_id=issue_id, + affected_pkg_status=affected_pkg_status, + installed_packages=installed_packages, + usn_released_pkgs=usn_released_pkgs, + dry_run=dry_run, + ) + + if target_fix_status not in ( + FixStatus.SYSTEM_NON_VULNERABLE, + FixStatus.SYSTEM_NOT_AFFECTED, + ): + return target_fix_status + + if not related_usns or no_related: + return target_fix_status + + print( + "\n" + + messages.SECURITY_RELATED_USNS.format( + related_usns="\n- ".join(usn.id for usn in related_usns) + ) + ) + + print("\n" + messages.SECURITY_FIXING_RELATED_USNS) + related_usn_status = {} # type: Dict[str, FixResult] + for related_usn in related_usns: + print("- {}".format(related_usn.id)) + affected_pkg_status = get_affected_packages_from_usn( + usn=related_usn, installed_packages=installed_packages + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + [related_usn], beta_pockets + ) + + related_fix_status = prompt_for_affected_packages( + cfg=cfg, + issue_id=related_usn.id, + affected_pkg_status=affected_pkg_status, + installed_packages=installed_packages, + usn_released_pkgs=usn_released_pkgs, + dry_run=dry_run, + ) + + related_usn_status[related_usn.id] = related_fix_status + print() + + print(messages.SECURITY_USN_SUMMARY) + _handle_fix_status_message( + target_fix_status, issue_id, extra_info=" [requested]" + ) + + failure_on_related_usn = False + for related_usn in related_usns: + status = related_usn_status[related_usn.id].status + _handle_fix_status_message( + status, related_usn.id, extra_info=" [related]" + ) + + if status == FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT: + print( + "- " + + messages.ENABLE_REBOOT_REQUIRED_TMPL.format( + operation="fix operation" + ) + ) + failure_on_related_usn = True + if status == FixStatus.SYSTEM_STILL_VULNERABLE: + unfixed_pkgs = ( + related_usn_status[related_usn.id].unfixed_pkgs or [] + ) + for unfixed_pkg in unfixed_pkgs: + if unfixed_pkg.unfixed_reason: + print( + " - {}: {}".format( + unfixed_pkg.pkg, unfixed_pkg.unfixed_reason + ) + ) + failure_on_related_usn = True + + if failure_on_related_usn: + print( + "\n" + + messages.SECURITY_RELATED_USN_ERROR.format(issue_id=issue_id) + ) + + return target_fix_status + + def fix_security_issue_id( - cfg: UAConfig, issue_id: str, dry_run: bool = False + cfg: UAConfig, + issue_id: str, + dry_run: bool = False, + no_related: bool = False, ) -> FixStatus: if dry_run: print(messages.SECURITY_DRY_RUN_WARNING) @@ -585,76 +787,53 @@ } if "CVE" in issue_id: - # Check livepatch status for CVE in fixes before checking CVE api - status_stdout = None - try: - status_stdout, _ = system.subp( - [ - "canonical-livepatch", - "status", - "--verbose", - "--format=json", - ] - ) - except exceptions.ProcessExecutionError: - pass - if status_stdout: - try: - parsed_patch = json.loads(status_stdout)["Status"][0][ - "Livepatch" - ] - - if parsed_patch: - fixes = parsed_patch.get("Fixes", []) - if any( - fix["Name"] == issue_id.lower() and fix["Patched"] - for fix in fixes - ): - print( - messages.CVE_FIXED_BY_LIVEPATCH.format( - issue=issue_id, - version=parsed_patch.get("Version", "N/A"), - ) - ) - return FixStatus.SYSTEM_NON_VULNERABLE - except (ValueError, KeyError, IndexError): - pass + livepatch_cve_status, patch_version = _check_cve_fixed_by_livepatch( + issue_id + ) + + if livepatch_cve_status: + print( + messages.CVE_FIXED_BY_LIVEPATCH.format( + issue=issue_id, + version=patch_version, + ) + ) + return livepatch_cve_status try: cve = client.get_cve(cve_id=issue_id) usns = client.get_notices(details=issue_id) except exceptions.SecurityAPIError as e: msg = str(e) - if "not found" in msg.lower(): + if e.code == 404: msg = messages.SECURITY_FIX_NOT_FOUND_ISSUE.format( issue_id=issue_id ) raise exceptions.UserFacingError(msg) - affected_pkg_status = get_cve_affected_source_packages_status( - cve=cve, installed_packages=installed_packages - ) print(cve.get_url_header()) - usn_released_pkgs = merge_usn_released_binary_package_versions( - usns, beta_pockets + return _fix_cve( + cve=cve, + usns=usns, + issue_id=issue_id, + installed_packages=installed_packages, + cfg=cfg, + beta_pockets=beta_pockets, + dry_run=dry_run, ) + else: # USN try: usn = client.get_notice(notice_id=issue_id) usns = get_related_usns(usn, client) except exceptions.SecurityAPIError as e: msg = str(e) - if "not found" in msg.lower(): + if e.code == 404: msg = messages.SECURITY_FIX_NOT_FOUND_ISSUE.format( issue_id=issue_id ) raise exceptions.UserFacingError(msg) - affected_pkg_status = get_usn_affected_packages_status( - usn=usn, installed_packages=installed_packages - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - usns, beta_pockets - ) + print(usn.get_url_header()) if not usn.response["release_packages"]: # Since usn.release_packages filters to our current release only @@ -665,14 +844,17 @@ ), issue_id=issue_id, ) - return prompt_for_affected_packages( - cfg=cfg, - issue_id=issue_id, - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=dry_run, - ) + + return _fix_usn( + usn=usn, + related_usns=usns, + issue_id=issue_id, + installed_packages=installed_packages, + cfg=cfg, + beta_pockets=beta_pockets, + dry_run=dry_run, + no_related=no_related, + ) def get_affected_packages_from_cves(cves, installed_packages): @@ -703,7 +885,7 @@ cve_response = defaultdict(str) cve_response["status"] = "released" # Here we are assuming that the pocket will be the same one across - # the different binary packages. + # all the different binary packages. all_pockets = { pkg_bin_info["pocket"] for _, pkg_bin_info in pkg_info.items() @@ -766,13 +948,17 @@ count = len(affected_pkg_status) if count == 0: print( - "\n" - + messages.SECURITY_AFFECTED_PKGS.format( + messages.SECURITY_AFFECTED_PKGS.format( count="No", plural_str="s are" ) + "." ) - print("\n" + messages.SECURITY_ISSUE_UNAFFECTED.format(issue=issue_id)) + print( + "\n" + + messages.SECURITY_ISSUE_UNAFFECTED.format( + issue=issue_id, extra_info="" + ) + ) return if count == 1: @@ -780,8 +966,7 @@ else: plural_str = "s are" msg = ( - "\n" - + messages.SECURITY_AFFECTED_PKGS.format( + messages.SECURITY_AFFECTED_PKGS.format( count=count, plural_str=plural_str ) + ": " @@ -899,6 +1084,43 @@ return False +def _handle_fix_status_message( + status: FixStatus, issue_id: str, extra_info: str = "" +): + if status == FixStatus.SYSTEM_NON_VULNERABLE: + print( + util.handle_unicode_characters( + messages.SECURITY_ISSUE_RESOLVED.format( + issue=issue_id, extra_info=extra_info + ) + ) + ) + elif status == FixStatus.SYSTEM_NOT_AFFECTED: + print( + util.handle_unicode_characters( + messages.SECURITY_ISSUE_UNAFFECTED.format( + issue=issue_id, extra_info=extra_info + ) + ) + ) + elif status == FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT: + print( + util.handle_unicode_characters( + messages.SECURITY_ISSUE_NOT_RESOLVED.format( + issue=issue_id, extra_info=extra_info + ) + ) + ) + else: + print( + util.handle_unicode_characters( + messages.SECURITY_ISSUE_NOT_RESOLVED.format( + issue=issue_id, extra_info=extra_info + ) + ) + ) + + def _handle_released_package_fixes( cfg: UAConfig, src_pocket_pkgs: Dict[str, List[Tuple[str, CVEPackageStatus]]], @@ -916,7 +1138,7 @@ """ all_already_installed = True upgrade_status = True - unfixed_pkgs = set() + unfixed_pkgs = [] # type: List[UnfixedPackage] installed_pkgs = set() # type: Set[str] if src_pocket_pkgs: for pocket in [ @@ -926,6 +1148,9 @@ ]: pkg_src_group = src_pocket_pkgs[pocket] binary_pkgs = binary_pocket_pkgs[pocket] + failure_msg = messages.SECURITY_UA_SERVICE_REQUIRED.format( + service=pocket + ) if upgrade_status: msg = _format_packages_message( @@ -947,34 +1172,50 @@ all_already_installed = False upgrade_pkgs = [] - for binary_pkg in binary_pkgs: + for binary_pkg in sorted(binary_pkgs): + check_esm_cache = pocket != UBUNTU_STANDARD_UPDATES_POCKET candidate_version = apt.get_pkg_candidate_version( - binary_pkg.binary_pkg + binary_pkg.binary_pkg, check_esm_cache=check_esm_cache ) if candidate_version and apt.compare_versions( binary_pkg.fixed_version, candidate_version, "le" ): upgrade_pkgs.append(binary_pkg.binary_pkg) else: - print( - "- " - + messages.FIX_CANNOT_INSTALL_PACKAGE.format( + unfixed_reason = ( + messages.FIX_CANNOT_INSTALL_PACKAGE.format( package=binary_pkg.binary_pkg, version=binary_pkg.fixed_version, ).msg ) - unfixed_pkgs.add(binary_pkg.source_pkg) + print("- " + unfixed_reason) + unfixed_pkgs.append( + UnfixedPackage( + pkg=binary_pkg.source_pkg, + unfixed_reason=unfixed_reason, + ) + ) pkg_index += len(pkg_src_group) - upgrade_status &= upgrade_packages_and_attach( + upgrade_result = upgrade_packages_and_attach( cfg=cfg, upgrade_pkgs=upgrade_pkgs, pocket=pocket, dry_run=dry_run, ) + upgrade_status &= upgrade_result.status + failure_msg = upgrade_result.failure_reason or "" if not upgrade_status: - unfixed_pkgs.update([src_pkg for src_pkg, _ in pkg_src_group]) + unfixed_pkgs.extend( + [ + UnfixedPackage( + pkg=src_pkg, + unfixed_reason=failure_msg, + ) + for src_pkg, _ in pkg_src_group + ] + ) else: installed_pkgs.update( binary_pkg.binary_pkg for binary_pkg in binary_pkgs @@ -988,20 +1229,21 @@ ) -def _format_unfixed_packages_msg(unfixed_pkgs: List[str]) -> str: +def _format_unfixed_packages_msg(unfixed_pkgs: List[UnfixedPackage]) -> str: """Format the list of unfixed packages into an message. :returns: A string containing the message output for the unfixed packages. """ - num_pkgs_unfixed = len(unfixed_pkgs) + sorted_pkgs = sorted({pkg.pkg for pkg in unfixed_pkgs}) + num_pkgs_unfixed = len(sorted_pkgs) return textwrap.fill( - "{} package{} {} still affected: {}".format( - num_pkgs_unfixed, - "s" if num_pkgs_unfixed > 1 else "", - "are" if num_pkgs_unfixed > 1 else "is", - ", ".join(sorted(unfixed_pkgs)), - ), + messages.SECURITY_PKG_STILL_AFFECTED.format( + num_pkgs=num_pkgs_unfixed, + s="s" if num_pkgs_unfixed > 1 else "", + verb="are" if num_pkgs_unfixed > 1 else "is", + pkgs=", ".join(sorted_pkgs), + ).msg, width=PRINT_WRAP_WIDTH, subsequent_indent=" ", ) @@ -1014,7 +1256,7 @@ installed_packages: Dict[str, Dict[str, str]], usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], dry_run: bool, -) -> FixStatus: +) -> FixResult: """Process security CVE dict returning a CVEStatus object. Since CVEs point to a USN if active, get_notice may be called to fill in @@ -1026,8 +1268,9 @@ count = len(affected_pkg_status) print_affected_packages_header(issue_id, affected_pkg_status) if count == 0: - return FixStatus.SYSTEM_NON_VULNERABLE - fix_message = messages.SECURITY_ISSUE_RESOLVED.format(issue=issue_id) + return FixResult( + status=FixStatus.SYSTEM_NOT_AFFECTED, unfixed_pkgs=None + ) src_pocket_pkgs = defaultdict(list) binary_pocket_pkgs = defaultdict(list) pkg_index = 0 @@ -1036,12 +1279,10 @@ affected_pkg_status, usn_released_pkgs ) - unfixed_pkgs = [] + unfixed_pkgs = [] # type: List[UnfixedPackage] for status_value, pkg_status_group in sorted(pkg_status_groups.items()): if status_value != "released": - fix_message = messages.SECURITY_ISSUE_NOT_RESOLVED.format( - issue=issue_id - ) + fix_result = FixStatus.SYSTEM_NON_VULNERABLE print( _format_packages_message( pkg_status_list=pkg_status_group, @@ -1050,7 +1291,11 @@ ) ) pkg_index += len(pkg_status_group) - unfixed_pkgs += [src_pkg for src_pkg, _ in pkg_status_group] + status_msg = pkg_status_group[0][1].status_message + unfixed_pkgs += [ + UnfixedPackage(pkg=src_pkg, unfixed_reason=status_msg) + for src_pkg, _ in pkg_status_group + ] else: for src_pkg, pkg_status in pkg_status_group: src_pocket_pkgs[pkg_status.pocket_source].append( @@ -1059,21 +1304,11 @@ for binary_pkg, version in installed_packages[src_pkg].items(): usn_released_src = usn_released_pkgs.get(src_pkg, {}) if binary_pkg not in usn_released_src: - unfixed_pkgs += [ - src_pkg for src_pkg, _ in pkg_status_group - ] - msg = ( - "{issue} metadata defines no fixed version for" - " {pkg}.\n".format(pkg=binary_pkg, issue=issue_id) - ) - - msg += _format_unfixed_packages_msg(unfixed_pkgs) - raise exceptions.SecurityAPIMetadataError( - msg, issue_id - ) + continue fixed_version = usn_released_src.get(binary_pkg, {}).get( "version", "" ) + if not apt.compare_versions(fixed_version, version, "le"): binary_pocket_pkgs[pkg_status.pocket_source].append( BinaryPackageFix( @@ -1097,9 +1332,6 @@ print() if unfixed_pkgs: print(_format_unfixed_packages_msg(unfixed_pkgs)) - fix_message = messages.SECURITY_ISSUE_NOT_RESOLVED.format( - issue=issue_id - ) if released_pkgs_install_result.fix_status: # fix_status is True if either: @@ -1108,8 +1340,7 @@ # In case (2), then all_already_installed is also True if released_pkgs_install_result.all_already_installed: # we didn't install any packages, so we're good - print(util.handle_unicode_characters(fix_message)) - return ( + fix_result = ( FixStatus.SYSTEM_STILL_VULNERABLE if unfixed_pkgs else FixStatus.SYSTEM_NON_VULNERABLE @@ -1128,12 +1359,7 @@ Notice.ENABLE_REBOOT_REQUIRED, operation="fix operation", ) - print( - util.handle_unicode_characters( - messages.SECURITY_ISSUE_NOT_RESOLVED.format(issue=issue_id) - ) - ) - return ( + fix_result = ( FixStatus.SYSTEM_STILL_VULNERABLE if unfixed_pkgs else FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT @@ -1141,19 +1367,19 @@ else: # we successfully installed some packages, and the system # reboot-required flag is not set, so we're good - print(util.handle_unicode_characters(fix_message)) - return ( + fix_result = ( FixStatus.SYSTEM_STILL_VULNERABLE if unfixed_pkgs else FixStatus.SYSTEM_NON_VULNERABLE ) else: - print( - util.handle_unicode_characters( - messages.SECURITY_ISSUE_NOT_RESOLVED.format(issue=issue_id) - ) - ) - return FixStatus.SYSTEM_STILL_VULNERABLE + fix_result = FixStatus.SYSTEM_STILL_VULNERABLE + + _handle_fix_status_message(fix_result, issue_id) + return FixResult( + status=fix_result, + unfixed_pkgs=unfixed_pkgs, + ) def _inform_ubuntu_pro_existence_if_applicable() -> None: @@ -1382,7 +1608,7 @@ def upgrade_packages_and_attach( cfg: UAConfig, upgrade_pkgs: List[str], pocket: str, dry_run: bool -) -> bool: +) -> UpgradeResult: """Upgrade available packages to fix a CVE. Upgrade all packages in upgrades_packages and, if necessary, @@ -1391,13 +1617,14 @@ :return: True if package upgrade completed or unneeded, False otherwise. """ if not upgrade_pkgs: - return True + return UpgradeResult(status=True, failure_reason=None) # If we are running on --dry-run mode, we don't need to be root # to understand what will happen with the system if not util.we_are_currently_root() and not dry_run: - print(messages.SECURITY_APT_NON_ROOT) - return False + msg = messages.SECURITY_APT_NON_ROOT + print(msg) + return UpgradeResult(status=False, failure_reason=msg) if pocket != UBUNTU_STANDARD_UPDATES_POCKET: # We are now using status-cache because non-root users won't @@ -1407,15 +1634,30 @@ status_cache = cfg.read_cache("status-cache") or {} if not status_cache.get("attached", False): if not _check_attached(cfg, dry_run): - return False + return UpgradeResult( + status=False, + failure_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa + service=pocket + ), + ) elif _check_subscription_is_expired( status_cache=status_cache, cfg=cfg, dry_run=dry_run ): - return False + return UpgradeResult( + status=False, + failure_reason=messages.SECURITY_UA_SERVICE_WITH_EXPIRED_SUB.format( # noqa + service=pocket + ), + ) if not _check_subscription_for_required_service(pocket, cfg, dry_run): # User subscription does not have required service enabled - return False + return UpgradeResult( + status=False, + failure_reason=messages.SECURITY_UA_SERVICE_NOT_ENABLED_SHORT.format( # noqa + service=pocket + ), + ) print( colorize_commands( @@ -1433,11 +1675,13 @@ apt.run_apt_command( cmd=["apt-get", "install", "--only-upgrade", "-y"] + upgrade_pkgs, - env={"DEBIAN_FRONTEND": "noninteractive"}, + override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, ) except Exception as e: msg = getattr(e, "msg", str(e)) print(msg.strip()) - return False + return UpgradeResult( + status=False, failure_reason=messages.SECURITY_UA_APT_FAILURE + ) - return True + return UpgradeResult(status=True, failure_reason=None) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/security_status.py ubuntu-advantage-tools-28.1~18.04/uaclient/security_status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/security_status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/security_status.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,6 +1,7 @@ import re import textwrap from collections import defaultdict +from datetime import datetime, timezone from enum import Enum from functools import lru_cache from random import choice @@ -9,19 +10,25 @@ import apt # type: ignore from uaclient import livepatch, messages -from uaclient.apt import PreserveAptCfg, get_apt_cache, get_esm_cache +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.apt import ( + PreserveAptCfg, + get_apt_cache, + get_apt_cache_datetime, + get_esm_cache, +) from uaclient.config import UAConfig from uaclient.entitlements import ESMAppsEntitlement, ESMInfraEntitlement from uaclient.entitlements.entitlement_status import ( ApplicabilityStatus, ApplicationStatus, + ContractStatus, ) -from uaclient.status import status from uaclient.system import ( REBOOT_PKGS_FILE_PATH, get_distro_info, get_kernel_info, - get_platform_info, + get_release_info, is_current_series_lts, is_supported, load_file, @@ -47,7 +54,7 @@ @lru_cache(maxsize=None) def get_origin_information_to_service_map(): - series = get_platform_info()["series"] + series = get_release_info().series return { ("Ubuntu", "{}-security".format(series)): "standard-security", ("UbuntuESMApps", "{}-apps-security".format(series)): "esm-apps", @@ -186,21 +193,32 @@ def get_ua_info(cfg: UAConfig) -> Dict[str, Any]: """Returns the Pro information based on the config object.""" + is_attached = _is_attached(cfg).is_attached ua_info = { - "attached": False, + "attached": is_attached, "enabled_services": [], "entitled_services": [], } # type: Dict[str, Any] - status_dict = status(cfg=cfg, show_all=True) - if status_dict["attached"]: - ua_info["attached"] = True - for service in status_dict["services"]: - if service["name"] in ESM_SERVICES: - if service["entitled"] == "yes": - ua_info["entitled_services"].append(service["name"]) - if service["status"] == "enabled": - ua_info["enabled_services"].append(service["name"]) + if is_attached: + infra_entitlement = ESMInfraEntitlement(cfg) + apps_entitlement = ESMAppsEntitlement(cfg) + + if apps_entitlement.contract_status() == ContractStatus.ENTITLED: + ua_info["entitled_services"].append("esm-apps") + if ( + apps_entitlement.application_status()[0] + == ApplicationStatus.ENABLED + ): + ua_info["enabled_services"].append("esm-apps") + + if infra_entitlement.contract_status() == ContractStatus.ENTITLED: + ua_info["entitled_services"].append("esm-infra") + if ( + infra_entitlement.application_status()[0] + == ApplicationStatus.ENABLED + ): + ua_info["enabled_services"].append("esm-infra") return ua_info @@ -263,7 +281,11 @@ and our_kernel_version is not None and our_kernel_version == lp_status.kernel and lp_status.livepatch is not None - and lp_status.livepatch.state == "applied" + and ( + lp_status.livepatch.state == "applied" + or lp_status.livepatch.state == "nothing-to-apply" + ) + and lp_status.supported == "supported" ): return RebootStatus.REBOOT_REQUIRED_LIVEPATCH_APPLIED @@ -414,7 +436,7 @@ def _print_interim_release_support(): - series = get_platform_info()["series"] + series = get_release_info().series eol_date = get_distro_info(series).eol date = "{}/{}".format(str(eol_date.month), str(eol_date.year)) print(messages.SS_INTERIM_SUPPORT.format(date=date)) @@ -422,7 +444,7 @@ def _print_lts_support(): - series = get_platform_info()["series"] + series = get_release_info().series if is_supported(series): eol_date = get_distro_info(series).eol print(messages.SS_LTS_SUPPORT.format(date=str(eol_date.year))) @@ -439,7 +461,7 @@ available_updates: int, is_attached: bool, ): - series = get_platform_info()["series"] + series = get_release_info().series eol_date_esm = get_distro_info(series).eol_esm if service_status == ApplicationStatus.ENABLED: message = messages.SS_SERVICE_ENABLED.format( @@ -447,60 +469,46 @@ service=service, year=str(eol_date_esm.year), ) - if installed_updates: - message += messages.SS_SERVICE_ENABLED_COUNTS.format( - updates=installed_updates, - plural="" if installed_updates == 1 else "s", - ) - print(message) else: message = messages.SS_SERVICE_ADVERTISE.format( service=service, repository=repository, year=str(eol_date_esm.year), ) - if available_updates: - message += messages.SS_SERVICE_ADVERTISE_COUNTS.format( - updates=available_updates, - plural="s" if available_updates > 1 else "", - ) - else: - message += "." - print(message) - if ( - is_attached - and service_applicability == ApplicabilityStatus.APPLICABLE - ): - print(messages.SS_SERVICE_COMMAND.format(service=service)) + + if installed_updates: + message += messages.SS_SERVICE_ENABLED_COUNTS.format( + updates=installed_updates, + plural="" if installed_updates == 1 else "s", + ) + + if available_updates: + message += messages.SS_SERVICE_ADVERTISE_COUNTS.format( + verb="is" if available_updates == 1 else "are", + updates=available_updates, + plural="s" if available_updates > 1 else "", + ) + print(message) + + if ( + is_attached + and service_status == ApplicationStatus.DISABLED + and service_applicability == ApplicabilityStatus.APPLICABLE + ): + print("") + print(messages.SS_SERVICE_COMMAND.format(service=service)) print("") def _print_package_list( - package_list: List[apt.package.Package], - reference_list: List[apt.package.Package] = [], + package_list: List[str], ): - package_string = "" - - package_names = [package.name for package in package_list] - reference_names = [package.name for package in reference_list] - - for package_name in package_names: - if package_name in reference_names: - package_string += ( - "{bold}{package_name}{end_bold}".format( - bold=messages.TxtColor.BOLD, - package_name=package_name, - end_bold=messages.TxtColor.ENDC, - ) - + " " - ) - else: - package_string += package_name + " " + package_names = " ".join(package_list) print( "\n".join( textwrap.wrap( - package_string, + package_names, width=80, break_long_words=False, break_on_hyphens=False, @@ -508,8 +516,20 @@ ) ) print("") - hint_package_list = reference_names if reference_names else package_names - print(messages.SS_POLICY_HINT.format(package=choice(hint_package_list))) + + +def _print_apt_update_call(): + last_apt_update = get_apt_cache_datetime() + if last_apt_update is None: + print(messages.SS_UPDATE_UNKNOWN) + print("") + return + + now = datetime.now(timezone.utc) + time_since_update = now - last_apt_update + if time_since_update.days > 0: + print(messages.SS_UPDATE_DAYS.format(days=time_since_update.days)) + print("") def security_status(cfg: UAConfig): @@ -520,7 +540,7 @@ esm_apps_status = ESMAppsEntitlement(cfg).application_status()[0] esm_apps_applicability = ESMAppsEntitlement(cfg).applicability_status()[0] - series = get_platform_info()["series"] + series = get_release_info().series is_lts = is_current_series_lts() is_attached = get_ua_info(cfg)["attached"] @@ -542,6 +562,8 @@ print(messages.SS_HELP_CALL) print("") + _print_apt_update_call() + if not is_lts: if is_supported(series): _print_interim_release_support() @@ -589,6 +611,7 @@ def list_third_party_packages(): packages_by_origin = get_installed_packages_by_origin() third_party_packages = packages_by_origin["third-party"] + package_names = [package.name for package in third_party_packages] _print_package_summary( packages_by_origin, show_items="third-party", always_show=True @@ -599,7 +622,8 @@ print("") print("Packages:") - _print_package_list(third_party_packages) + _print_package_list(package_names) + print(messages.SS_SHOW_HINT.format(package=choice(package_names))) else: print(messages.SS_NO_THIRD_PARTY) @@ -607,6 +631,7 @@ def list_unavailable_packages(): packages_by_origin = get_installed_packages_by_origin() unknown_packages = packages_by_origin["unknown"] + package_names = [package.name for package in unknown_packages] _print_package_summary( packages_by_origin, show_items="unknown", always_show=True @@ -617,7 +642,8 @@ print("") print("Packages:") - _print_package_list(unknown_packages) + _print_package_list(package_names) + print(messages.SS_SHOW_HINT.format(package=choice(package_names))) else: print(messages.SS_NO_UNAVAILABLE) @@ -637,7 +663,7 @@ for update, _ in security_upgradable_versions: infra_updates.add(update.package) - series = get_platform_info()["series"] + series = get_release_info().series is_lts = is_current_series_lts() esm_infra_status = ESMInfraEntitlement(cfg).application_status()[0] @@ -645,6 +671,21 @@ 0 ] + installed_package_names = sorted( + [package.name for package in infra_packages] + ) + available_package_names = sorted( + [package.name for package in infra_updates] + ) + remaining_package_names = sorted( + [ + package.name + for package in all_infra_packages + if package.name not in installed_package_names + and package.name not in available_package_names + ] + ) + _print_package_summary( packages_by_origin, show_items="esm-infra", always_show=True ) @@ -672,9 +713,27 @@ print("") if not is_supported(series): - print(messages.SS_BOLD_PACKAGES.format(service="esm-infra")) - print("Packages:") - _print_package_list(all_infra_packages, list(infra_updates)) + if available_package_names: + print(messages.SS_UPDATES_AVAILABLE.format(service="esm-infra")) + _print_package_list(available_package_names) + + if installed_package_names: + print(messages.SS_UPDATES_INSTALLED.format(service="esm-infra")) + _print_package_list(installed_package_names) + + hint_list = available_package_names or installed_package_names + # Check names because packages may have been already listed + if remaining_package_names: + print( + messages.SS_OTHER_PACKAGES.format( + prefix="Further installed" if hint_list else "Installed", + service="esm-infra", + ) + ) + _print_package_list(remaining_package_names) + + if hint_list: + print(messages.SS_SHOW_HINT.format(package=choice(hint_list))) def list_esm_apps_packages(cfg): @@ -698,6 +757,21 @@ esm_apps_status = ESMAppsEntitlement(cfg).application_status()[0] esm_apps_applicability = ESMAppsEntitlement(cfg).applicability_status()[0] + installed_package_names = sorted( + [package.name for package in apps_packages] + ) + available_package_names = sorted( + [package.name for package in apps_updates] + ) + remaining_package_names = sorted( + [ + package.name + for package in all_apps_packages + if package.name not in installed_package_names + and package.name not in available_package_names + ] + ) + _print_package_summary( packages_by_origin, show_items="esm-apps", always_show=True ) @@ -719,6 +793,25 @@ print("") if all_apps_packages: - print(messages.SS_BOLD_PACKAGES.format(service="esm-apps")) - print("Packages:") - _print_package_list(all_apps_packages, list(apps_updates)) + if available_package_names: + print(messages.SS_UPDATES_AVAILABLE.format(service="esm-apps")) + _print_package_list(available_package_names) + + if installed_package_names: + print(messages.SS_UPDATES_INSTALLED.format(service="esm-apps")) + _print_package_list(installed_package_names) + + hint_list = available_package_names or installed_package_names + + # Check names because packages may have been already listed + if remaining_package_names: + print( + messages.SS_OTHER_PACKAGES.format( + prefix="Further installed" if hint_list else "Installed", + service="esm-apps", + ) + ) + _print_package_list(remaining_package_names) + + if hint_list: + print(messages.SS_SHOW_HINT.format(package=choice(hint_list))) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/status.py ubuntu-advantage-tools-28.1~18.04/uaclient/status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/status.py 2023-06-27 00:49:37.000000000 +0000 @@ -1,6 +1,5 @@ import copy import logging -import os import sys import textwrap from collections import OrderedDict @@ -15,6 +14,7 @@ util, version, ) +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.config import UA_CONFIGURABLE_KEYS, UAConfig from uaclient.contract import get_available_resources, get_contract_information from uaclient.defaults import ATTACH_FAIL_DATE_FORMAT, PRINT_WRAP_WIDTH @@ -25,7 +25,7 @@ UserFacingConfigStatus, UserFacingStatus, ) -from uaclient.files import notices +from uaclient.files import notices, state_files from uaclient.files.notices import Notice from uaclient.messages import TxtColor @@ -81,6 +81,9 @@ # columns. Colorizing has an opening and closing set of unprintable characters # that factor into formats len() calculations STATUS_TMPL = "{name: <17}{entitled: <19}{status: <19}{description}" +VARIANT_STATUS_TMPL = ( + "{marker} {name: <15}{entitled: <19}{status: <19}{description}" +) DEFAULT_STATUS = { "_doc": "Content provided in json response is currently considered" @@ -117,7 +120,9 @@ def _get_blocked_by_services(ent): return [ { - "name": service.entitlement.name, + "name": service.entitlement.name + if not service.entitlement.is_variant + else service.entitlement.variant_name, "reason_code": service.named_msg.name, "reason": service.named_msg.msg, } @@ -125,12 +130,15 @@ ] -def _attached_service_status(ent, inapplicable_resources) -> Dict[str, Any]: +def _attached_service_status( + ent, inapplicable_resources, cfg +) -> Dict[str, Any]: warning = None status_details = "" description_override = ent.status_description_override() contract_status = ent.contract_status() available = "no" if ent.name in inapplicable_resources else "yes" + variants = {} if contract_status == ContractStatus.UNENTITLED: ent_status = UserFacingStatus.UNAVAILABLE @@ -151,9 +159,19 @@ if ent_status == UserFacingStatus.INAPPLICABLE: available = "no" + if ent.variants: + variants = { + variant_name: _attached_service_status( + variant_cls(cfg=cfg), + inapplicable_resources, + cfg, + ) + for variant_name, variant_cls in ent.variants.items() + } + blocked_by = _get_blocked_by_services(ent) - return { + service_status = { "name": ent.presentation_name, "description": ent.description, "entitled": contract_status.value, @@ -165,6 +183,11 @@ "warning": warning, } + if not ent.is_variant: + service_status["variants"] = variants + + return service_status + def _attached_status(cfg: UAConfig) -> Dict[str, Any]: """Return configuration of attached status as a dictionary.""" @@ -224,7 +247,7 @@ continue ent = ent_cls(cfg) response["services"].append( - _attached_service_status(ent, inapplicable_resources) + _attached_service_status(ent, inapplicable_resources, cfg) ) response["services"].sort(key=lambda x: x.get("name", "")) @@ -266,7 +289,8 @@ # that takes into account local information. if ( ent_cls.name == "livepatch" - and livepatch.on_supported_kernel() is False + and livepatch.on_supported_kernel() + == livepatch.LivepatchSupport.UNSUPPORTED ): lp = ent_cls(cfg) descr_override = lp.status_description_override() @@ -341,7 +365,7 @@ status_desc = messages.LOCK_HELD.format( pid=lock_pid, lock_holder=lock_holder ).msg - elif os.path.exists(cfg.data_path("marker-reboot-cmds")): + elif state_files.reboot_cmd_marker_file.is_present: status_val = userStatus.REBOOTREQUIRED.value operation = "configuration changes" status_desc = messages.ENABLE_REBOOT_REQUIRED_TMPL.format( @@ -374,7 +398,7 @@ Write the status-cache when called by root. """ - if cfg.is_attached: + if _is_attached(cfg).is_attached: response = _attached_status(cfg) else: response = _unattached_status(cfg) @@ -483,20 +507,14 @@ event.info("This token is not valid.\n" + message.msg + "\n") ret = 1 - status_cache = cfg.read_cache("status-cache") - if status_cache: - resources = status_cache.get("services") - else: - resources = get_available_resources(cfg) - - entitlements = contract_info.get("resourceEntitlements", []) - + resources = get_available_resources(cfg) inapplicable_resources = [ resource["name"] for resource in sorted(resources, key=lambda x: x["name"]) if not resource["available"] ] + entitlements = contract_info.get("resourceEntitlements", []) for resource in resources: entitlement_name = resource.get("name", "") try: @@ -604,10 +622,13 @@ return expires.strftime("%c %Z") -def format_tabular(status: Dict[str, Any]) -> str: +def format_tabular(status: Dict[str, Any], show_all: bool = False) -> str: """Format status dict for tabular output.""" if not status.get("attached"): if status.get("simulated"): + if not status.get("services", None): + return messages.STATUS_NO_SERVICES_AVAILABLE + content = [ STATUS_SIMULATED_TMPL.format( name="SERVICE", @@ -617,29 +638,35 @@ description="DESCRIPTION", ) ] - for service in status["services"]: + for service in status.get("services", []): content.append(STATUS_SIMULATED_TMPL.format(**service)) + return "\n".join(content) - content = [ - STATUS_UNATTACHED_TMPL.format( - name="SERVICE", - available="AVAILABLE", - description="DESCRIPTION", - ) - ] - for service in status["services"]: - descr_override = service.get("description_override") - description = ( - descr_override if descr_override else service["description"] - ) - content.append( + if not status.get("services", None): + content = [messages.STATUS_NO_SERVICES_AVAILABLE] + else: + content = [ STATUS_UNATTACHED_TMPL.format( - name=service["name"], - available=service["available"], - description=description, + name="SERVICE", + available="AVAILABLE", + description="DESCRIPTION", + ) + ] + for service in status.get("services", []): + descr_override = service.get("description_override") + description = ( + descr_override + if descr_override + else service.get("description", "") + ) + content.append( + STATUS_UNATTACHED_TMPL.format( + name=service.get("name", ""), + available=service.get("available", ""), + description=description, + ) ) - ) notices = status.get("notices") if notices: @@ -648,37 +675,69 @@ if status.get("features"): content.append("\nFEATURES") - for key, value in sorted(status["features"].items()): + for key, value in sorted(status.get("features", {}).items()): content.append("{}: {}".format(key, value)) + if not show_all: + content.extend(["", messages.STATUS_ALL_HINT]) + content.extend(["", messages.UNATTACHED.msg]) - if livepatch.on_supported_kernel() is False: + if ( + livepatch.on_supported_kernel() + == livepatch.LivepatchSupport.UNSUPPORTED + ): content.extend( ["", messages.LIVEPATCH_KERNEL_NOT_SUPPORTED_UNATTACHED] ) return "\n".join(content) service_warnings = [] - content = [STATUS_HEADER] - for service_status in status["services"]: - entitled = service_status["entitled"] - descr_override = service_status.get("description_override") - description = ( - descr_override if descr_override else service_status["description"] - ) - fmt_args = { - "name": service_status["name"], - "entitled": colorize(entitled), - "status": colorize(service_status["status"]), - "description": description, - } - warning = service_status.get("warning", None) - if warning is not None: - warning_message = warning.get("message", None) - if warning_message is not None: - service_warnings.append(warning_message) - content.append(STATUS_TMPL.format(**fmt_args)) - tech_support_level = status["contract"]["tech_support_level"] + has_variants = False + if not status.get("services", None): + content = [messages.STATUS_NO_SERVICES_AVAILABLE] + else: + content = [STATUS_HEADER] + for service_status in status.get("services", []): + entitled = service_status.get("entitled", "") + descr_override = service_status.get("description_override") + description = ( + descr_override + if descr_override + else service_status.get("description", "") + ) + fmt_args = { + "name": service_status.get("name", ""), + "entitled": colorize(entitled), + "status": colorize(service_status.get("status", "")), + "description": description, + } + warning = service_status.get("warning", None) + if warning is not None: + warning_message = warning.get("message", None) + if warning_message is not None: + service_warnings.append(warning_message) + variants = service_status.get("variants") + if variants and not show_all: + has_variants = True + fmt_args["name"] = "{}*".format(fmt_args["name"]) + + content.append(STATUS_TMPL.format(**fmt_args)) + if variants and show_all: + for idx, (_, variant) in enumerate(variants.items()): + marker = "├" if idx != len(variants) - 1 else "└" + content.append( + VARIANT_STATUS_TMPL.format( + marker=marker, + name=variant.get("name"), + entitled=colorize(variant.get("entitled", "")), + status=colorize(variant.get("status", "")), + description=variant.get("description", ""), + ) + ) + + if has_variants: + content.append("") + content.append(messages.STATUS_SERVICE_HAS_VARIANTS) if status.get("notices") or len(service_warnings) > 0: content.append("") @@ -691,22 +750,32 @@ if status.get("features"): content.append("\nFEATURES") - for key, value in sorted(status["features"].items()): + for key, value in sorted(status.get("features", {}).items()): content.append("{}: {}".format(key, value)) + content.append("") - content.append("\nEnable services with: pro enable ") + if not show_all: + if has_variants: + content.append(messages.STATUS_ALL_HINT_WITH_VARIANTS) + else: + content.append(messages.STATUS_ALL_HINT) + + content.append("Enable services with: pro enable ") pairs = [] - account_name = status["account"]["name"] + account_name = status.get("account", {}).get("name", "unknown") if account_name: pairs.append(("Account", account_name)) - contract_name = status["contract"]["name"] + contract_name = status.get("contract", {}).get("name", "unknown") if contract_name: pairs.append(("Subscription", contract_name)) - if status["origin"] != "free": - pairs.append(("Valid until", format_expires(status["expires"]))) + if status.get("origin", None) != "free": + pairs.append(("Valid until", format_expires(status.get("expires")))) + tech_support_level = status.get("contract", {}).get( + "tech_support_level", "unknown" + ) pairs.append(("Technical support level", colorize(tech_support_level))) if pairs: @@ -749,8 +818,8 @@ "No help available for '{}'".format(name) ) - if cfg.is_attached: - service_status = _attached_service_status(help_ent, {}) + if _is_attached(cfg).is_attached: + service_status = _attached_service_status(help_ent, {}, cfg) status_msg = service_status["status"] response_dict["entitled"] = service_status["entitled"] diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/system.py ubuntu-advantage-tools-28.1~18.04/uaclient/system.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/system.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/system.py 2023-06-27 00:49:37.000000000 +0000 @@ -3,6 +3,7 @@ import os import pathlib import re +import stat import subprocess import tempfile import time @@ -11,7 +12,7 @@ from shutil import rmtree from typing import Dict, List, NamedTuple, Optional, Sequence, Set, Tuple -from uaclient import exceptions, messages, util +from uaclient import defaults, exceptions, messages, util REBOOT_FILE_CHECK_PATH = "/var/run/reboot-required" REBOOT_PKGS_FILE_PATH = "/var/run/reboot-required.pkgs" @@ -19,6 +20,8 @@ DBUS_MACHINE_ID = "/var/lib/dbus/machine-id" DISTRO_INFO_CSV = "/usr/share/distro-info/ubuntu.csv" +CPU_VENDOR_MAP = {"GenuineIntel": "intel"} + # N.B. this relies on the version normalisation we perform in get_platform_info REGEX_OS_RELEASE_VERSION = ( r"(?P\d+\.\d+) (LTS\s*)?(\((?P\w+))?.*" @@ -45,7 +48,9 @@ KernelInfo = NamedTuple( "KernelInfo", [ + ("uname_machine_arch", str), ("uname_release", str), + ("build_date", Optional[datetime.datetime]), ("proc_version_signature_version", Optional[str]), ("major", Optional[int]), ("minor", Optional[int]), @@ -55,6 +60,77 @@ ], ) +ReleaseInfo = NamedTuple( + "ReleaseInfo", + [ + ("distribution", str), + ("release", str), + ("series", str), + ("pretty_version", str), + ], +) +CpuInfo = NamedTuple( + "CpuInfo", + [ + ("vendor_id", str), + ("model", Optional[int]), + ("stepping", Optional[int]), + ], +) + + +RE_KERNEL_EXTRACT_BUILD_DATE = r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun).*" + + +def _get_kernel_changelog_timestamp( + uname: os.uname_result, +) -> Optional[datetime.datetime]: + if is_container(): + with util.disable_log_to_console(): + logging.warning( + "Not attempting to use timestamp of kernel changelog because we're in a container" # noqa: E501 + ) + return None + + with util.disable_log_to_console(): + logging.warning("Falling back to using timestamp of kernel changelog") + + try: + stat_result = os.stat( + "/usr/share/doc/linux-image-{}/changelog.Debian.gz".format( + uname.release + ) + ) + return datetime.datetime.fromtimestamp( + stat_result.st_mtime, datetime.timezone.utc + ) + except Exception: + with util.disable_log_to_console(): + logging.warning("Unable to stat kernel changelog") + return None + + +def _get_kernel_build_date( + uname: os.uname_result, +) -> Optional[datetime.datetime]: + date_match = re.search(RE_KERNEL_EXTRACT_BUILD_DATE, uname.version) + if date_match is None: + with util.disable_log_to_console(): + logging.warning("Unable to find build date in uname version") + return _get_kernel_changelog_timestamp(uname) + date_str = date_match.group(0) + try: + dt = datetime.datetime.strptime(date_str, "%a %b %d %H:%M:%S %Z %Y") + except ValueError: + with util.disable_log_to_console(): + logging.warning("Unable to parse build date from uname version") + return _get_kernel_changelog_timestamp(uname) + if dt.tzinfo is None: + # Give it a default timezone if it didn't get one from strptime + # The Livepatch API requires a timezone + dt = dt.replace(tzinfo=datetime.timezone.utc) + return dt + @lru_cache(maxsize=None) def get_kernel_info() -> KernelInfo: @@ -65,14 +141,20 @@ except Exception: logging.warning("failed to process /proc/version_signature.") - uname_release = os.uname().release.strip() + uname = os.uname() + uname_machine_arch = uname.machine.strip() + build_date = _get_kernel_build_date(uname) + + uname_release = uname.release.strip() uname_match = re.match(RE_KERNEL_UNAME, uname_release) if uname_match is None: logging.warning( messages.KERNEL_PARSE_ERROR.format(kernel=uname_release) ) return KernelInfo( + uname_machine_arch=uname_machine_arch, uname_release=uname_release, + build_date=build_date, proc_version_signature_version=proc_version_signature_version, major=None, minor=None, @@ -82,7 +164,9 @@ ) else: return KernelInfo( + uname_machine_arch=uname_machine_arch, uname_release=uname_release, + build_date=build_date, proc_version_signature_version=proc_version_signature_version, major=int(uname_match.group("major")), minor=int(uname_match.group("minor")), @@ -108,6 +192,30 @@ @lru_cache(maxsize=None) +def get_cpu_info() -> CpuInfo: + cpu_info_content = load_file("/proc/cpuinfo") + cpu_info_values = {} + for field in ["vendor_id", "model", "stepping"]: + cpu_match = re.search( + r"^{}\s*:\s*(?P\w*)".format(field), + cpu_info_content, + re.MULTILINE, + ) + if cpu_match: + value = cpu_match.group("info") + cpu_info_values[field] = value + + vendor_id_base = cpu_info_values.get("vendor_id", "") + model = cpu_info_values.get("model") + stepping = cpu_info_values.get("stepping") + return CpuInfo( + vendor_id=CPU_VENDOR_MAP.get(vendor_id_base, vendor_id_base), + model=int(model) if model else None, + stepping=int(stepping) if stepping else None, + ) + + +@lru_cache(maxsize=None) def get_machine_id(cfg) -> str: """ Get system's unique machine-id or create our own in data_dir. @@ -135,50 +243,36 @@ @lru_cache(maxsize=None) -def get_platform_info() -> Dict[str, str]: - """ - Returns a dict of platform information. - - N.B. This dict is sent to the contract server, which requires the - distribution, type and release keys. - """ - os_release = parse_os_release() - platform_info = { - "distribution": os_release.get("NAME", "UNKNOWN"), - "type": "Linux", - } - - version = re.sub(r"\.\d LTS", " LTS", os_release.get("VERSION", "")) - platform_info["version"] = version +def get_release_info() -> ReleaseInfo: + os_release = _parse_os_release() + distribution = os_release.get("NAME", "UNKNOWN") + pretty_version = re.sub(r"\.\d LTS", " LTS", os_release.get("VERSION", "")) series = os_release.get("VERSION_CODENAME", "") release = os_release.get("VERSION_ID", "") if not series or not release: - match = re.match(REGEX_OS_RELEASE_VERSION, version) + match = re.match(REGEX_OS_RELEASE_VERSION, pretty_version) if not match: raise exceptions.ParsingErrorOnOSReleaseFile( - orig_ver=os_release.get("VERSION", ""), mod_ver=version + orig_ver=os_release.get("VERSION", ""), mod_ver=pretty_version ) match_dict = match.groupdict() series = series or match_dict.get("series", "") if not series: - raise exceptions.MissingSeriesOnOSReleaseFile(version=version) + raise exceptions.MissingSeriesOnOSReleaseFile( + version=pretty_version + ) release = release or match_dict.get("release", "") - platform_info.update( - { - "release": release, - "series": series.lower(), - "kernel": get_kernel_info().uname_release, - "arch": get_dpkg_arch(), - "virt": get_virt_type(), - } + return ReleaseInfo( + distribution=distribution, + release=release, + series=series.lower(), + pretty_version=pretty_version, ) - return platform_info - @lru_cache(maxsize=None) def is_lts(series: str) -> bool: @@ -188,7 +282,7 @@ @lru_cache(maxsize=None) def is_current_series_lts() -> bool: - return is_lts(get_platform_info()["series"]) + return is_lts(get_release_info().series) @lru_cache(maxsize=None) @@ -210,7 +304,7 @@ @lru_cache(maxsize=None) def is_current_series_active_esm() -> bool: - return is_active_esm(get_platform_info()["series"]) + return is_active_esm(get_release_info().series) @lru_cache(maxsize=None) @@ -241,11 +335,13 @@ @lru_cache(maxsize=None) -def parse_os_release(release_file: Optional[str] = None) -> Dict[str, str]: - if not release_file: - release_file = "/etc/os-release" +def _parse_os_release() -> Dict[str, str]: + try: + file_contents = load_file("/etc/os-release") + except FileNotFoundError: + file_contents = load_file("/usr/lib/os-release") data = {} - for line in load_file(release_file).splitlines(): + for line in file_contents.splitlines(): key, value = line.split("=", 1) if value: data[key] = value.strip().strip('"') @@ -366,14 +462,28 @@ os.chmod(filename, mode) -def write_file(filename: str, content: str, mode: int = 0o644) -> None: +def write_file( + filename: str, content: str, mode: Optional[int] = None +) -> None: """Write content to the provided filename encoding it if necessary. + We preserve the file ownership and permissions if the file is present + and no mode argument is provided. + @param filename: The full path of the file to write. @param content: The content to write to the file. @param mode: The filesystem mode to set on the file. """ tmpf = None + is_file_present = os.path.isfile(filename) + if is_file_present: + file_stat = pathlib.Path(filename).stat() + f_mode = stat.S_IMODE(file_stat.st_mode) + if mode is None: + mode = f_mode + + elif mode is None: + mode = 0o644 try: os.makedirs(os.path.dirname(filename), exist_ok=True) tmpf = tempfile.NamedTemporaryFile( @@ -386,6 +496,8 @@ tmpf.flush() tmpf.close() os.chmod(tmpf.name, mode) + if is_file_present: + os.chown(tmpf.name, file_stat.st_uid, file_stat.st_gid) os.rename(tmpf.name, filename) except Exception as e: if tmpf is not None: @@ -405,7 +517,7 @@ rcs: Optional[List[int]] = None, capture: bool = False, timeout: Optional[float] = None, - env: Optional[Dict[str, str]] = None, + override_env_vars: Optional[Dict[str, str]] = None, ) -> Tuple[str, str]: """Run a command and return a tuple of decoded stdout, stderr. @@ -415,7 +527,11 @@ @param capture: Boolean set True to log the command and response. @param timeout: Optional float indicating number of seconds to wait for subp to return. - @param env: Optional dictionary of environment variable to pass to Popen. + @param override_env_vars: Optional dictionary of environment variables. + If None, the current os.environ is used for the subprocess. + If defined, these env vars get merged with the current process' + os.environ for the subprocess, overriding any values that already + existed in os.environ. @return: Tuple of utf-8 decoded stdout, stderr @raises ProcessExecutionError on invalid command or returncode not in rcs. @@ -425,14 +541,23 @@ bytes_args = [ x if isinstance(x, bytes) else x.encode("utf-8") for x in args ] - if env: - env.update(os.environ) + + # If env is None, subprocess.Popen will use the process environment + # variables by default, as stated here: + # https://docs.python.org/3.5/library/subprocess.html?highlight=subprocess#popen-constructor + merged_env = None + if override_env_vars: + merged_env = {**os.environ, **override_env_vars} + if rcs is None: rcs = [0] redacted_cmd = util.redact_sensitive_logs(" ".join(args)) try: proc = subprocess.Popen( - bytes_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env + bytes_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=merged_env, ) (out, err) = proc.communicate(timeout=timeout) except OSError: @@ -468,7 +593,7 @@ capture: bool = False, timeout: Optional[float] = None, retry_sleeps: Optional[List[float]] = None, - env: Optional[Dict[str, str]] = None, + override_env_vars: Optional[Dict[str, str]] = None, ) -> Tuple[str, str]: """Run a command and return a tuple of decoded stdout, stderr. @@ -482,8 +607,11 @@ retries. Specifying a list of [0.5, 1] instructs subp to retry twice on failure; sleeping half a second before the first retry and 1 second before the next retry. - @param env: Optional dictionary of environment variables to provide to - subp. + @param override_env_vars: Optional dictionary of environment variables. + If None, the current os.environ is used for the subprocess. + If defined, these env vars get merged with the current process' + os.environ for the subprocess, overriding any values that already + existed in os.environ. @return: Tuple of utf-8 decoded stdout, stderr @raises ProcessExecutionError on invalid command or returncode not in rcs. @@ -493,7 +621,13 @@ retry_sleeps = retry_sleeps.copy() if retry_sleeps is not None else None while True: try: - out, err = _subp(args, rcs, capture, timeout, env=env) + out, err = _subp( + args, + rcs, + capture, + timeout, + override_env_vars=override_env_vars, + ) break except exceptions.ProcessExecutionError as e: if capture: @@ -534,3 +668,14 @@ return False return out.strip() == "active" + + +def get_user_cache_dir() -> str: + if util.we_are_currently_root(): + return defaults.UAC_RUN_PATH + + xdg_cache_home = os.environ.get("XDG_CACHE_HOME") + if xdg_cache_home: + return xdg_cache_home + "/" + defaults.USER_CACHE_SUBDIR + + return os.path.expanduser("~") + "/.cache/" + defaults.USER_CACHE_SUBDIR diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/testing/fakes.py ubuntu-advantage-tools-28.1~18.04/uaclient/testing/fakes.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/testing/fakes.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/testing/fakes.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,7 +1,4 @@ -from uaclient.contract import ( - API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE, - UAContractClient, -) +from uaclient.contract import API_V1_GET_CONTRACT_MACHINE, UAContractClient class FakeContractClient(UAContractClient): @@ -9,7 +6,7 @@ _requests = [] _responses = {} - refresh_route = API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE.format( + refresh_route = API_V1_GET_CONTRACT_MACHINE.format( contract="cid", machine="mid" ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-bionic.txt ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-bionic.txt --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-bionic.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-bionic.txt 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,8 @@ +attrs==17.4 +flake8==3.5.0 +py==1.5.2 +pycodestyle==2.3.1 +pyflakes==1.6.0 +pytest==3.3.2 +pytest-cov==2.5.1 +pyyaml==3.12 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-focal.txt ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-focal.txt --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-focal.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-focal.txt 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,8 @@ +attrs==19.3 +flake8==3.7.9 +py==1.8.1 +pycodestyle==2.5.0 +pyflakes==2.1.1 +pytest==4.6.9 +pytest-cov==2.8.1 +pyyaml==5.3.1 \ No newline at end of file diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-jammy.txt ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-jammy.txt --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-jammy.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-jammy.txt 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,8 @@ +attrs==21.2 +flake8==4.0.1 +py==1.10 +pycodestyle==2.8.0 +pyflakes==2.4.0 +pytest==6.2.5 +pytest-cov==3.0.0 +pyyaml==5.4.1 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-mypy.txt ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-mypy.txt --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-mypy.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-mypy.txt 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,5 @@ +mypy +pyparsing==3.0.7 +pytest==6.1.2 +importlib-metadata==3.3.0 +packaging==20.9 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-xenial.txt ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-xenial.txt --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/constraints/constraints-xenial.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/constraints/constraints-xenial.txt 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,10 @@ +attrs==15.2 +flake8==2.5.4 +pep8==1.7.0 +py==1.4.31 +# Xenial ships pyflakes 1.1.0, but there is a dependency mismatch between the +# deb and the pip versions of flake8==2.5.4, which requires pyflakes. +pyflakes==1.0.0 +pytest==2.8.7 +pytest-cov==2.2.1 +pyyaml==3.11 diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_actions.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_actions.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_actions.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_actions.py 2023-06-01 18:49:33.000000000 +0000 @@ -44,7 +44,7 @@ ], ) @mock.patch(M_PATH + "identity.get_instance_id", return_value="my-iid") - @mock.patch("uaclient.jobs.update_messaging.update_motd_messages") + @mock.patch("uaclient.timer.update_messaging.update_motd_messages") @mock.patch("uaclient.status.status") @mock.patch(M_PATH + "contract.request_updated_contract") @mock.patch(M_PATH + "config.UAConfig.write_cache") @@ -83,12 +83,12 @@ @mock.patch(M_PATH + "attach_with_token") @mock.patch( M_PATH - + "contract.UAContractClient.request_auto_attach_contract_token", + + "contract.UAContractClient.get_contract_token_for_cloud_instance", return_value={"contractToken": "token"}, ) def test_happy_path_on_auto_attach( self, - m_request_auto_attach_contract_token, + _m_get_contract_token_for_cloud_instances, m_attach_with_token, FakeConfig, ): @@ -101,13 +101,14 @@ ] == m_attach_with_token.call_args_list @mock.patch( - M_PATH + "contract.UAContractClient.request_auto_attach_contract_token" + M_PATH + + "contract.UAContractClient.get_contract_token_for_cloud_instance" # noqa ) @mock.patch(M_PATH + "identity.get_instance_id", return_value="my-iid") def test_raise_unexpected_errors( self, _m_get_instance_id, - m_request_auto_attach_contract_token, + m_get_contract_token_for_cloud_instances, FakeConfig, ): """Any unexpected errors will be raised.""" @@ -119,7 +120,7 @@ ), error_response={"message": "something unexpected"}, ) - m_request_auto_attach_contract_token.side_effect = unexpected_error + m_get_contract_token_for_cloud_instances.side_effect = unexpected_error with pytest.raises(ContractAPIError) as excinfo: auto_attach(cfg, fake_instance_factory()) @@ -135,8 +136,10 @@ @mock.patch("uaclient.system.load_file") @mock.patch("uaclient.actions._get_state_files") @mock.patch("glob.glob") + @mock.patch("uaclient.log.get_user_log_file") def test_collect_logs_invalid_file( self, + m_get_user, m_glob, m_get_state_files, m_load_file, @@ -144,18 +147,29 @@ m_we_are_currently_root, m_write_cmd, caplog_text, + tmpdir, ): + log_file = tmpdir.join("user-log").strpath + m_get_user.return_value = log_file m_get_state_files.return_value = ["a", "b"] - m_load_file.side_effect = [UnicodeError("test"), "test"] + m_load_file.side_effect = ["test", UnicodeError("test"), "test"] m_glob.return_value = [] with mock.patch("os.path.isfile", return_value=True): collect_logs(cfg=mock.MagicMock(), output_dir="test") - assert 2 == m_load_file.call_count - assert [mock.call("a"), mock.call("b")] == m_load_file.call_args_list - assert 1 == m_write_file.call_count - assert [mock.call("test/b", "test")] == m_write_file.call_args_list + assert 3 == m_load_file.call_count + assert [ + mock.call(log_file), + mock.call("a"), + mock.call("b"), + ] == m_load_file.call_args_list + assert 2 == m_write_file.call_count + print(m_write_file.call_args_list) + assert [ + mock.call("test/user0.log", "test"), + mock.call("test/b", "test"), + ] == m_write_file.call_args_list assert "Failed to load file: a\n" in caplog_text() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_apt_news.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_apt_news.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_apt_news.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_apt_news.py 2023-05-30 19:02:35.000000000 +0000 @@ -5,7 +5,7 @@ from uaclient import apt_news, messages from uaclient.clouds.identity import NoCloudTypeReason -from uaclient.jobs.update_messaging import ContractExpiryStatus +from uaclient.timer.update_messaging import ContractExpiryStatus M_PATH = "uaclient.apt_news." @@ -159,7 +159,7 @@ ], ) @mock.patch(M_PATH + "get_cloud_type") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") def test_do_selectors_apply( self, m_get_platform_info, @@ -175,7 +175,7 @@ cfg = FakeConfig.for_attached_machine() else: cfg = FakeConfig() - m_get_platform_info.return_value = {"series": series} + m_get_platform_info.return_value = mock.MagicMock(series=series) m_get_cloud_type.return_value = cloud_type assert expected == apt_news.do_selectors_apply(cfg, selectors) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_apt.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_apt.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_apt.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_apt.py 2023-05-30 19:02:35.000000000 +0000 @@ -31,6 +31,7 @@ get_apt_cache_time, get_apt_config_values, get_installed_packages_names, + get_pkg_candidate_version, is_installed, remove_apt_list_files, remove_auth_apt_repo, @@ -58,10 +59,12 @@ class TestAddPPAPinning: - @mock.patch("uaclient.system.get_platform_info") - def test_write_apt_pin_file_to_apt_preferences(self, m_platform, tmpdir): + @mock.patch("uaclient.system.get_release_info") + def test_write_apt_pin_file_to_apt_preferences( + self, m_get_release_info, tmpdir + ): """Write proper apt pin file to specified apt_preference_file.""" - m_platform.return_value = {"series": "xenial"} + m_get_release_info.return_value = mock.MagicMock(series="xenial") pref_file = tmpdir.join("preffile").strpath assert None is add_ppa_pinning( pref_file, @@ -294,11 +297,12 @@ @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_add_auth_apt_repo_writes_sources_file( self, - m_platform, + m_get_release_info, m_valid_creds, m_get_apt_auth_file, m_subp, @@ -334,11 +338,12 @@ @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_add_auth_apt_repo_ignores_suites_not_matching_series( self, - m_platform, + m_get_release_info, m_valid_creds, m_get_apt_auth_file, m_subp, @@ -382,11 +387,12 @@ @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_add_auth_apt_repo_comments_updates_suites_on_non_update_machine( self, - m_platform, + m_get_release_info, m_valid_creds, m_get_apt_auth_file, m_subp, @@ -427,11 +433,12 @@ @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_add_auth_apt_repo_writes_username_password_to_auth_file( self, - m_platform, + m_get_release_info, m_valid_creds, m_get_apt_auth_file, m_subp, @@ -463,11 +470,12 @@ @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "xenial"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="xenial"), ) def test_add_auth_apt_repo_writes_bearer_resource_token_to_auth_file( self, - m_platform, + m_get_release_info, m_valid_creds, m_get_apt_auth_file, m_subp, @@ -1243,3 +1251,60 @@ assert 1 == apt_cfg["test"] assert [1, 2, 3] == apt_cfg["test1"] assert {"foo": "bar"} == apt_cfg["test2"] + + +class TestGetPkgCandidateversion: + @pytest.mark.parametrize("check_esm_cache", ((True), (False))) + @mock.patch("uaclient.apt.get_apt_cache") + @mock.patch("uaclient.apt.get_esm_cache") + def test_get_pkg_candidate_version( + self, + m_esm_cache, + m_apt_cache, + check_esm_cache, + apt_pkg, + ): + type(apt_pkg).config = mock.PropertyMock(return_value={}) + m_pkg_ver = mock.MagicMock(version="1.2") + m_pkg = mock.MagicMock(candidate=m_pkg_ver) + m_apt_cache.return_value = {"pkg1": m_pkg} + + m_esm_pkg_ver = mock.MagicMock(version="1.3~esm1") + m_esm_pkg = mock.MagicMock(candidate=m_esm_pkg_ver) + m_esm_cache.return_value = {"pkg1": m_esm_pkg} + + actual_value = get_pkg_candidate_version("pkg1", check_esm_cache) + if not check_esm_cache: + assert "1.2" == actual_value + else: + assert "1.3~esm1" == actual_value + + @mock.patch("uaclient.apt.get_apt_cache") + @mock.patch("uaclient.apt.get_esm_cache") + def test_get_pkg_candidate_version_when_esm_cache_fails( + self, + m_esm_cache, + m_apt_cache, + apt_pkg, + ): + type(apt_pkg).config = mock.PropertyMock(return_value={}) + m_pkg_ver = mock.MagicMock(version="1.2") + m_pkg = mock.MagicMock(candidate=m_pkg_ver) + m_apt_cache.return_value = {"pkg1": m_pkg} + m_esm_cache.return_value = {} + + actual_value = get_pkg_candidate_version("pkg1", check_esm_cache=True) + assert "1.2" == actual_value + + @mock.patch("uaclient.apt.get_apt_cache") + def test_get_pkg_candidate_version_when_candidate_doesnt_exist( + self, + m_apt_cache, + apt_pkg, + ): + type(apt_pkg).config = mock.PropertyMock(return_value={}) + m_pkg = mock.MagicMock(candidate=None) + m_apt_cache.return_value = {"pkg1": m_pkg} + + actual_value = get_pkg_candidate_version("pkg1") + assert actual_value is None diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_api.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_api.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_api.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_api.py 2023-05-30 19:02:35.000000000 +0000 @@ -25,7 +25,8 @@ class TestActionAPI: @mock.patch("uaclient.cli.entitlements.valid_services", return_value=[]) - def test_api_help(self, valid_services, capsys): + @mock.patch("uaclient.cli.setup_logging") + def test_api_help(self, _m_setup_logging, valid_services, capsys): with pytest.raises(SystemExit): with mock.patch("sys.argv", ["/usr/bin/ua", "api", "--help"]): main() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_attach.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_attach.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_attach.py 2023-06-01 18:49:33.000000000 +0000 @@ -8,6 +8,7 @@ import pytest from uaclient import event_logger, messages, status, util +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.cli import ( UA_AUTH_TOKEN_URL, action_attach, @@ -238,7 +239,7 @@ @mock.patch("uaclient.system.should_reboot", return_value=False) @mock.patch("uaclient.files.notices.NoticesManager.remove") @mock.patch("uaclient.status.get_available_resources") - @mock.patch("uaclient.jobs.update_messaging.update_motd_messages") + @mock.patch("uaclient.timer.update_messaging.update_motd_messages") @mock.patch(M_PATH + "contract.request_updated_contract") def test_status_updated_when_auto_enable_fails( self, @@ -269,7 +270,7 @@ main_error_handler(action_attach)(args, cfg) assert 1 == excinfo.value.code - assert cfg.is_attached + assert _is_attached(cfg).is_attached # Assert updated status cache is written to disk assert orig_unattached_status != cfg.read_cache( "status-cache" @@ -278,10 +279,8 @@ @mock.patch("uaclient.system.should_reboot", return_value=False) @mock.patch("uaclient.files.notices.NoticesManager.remove") - @mock.patch("uaclient.jobs.update_messaging.update_motd_messages") - @mock.patch( - M_PATH + "contract.UAContractClient.request_contract_machine_attach" - ) + @mock.patch("uaclient.timer.update_messaging.update_motd_messages") + @mock.patch(M_PATH + "contract.UAContractClient.add_contract_machine") @mock.patch("uaclient.actions.status", return_value=("", 0)) @mock.patch("uaclient.status.format_tabular") def test_happy_path_with_token_arg( @@ -353,7 +352,7 @@ @mock.patch("uaclient.system.should_reboot", return_value=False) @mock.patch("uaclient.files.notices.NoticesManager.remove") @mock.patch("uaclient.status.get_available_resources") - @mock.patch("uaclient.jobs.update_messaging.update_motd_messages") + @mock.patch("uaclient.timer.update_messaging.update_motd_messages") def test_auto_enable_passed_through_to_request_updated_contract( self, m_update_apt_and_motd_msgs, @@ -532,11 +531,21 @@ } assert expected == json.loads(fake_stdout.getvalue()) + @pytest.mark.parametrize( + "expected_exception,expected_msg", + ( + ( + UserFacingError("error"), + messages.ATTACH_FAILURE_DEFAULT_SERVICES, + ), + (Exception("error"), messages.UNEXPECTED_ERROR), + ), + ) @mock.patch("uaclient.entitlements.entitlements_enable_order") @mock.patch("uaclient.contract.process_entitlement_delta") @mock.patch("uaclient.contract.apply_contract_overrides") @mock.patch("uaclient.contract.UAContractClient.request_url") - @mock.patch("uaclient.jobs.update_messaging.update_motd_messages") + @mock.patch("uaclient.timer.update_messaging.update_motd_messages") def test_attach_when_one_service_fails_to_enable( self, _m_update_messages, @@ -544,6 +553,8 @@ _m_apply_contract_overrides, m_process_entitlement_delta, m_enable_order, + expected_exception, + expected_msg, FakeConfig, event, ): @@ -553,7 +564,7 @@ m_enable_order.return_value = ["test1", "test2"] m_process_entitlement_delta.side_effect = [ ({"test": 123}, True), - UserFacingError("error"), + expected_exception, ] m_request_url.return_value = ( { @@ -597,12 +608,20 @@ ): main_error_handler(action_attach)(args, cfg) - expected_msg = messages.ATTACH_FAILURE_DEFAULT_SERVICES expected = { "_schema_version": event_logger.JSON_SCHEMA_VERSION, "result": "failure", "errors": [ { + "additional_info": { + "services": [ + { + "code": expected_msg.name, + "name": "test2", + "title": expected_msg.msg, + } + ] + }, "message": expected_msg.msg, "message_code": expected_msg.name, "service": None, @@ -652,7 +671,10 @@ @mock.patch(M_PATH + "contract.get_available_resources") class TestParser: - def test_attach_help(self, _m_resources, capsys, FakeConfig): + @mock.patch("uaclient.cli.setup_logging") + def test_attach_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch("sys.argv", ["/usr/bin/pro", "attach", "--help"]): with mock.patch( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_auto_attach.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_auto_attach.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_auto_attach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_auto_attach.py 2023-06-01 18:49:33.000000000 +0000 @@ -3,7 +3,7 @@ import mock import pytest -from uaclient import event_logger, exceptions +from uaclient import event_logger, exceptions, messages from uaclient.api import exceptions as api_exceptions from uaclient.api.u.pro.attach.auto.full_auto_attach.v1 import ( FullAutoAttachOptions, @@ -41,8 +41,11 @@ class TestActionAutoAttach: + @mock.patch("uaclient.cli.setup_logging") @mock.patch(M_PATH + "contract.get_available_resources") - def test_auto_attach_help(self, _m_resources, capsys, FakeConfig): + def test_auto_attach_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch( "sys.argv", ["/usr/bin/ua", "auto-attach", "--help"] @@ -76,6 +79,16 @@ ] == m_full_auto_attach.call_args_list assert [mock.call(cfg)] == m_post_cli_attach.call_args_list + @pytest.mark.parametrize( + "api_side_effect,expected_err,expected_ret", + ( + ( + exceptions.UrlError(cause="does-not-matter"), + messages.ATTACH_FAILURE.msg, + 1, + ), + ), + ) @mock.patch(M_PATH + "event") @mock.patch(M_PATH + "_post_cli_attach") @mock.patch(M_PATH + "_full_auto_attach") @@ -84,33 +97,44 @@ m_full_auto_attach, m_post_cli_attach, m_event, + api_side_effect, + expected_err, + expected_ret, FakeConfig, ): - m_full_auto_attach.side_effect = exceptions.UrlError( - cause="does-not-matter" - ) + m_full_auto_attach.side_effect = (api_side_effect,) cfg = FakeConfig() - assert 1 == action_auto_attach(mock.MagicMock(), cfg=cfg) + assert expected_ret == action_auto_attach(mock.MagicMock(), cfg=cfg) - assert [ - mock.call("Failed to attach machine. See https://ubuntu.com/pro") - ] == m_event.info.call_args_list + assert [mock.call(expected_err)] == m_event.info.call_args_list assert [] == m_post_cli_attach.call_args_list @pytest.mark.parametrize( - "api_side_effect, expected_err", + "api_side_effect,expected_err,expected_ret", [ - (exceptions.UserFacingError("foo"), "foo\n"), + (exceptions.UserFacingError("foo"), "foo\n", 1), ( exceptions.AlreadyAttachedError("foo"), "This machine is already attached to 'foo'\n" "To use a different subscription first run: sudo pro" " detach.\n", + 2, ), ( api_exceptions.AutoAttachDisabledError, "features.disable_auto_attach set in config\n", + 1, + ), + ( + exceptions.EntitlementsNotEnabledError( + failed_services=[ + ("esm-infra", messages.NamedMessage("test", "test")), + ("livepatch", messages.NamedMessage("test", "test")), + ] + ), + messages.ENTITLEMENTS_NOT_ENABLED_ERROR.msg + "\n", + 4, ), ], ) @@ -124,15 +148,15 @@ m_logging, api_side_effect, expected_err, + expected_ret, capsys, FakeConfig, ): m_full_auto_attach.side_effect = api_side_effect cfg = FakeConfig() - with pytest.raises(SystemExit): - assert 1 == main_error_handler(action_auto_attach)( - mock.MagicMock(), cfg=cfg - ) + with pytest.raises(SystemExit) as excinfo: + main_error_handler(action_auto_attach)(mock.MagicMock(), cfg=cfg) + assert expected_ret == excinfo.value.code _out, err = capsys.readouterr() assert expected_err == err assert [] == m_post_cli_attach.call_args_list diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_collect_logs.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_collect_logs.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_collect_logs.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_collect_logs.py 2023-05-30 19:02:35.000000000 +0000 @@ -29,8 +29,11 @@ class TestActionCollectLogs: + @mock.patch("uaclient.cli.setup_logging") @mock.patch(M_PATH + "contract.get_available_resources") - def test_collect_logs_help(self, _m_resources, capsys, FakeConfig): + def test_collect_logs_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch( "sys.argv", ["/usr/bin/ua", "collect-logs", "--help"] @@ -43,6 +46,8 @@ out, _err = capsys.readouterr() assert re.match(HELP_OUTPUT, out) + @pytest.mark.parametrize("is_root", ((True), (False))) + @mock.patch("uaclient.util.we_are_currently_root") @mock.patch( "glob.glob", return_value=[ @@ -54,22 +59,47 @@ @mock.patch("builtins.open") @mock.patch(M_PATH + "util.redact_sensitive_logs", return_value="test") # let's pretend all files exist + @mock.patch("pathlib.Path.stat") + @mock.patch("os.chown") @mock.patch("os.path.isfile", return_value=True) @mock.patch("uaclient.system.write_file") @mock.patch("uaclient.system.load_file") @mock.patch("uaclient.system.subp", return_value=(None, None)) + @mock.patch("uaclient.log.get_user_log_file") + @mock.patch("uaclient.log.get_all_user_log_files") def test_collect_logs( self, + m_get_users, + m_get_user, m_subp, _load_file, _write_file, m_isfile, + _chown, + _stat, redact, _fopen, _tarfile, _glob, + util_we_are_currently_root, + is_root, FakeConfig, + tmpdir, ): + util_we_are_currently_root.return_value = is_root + m_get_user.return_value = tmpdir.join("user-log").strpath + m_get_users.return_value = [ + tmpdir.join("user1-log").strpath, + tmpdir.join("user2-log").strpath, + ] + is_file_calls = 17 + user_log_files = [mock.call(m_get_user())] + if util_we_are_currently_root(): + user_log_files = [ + mock.call(m_get_users()[0]), + mock.call(m_get_users()[1]), + ] + cfg = FakeConfig() action_collect_logs(mock.MagicMock(), cfg=cfg) @@ -117,10 +147,10 @@ ), ] - assert m_isfile.call_count == 17 + assert m_isfile.call_count == is_file_calls assert m_isfile.call_args_list == [ mock.call("/etc/ubuntu-advantage/uaclient.conf"), - mock.call("/var/log/ubuntu-advantage.log"), + mock.call(cfg.log_file), mock.call("/var/log/ubuntu-advantage-timer.log"), mock.call("/var/log/ubuntu-advantage-daemon.log"), mock.call("/var/lib/ubuntu-advantage/jobs-status.json"), @@ -137,7 +167,7 @@ mock.call("/var/log/ubuntu-advantage.log"), mock.call("/var/log/ubuntu-advantage.log.1"), ] - assert redact.call_count == 17 + assert redact.call_count == is_file_calls + len(user_log_files) class TestParser: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_detach.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_detach.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_detach.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_detach.py 2023-05-30 19:02:35.000000000 +0000 @@ -18,6 +18,7 @@ def entitlement_cls_mock_factory(can_disable, name=None): m_instance = mock.MagicMock() + m_instance.enabled_variant = None m_instance.can_disable.return_value = (can_disable, None) m_instance.disable.return_value = (can_disable, None) type(m_instance).dependent_services = mock.PropertyMock(return_value=()) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_disable.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_disable.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_disable.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_disable.py 2023-05-30 19:02:35.000000000 +0000 @@ -52,8 +52,11 @@ class TestDisable: + @mock.patch("uaclient.cli.setup_logging") @mock.patch("uaclient.cli.contract.get_available_resources") - def test_disable_help(self, _m_resources, capsys, FakeConfig): + def test_disable_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch("sys.argv", ["/usr/bin/ua", "disable", "--help"]): with mock.patch( @@ -104,6 +107,7 @@ m_entitlement_cls = mock.Mock() m_entitlement = m_entitlement_cls.return_value + m_entitlement.enabled_variant = None m_entitlement.disable.return_value = (disable_return, fail) entitlements_obj.append(m_entitlement) @@ -198,6 +202,7 @@ m_ent1_cls = mock.Mock() m_ent1_obj = m_ent1_cls.return_value + m_ent1_obj.enabled_variant = None m_ent1_obj.disable.return_value = ( False, CanDisableFailure( @@ -209,6 +214,7 @@ m_ent2_cls = mock.Mock() m_ent2_obj = m_ent2_cls.return_value + m_ent2_obj.enabled_variant = None m_ent2_obj.disable.return_value = ( False, CanDisableFailure( @@ -220,6 +226,7 @@ m_ent3_cls = mock.Mock() m_ent3_obj = m_ent3_cls.return_value + m_ent3_obj.enabled_variant = None m_ent3_obj.disable.return_value = (True, None) type(m_ent3_obj).name = mock.PropertyMock(return_value="ent3") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_enable.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_enable.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_enable.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_enable.py 2023-05-30 19:02:35.000000000 +0000 @@ -31,15 +31,19 @@ realtime-kernel. --beta allow beta service to be enabled --format {cli,json} output enable in the specified format (default: cli) + --variant VARIANT The name of the variant to use when enabling the + service """ @mock.patch("uaclient.contract.request_updated_contract") class TestActionEnable: + @mock.patch("uaclient.cli.setup_logging") @mock.patch("uaclient.cli.contract.get_available_resources") def test_enable_help( self, _m_resources, + _m_setup_logging, _request_updated_contract, capsys, FakeConfig, @@ -64,6 +68,7 @@ capsys, event, FakeConfig, + tmpdir, ): """Check that a UID != 0 will receive a message and exit non-zero""" args = mock.MagicMock() @@ -72,6 +77,12 @@ with pytest.raises(exceptions.NonRootUserError): action_enable(args, cfg=cfg) + default_get_user_log_file = tmpdir.join("default.log").strpath + defaults_ret = { + "log_level": "debug", + "log_file": default_get_user_log_file, + } + with pytest.raises(SystemExit): with mock.patch( "sys.argv", @@ -88,7 +99,15 @@ "uaclient.config.UAConfig", return_value=FakeConfig(), ): - main() + with mock.patch( + "uaclient.log.get_user_log_file", + return_value=tmpdir.join("user.log").strpath, + ): + with mock.patch.dict( + "uaclient.cli.defaults.CONFIG_DEFAULTS", + defaults_ret, + ): + main() expected_message = messages.NONROOT_USER expected = { @@ -272,6 +291,8 @@ args = mock.MagicMock() args.service = ["bogus"] args.command = "enable" + args.access_only = False + with pytest.raises(exceptions.UserFacingError) as err: action_enable(args, cfg) @@ -402,6 +423,7 @@ args.assume_yes = assume_yes args.beta = False args.access_only = False + args.variant = "" with mock.patch( "uaclient.entitlements.entitlement_factory", @@ -454,7 +476,7 @@ m_ent3_obj = m_ent3_cls.return_value m_ent3_obj.enable.return_value = (True, None) - def factory_side_effect(cfg, name, not_found_okay=True): + def factory_side_effect(cfg, name, variant): if name == "ent2": return m_ent2_cls if name == "ent3": @@ -471,6 +493,7 @@ args_mock.access_only = False args_mock.assume_yes = assume_yes args_mock.beta = False + args_mock.variant = "" expected_msg = "One moment, checking your subscription first\n" @@ -580,8 +603,9 @@ args_mock.access_only = False args_mock.assume_yes = assume_yes args_mock.beta = beta_flag + args_mock.variant = "" - def factory_side_effect(cfg, name, not_found_okay=True): + def factory_side_effect(cfg, name, variant): if name == "ent2": return m_ent2_cls if name == "ent3": @@ -703,6 +727,7 @@ args_mock.service = ["ent1"] args_mock.assume_yes = False args_mock.beta = False + args_mock.access_only = False with mock.patch( "uaclient.entitlements.entitlement_factory", @@ -770,6 +795,7 @@ args_mock = mock.MagicMock() args_mock.service = service args_mock.beta = beta + args_mock.access_only = False with pytest.raises(exceptions.UserFacingError) as err: fake_stdout = io.StringIO() @@ -844,6 +870,7 @@ args_mock.assume_yes = False args_mock.beta = allow_beta args_mock.service = ["testitlement"] + args_mock.variant = "" with mock.patch( "uaclient.entitlements.entitlement_factory", @@ -930,3 +957,14 @@ "warnings": [], } assert expected == json.loads(fake_stdout.getvalue()) + + def test_access_only_cannot_be_used_together_with_variant( + self, _m_get_available_resources, FakeConfig + ): + cfg = FakeConfig.for_attached_machine() + args_mock = mock.MagicMock() + args_mock.access_only = True + args_mock.variant = "variant" + + with pytest.raises(exceptions.InvalidOptionCombination): + action_enable(args_mock, cfg) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_fix.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_fix.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_fix.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_fix.py 2023-05-30 19:02:35.000000000 +0000 @@ -25,13 +25,18 @@ --dry-run If used, fix will not actually run but will display everything that will happen on the machine during the command. + --no-related If used, when fixing a USN, the command will not try to also + fix related USNs to the target USN. """ ) class TestActionFix: + @mock.patch("uaclient.cli.setup_logging") @mock.patch("uaclient.cli.contract.get_available_resources") - def test_fix_help(self, _m_resources, capsys, FakeConfig): + def test_fix_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch("sys.argv", ["/usr/bin/ua", "fix", "--help"]): with mock.patch( @@ -67,12 +72,16 @@ ): """Check that root and non-root will emit attached status""" cfg = FakeConfig() - args = mock.MagicMock(security_issue=issue, dry_run=False) + args = mock.MagicMock( + security_issue=issue, dry_run=False, no_related=False + ) m_fix_security_issue_id.return_value = FixStatus.SYSTEM_NON_VULNERABLE if is_valid: assert 0 == action_fix(args, cfg=cfg) assert [ - mock.call(cfg=cfg, issue_id=issue, dry_run=False) + mock.call( + cfg=cfg, issue_id=issue, dry_run=False, no_related=False + ) ] == m_fix_security_issue_id.call_args_list else: with pytest.raises(exceptions.UserFacingError) as excinfo: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli.py 2023-06-01 18:49:33.000000000 +0000 @@ -12,7 +12,7 @@ import mock import pytest -from uaclient import exceptions, messages, status +from uaclient import defaults, exceptions, messages, status from uaclient.cli import ( action_help, assert_attached, @@ -48,8 +48,7 @@ {"name": "ros"}, ] -ALL_SERVICES_WRAPPED_HELP = textwrap.dedent( - """ +SERVICES_WRAPPED_HELP = """\ Client to manage Ubuntu Pro services on a machine. - cc-eal: Common Criteria EAL2 Provisioning Packages (https://ubuntu.com/cc-eal) @@ -69,28 +68,8 @@ (https://ubuntu.com/robotics/ros-esm) - ros: Security Updates for the Robot Operating System (https://ubuntu.com/robotics/ros-esm) -""" -) -SERVICES_WRAPPED_HELP = textwrap.dedent( - """ -Client to manage Ubuntu Pro services on a machine. - - cc-eal: Common Criteria EAL2 Provisioning Packages - (https://ubuntu.com/cc-eal) - - cis: Security compliance and audit tools - (https://ubuntu.com/security/certifications/docs/usg) - - esm-apps: Expanded Security Maintenance for Applications - (https://ubuntu.com/security/esm) - - esm-infra: Expanded Security Maintenance for Infrastructure - (https://ubuntu.com/security/esm) - - fips-updates: NIST-certified core packages with priority security updates - (https://ubuntu.com/security/certifications#fips) - - fips: NIST-certified core packages - (https://ubuntu.com/security/certifications#fips) - - livepatch: Canonical Livepatch service - (https://ubuntu.com/security/livepatch) -""" -) +Use pro help to get more details about each service""" @pytest.fixture(params=["direct", "--help", "pro help", "pro help --all"]) @@ -147,10 +126,17 @@ maxDiff = None @mock.patch("uaclient.util.we_are_currently_root", return_value=False) + @mock.patch("uaclient.log.get_user_log_file") @mock.patch("uaclient.cli.entitlements") @mock.patch("uaclient.cli.contract") def test_help_descr_and_url_is_wrapped_at_eighty_chars( - self, m_contract, m_entitlements, m_we_are_currently_root, get_help + self, + m_contract, + m_entitlements, + m_get_user_log_file, + m_we_are_currently_root, + get_help, + tmpdir, ): """Help lines are wrapped at 80 chars""" @@ -160,7 +146,12 @@ help_doc_url=BIG_URL, is_beta=False, ) - + m_get_user_log_file.return_value = tmpdir.join("user.log").strpath + default_get_user_log_file = tmpdir.join("default.log").strpath + defaults_ret = { + "log_level": "debug", + "log_file": default_get_user_log_file, + } m_entitlements.entitlement_factory.return_value = mocked_ent m_contract.get_available_resources.return_value = [{"name": "test"}] @@ -168,21 +159,36 @@ " - test: " + " ".join(["123456789"] * 7), " next line ({url})".format(url=BIG_URL), ] - out, _ = get_help() + with mock.patch.dict( + "uaclient.cli.defaults.CONFIG_DEFAULTS", defaults_ret + ): + out, _ = get_help() assert "\n".join(lines) in out @mock.patch("uaclient.util.we_are_currently_root", return_value=False) + @mock.patch("uaclient.log.get_user_log_file") @mock.patch("uaclient.cli.contract") def test_help_sourced_dynamically_from_each_entitlement( - self, m_contract, m_we_are_currently_root, get_help + self, + m_contract, + m_get_user_log_file, + m_we_are_currently_root, + get_help, + tmpdir, ): """Help output is sourced from entitlement name and description.""" m_contract.get_available_resources.return_value = AVAILABLE_RESOURCES - out, type_request = get_help() - if type_request == "base": + m_get_user_log_file.return_value = tmpdir.join("user.log").strpath + default_get_user_log_file = tmpdir.join("default.log").strpath + defaults_ret = { + "log_level": "debug", + "log_file": default_get_user_log_file, + } + with mock.patch.dict( + "uaclient.cli.defaults.CONFIG_DEFAULTS", defaults_ret + ): + out, type_request = get_help() assert SERVICES_WRAPPED_HELP in out - else: - assert ALL_SERVICES_WRAPPED_HELP in out @pytest.mark.parametrize( "out_format, expected_return", @@ -197,9 +203,7 @@ ), ) @mock.patch("uaclient.status.get_available_resources") - @mock.patch( - "uaclient.config.UAConfig.is_attached", new_callable=mock.PropertyMock - ) + @mock.patch("uaclient.status._is_attached") def test_help_command_when_unnatached( self, m_attached, m_available_resources, out_format, expected_return ): @@ -219,7 +223,7 @@ m_entitlement_obj = m_entitlement_cls.return_value type(m_entitlement_obj).help_info = m_ent_help_info - m_attached.return_value = False + m_attached.return_value = mock.MagicMock(is_attached=False) m_available_resources.return_value = [ {"name": "test", "available": True} @@ -253,9 +257,7 @@ ) @pytest.mark.parametrize("is_beta", (True, False)) @mock.patch("uaclient.status.get_available_resources") - @mock.patch( - "uaclient.config.UAConfig.is_attached", new_callable=mock.PropertyMock - ) + @mock.patch("uaclient.status._is_attached") def test_help_command_when_attached( self, m_attached, m_available_resources, ent_status, ent_msg, is_beta ): @@ -285,7 +287,7 @@ m_ent_desc = mock.PropertyMock(return_value="description") type(m_entitlement_obj).description = m_ent_desc - m_attached.return_value = True + m_attached.return_value = mock.MagicMock(is_attached=True) m_available_resources.return_value = [ {"name": "test", "available": True} ] @@ -717,6 +719,45 @@ assert "UA_ENV=YES" in log assert "UA_FEATURES_WOW=XYZ" in log + @mock.patch("uaclient.cli.setup_logging") + @mock.patch("uaclient.cli.get_parser") + @mock.patch("uaclient.cli.config.UAConfig") + @pytest.mark.parametrize("config_error", [True, False]) + def test_setup_logging_with_defaults( + self, + m_config, + _m_get_parser, + m_setup_logging, + config_error, + logging_sandbox, + tmpdir, + FakeConfig, + ): + log_file = tmpdir.join("file.log") + cfg = FakeConfig({"log_file": log_file.strpath}) + if not config_error: + m_config.return_value = cfg + else: + m_config.side_effect = OSError("Error reading UAConfig") + + with contextlib.suppress(SystemExit): + main(["some", "args"]) + + expected_setup_logging_calls = [ + mock.call( + logging.INFO, + defaults.CONFIG_DEFAULTS["log_level"], + defaults.CONFIG_DEFAULTS["log_file"], + ), + ] + + if not config_error: + expected_setup_logging_calls.append( + mock.call(mock.ANY, mock.ANY, cfg.log_file), + ) + + assert expected_setup_logging_calls == m_setup_logging.call_args_list + @mock.patch("uaclient.cli.contract.get_available_resources") def test_argparse_errors_well_formatted( self, _m_resources, capsys, FakeConfig @@ -738,14 +779,73 @@ == str(err) ) + @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) + @pytest.mark.parametrize( + "cli_args,is_tty,should_warn", + ( + (["pro", "status"], True, False), + (["pro", "status"], False, True), + (["pro", "status", "--format", "tabular"], True, False), + (["pro", "status", "--format", "tabular"], False, True), + (["pro", "status", "--format", "json"], True, False), + (["pro", "status", "--format", "json"], False, False), + (["pro", "security-status"], True, False), + (["pro", "security-status"], False, True), + (["pro", "security-status", "--format", "json"], True, False), + (["pro", "security-status", "--format", "json"], False, False), + ), + ) + @mock.patch("uaclient.cli.action_status") + @mock.patch("uaclient.cli.action_security_status") + @mock.patch("uaclient.cli.setup_logging") + @mock.patch("sys.stdout.isatty") + def test_status_human_readable_warning( + self, + m_tty, + _m_setup_logging, + _m_action_security_status, + _m_action_status, + caplog_text, + cli_args, + is_tty, + should_warn, + FakeConfig, + ): + check_text = "WARNING: this output is intended to be human readable" + m_tty.return_value = is_tty + with mock.patch("sys.argv", cli_args): + with mock.patch( + "uaclient.config.UAConfig", + return_value=FakeConfig(), + ): + main() + + logs = caplog_text() + if should_warn: + assert check_text in logs + else: + assert check_text not in logs + class TestSetupLogging: @pytest.mark.parametrize("level", (logging.INFO, logging.ERROR)) @mock.patch("uaclient.cli.util.we_are_currently_root", return_value=False) + @mock.patch("uaclient.log.get_user_log_file") def test_console_log_configured_if_not_present( - self, m_we_are_currently_root, level, capsys, logging_sandbox + self, + m_get_user, + m_we_are_currently_root, + level, + capsys, + logging_sandbox, + FakeConfig, + tmpdir, ): - setup_logging(level, logging.INFO) + m_get_user.return_value = tmpdir.join("user.log").strpath + with mock.patch( + "uaclient.cli.config.UAConfig", return_value=FakeConfig() + ): + setup_logging(level, logging.INFO) logging.log(level, "after setup") logging.log(level - 1, "not present") @@ -754,14 +854,25 @@ assert "not present" not in err @mock.patch("uaclient.cli.util.we_are_currently_root", return_value=False) + @mock.patch("uaclient.log.get_user_log_file") def test_console_log_configured_if_already_present( - self, m_we_are_currently_root, capsys, logging_sandbox + self, + m_get_user, + m_we_are_currently_root, + capsys, + logging_sandbox, + FakeConfig, + tmpdir, ): + m_get_user.return_value = tmpdir.join("user.log").strpath logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) - logging.error("before setup") - setup_logging(logging.INFO, logging.INFO) - logging.error("after setup") + with mock.patch( + "uaclient.cli.config.UAConfig", return_value=FakeConfig() + ): + logging.error("before setup") + setup_logging(logging.INFO, logging.INFO) + logging.error("after setup") # 'before setup' will be in stderr, so check that setup_logging # configures the format @@ -769,16 +880,22 @@ assert "ERROR: before setup" not in err assert "ERROR: after setup" in err + @mock.patch("uaclient.log.get_user_log_file") @mock.patch("uaclient.cli.util.we_are_currently_root", return_value=False) - def test_file_log_not_configured_if_not_root( - self, m_we_are_currently_root, tmpdir, logging_sandbox + def test_user_file_log_configured_if_not_root( + self, + m_we_are_currently_root, + m_log_get_user_log_file, + tmpdir, + logging_sandbox, ): log_file = tmpdir.join("log_file") + m_log_get_user_log_file.return_value = log_file.strpath - setup_logging(logging.INFO, logging.INFO, log_file=log_file.strpath) + setup_logging(logging.INFO, logging.INFO) logging.info("after setup") - assert not log_file.exists() + assert log_file.exists() @pytest.mark.parametrize("log_filename", (None, "file.log")) @mock.patch("uaclient.cli.config") @@ -898,7 +1015,7 @@ ): log_file = tmpdir.join("root-only.log") log_path = log_file.strpath - expected_mode = 0o644 + expected_mode = 0o640 if pre_existing: expected_mode = 0o640 log_file.write("existing content\n") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_reboot_required.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_reboot_required.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_reboot_required.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_reboot_required.py 2023-05-30 19:02:35.000000000 +0000 @@ -22,7 +22,8 @@ class TestActionRebootRequired: - def test_enable_help(self, capsys, FakeConfig): + @mock.patch("uaclient.cli.setup_logging") + def test_enable_help(self, _m_setup_logging, capsys, FakeConfig): with pytest.raises(SystemExit): with mock.patch( "sys.argv", diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_refresh.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_refresh.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_refresh.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_refresh.py 2023-05-30 19:02:35.000000000 +0000 @@ -28,8 +28,11 @@ class TestActionRefresh: + @mock.patch("uaclient.cli.setup_logging") @mock.patch("uaclient.cli.contract.get_available_resources") - def test_refresh_help(self, _m_resources, capsys, FakeConfig): + def test_refresh_help( + self, _m_resources, _m_setup_logging, capsys, FakeConfig + ): with pytest.raises(SystemExit): with mock.patch("sys.argv", ["/usr/bin/ua", "refresh", "--help"]): with mock.patch( @@ -144,7 +147,7 @@ assert messages.REFRESH_MESSAGES_FAILURE == excinfo.value.msg @mock.patch("uaclient.apt_news.update_apt_news") - @mock.patch("uaclient.jobs.update_messaging.exists", return_value=True) + @mock.patch("uaclient.timer.update_messaging.exists", return_value=True) @mock.patch("logging.exception") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.cli.update_motd_messages") diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_security_status.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_security_status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_security_status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_security_status.py 2023-05-30 19:02:35.000000000 +0000 @@ -54,8 +54,10 @@ @mock.patch(M_PATH + "security_status.security_status_dict") @mock.patch(M_PATH + "contract.get_available_resources") class TestActionSecurityStatus: + @mock.patch(M_PATH + "setup_logging") def test_security_status_help( self, + _m_setup_logging, _m_resources, _m_security_status_dict, _m_security_status, @@ -119,8 +121,10 @@ assert m_security_status.call_args_list == [mock.call(cfg)] assert m_security_status.call_count == 1 + @mock.patch(M_PATH + "setup_logging") def test_error_on_wrong_format( self, + _m_setup_logging, _m_resources, _m_security_status_dict, _m_security_status, diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_status.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_cli_status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_cli_status.py 2023-05-30 19:02:35.000000000 +0000 @@ -110,6 +110,8 @@ esm-infra yes Expanded Security Maintenance for Infrastructure livepatch yes Canonical Livepatch service +For a list of all Ubuntu Pro services, run 'pro status --all' + This machine is not attached to an Ubuntu Pro subscription. See https://ubuntu.com/pro """ # noqa: E501 @@ -140,6 +142,7 @@ esm-infra no {dash} Expanded Security Maintenance for Infrastructure livepatch no {dash} Canonical Livepatch service {notices}{features} +For a list of all Ubuntu Pro services, run 'pro status --all' Enable services with: pro enable Account: test_account @@ -340,8 +343,10 @@ return_value=RESPONSE_CONTRACT_INFO, ) class TestActionStatus: + @mock.patch(M_PATH + "setup_logging") def test_status_help( self, + _m_setup_logging, _m_get_contract_information, _m_get_available_resources, _m_should_reboot, @@ -630,6 +635,7 @@ "config": { "data_dir": mock.ANY, "ua_config": mock.ANY, + "log_file": mock.ANY, }, "simulated": False, "errors": [], @@ -754,6 +760,7 @@ "config": { "data_dir": mock.ANY, "ua_config": mock.ANY, + "log_file": mock.ANY, }, "simulated": False, "errors": [], @@ -921,6 +928,7 @@ "config": { "data_dir": mock.ANY, "ua_config": mock.ANY, + "log_file": mock.ANY, }, "errors": [], "warnings": [], diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_config.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_config.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_config.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_config.py 2023-05-30 19:02:35.000000000 +0000 @@ -175,7 +175,6 @@ removes, expected, ): - for notice_ in notices_: notices.add(*notice_) for label in removes: @@ -303,7 +302,7 @@ self, FakeConfig ): cfg = FakeConfig({"data_dir": "/my/d"}) - cfg.data_paths["test_path"] = DataPath("test_path", False, False) + cfg.data_paths["test_path"] = DataPath("test_path", False) assert "/my/d/test_path" == cfg.data_path("test_path") @@ -441,8 +440,8 @@ @pytest.mark.parametrize( "datapath,mode", ( - (DataPath("path", False, False), 0o644), - (DataPath("path", True, False), 0o600), + (DataPath("path", False), 0o644), + (DataPath("path", True), 0o600), ), ) def test_permissions(self, FakeConfig, datapath, mode): @@ -598,7 +597,7 @@ cfg.machine_token_file.delete() assert {} == cfg.machine_token_file.entitlements - def test_delete_cache_removes_all_data_path_files_with_delete_permanent( + def test_delete_cache_removes_all_data_path_files( self, tmpdir, FakeConfig ): """Any cached files defined in cfg.data_paths will be removed.""" @@ -619,7 +618,7 @@ ) ) assert len(odd_keys) == len(present_files) - cfg.delete_cache(delete_permanent=True) + cfg.delete_cache() dirty_files = list( itertools.chain( *[walk_entry[2] for walk_entry in os.walk(tmpdir.strpath)] @@ -629,39 +628,6 @@ ", ".join(dirty_files) ) - def test_delete_cache_ignores_permanent_data_path_files( - self, tmpdir, FakeConfig - ): - """Any cached files defined in cfg.data_paths will be removed.""" - cfg = FakeConfig() - for key in cfg.data_paths.keys(): - if key == "notices": - # notices key expects specific list or lists format - value = [[key, key]] - else: - value = key - cfg.write_cache(key, value) - - num_permanent_files = len( - [v for v in cfg.data_paths.values() if v.permanent] - ) - present_files = list( - itertools.chain( - *[walk_entry[2] for walk_entry in os.walk(tmpdir.strpath)] - ) - ) - assert len(cfg.data_paths.keys()) == len(present_files) - cfg.delete_cache() - cfg.machine_token_file.delete() - dirty_files = list( - itertools.chain( - *[walk_entry[2] for walk_entry in os.walk(tmpdir.strpath)] - ) - ) - assert num_permanent_files == len( - dirty_files - ), "{} files not deleted".format(", ".join(dirty_files)) - def test_delete_cache_ignores_files_not_defined_in_data_paths( self, tmpdir, FakeConfig ): @@ -1028,11 +994,9 @@ def test_parse_config_uses_defaults_when_no_config_present( self, _m_resources, m_exists ): - cwd = os.getcwd() with mock.patch.dict("uaclient.config.os.environ", values={}): config, _ = parse_config() expected_calls = [ - mock.call("{}/uaclient.conf".format(cwd)), mock.call("/etc/ubuntu-advantage/uaclient.conf"), ] assert expected_calls == m_exists.call_args_list @@ -1148,7 +1112,7 @@ self, m_load_file, m_path_exists ): m_load_file.return_value = "test: true\nfoo: bar" - m_path_exists.side_effect = [False, False, True] + m_path_exists.side_effect = [False, True] user_values = {"UA_FEATURES_TEST": "test.yaml"} with mock.patch.dict("uaclient.config.os.environ", values=user_values): @@ -1388,19 +1352,9 @@ ): assert "test" == get_config_path() - @mock.patch("uaclient.config.os.path.join", return_value="test123") - @mock.patch("uaclient.config.os.path.exists", return_value=True) - def test_get_config_path_from_local_dir(self, _m_exists, _m_join): - with mock.patch.dict("uaclient.config.os.environ", values={}): - assert "test123" == get_config_path() - assert _m_join.call_count == 1 - assert _m_exists.call_count == 1 - - @mock.patch("uaclient.config.os.path.exists", return_value=False) - def test_get_default_config_path(self, _m_exists): + def test_get_default_config_path(self): with mock.patch.dict("uaclient.config.os.environ", values={}): assert DEFAULT_CONFIG_FILE == get_config_path() - assert _m_exists.call_count == 1 class TestCheckLockInfo: diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_contract.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_contract.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_contract.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_contract.py 2023-06-01 18:49:33.000000000 +0000 @@ -8,11 +8,11 @@ from uaclient import exceptions, messages, util from uaclient.contract import ( - API_V1_CONTEXT_MACHINE_TOKEN, - API_V1_CONTRACT_INFORMATION, - API_V1_RESOURCES, - API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE, - API_V1_TMPL_RESOURCE_MACHINE_ACCESS, + API_V1_ADD_CONTRACT_MACHINE, + API_V1_AVAILABLE_RESOURCES, + API_V1_GET_CONTRACT_MACHINE, + API_V1_GET_CONTRACT_USING_TOKEN, + API_V1_GET_RESOURCE_MACHINE_ACCESS, UAContractClient, _get_override_weight, apply_contract_overrides, @@ -49,12 +49,18 @@ "machine_id_response", (("contract-machine-id"), None) ) @pytest.mark.parametrize("activity_id", ((None), ("test-acid"))) - @mock.patch("uaclient.contract.system.get_platform_info") - def test__request_machine_token_update( + @mock.patch("uaclient.contract.system.get_virt_type") + @mock.patch("uaclient.contract.system.get_dpkg_arch") + @mock.patch("uaclient.contract.system.get_kernel_info") + @mock.patch("uaclient.contract.system.get_release_info") + def test_update_contract_machine( self, - get_platform_info, - get_machine_id, - request_url, + m_get_release_info, + m_get_kernel_info, + m_get_dpkg_arch, + m_get_virt_type, + m_get_machine_id, + m_request_url, machine_id_response, activity_id, FakeConfig, @@ -63,8 +69,16 @@ Setting detach=True will result in a DELETE operation. """ - get_platform_info.return_value = {"arch": "arch", "kernel": "kernel"} - get_machine_id.return_value = "machineId" + m_get_release_info.return_value = mock.MagicMock( + distribution="Ubuntu", + release="release", + series="series", + pretty_version="version", + ) + m_get_kernel_info.return_value = mock.MagicMock(uname_release="kernel") + m_get_dpkg_arch.return_value = "arch" + m_get_virt_type.return_value = "virt" + m_get_machine_id.return_value = "machineId" machine_token = {"machineTokenInfo": {}} if machine_id_response: @@ -72,7 +86,7 @@ "machineId" ] = machine_id_response - request_url.return_value = (machine_token, {}) + m_request_url.return_value = (machine_token, {}) cfg = FakeConfig.for_attached_machine() client = UAContractClient(cfg) kwargs = {"machine_token": "mToken", "contract_id": "cId"} @@ -91,7 +105,7 @@ "user_facing_status", new=entitlement_user_facing_status, ): - client._request_machine_token_update(**kwargs) + client._update_contract_machine(**kwargs) assert machine_token != cfg.machine_token_file.machine_token client.update_files_after_machine_token_update(machine_token) @@ -114,14 +128,22 @@ params["data"] = { "machineId": "machineId", "architecture": "arch", - "os": {"kernel": "kernel"}, + "os": { + "type": "Linux", + "kernel": "kernel", + "distribution": "Ubuntu", + "release": "release", + "series": "series", + "version": "version", + "virt": "virt", + }, "activityInfo": { "activityToken": None, "activityID": expected_activity_id, "resources": enabled_services, }, } - assert request_url.call_args_list == [ + assert m_request_url.call_args_list == [ mock.call("/v1/contracts/cId/context/machines/machineId", **params) ] @@ -134,12 +156,12 @@ ((None, "POST"), (False, "POST"), (True, "DELETE")), ) @pytest.mark.parametrize("activity_id", ((None), ("test-acid"))) - @mock.patch("uaclient.contract.system.get_platform_info") + @mock.patch("uaclient.contract.system.get_release_info") @mock.patch.object(UAContractClient, "_get_platform_data") - def test_get_updated_contract_info( + def test_get_contract_machine( self, m_platform_data, - get_platform_info, + get_release_info, get_machine_id, request_url, detach, @@ -154,12 +176,10 @@ return {"machineId": machine_id} m_platform_data.side_effect = fake_platform_data - get_platform_info.return_value = { - "arch": "arch", - "kernel": "kernel", - "series": "series", - "virt": "mtype", - } + get_release_info.return_value = mock.MagicMock( + kernel="kernel", + series="series", + ) get_machine_id.return_value = "machineId" machine_token = {"machineTokenInfo": {}} @@ -175,10 +195,10 @@ } cfg = FakeConfig.for_attached_machine() client = UAContractClient(cfg) - resp = client.get_updated_contract_info(**kwargs) + resp = client.get_contract_machine(**kwargs) assert resp == machine_token - def test_request_resource_machine_access( + def test_get_resource_machine_access( self, get_machine_id, request_url, FakeConfig ): """GET from resource-machine-access route to "enable" a service""" @@ -187,7 +207,7 @@ cfg = FakeConfig.for_attached_machine() client = UAContractClient(cfg) kwargs = {"machine_token": "mToken", "resource": "cis"} - assert "response" == client.request_resource_machine_access(**kwargs) + assert "response" == client.get_resource_machine_access(**kwargs) assert "response" == cfg.read_cache("machine-access-cis") params = { "headers": { @@ -201,7 +221,7 @@ mock.call("/v1/resources/cis/context/machines/machineId", **params) ] == request_url.call_args_list - def test_request_contract_information( + def test_get_contract_using_token( self, _m_machine_id, m_request_url, FakeConfig ): m_request_url.return_value = ("response", {}) @@ -217,7 +237,7 @@ } } - assert "response" == client.request_contract_information("some_token") + assert "response" == client.get_contract_using_token("some_token") assert [ mock.call("/v1/contract", **params) ] == m_request_url.call_args_list @@ -226,7 +246,7 @@ @pytest.mark.parametrize( "enabled_services", (([]), (["esm-apps", "livepatch"])) ) - def test_report_machine_activity( + def test_update_activity_token( self, get_machine_id, request_url, @@ -264,7 +284,7 @@ with mock.patch( "uaclient.config.files.MachineTokenFile.write" ) as m_write_file: - client.report_machine_activity() + client.update_activity_token() expected_write_calls = 1 assert expected_write_calls == m_write_file.call_count @@ -292,7 +312,7 @@ "machine_id_response", (("contract-machine-id"), None) ) @mock.patch.object(UAContractClient, "_get_platform_data") - def test__request_contract_machine_attach( + def test_add_contract_machine( self, m_platform_data, get_machine_id, @@ -328,7 +348,7 @@ cfg = FakeConfig() client = UAContractClient(cfg) - client.request_contract_machine_attach( + client.add_contract_machine( contract_token=contract_token, machine_id=machine_id_param ) @@ -548,8 +568,8 @@ assert expected_calls == m_process_contract_deltas.call_args_list @mock.patch( - "uaclient.system.get_platform_info", - return_value={"series": "fake_series"}, + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="fake_series"), ) @mock.patch(M_REPO_PATH + "process_contract_deltas") def test_overrides_applied_before_comparison( @@ -572,9 +592,9 @@ class TestGetAvailableResources: - @mock.patch.object(UAContractClient, "request_resources") - def test_request_resources_error_on_network_disconnected( - self, m_request_resources, FakeConfig + @mock.patch.object(UAContractClient, "available_resources") + def test_available_resources_error_on_network_disconnected( + self, m_available_resources, FakeConfig ): """Raise error get_available_resources can't contact backend""" cfg = FakeConfig() @@ -582,18 +602,20 @@ urlerror = exceptions.UrlError( socket.gaierror(-2, "Name or service not known") ) - m_request_resources.side_effect = urlerror + m_available_resources.side_effect = urlerror with pytest.raises(exceptions.UrlError) as exc: get_available_resources(cfg) assert urlerror == exc.value @mock.patch(M_PATH + "UAContractClient") - def test_request_resources_from_contract_server(self, client, FakeConfig): - """Call UAContractClient.request_resources to get updated resources.""" + def test_available_resources_from_contract_server( + self, client, FakeConfig + ): + """Call get_available_resources to get updated resources.""" cfg = FakeConfig() - url = API_V1_RESOURCES + url = API_V1_AVAILABLE_RESOURCES new_resources = [{"name": "new_resource", "available": False}] @@ -613,7 +635,7 @@ ): cfg = FakeConfig() - url = API_V1_CONTRACT_INFORMATION + url = API_V1_GET_CONTRACT_USING_TOKEN information = {"contract": "some_contract_data"} @@ -629,13 +651,13 @@ class TestRequestUpdatedContract: - refresh_route = API_V1_TMPL_CONTEXT_MACHINE_TOKEN_RESOURCE.format( + refresh_route = API_V1_GET_CONTRACT_MACHINE.format( contract="cid", machine="mid" ) - access_route_ent1 = API_V1_TMPL_RESOURCE_MACHINE_ACCESS.format( + access_route_ent1 = API_V1_GET_RESOURCE_MACHINE_ACCESS.format( resource="ent1", machine="mid" ) - access_route_ent2 = API_V1_TMPL_RESOURCE_MACHINE_ACCESS.format( + access_route_ent2 = API_V1_GET_RESOURCE_MACHINE_ACCESS.format( resource="ent2", machine="mid" ) @@ -734,7 +756,7 @@ def fake_contract_client(cfg): fake_client = FakeContractClient(cfg) fake_client._responses = { - API_V1_CONTEXT_MACHINE_TOKEN: exceptions.ContractAPIError( + API_V1_ADD_CONTRACT_MACHINE: exceptions.ContractAPIError( exceptions.UrlError( "Server error", code=error_code, @@ -991,11 +1013,11 @@ assert process_calls == process_entitlement_delta.call_args_list -@mock.patch("uaclient.contract.UAContractClient.get_updated_contract_info") +@mock.patch("uaclient.contract.UAContractClient.get_contract_machine") class TestContractChanged: @pytest.mark.parametrize("has_contract_expired", (False, True)) def test_contract_change_with_expiry( - self, get_updated_contract_info, has_contract_expired, FakeConfig + self, m_get_contract_machine, has_contract_expired, FakeConfig ): if has_contract_expired: expiry_date = util.parse_rfc3339_date("2041-05-08T19:02:26Z") @@ -1003,7 +1025,7 @@ else: expiry_date = util.parse_rfc3339_date("2040-05-08T19:02:26Z") ret_val = False - get_updated_contract_info.return_value = { + m_get_contract_machine.return_value = { "machineTokenInfo": { "contractInfo": { "effectiveTo": expiry_date, @@ -1015,7 +1037,7 @@ @pytest.mark.parametrize("has_contract_changed", (False, True)) def test_contract_change_with_entitlements( - self, get_updated_contract_info, has_contract_changed, FakeConfig + self, m_get_contract_machine, has_contract_changed, FakeConfig ): if has_contract_changed: resourceEntitlements = [{"type": "token1", "entitled": True}] @@ -1023,7 +1045,7 @@ else: resourceTokens = [] resourceEntitlements = [] - get_updated_contract_info.return_value = { + m_get_contract_machine.return_value = { "machineTokenInfo": { "machineId": "test_machine_id", "resourceTokens": resourceTokens, @@ -1073,13 +1095,14 @@ @pytest.mark.parametrize("include_overrides", (True, False)) @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "ubuntuX"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="ubuntuX"), ) @mock.patch( "uaclient.clouds.identity.get_cloud_type", return_value=(None, "") ) def test_return_same_dict_when_no_overrides_match( - self, _m_cloud_type, _m_platform_info, include_overrides + self, _m_cloud_type, _m_release_info, include_overrides ): orig_access = { "entitlement": { @@ -1097,6 +1120,21 @@ } } if include_overrides: + overrides = [ + { + "selector": {"series": "dontMatch"}, + "affordances": { + "some_affordance": ["ubuntuX-series-overriden"] + }, + }, + { + "selector": {"cloud": "dontMatch"}, + "affordances": { + "some_affordance": ["ubuntuX-cloud-overriden"] + }, + }, + ] + orig_access["entitlement"].update( { "series": { @@ -1106,30 +1144,19 @@ } } }, - "overrides": [ - { - "selector": {"series": "dontMatch"}, - "affordances": { - "some_affordance": ["ubuntuX-series-overriden"] - }, - }, - { - "selector": {"cloud": "dontMatch"}, - "affordances": { - "some_affordance": ["ubuntuX-cloud-overriden"] - }, - }, - ], + "overrides": overrides, } ) + expected["entitlement"]["overrides"] = overrides apply_contract_overrides(orig_access) assert expected == orig_access @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "ubuntuX"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="ubuntuX"), ) - def test_missing_keys_are_included(self, _m_platform_info): + def test_missing_keys_are_included(self, _m_release_info): orig_access = { "entitlement": { "series": {"ubuntuX": {"directives": {"suites": ["ubuntuX"]}}} @@ -1157,7 +1184,8 @@ ), ) @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "ubuntuX"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="ubuntuX"), ) @mock.patch( "uaclient.clouds.identity.get_cloud_type", @@ -1166,13 +1194,31 @@ def test_applies_contract_overrides_respecting_weight( self, _m_cloud_type, - _m_platform_info, + _m_release_info, series_selector, cloud_selector, series_cloud_selector, expected_value, ): """Apply the expected overrides to orig_access dict when called.""" + overrides = [ + { + "selector": {"series": series_selector}, + "affordances": {"some_affordance": ["series_overriden"]}, + }, + { + "selector": {"cloud": cloud_selector}, + "affordances": {"some_affordance": ["cloud_overriden"]}, + }, + { + "selector": { + "series": series_selector, + "cloud": series_cloud_selector, + }, + "affordances": {"some_affordance": ["both_overriden"]}, + }, + ] + orig_access = { "entitlement": { "affordances": {"some_affordance": ["original_affordance"]}, @@ -1183,33 +1229,14 @@ } } }, - "overrides": [ - { - "selector": {"series": series_selector}, - "affordances": { - "some_affordance": ["series_overriden"] - }, - }, - { - "selector": {"cloud": cloud_selector}, - "affordances": { - "some_affordance": ["cloud_overriden"] - }, - }, - { - "selector": { - "series": series_selector, - "cloud": series_cloud_selector, - }, - "affordances": {"some_affordance": ["both_overriden"]}, - }, - ], + "overrides": overrides, } } expected = { "entitlement": { - "affordances": {"some_affordance": [expected_value]} + "affordances": {"some_affordance": [expected_value]}, + "overrides": overrides, } } @@ -1217,16 +1244,35 @@ assert orig_access == expected @mock.patch( - "uaclient.system.get_platform_info", return_value={"series": "ubuntuX"} + "uaclient.system.get_release_info", + return_value=mock.MagicMock(series="ubuntuX"), ) @mock.patch( "uaclient.clouds.identity.get_cloud_type", return_value=("cloudX", None), ) def test_different_overrides_applied_together( - self, _m_cloud_type, _m_platform_info + self, _m_cloud_type, _m_release_info ): """Apply different overrides from different matching selectors.""" + overrides = [ + { + "selector": {"series": "ubuntuX"}, + "affordances": {"some_affordance": ["series_overriden"]}, + }, + { + "selector": {"cloud": "cloudX"}, + "directives": {"some_directive": ["cloud_overriden"]}, + }, + { + "selector": {"series": "ubuntuX", "cloud": "cloudX"}, + "obligations": { + "new_obligation": True, + "some_obligation": True, + }, + }, + ] + orig_access = { "entitlement": { "affordances": {"some_affordance": ["original_affordance"]}, @@ -1239,25 +1285,7 @@ } } }, - "overrides": [ - { - "selector": {"series": "ubuntuX"}, - "affordances": { - "some_affordance": ["series_overriden"] - }, - }, - { - "selector": {"cloud": "cloudX"}, - "directives": {"some_directive": ["cloud_overriden"]}, - }, - { - "selector": {"series": "ubuntuX", "cloud": "cloudX"}, - "obligations": { - "new_obligation": True, - "some_obligation": True, - }, - }, - ], + "overrides": overrides, } } @@ -1272,6 +1300,7 @@ "new_obligation": True, "some_obligation": True, }, + "overrides": overrides, } } @@ -1302,7 +1331,7 @@ ) with pytest.raises(exceptions.InvalidProImage) as exc_error: - contract.request_auto_attach_contract_token( + contract.get_contract_token_for_cloud_instance( instance=mock.MagicMock() ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_data_types.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_data_types.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_data_types.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_data_types.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,4 +1,5 @@ import datetime +import logging from typing import List, Optional import pytest @@ -696,7 +697,8 @@ assert do.string == "something" assert d == do.to_dict() - def test_optional_type_errors_become_null(self): + @pytest.mark.parametrize("caplog_text", [logging.WARNING], indirect=True) + def test_optional_type_errors_become_null(self, caplog_text): result = ExampleDataObject.from_dict( { **example_data_object_dict_with_optionals, @@ -723,6 +725,11 @@ assert result.enum_opt_list is None assert result.dt_opt is None assert result.dtlist_opt is None + logs = caplog_text() + assert ( + "string_opt is wrong type (expected str but got int) but " + "considered optional - treating as null" + ) in logs @pytest.mark.parametrize( "d", diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_livepatch.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_livepatch.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_livepatch.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_livepatch.py 2023-06-01 18:49:33.000000000 +0000 @@ -12,6 +12,7 @@ LivepatchPatchFixStatus, LivepatchPatchStatus, LivepatchStatusStatus, + LivepatchSupport, UALivepatchClient, _on_supported_kernel_api, _on_supported_kernel_cache, @@ -59,6 +60,7 @@ "Kernel": "installed-kernel-generic", "Livepatch": { "State": "nothing-to-apply", + "Version": "100", }, } ], @@ -72,6 +74,7 @@ livepatch=LivepatchPatchStatus( state="nothing-to-apply", fixes=None, + version="100", ), supported=None, ), @@ -113,6 +116,7 @@ patched=True, ) ], + version=None, ), supported=None, ), @@ -150,8 +154,7 @@ LivepatchStatusStatus( kernel="4.15.0-187.198-generic", livepatch=LivepatchPatchStatus( - state="nothing-to-apply", - fixes=None, + state="nothing-to-apply", fixes=None, version="" ), supported=None, ), @@ -204,6 +207,7 @@ "flavor", "arch", "codename", + "build_date", "expected_request_calls", ], [ @@ -212,6 +216,9 @@ "generic", "amd64", "xenial", + datetime.datetime( + 2020, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc + ), [ mock.call( "/v1/api/kernels/supported", @@ -221,6 +228,7 @@ "flavour": "generic", "architecture": "amd64", "codename": "xenial", + "build-date": "2020-01-01T01:01:01+00:00", }, ) ], @@ -230,6 +238,7 @@ "kvm", "arm64", "kinetic", + None, [ mock.call( "/v1/api/kernels/supported", @@ -239,6 +248,7 @@ "flavour": "kvm", "architecture": "arm64", "codename": "kinetic", + "build-date": "unknown", }, ) ], @@ -253,11 +263,14 @@ flavor, arch, codename, + build_date, expected_request_calls, ): m_request_url.return_value = ("mock", "mock") lp_client = UALivepatchClient() - lp_client.is_kernel_supported(version, flavor, arch, codename) + lp_client.is_kernel_supported( + version, flavor, arch, codename, build_date + ) assert m_request_url.call_args_list == expected_request_calls @pytest.mark.parametrize( @@ -266,13 +279,27 @@ "expected", ], [ - ([({"Supported": True}, None)], True), - ([({"Supported": False}, None)], False), - ([({}, None)], False), + ([({"Supported": True}, None)], LivepatchSupport.SUPPORTED), + ([({"Supported": False}, None)], LivepatchSupport.UNSUPPORTED), + ([({}, None)], LivepatchSupport.UNSUPPORTED), ([([], None)], None), ([("string", None)], None), (exceptions.UrlError(mock.MagicMock()), None), (Exception(), None), + ([({"Supported": "supported"}, None)], LivepatchSupport.SUPPORTED), + ( + [({"Supported": "unsupported"}, None)], + LivepatchSupport.UNSUPPORTED, + ), + ([({"Supported": "unknown"}, None)], LivepatchSupport.UNKNOWN), + ( + [({"Supported": "kernel-end-of-life"}, None)], + LivepatchSupport.KERNEL_EOL, + ), + ( + [({"Supported": "kernel-upgrade-required"}, None)], + LivepatchSupport.KERNEL_UPGRADE_REQUIRED, + ), ], ) def test_is_kernel_supported_interprets_api_response( @@ -284,7 +311,7 @@ ): m_request_url.side_effect = request_side_effect lp_client = UALivepatchClient() - assert lp_client.is_kernel_supported("", "", "", "") == expected + assert lp_client.is_kernel_supported("", "", "", "", None) == expected class TestOnSupportedKernel: @@ -305,19 +332,33 @@ LivepatchStatusStatus( kernel=None, livepatch=None, supported="supported" ), - True, + LivepatchSupport.SUPPORTED, ), ( LivepatchStatusStatus( kernel=None, livepatch=None, supported="unsupported" ), - False, + LivepatchSupport.UNSUPPORTED, + ), + ( + LivepatchStatusStatus( + kernel=None, + livepatch=None, + supported="kernel-upgrade-required", + ), + LivepatchSupport.KERNEL_UPGRADE_REQUIRED, + ), + ( + LivepatchStatusStatus( + kernel=None, livepatch=None, supported="kernel-end-of-life" + ), + LivepatchSupport.KERNEL_EOL, ), ( LivepatchStatusStatus( kernel=None, livepatch=None, supported="unknown" ), - None, + LivepatchSupport.UNKNOWN, ), ], ) @@ -472,8 +513,8 @@ ], [ ( - ("5.14-14", "generic", "amd64", "focal"), - True, + ("5.14-14", "generic", "amd64", "focal", None), + LivepatchSupport.SUPPORTED, [ mock.call( LivepatchSupportCacheData( @@ -486,11 +527,11 @@ ) ) ], - True, + LivepatchSupport.SUPPORTED, ), ( - ("5.14-14", "kvm", "arm64", "focal"), - False, + ("5.14-14", "kvm", "arm64", "focal", None), + LivepatchSupport.UNSUPPORTED, [ mock.call( LivepatchSupportCacheData( @@ -503,10 +544,10 @@ ) ) ], - False, + LivepatchSupport.UNSUPPORTED, ), ( - ("4.14-14", "kvm", "arm64", "xenial"), + ("4.14-14", "kvm", "arm64", "xenial", None), None, [ mock.call( @@ -522,6 +563,23 @@ ], None, ), + ( + ("4.14-14", "kvm", "arm64", "xenial", None), + LivepatchSupport.UNKNOWN, + [ + mock.call( + LivepatchSupportCacheData( + version="4.14-14", + flavor="kvm", + arch="arm64", + codename="xenial", + supported=None, + cached_at=mock.ANY, + ) + ) + ], + LivepatchSupport.UNKNOWN, + ), ], ) @mock.patch(M_PATH + "state_files.livepatch_support_cache.write") @@ -544,7 +602,7 @@ "cli_result", "get_kernel_info_result", "standardize_arch_name_result", - "get_platform_info_result", + "get_release_info_result", "cache_result", "api_result", "cache_call_args", @@ -552,9 +610,9 @@ "expected", ], [ - # cli result true + # cli result supported ( - True, + LivepatchSupport.SUPPORTED, None, None, None, @@ -562,11 +620,11 @@ None, [], [], - True, + LivepatchSupport.SUPPORTED, ), - # cli result false + # cli result unsupported ( - False, + LivepatchSupport.UNSUPPORTED, None, None, None, @@ -574,14 +632,52 @@ None, [], [], - False, + LivepatchSupport.UNSUPPORTED, + ), + # cli result upgrade-required + ( + LivepatchSupport.KERNEL_UPGRADE_REQUIRED, + None, + None, + None, + None, + None, + [], + [], + LivepatchSupport.KERNEL_UPGRADE_REQUIRED, + ), + # cli result eol + ( + LivepatchSupport.KERNEL_EOL, + None, + None, + None, + None, + None, + [], + [], + LivepatchSupport.KERNEL_EOL, + ), + # cli result definite unknown + ( + LivepatchSupport.UNKNOWN, + None, + None, + None, + None, + None, + [], + [], + LivepatchSupport.UNKNOWN, ), # insufficient kernel info ( None, system.KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version="", + build_date=None, flavor=None, major=5, minor=6, @@ -594,14 +690,16 @@ None, [], [], - None, + LivepatchSupport.UNKNOWN, ), # cache result true ( None, system.KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version="", + build_date=None, flavor="generic", major=5, minor=6, @@ -609,19 +707,21 @@ patch=None, ), "amd64", - {"series": "xenial"}, + mock.MagicMock(series="xenial"), (True, True), None, [mock.call("5.6", "generic", "amd64", "xenial")], [], - True, + LivepatchSupport.SUPPORTED, ), # cache result false ( None, system.KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version="", + build_date=None, flavor="generic", major=5, minor=6, @@ -629,19 +729,21 @@ patch=None, ), "amd64", - {"series": "xenial"}, + mock.MagicMock(series="xenial"), (True, False), None, [mock.call("5.6", "generic", "amd64", "xenial")], [], - False, + LivepatchSupport.UNSUPPORTED, ), # cache result none ( None, system.KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version="", + build_date=None, flavor="generic", major=5, minor=6, @@ -649,19 +751,21 @@ patch=None, ), "amd64", - {"series": "xenial"}, + mock.MagicMock(series="xenial"), (True, None), None, [mock.call("5.6", "generic", "amd64", "xenial")], [], - None, + LivepatchSupport.UNKNOWN, ), # api result true ( None, system.KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version="", + build_date=None, flavor="generic", major=5, minor=6, @@ -669,35 +773,33 @@ patch=None, ), "amd64", - {"series": "xenial"}, + mock.MagicMock(series="xenial"), (False, None), - True, + LivepatchSupport.SUPPORTED, [mock.call("5.6", "generic", "amd64", "xenial")], - [mock.call("5.6", "generic", "amd64", "xenial")], - True, + [mock.call("5.6", "generic", "amd64", "xenial", None)], + LivepatchSupport.SUPPORTED, ), ], ) @mock.patch(M_PATH + "_on_supported_kernel_api") @mock.patch(M_PATH + "_on_supported_kernel_cache") - @mock.patch(M_PATH + "system.get_platform_info") + @mock.patch(M_PATH + "system.get_release_info") @mock.patch(M_PATH + "util.standardize_arch_name") - @mock.patch(M_PATH + "system.get_dpkg_arch") @mock.patch(M_PATH + "system.get_kernel_info") @mock.patch(M_PATH + "_on_supported_kernel_cli") def test_on_supported_kernel( self, m_supported_cli, m_get_kernel_info, - m_get_dpkg_arch, m_standardize_arch_name, - m_get_platform_info, + m_get_release_info, m_supported_cache, m_supported_api, cli_result, get_kernel_info_result, standardize_arch_name_result, - get_platform_info_result, + get_release_info_result, cache_result, api_result, cache_call_args, @@ -707,7 +809,7 @@ m_supported_cli.return_value = cli_result m_get_kernel_info.return_value = get_kernel_info_result m_standardize_arch_name.return_value = standardize_arch_name_result - m_get_platform_info.return_value = get_platform_info_result + m_get_release_info.return_value = get_release_info_result m_supported_cache.return_value = cache_result m_supported_api.return_value = api_result assert on_supported_kernel.__wrapped__() == expected diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_pip.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_pip.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_pip.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_pip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -"""Tests related to uaclient.pip module.""" - -from configparser import ConfigParser -from textwrap import dedent - -import mock -import pytest - -from uaclient.pip import update_pip_conf - - -class TestPipConfUpdate: - index_url = "http://bearer:token@python.esm.ubuntu.com/simple" - - def _get_config_dict(self): - """ - Create a base config dict to be used on tests. This - config is based on the possible config that will - be used by the esm-apps-python service. - """ - index_url = self.index_url - index = index_url - - return {"global": {"index-url": index_url, "index": index}} - - def _cfg_to_dict(self, cfg_file): - """Return a ConfigParser dict representation of a config file.""" - cfg_parser = ConfigParser() - cfg_parser.read(cfg_file) - cfg_dict = {} - for s in cfg_parser.sections(): - cfg_dict[s] = {} - - for option in cfg_parser[s]: - cfg_dict[s][option] = cfg_parser[s][option] - - return cfg_dict - - @pytest.mark.parametrize( - "file_content,expected", - ( - ("", {"global": {"index-url": index_url, "index": index_url}}), - ( - dedent( - """\ - [freeze] - timeout = 10 - """ - ), - { - "global": {"index-url": index_url, "index": index_url}, - "freeze": {"timeout": "10"}, - }, - ), - ( - dedent( - """\ - [global] - index-url = www.mypip.com - """ - ), - {"global": {"index-url": index_url, "index": index_url}}, - ), - ( - dedent( - """\ - [global] - index-url = www.mypip.com - - [freeze] - timeout = 10 - """ - ), - { - "global": {"index-url": index_url, "index": index_url}, - "freeze": {"timeout": "10"}, - }, - ), - (None, {"global": {"index-url": index_url, "index": index_url}}), - ), - ) - def test_update_pip_conf(self, tmpdir, file_content, expected): - file_path = tmpdir / "pip.conf" - - if file_content: - file_path.write(file_content) - - with mock.patch("uaclient.pip.PIP_CONFIG_FILE", file_path.strpath): - update_pip_conf(self._get_config_dict()) - - assert self._cfg_to_dict(file_path.strpath) == expected diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_reboot_cmds.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_reboot_cmds.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_reboot_cmds.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_reboot_cmds.py 2023-06-01 18:49:33.000000000 +0000 @@ -1,183 +1,192 @@ -import logging - import mock import pytest -from lib.reboot_cmds import ( - fix_pro_pkg_holds, - main, - process_reboot_operations, - run_command, -) -from uaclient import messages -from uaclient.exceptions import ProcessExecutionError -from uaclient.files.notices import Notice +from lib.reboot_cmds import fix_pro_pkg_holds, main +from uaclient import exceptions +from uaclient.testing.helpers import does_not_raise M_FIPS_PATH = "uaclient.entitlements.fips.FIPSEntitlement." -class TestMain: - @pytest.mark.parametrize("caplog_text", [logging.WARNING], indirect=True) - def test_retries_on_lock_file(self, FakeConfig, caplog_text): - cfg = FakeConfig.for_attached_machine() - with pytest.raises(SystemExit) as excinfo: - with mock.patch( - "uaclient.config.UAConfig.check_lock_info" - ) as m_check_lock: - m_check_lock.return_value = (123, "pro auto-attach") - with mock.patch("time.sleep") as m_sleep: - main(cfg=cfg) - assert [ - mock.call(1), - mock.call(1), - mock.call(1), - mock.call(1), - mock.call(1), - mock.call(1), - ] == m_sleep.call_args_list - assert 1 == excinfo.value.code - assert ( - "Lock not released. Unable to perform: ua-reboot-cmds" - ) in caplog_text() - - @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) - @mock.patch("lib.reboot_cmds.subp") - def test_main_unattached_removes_marker( - self, - m_subp, - FakeConfig, - caplog_text, - ): - cfg = FakeConfig() - cfg.write_cache("marker-reboot-cmds", "samplecontent") - main(cfg=cfg) - assert None is cfg.read_cache("marker-reboot-cmds") - assert "Skipping reboot_cmds. Machine is unattached" in caplog_text() - assert 0 == m_subp.call_count - - @mock.patch("lib.reboot_cmds.subp") - def test_main_noops_when_no_marker(self, m_subp, FakeConfig): - cfg = FakeConfig() - assert None is cfg.read_cache("marker-reboot-cmds") - main(cfg=cfg) - assert 0 == m_subp.call_count - - @mock.patch("lib.reboot_cmds.subp") - def test_main_unattached_removes_marker_file( - self, - m_subp, - FakeConfig, - ): - cfg = FakeConfig.for_attached_machine() - assert None is cfg.read_cache("marker-reboot-cmds") - main(cfg=cfg) - assert 0 == m_subp.call_count - - -M_REPO_PATH = "uaclient.entitlements" - - +@mock.patch("uaclient.entitlements.fips.FIPSEntitlement.install_packages") +@mock.patch("uaclient.entitlements.fips.FIPSEntitlement.setup_apt_config") +@mock.patch("uaclient.entitlements.fips.FIPSEntitlement.application_status") class TestFixProPkgHolds: - @pytest.mark.parametrize("caplog_text", [logging.WARN], indirect=True) - @pytest.mark.parametrize("fips_status", ("enabled", "disabled")) - @mock.patch("sys.exit") - @mock.patch(M_FIPS_PATH + "install_packages") - @mock.patch(M_FIPS_PATH + "setup_apt_config") - @mock.patch("uaclient.files.notices.NoticesManager.remove") - def test_calls_setup_apt_config_and_install_packages_when_enabled( + @pytest.mark.parametrize( + [ + "fips_status", + "fips_setup_apt_config_side_effect", + "fips_install_packages_side_effect", + "expected_fips_setup_apt_config_calls", + "expected_fips_install_packages_calls", + "expected_raises", + ], + [ + ("disabled", None, None, [], [], does_not_raise()), + ( + "enabled", + None, + None, + [mock.call()], + [mock.call(cleanup_on_failure=False)], + does_not_raise(), + ), + ( + "enabled", + Exception(), + None, + [mock.call()], + [mock.call(cleanup_on_failure=False)], + does_not_raise(), + ), + ( + "enabled", + Exception(), + exceptions.UserFacingError(""), + [mock.call()], + [mock.call(cleanup_on_failure=False)], + pytest.raises(exceptions.UserFacingError), + ), + ], + ) + def test_fix_pro_pkg_holds( self, - m_remove_notice, - setup_apt_config, - install_packages, - exit, + m_fips_status, + m_fips_setup_apt_config, + m_fips_install_packages, fips_status, + fips_setup_apt_config_side_effect, + fips_install_packages_side_effect, + expected_fips_setup_apt_config_calls, + expected_fips_install_packages_calls, + expected_raises, FakeConfig, - caplog_text, ): + m_fips_setup_apt_config.side_effect = fips_setup_apt_config_side_effect + m_fips_install_packages.side_effect = fips_install_packages_side_effect cfg = FakeConfig() fake_status_cache = { "services": [{"name": "fips", "status": fips_status}] } cfg.write_cache("status-cache", fake_status_cache) - fix_pro_pkg_holds(cfg=cfg) - if fips_status == "enabled": - assert [mock.call()] == setup_apt_config.call_args_list - assert [ - mock.call(cleanup_on_failure=False) - ] == install_packages.call_args_list - else: - assert 0 == setup_apt_config.call_count - assert 0 == install_packages.call_count - assert 0 == len(m_remove_notice.call_args_list) - assert 0 == exit.call_count - - -class TestRunCommand: - @pytest.mark.parametrize("caplog_text", [logging.WARN], indirect=True) - @mock.patch("sys.exit") - @mock.patch("lib.reboot_cmds.subp") - def test_run_command_failure(self, m_subp, m_exit, caplog_text): - cmd = "foobar" - m_cfg = mock.MagicMock() - m_subp.side_effect = ProcessExecutionError( - cmd=cmd, exit_code=1, stdout="foo", stderr="bar" + with expected_raises: + fix_pro_pkg_holds(cfg) + + assert ( + expected_fips_setup_apt_config_calls + == m_fips_setup_apt_config.call_args_list + ) + assert ( + expected_fips_install_packages_calls + == m_fips_install_packages.call_args_list ) - run_command(cmd=cmd, cfg=m_cfg) - expected_msgs = [ - "Failed running cmd: foobar", - "Return code: 1", - "Stderr: bar", - "Stdout: foo", - ] - - for expected_msg in expected_msgs: - assert expected_msg in caplog_text() - - assert m_subp.call_args_list == [mock.call(["foobar"], capture=True)] - assert m_cfg.delete_cache_key.call_args_list == [ - mock.call("marker-reboot-cmds") - ] - assert m_exit.call_args_list == [mock.call(1)] - - -class TestProcessRebootOperations: - @pytest.mark.parametrize("caplog_text", [logging.ERROR], indirect=True) - @mock.patch("uaclient.config.UAConfig.delete_cache_key") - @mock.patch("uaclient.config.UAConfig.check_lock_info") - @mock.patch("uaclient.files.notices.NoticesManager.add") - @mock.patch("lib.reboot_cmds.fix_pro_pkg_holds") - def test_process_reboot_operations_create_notice_when_it_fails( + +@mock.patch("uaclient.files.notices.add") +@mock.patch("uaclient.files.notices.remove") +@mock.patch( + "uaclient.upgrade_lts_contract.process_contract_delta_after_apt_lock" +) # noqa: E501 +@mock.patch("lib.reboot_cmds.refresh_contract") +@mock.patch("lib.reboot_cmds.fix_pro_pkg_holds") +@mock.patch("uaclient.lock.SpinLock") +@mock.patch("lib.reboot_cmds._is_attached") +@mock.patch( + "uaclient.files.state_files.reboot_cmd_marker_file", + new_callable=mock.PropertyMock, +) +class TestMain: + @pytest.mark.parametrize( + [ + "marker_file_present", + "is_attached", + "expected_delete_marker", + "expected_calls", + "expected_ret", + ], + [ + (False, False, False, False, 0), + (True, False, True, False, 0), + (False, True, False, False, 0), + (True, True, True, True, 0), + ], + ) + def test_main_success_cases( self, + m_reboot_cmd_marker_file, + m_is_attached, + m_spin_lock, m_fix_pro_pkg_holds, - m_add_notice, - m_check_lock_info, - _m_delete_cache_key, + m_refresh_contract, + m_process_contract_delta_after_apt_lock, + m_notices_remove, + m_notices_add, + marker_file_present, + is_attached, + expected_delete_marker, + expected_calls, + expected_ret, FakeConfig, - caplog_text, ): - m_check_lock_info.return_value = (0, 0) - m_fix_pro_pkg_holds.side_effect = ProcessExecutionError("error") - - cfg = FakeConfig.for_attached_machine() - with mock.patch("os.path.exists", return_value=True): - with mock.patch("uaclient.config.UAConfig.write_cache"): - process_reboot_operations(cfg=cfg) - - expected_calls = [ - mock.call( - Notice.REBOOT_SCRIPT_FAILED, - messages.REBOOT_SCRIPT_FAILED, - ), - ] + m_is_attached.return_value = mock.MagicMock(is_attached=is_attached) + m_reboot_cmd_marker_file.is_present = marker_file_present + assert expected_ret == main(FakeConfig()) + + # no notices are added in any success scenario + assert [] == m_notices_add.call_args_list + # any existing notice should always be cleaned up on success + assert [mock.call(mock.ANY)] == m_notices_remove.call_args_list - assert expected_calls == m_add_notice.call_args_list + if expected_delete_marker: + assert [ + mock.call() + ] == m_reboot_cmd_marker_file.delete.call_args_list + else: + assert [] == m_reboot_cmd_marker_file.delete.call_args_list - expected_msgs = [ - "Failed running commands on reboot.", - "Invalid command specified 'error'.", - ] - assert all( - expected_msg in caplog_text() for expected_msg in expected_msgs - ) + if expected_calls: + assert [ + mock.call(cfg=mock.ANY, lock_holder="pro-reboot-cmds") + ] == m_spin_lock.call_args_list + assert [mock.call(mock.ANY)] == m_fix_pro_pkg_holds.call_args_list + assert [mock.call(mock.ANY)] == m_refresh_contract.call_args_list + assert [ + mock.call(mock.ANY) + ] == m_process_contract_delta_after_apt_lock.call_args_list + else: + assert [] == m_spin_lock.call_args_list + assert [] == m_fix_pro_pkg_holds.call_args_list + assert [] == m_refresh_contract.call_args_list + assert [] == m_process_contract_delta_after_apt_lock.call_args_list + + @pytest.mark.parametrize( + [ + "error", + "expected_ret", + ], + [ + (Exception(), 1), + (exceptions.LockHeldError("", "", 1), 1), + (exceptions.UserFacingError(""), 1), + ], + ) + def test_main_error_cases( + self, + m_reboot_cmd_marker_file, + m_is_attached, + m_spin_lock, + m_fix_pro_pkg_holds, + m_refresh_contract, + m_process_contract_delta_after_apt_lock, + m_notices_remove, + m_notices_add, + error, + expected_ret, + FakeConfig, + ): + m_is_attached.return_value = mock.MagicMock(is_attached=True) + m_reboot_cmd_marker_file.is_present = True + m_fix_pro_pkg_holds.side_effect = error + assert expected_ret == main(FakeConfig()) + assert [mock.call(mock.ANY)] == m_notices_add.call_args_list diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_security.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_security.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_security.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_security.py 2023-05-30 19:02:35.000000000 +0000 @@ -1,13 +1,12 @@ import copy import datetime -import json import textwrap from collections import defaultdict import mock import pytest -from uaclient import exceptions +from uaclient import exceptions, livepatch from uaclient.clouds.identity import NoCloudTypeReason from uaclient.entitlements.entitlement_status import ( ApplicabilityStatus, @@ -21,13 +20,19 @@ PROMPT_ENTER_TOKEN, PROMPT_EXPIRED_ENTER_TOKEN, SECURITY_APT_NON_ROOT, + SECURITY_CVE_STATUS_IGNORED, + SECURITY_CVE_STATUS_NEEDED, + SECURITY_CVE_STATUS_PENDING, + SECURITY_CVE_STATUS_TRIAGE, SECURITY_DRY_RUN_UA_EXPIRED_SUBSCRIPTION, SECURITY_DRY_RUN_UA_NOT_ATTACHED, SECURITY_DRY_RUN_UA_SERVICE_NOT_ENABLED, - SECURITY_ISSUE_NOT_RESOLVED, + SECURITY_FIXING_REQUESTED_USN, + SECURITY_RELATED_USN_ERROR, SECURITY_SERVICE_DISABLED, SECURITY_UA_SERVICE_NOT_ENABLED, SECURITY_UA_SERVICE_NOT_ENTITLED, + SECURITY_UA_SERVICE_REQUIRED, SECURITY_UPDATE_NOT_INSTALLED_EXPIRED, SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, SECURITY_USE_PRO_TMPL, @@ -42,9 +47,12 @@ CVEPackageStatus, FixStatus, UASecurityClient, + UnfixedPackage, + UpgradeResult, _check_attached, _check_subscription_for_required_service, _check_subscription_is_expired, + _fix_usn, _prompt_for_attach, fix_security_issue_id, get_cve_affected_source_packages_status, @@ -224,17 +232,17 @@ ("focal", {"samba": "1000"}, {}), ), ) - @mock.patch("uaclient.security.system.get_platform_info") + @mock.patch("uaclient.security.system.get_release_info") def test_affected_packages_status_filters_by_installed_pkgs_and_series( self, - get_platform_info, + m_get_release_info, series, installed_packages, expected_status, FakeConfig, ): """Package statuses are filtered if not installed""" - get_platform_info.return_value = {"series": series} + m_get_release_info.return_value = mock.MagicMock(series=series) client = UASecurityClient(FakeConfig()) cve = CVE(client, SAMPLE_CVE_RESPONSE) affected_packages = get_cve_affected_source_packages_status( @@ -449,11 +457,11 @@ ("series-example-3", {}), ), ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_release_packages_returns_source_and_binary_pkgs_for_series( - self, get_platform_info, series, expected, FakeConfig + self, m_get_release_info, series, expected, FakeConfig ): - get_platform_info.return_value = {"series": series} + m_get_release_info.return_value = mock.MagicMock(series=series) client = UASecurityClient(FakeConfig()) usn = USN(client, SAMPLE_USN_RESPONSE) @@ -476,12 +484,14 @@ ), ), ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_release_packages_errors_on_sparse_source_url( - self, get_platform_info, source_link, error_msg, FakeConfig + self, m_get_release_info, source_link, error_msg, FakeConfig ): """Raise errors when USN metadata contains no valid source_link.""" - get_platform_info.return_value = {"series": "series-example-1"} + m_get_release_info.return_value = mock.MagicMock( + series="series-example-1" + ) client = UASecurityClient(FakeConfig()) sparse_md = copy.deepcopy(SAMPLE_USN_RESPONSE) sparse_md["release_packages"]["series-example-1"].append( @@ -894,11 +904,11 @@ ), ) @mock.patch("uaclient.security.system.subp") - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_result_keyed_by_source_package_name( - self, get_platform_info, subp, dpkg_out, results + self, m_get_release_info, subp, dpkg_out, results ): - get_platform_info.return_value = {"series": "bionic"} + m_get_release_info.return_value = mock.MagicMock(series="bionic") subp.return_value = dpkg_out, "" assert results == query_installed_source_pkg_versions() _format = "-f=${Package},${Source},${Version},${db:Status-Status}\n" @@ -952,49 +962,9 @@ class TestPromptForAffectedPackages: @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs", - ( - ( - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"slsrc": {"sl": "2.0"}}, - {}, - ), - ), - ) - def test_raise_userfacing_error_on_invalid_usn_metadata( - self, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - FakeConfig, - ): - with pytest.raises(exceptions.SecurityAPIMetadataError) as exc: - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=FakeConfig(), - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - assert ( - "Error: USN-### metadata defines no fixed version for sl.\n" - "1 package is still affected: slsrc\n" - "{msg}".format( - msg=SECURITY_ISSUE_NOT_RESOLVED.format(issue="USN-###") - ) - == exc.value.msg - ) - - @pytest.mark.parametrize( "affected_pkg_status,installed_packages," - "usn_released_pkgs,cloud_type,expected,expected_ret", + "usn_released_pkgs,cloud_type,expected,expected_ret," + "expected_unfixed_pkgs", ( ( # No affected_packages listed {}, @@ -1003,14 +973,15 @@ (None, NoCloudTypeReason.NO_CLOUD_DETECTED), textwrap.dedent( """\ - No affected source packages are installed. + No affected source packages are installed. - {check} USN-### does not affect your system. - """.format( + {check} USN-### does not affect your system. + """.format( check=OKGREEN_CHECK # noqa: E126 ) # noqa: E126 ), - FixStatus.SYSTEM_NON_VULNERABLE, + FixStatus.SYSTEM_NOT_AFFECTED, + None, ), ( # version is >= released affected package {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, @@ -1019,17 +990,18 @@ (None, NoCloudTypeReason.NO_CLOUD_DETECTED), textwrap.dedent( """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - The update is already installed. + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu standard updates. + The update is already installed. - {check} USN-### is resolved. - """.format( + {check} USN-### is resolved. + """.format( check=OKGREEN_CHECK # noqa: E126 ) # noqa: E126 ), FixStatus.SYSTEM_NON_VULNERABLE, + [], ), ( # usn_released_pkgs version is used instead of CVE (2.1) {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, @@ -1038,10 +1010,10 @@ (None, NoCloudTypeReason.NO_CLOUD_DETECTED), textwrap.dedent( """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - """ + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu standard updates. + """ ) + colorize_commands( [["apt update && apt install --only-upgrade" " -y sl"]] @@ -1049,6 +1021,7 @@ + "\n\n" + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), FixStatus.SYSTEM_NON_VULNERABLE, + [], ), ( # version is < released affected package standard updates {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, @@ -1057,10 +1030,10 @@ (None, NoCloudTypeReason.NO_CLOUD_DETECTED), textwrap.dedent( """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - """ + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu standard updates. + """ ) + "\n".join( [ @@ -1079,6 +1052,7 @@ ] ), FixStatus.SYSTEM_NON_VULNERABLE, + [], ), ( # version is < released affected package esm-infra updates {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, @@ -1101,6 +1075,14 @@ ] ), FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="slsrc", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Infra" + ), + ), + ], ), ( # version < released package in esm-infra updates and aws cloud {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, @@ -1121,6 +1103,14 @@ ] ), FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="slsrc", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Infra" + ), + ), + ], ), ( # version is < released affected both esm-apps and standard { @@ -1161,6 +1151,14 @@ + "\n\n" + "1 package is still affected: slsrc", FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="slsrc", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Apps" + ), + ), + ], ), ( # version is < released affected both esm-apps and standard { @@ -1255,6 +1253,68 @@ ) ), FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg2", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg9", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg7", + unfixed_reason=SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg8", + unfixed_reason=SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg5", + unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg6", + unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg3", + unfixed_reason=SECURITY_CVE_STATUS_PENDING, + ), + UnfixedPackage( + pkg="pkg4", + unfixed_reason=SECURITY_CVE_STATUS_PENDING, + ), + UnfixedPackage( + pkg="pkg12", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Infra" + ), + ), + UnfixedPackage( + pkg="pkg13", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Infra" + ), + ), + UnfixedPackage( + pkg="pkg14", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Apps" + ), + ), + UnfixedPackage( + pkg="pkg15", + unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( + service="Ubuntu Pro: ESM Apps" + ), + ), + ], ), ( # No released version { @@ -1295,6 +1355,44 @@ + "\n" + "{check} USN-### is not resolved.\n".format(check=FAIL_X), FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg2", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg9", + unfixed_reason=SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg7", + unfixed_reason=SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg8", + unfixed_reason=SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg5", + unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg6", + unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg3", + unfixed_reason=SECURITY_CVE_STATUS_PENDING, + ), + UnfixedPackage( + pkg="pkg4", + unfixed_reason=SECURITY_CVE_STATUS_PENDING, + ), + ], ), ( # text wrapping required in several places { @@ -1358,6 +1456,7 @@ + "\n\n" + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), FixStatus.SYSTEM_NON_VULNERABLE, + [], ), ), ) @@ -1383,6 +1482,7 @@ cloud_type, expected, expected_ret, + expected_unfixed_pkgs, FakeConfig, capsys, _subp, @@ -1406,7 +1506,8 @@ usn_released_pkgs=usn_released_pkgs, dry_run=False, ) - assert expected_ret == actual_ret + assert expected_ret == actual_ret.status + assert expected_unfixed_pkgs == actual_ret.unfixed_pkgs out, err = capsys.readouterr() assert expected in out @@ -1582,7 +1683,9 @@ capsys, _subp, ): - m_upgrade_packages.return_value = False + m_upgrade_packages.return_value = UpgradeResult( + status=False, failure_reason=None + ) cfg = FakeConfig() with mock.patch("uaclient.system._subp", side_effect=_subp): @@ -1968,8 +2071,7 @@ {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, {"pkg1": {"pkg1": "1.8"}}, {"pkg1": {"pkg1": {"version": "2.0"}}}, - "\n" - + textwrap.dedent( + textwrap.dedent( """\ 1 affected source package is installed: pkg1 (1/1) pkg1: @@ -2191,7 +2293,7 @@ usn_released_pkgs=usn_released_pkgs, dry_run=False, ) - assert exp_ret == actual_ret + assert exp_ret == actual_ret.status out, err = capsys.readouterr() assert exp_msg in out @@ -2309,7 +2411,7 @@ upgrade_pkgs=["t1=123"], pocket="Ubuntu standard updates", dry_run=False, - ) + ).status is False ) @@ -2318,12 +2420,12 @@ class TestGetRelatedUSNs: - def test_original_usn_returned_when_no_cves_are_found(self, FakeConfig): + def test_no_usns_returned_when_no_cves_are_found(self, FakeConfig): cfg = FakeConfig() client = UASecurityClient(cfg=cfg) usn = USN(client, SAMPLE_USN_RESPONSE_NO_CVES) - assert [usn] == get_related_usns(usn, client) + assert [] == get_related_usns(usn, client) class TestGetUSNAffectedPackagesStatus: @@ -2342,15 +2444,15 @@ ), ), ) - @mock.patch("uaclient.system.get_platform_info") + @mock.patch("uaclient.system.get_release_info") def test_pkgs_come_from_release_packages_if_usn_has_no_cves( self, - m_platform_info, + m_get_release_info, installed_packages, affected_packages, FakeConfig, ): - m_platform_info.return_value = {"series": "bionic"} + m_get_release_info.return_value = mock.MagicMock(series="bionic") cfg = FakeConfig() client = UASecurityClient(cfg=cfg) @@ -2379,35 +2481,27 @@ ( ( "cve-2013-1798", - { - "Status": [ - { - "Kernel": "4.4.0-210.242-generic", - "Running": True, - "Livepatch": { - "CheckState": "checked", - "State": "applied", - "Version": "87.1", - "Fixes": [ - { - "Name": "cve-2013-1798", - "Description": "Mock Description", - "Bug": "", - "Patched": True, - } - ], - }, - } - ] - }, + livepatch.LivepatchStatusStatus( + kernel="4.4.0-210.242-generic", + supported=None, + livepatch=livepatch.LivepatchPatchStatus( + state="applied", + version="87.1", + fixes=[ + livepatch.LivepatchPatchFixStatus( + name="cve-2013-1798", patched=True + ) + ], + ), + ), FixStatus.SYSTEM_NON_VULNERABLE, ), ), ) - @mock.patch("uaclient.system.subp") + @mock.patch("uaclient.livepatch.status") def test_patched_msg_when_issue_id_fixed_by_livepatch( self, - subp, + m_livepatch_status, issue_id, livepatch_status, exp_ret, @@ -2415,18 +2509,19 @@ ): """fix_security_id returns system not vulnerable when issue_id fixed by livepatch""" - subp.return_value = json.dumps(livepatch_status), "" + m_livepatch_status.return_value = livepatch_status with mock.patch( "uaclient.security.query_installed_source_pkg_versions" ): assert exp_ret == fix_security_issue_id(FakeConfig(), issue_id) + @pytest.mark.parametrize("error_code", ((404), (400))) @pytest.mark.parametrize( "issue_id", (("CVE-1800-123456"), ("USN-12345-12")) ) @mock.patch("uaclient.security.query_installed_source_pkg_versions") def test_error_msg_when_issue_id_is_not_found( - self, _m_query_versions, issue_id, FakeConfig + self, _m_query_versions, issue_id, error_code, FakeConfig ): expected_message = "Error: {} not found.".format(issue_id) if "CVE" in issue_id: @@ -2438,9 +2533,9 @@ with mock.patch.object(exceptions.UrlError, "__str__") as m_str: with mock.patch.object(UASecurityClient, mock_func) as m_func: - m_str.return_value = "NOT FOUND" + m_str.return_value = "TEST" msg = "{} with id 'ID' does not exist".format(issue_type) - error_mock = mock.Mock() + error_mock = mock.MagicMock(code=error_code) type(error_mock).url = mock.PropertyMock(return_value="URL") m_func.side_effect = exceptions.SecurityAPIError( @@ -2450,6 +2545,11 @@ with pytest.raises(exceptions.UserFacingError) as exc: fix_security_issue_id(FakeConfig(), issue_id) + if error_code == 404: + expected_message = "Error: {} not found.".format(issue_id) + else: + expected_message = "TEST: [URL] " + msg + assert expected_message == exc.value.msg @mock.patch("uaclient.security.query_installed_source_pkg_versions") @@ -2772,3 +2872,332 @@ assert 1 == m_initiate.call_count assert 1 == m_wait.call_count assert 1 == m_revoke.call_count + + +class TestFixUSN: + @mock.patch("uaclient.security._check_attached", return_value=False) + @mock.patch("uaclient.apt.compare_versions") + @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") + @mock.patch("uaclient.security.merge_usn_released_binary_package_versions") + @mock.patch("uaclient.security.get_affected_packages_from_usn") + def test_fix_usn_with_related_usns( + self, + m_affected_pkgs, + m_merge_usn, + _m_get_pkg_cand_ver, + m_compare_versions, + _m_check_attached, + capsys, + FakeConfig, + ): + usn = mock.MagicMock() + issue_id = "USN-123" + related_usns = [ + mock.MagicMock(id="USN-456"), + mock.MagicMock(id="USN-789"), + mock.MagicMock(id="USN-822"), + ] + installed_packages = { + "pkg1": {"pkg1": "1.0"}, + "pkg2": {"pkg2": "1.0"}, + "pkg3": {"pkg3": "1.0"}, + "pkg4": {"pkg4": "1.0"}, + "pkg5": {"pkg5": "1.0"}, + } + cfg = FakeConfig() + beta_pockets = {} + dry_run = False + no_related = False + + m_affected_pkgs.side_effect = [ + { + "pkg1": CVEPackageStatus( + { + "status": "released", + "pocket": "security", + } + ) + }, + { + "pkg2": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-infra", + } + ) + }, + { + "pkg3": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-apps", + } + ), + "pkg4": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-apps", + } + ), + }, + { + "pkg5": CVEPackageStatus( + { + "status": "pending", + "pocket": "security", + } + ) + }, + ] + m_merge_usn.side_effect = [ + { + "pkg1": {"pkg1": {"version": "1.2", "name": "pkg1"}}, + }, + { + "pkg2": {"pkg2": {"version": "1.2", "name": "pkg2"}}, + }, + { + "pkg3": {"pkg3": {"version": "1.2", "name": "pkg3"}}, + "pkg4": {"pkg4": {"version": "1.2", "name": "pkg4"}}, + }, + { + "pkg5": {"pkg5": {"version": "1.2", "name": "pkg5"}}, + }, + ] + + m_compare_versions.side_effect = [ + False, + True, + False, + True, + False, + False, + True, + True, + False, + True, + ] + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + actual_ret = _fix_usn( + usn=usn, + related_usns=related_usns, + issue_id=issue_id, + installed_packages=installed_packages, + cfg=cfg, + beta_pockets=beta_pockets, + dry_run=dry_run, + no_related=no_related, + ) + + expected_msg = ( + "\n" + + SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) + + "\n" + + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-123 is resolved.\n".format(check=OKGREEN_CHECK) + + "\n" + + textwrap.dedent( + """\ + Found related USNs: + - USN-456 + - USN-789 + - USN-822 + """ + ) + + "\n" + + textwrap.dedent( + """\ + Fixing related USNs: + - USN-456 + 1 affected source package is installed: pkg2 + (1/1) pkg2: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + "\n" + + "1 package is still affected: pkg2" + + "\n" + + "{check} USN-456 is not resolved.".format(check=FAIL_X) + + "\n\n" + + textwrap.dedent( + """\ + - USN-789 + 2 affected source packages are installed: pkg3, pkg4 + (1/2, 2/2) pkg3, pkg4: + A fix is available in Ubuntu Pro: ESM Apps. + """ + ) + + "\n" + + "2 packages are still affected: pkg3, pkg4" + + "\n" + + "{check} USN-789 is not resolved.".format(check=FAIL_X) + + "\n\n" + + textwrap.dedent( + """\ + - USN-822 + 1 affected source package is installed: pkg5 + (1/1) pkg5: + A fix is coming soon. Try again tomorrow. + """ + ) + + "\n" + + "1 package is still affected: pkg5" + + "\n" + + "{check} USN-822 is not resolved.".format(check=FAIL_X) + + "\n\n" + + "Summary:" + + "\n" + + "{check} USN-123 [requested] is resolved.".format( + check=OKGREEN_CHECK + ) + + "\n" + + "{check} USN-456 [related] is not resolved.".format(check=FAIL_X) + + "\n" + + " - pkg2: Ubuntu Pro: ESM Infra is required for upgrade." + + "\n" + + "{check} USN-789 [related] is not resolved.".format(check=FAIL_X) + + "\n" + + " - pkg3: Ubuntu Pro: ESM Apps is required for upgrade." + + "\n" + + " - pkg4: Ubuntu Pro: ESM Apps is required for upgrade." + + "\n" + + "{check} USN-822 [related] is not resolved.".format(check=FAIL_X) + + "\n" + + " - pkg5: A fix is coming soon. Try again tomorrow." + + "\n\n" + + SECURITY_RELATED_USN_ERROR.format(issue_id="USN-123") + ) + out, err = capsys.readouterr() + assert expected_msg in out + assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret + + @mock.patch("uaclient.apt.compare_versions") + @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") + @mock.patch("uaclient.security.merge_usn_released_binary_package_versions") + @mock.patch("uaclient.security.get_affected_packages_from_usn") + def test_fix_usn_when_no_related_value_is_true( + self, + m_affected_pkgs, + m_merge_usn, + _m_get_pkg_cand_ver, + m_compare_versions, + capsys, + FakeConfig, + ): + usn = mock.MagicMock() + issue_id = "USN-123" + related_usns = [ + mock.MagicMock(id="USN-456"), + mock.MagicMock(id="USN-789"), + mock.MagicMock(id="USN-822"), + ] + installed_packages = { + "pkg1": {"pkg1": "1.0"}, + "pkg2": {"pkg2": "1.0"}, + "pkg3": {"pkg3": "1.0"}, + "pkg4": {"pkg4": "1.0"}, + "pkg5": {"pkg5": "1.0"}, + } + cfg = FakeConfig() + beta_pockets = {} + dry_run = False + no_related = True + + m_affected_pkgs.side_effect = [ + { + "pkg1": CVEPackageStatus( + { + "status": "released", + "pocket": "security", + } + ) + }, + { + "pkg2": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-infra", + } + ) + }, + { + "pkg3": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-apps", + } + ), + "pkg4": CVEPackageStatus( + { + "status": "released", + "pocket": "esm-apps", + } + ), + }, + { + "pkg5": CVEPackageStatus( + { + "status": "pending", + "pocket": "security", + } + ) + }, + ] + m_merge_usn.side_effect = [ + { + "pkg1": {"pkg1": {"version": "1.2", "name": "pkg1"}}, + }, + ] + + m_compare_versions.side_effect = [ + False, + True, + ] + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + actual_ret = _fix_usn( + usn=usn, + related_usns=related_usns, + issue_id=issue_id, + installed_packages=installed_packages, + cfg=cfg, + beta_pockets=beta_pockets, + dry_run=dry_run, + no_related=no_related, + ) + + expected_msg = ( + "\n" + + SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) + + "\n" + + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + ) + out, err = capsys.readouterr() + assert expected_msg in out + assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_security_status.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_security_status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_security_status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_security_status.py 2023-06-01 18:49:33.000000000 +0000 @@ -5,6 +5,10 @@ import pytest from uaclient import livepatch +from uaclient.entitlements.entitlement_status import ( + ApplicationStatus, + ContractStatus, +) from uaclient.security_status import ( RebootStatus, UpdateStatus, @@ -137,40 +141,50 @@ assert get_update_status(service_name, ua_info) == expected_result @pytest.mark.parametrize("is_attached", (True, False)) - @mock.patch("uaclient.security_status.status") - def test_get_ua_info(self, m_status, is_attached, FakeConfig): - if is_attached: - cfg = FakeConfig().for_attached_machine() - else: - cfg = FakeConfig() + @mock.patch(M_PATH + "_is_attached") + @mock.patch(M_PATH + "ESMInfraEntitlement") + @mock.patch(M_PATH + "ESMAppsEntitlement") + def test_get_ua_info( + self, m_apps, m_infra, m_attached, is_attached, FakeConfig + ): + m_attached.return_value = mock.MagicMock(is_attached=is_attached) - m_status.return_value = { - "attached": is_attached, - "services": [ - {"name": "esm-infra", "entitled": "yes", "status": "enabled"}, - {"name": "esm-apps", "entitled": "yes", "status": "disabled"}, - { - "name": "non-esm-service", - "entitled": "yes", - "status": "enabled", - }, - ], - } + m_infra.return_value = mock.MagicMock( + contract_status=mock.MagicMock( + return_value=ContractStatus.ENTITLED + ), + application_status=mock.MagicMock( + return_value=(ApplicationStatus.ENABLED, 0) + ), + ) + m_apps.return_value = mock.MagicMock( + contract_status=mock.MagicMock( + return_value=ContractStatus.ENTITLED + ), + application_status=mock.MagicMock( + return_value=(ApplicationStatus.DISABLED, 0) + ), + ) + cfg = FakeConfig() result = get_ua_info(cfg) if is_attached: assert result == { "attached": True, "enabled_services": ["esm-infra"], - "entitled_services": ["esm-infra", "esm-apps"], + "entitled_services": ["esm-apps", "esm-infra"], } + assert m_infra.call_args_list == [mock.call(cfg)] + assert m_apps.call_args_list == [mock.call(cfg)] else: assert result == { "attached": False, "enabled_services": [], "entitled_services": [], } + assert m_infra.call_args_list == [] + assert m_apps.call_args_list == [] @pytest.mark.parametrize( "installed_version,other_versions,expected_output", @@ -519,7 +533,9 @@ @mock.patch(M_PATH + "get_reboot_status") @mock.patch(M_PATH + "get_livepatch_fixed_cves", return_value=[]) - @mock.patch(M_PATH + "status", return_value={"attached": False}) + @mock.patch( + M_PATH + "_is_attached", return_value=mock.MagicMock(is_attached=False) + ) @mock.patch(M_PATH + "get_origin_for_package", return_value="main") @mock.patch(M_PATH + "filter_security_updates") @mock.patch(M_PATH + "get_apt_cache") @@ -603,14 +619,16 @@ m_livepatch_status.return_value = livepatch.LivepatchStatusStatus( kernel="installed-kernel-generic", livepatch=livepatch.LivepatchPatchStatus( - state="nothing-to-apply", fixes=None + state="nothing-to-apply", fixes=None, version=None ), supported=None, ) m_kernel_info.return_value = KernelInfo( + uname_machine_arch="", uname_release="", proc_version_signature_version=None, + build_date=None, major=None, minor=None, patch=None, @@ -627,7 +645,7 @@ m_livepatch_status.return_value = livepatch.LivepatchStatusStatus( kernel="installed-kernel-generic", livepatch=livepatch.LivepatchPatchStatus( - state="nothing-to-apply", fixes=None + state="nothing-to-apply", fixes=None, version=None ), supported=None, ) @@ -647,6 +665,7 @@ name="cve-example", patched=True ) ], + version=None, ), supported=None, ) @@ -711,26 +730,41 @@ assert 1 == m_load_file.call_count @pytest.mark.parametrize( - "livepatch_state,expected_state,kernel_name", + [ + "livepatch_state", + "supported_state", + "expected_state", + "kernel_name", + ], ( ( "applied", + "supported", RebootStatus.REBOOT_REQUIRED_LIVEPATCH_APPLIED, "4.15.0-187.198-generic", ), - ("applied", RebootStatus.REBOOT_REQUIRED, "test"), ( - "nothing-to-apply", + "applied", + None, RebootStatus.REBOOT_REQUIRED, "4.15.0-187.198-generic", ), + ("applied", "supported", RebootStatus.REBOOT_REQUIRED, "test"), + ( + "nothing-to-apply", + "supported", + RebootStatus.REBOOT_REQUIRED_LIVEPATCH_APPLIED, + "4.15.0-187.198-generic", + ), ( "applying", + "supported", RebootStatus.REBOOT_REQUIRED, "4.15.0-187.198-generic", ), ( "apply-failed", + "supported", RebootStatus.REBOOT_REQUIRED, "4.15.0-187.198-generic", ), @@ -749,6 +783,7 @@ m_is_livepatch_installed, m_kernel_info, livepatch_state, + supported_state, expected_state, kernel_name, ): @@ -760,9 +795,9 @@ m_livepatch_status.return_value = livepatch.LivepatchStatusStatus( kernel="4.15.0-187.198-generic", livepatch=livepatch.LivepatchPatchStatus( - state=livepatch_state, fixes=None + state=livepatch_state, fixes=None, version=None ), - supported=None, + supported=supported_state, ) assert get_reboot_status() == expected_state @@ -793,7 +828,7 @@ m_livepatch_status.return_value = livepatch.LivepatchStatusStatus( kernel="4.15.0-187.198-generic", livepatch=livepatch.LivepatchPatchStatus( - state="applied", fixes=None + state="applied", fixes=None, version=None ), supported=None, ) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_status.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_status.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_status.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_status.py 2023-05-30 19:02:35.000000000 +0000 @@ -509,6 +509,7 @@ "available": mock.ANY, "blocked_by": [], "warning": None, + "variants": {}, } for cls in ENTITLEMENT_CLASSES ] @@ -621,6 +622,7 @@ os.lstat(cfg.data_path("status-cache")).st_mode ) + @pytest.mark.parametrize("variants_in_contract", ((True), (False))) @pytest.mark.parametrize("show_all", (True, False)) @pytest.mark.parametrize( "features_override", ((None), ({"allow_beta": False})) @@ -653,8 +655,10 @@ @mock.patch(M_PATH + "esm.ESMAppsEntitlement.contract_status") @mock.patch(M_PATH + "repo.RepoEntitlement.user_facing_status") @mock.patch(M_PATH + "repo.RepoEntitlement.contract_status") + @mock.patch(M_PATH + "base.UAEntitlement._get_contract_variants") def test_attached_reports_contract_and_service_status( self, + m_contract_variants, m_repo_contract_status, m_repo_uf_status, m_esm_contract_status, @@ -670,6 +674,7 @@ entitlements, features_override, show_all, + variants_in_contract, FakeConfig, ): """When attached, return contract and service user-facing status.""" @@ -755,7 +760,60 @@ "esm-apps": "esm-apps details", } + rt_variants = { + "intel-iotg": { + "available": mock.ANY, + "blocked_by": [], + "description": "RT kernel optimized " + "for Intel IOTG " + "platform", + "description_override": None, + "entitled": "yes", + "name": "intel-iotg", + "status": "n/a", + "status_details": "repo details", + "warning": None, + }, + "nvidia-tegra": { + "available": mock.ANY, + "blocked_by": [], + "description": "RT kernel " + "optimized for " + "NVIDIA Tegra " + "platform", + "description_override": None, + "entitled": "yes", + "name": "nvidia-tegra", + "status": "n/a", + "status_details": "repo details", + "warning": None, + }, + "generic": { + "available": mock.ANY, + "blocked_by": [], + "description": "Generic version of the RT kernel (default)", + "description_override": None, + "entitled": "yes", + "name": "generic", + "status": "n/a", + "status_details": "repo details", + "warning": None, + }, + } + for cls in ENTITLEMENT_CLASSES: + if cls.name == "realtime-kernel": + if variants_in_contract: + m_contract_variants.return_value = set( + ["intel-iotg", "nvidia-tegra"] + ) + variants = rt_variants + else: + m_contract_variants.return_value = set() + variants = {} + else: + variants = {} + if cls.name == "livepatch": expected_status = UserFacingStatus.ACTIVE.value elif show_all: @@ -781,15 +839,16 @@ "available": mock.ANY, "blocked_by": [], "warning": None, + "variants": variants, } ) with mock.patch( "uaclient.status._get_config_status" ) as m_get_cfg_status: m_get_cfg_status.return_value = DEFAULT_CFG_STATUS + expected_status_calls = 11 if variants_in_contract else 8 assert expected == status.status(cfg=cfg, show_all=show_all) - - assert len(ENTITLEMENT_CLASSES) - 2 == m_repo_uf_status.call_count + assert expected_status_calls == m_repo_uf_status.call_count assert 1 == m_livepatch_uf_status.call_count expected_calls = [ @@ -852,19 +911,23 @@ ) assert expected_dt == status.status(cfg=cfg)["expires"] + @mock.patch( + "uaclient.files.state_files.reboot_cmd_marker_file", + new_callable=mock.PropertyMock, + ) @mock.patch("uaclient.status.get_available_resources", return_value={}) def test_nonroot_user_does_not_use_cache( self, _m_get_available_resources, + m_reboot_cmd_marker_file, _m_should_reboot, m_remove_notice, m_on_supported_kernel, FakeConfig, ): - + m_reboot_cmd_marker_file.is_present = True cached_status = {"pass": True} cfg = FakeConfig() - cfg.write_cache("marker-reboot-cmds", "") # To indicate a reboot reqd cfg.write_cache("status-cache", cached_status) before = status.status(cfg=cfg) @@ -924,6 +987,7 @@ uf_status, in_inapplicable_resources, expected_status, + FakeConfig, ): ent = mock.MagicMock() ent.name = "test_entitlement" @@ -936,7 +1000,9 @@ unavailable_resources = ( {ent.name: ""} if in_inapplicable_resources else {} ) - ret = status._attached_service_status(ent, unavailable_resources) + ret = status._attached_service_status( + ent, unavailable_resources, FakeConfig() + ) assert expected_status == ret["status"] @@ -975,9 +1041,10 @@ tmpdir, FakeConfig, ): + cfg = FakeConfig() ent = ConcreteTestEntitlement( - cfg=FakeConfig(), + cfg=cfg, blocking_incompatible_services=blocking_incompatible_services, ) - service_status = status._attached_service_status(ent, []) + service_status = status._attached_service_status(ent, [], cfg) assert service_status["blocked_by"] == expected_blocked_by diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_system.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_system.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_system.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_system.py 2023-06-27 00:49:37.000000000 +0000 @@ -1,5 +1,8 @@ +import datetime import logging +import os import subprocess +import textwrap import uuid import mock @@ -10,14 +13,41 @@ class TestGetKernelInfo: @pytest.mark.parametrize( - "uname_release, proc_version_signature_side_effect, expected", + [ + "uname_machine", + "uname_release", + "proc_version_signature_side_effect", + "build_date", + "expected", + ], ( ( + "x86_64", + "5.14.0-1024-oem", + "Ubuntu 5.14.0-1024.26-oem 5.15.100", + datetime.datetime(2023, 4, 6, 7, 48, 48), + system.KernelInfo( + uname_machine_arch="x86_64", + uname_release="5.14.0-1024-oem", + proc_version_signature_version="5.14.0-1024.26-oem", + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), + major=5, + minor=14, + patch=0, + abi="1024", + flavor="oem", + ), + ), + ( + "aarch64", "5.14.0-1024-oem", "Ubuntu 5.14.0-1024.26-oem 5.15.100", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="aarch64", uname_release="5.14.0-1024-oem", proc_version_signature_version="5.14.0-1024.26-oem", + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=5, minor=14, patch=0, @@ -26,11 +56,15 @@ ), ), ( + "x86_64", "4.4.0-21-generic", "Ubuntu 4.4.0-21.37-generic 4.15.100", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="4.4.0-21-generic", proc_version_signature_version="4.4.0-21.37-generic", + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=4, minor=4, patch=0, @@ -39,11 +73,15 @@ ), ), ( + "x86_64", "5.4.0-52-generic", "Ubuntu 5.4.0-52.37-generic 5.15.100", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="5.4.0-52-generic", proc_version_signature_version="5.4.0-52.37-generic", + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=5, minor=4, patch=0, @@ -52,11 +90,15 @@ ), ), ( + "x86_64", "5.4.0-52-generic", "Ubuntu 5.4.0-52.37~20.04-generic 5.15.100", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="5.4.0-52-generic", proc_version_signature_version="5.4.0-52.37~20.04-generic", + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=5, minor=4, patch=0, @@ -65,11 +107,15 @@ ), ), ( + "x86_64", "5.4.0-52-generic", Exception(), + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="5.4.0-52-generic", proc_version_signature_version=None, + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=5, minor=4, patch=0, @@ -78,11 +124,15 @@ ), ), ( + "x86_64", "5.4.0-1021-aws-fips", "Ubuntu 5.4.0-1021.21+fips2-aws-fips 5.4.44", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="5.4.0-1021-aws-fips", proc_version_signature_version="5.4.0-1021.21+fips2-aws-fips", # noqa: E501 + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=5, minor=4, patch=0, @@ -91,11 +141,15 @@ ), ), ( + "x86_64", "4.4.0-1017-fips", "Ubuntu 4.4.0-1017.22~recert1-fips 4.4.185", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="4.4.0-1017-fips", proc_version_signature_version="4.4.0-1017.22~recert1-fips", # noqa: E501 + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), major=4, minor=4, patch=0, @@ -104,11 +158,32 @@ ), ), ( + "x86_64", "4.4.0-1017.something.invalid-fips", "Ubuntu 4.4.0-1017.22~recert1-fips 4.4.185", + datetime.datetime(2023, 4, 6, 7, 48, 48), system.KernelInfo( + uname_machine_arch="x86_64", uname_release="4.4.0-1017.something.invalid-fips", proc_version_signature_version="4.4.0-1017.22~recert1-fips", # noqa: E501 + build_date=datetime.datetime(2023, 4, 6, 7, 48, 48), + major=None, + minor=None, + patch=None, + abi=None, + flavor=None, + ), + ), + ( + "x86_64", + "4.4.0-1017.something.invalid-fips", + "Ubuntu 4.4.0-1017.22~recert1-fips 4.4.185", + None, + system.KernelInfo( + uname_machine_arch="x86_64", + uname_release="4.4.0-1017.something.invalid-fips", + proc_version_signature_version="4.4.0-1017.22~recert1-fips", # noqa: E501 + build_date=None, major=None, minor=None, patch=None, @@ -118,20 +193,139 @@ ), ), ) + @mock.patch("uaclient.system._get_kernel_build_date") @mock.patch("uaclient.system.load_file") @mock.patch("uaclient.system.os.uname") def test_get_kernel_info( self, m_uname, m_load_file, + m_get_kernel_build_date, + uname_machine, uname_release, proc_version_signature_side_effect, + build_date, expected, ): - m_uname.return_value = mock.MagicMock(release=uname_release) + m_uname.return_value = mock.MagicMock( + release=uname_release, machine=uname_machine + ) m_load_file.side_effect = [proc_version_signature_side_effect] + m_get_kernel_build_date.return_value = build_date assert system.get_kernel_info.__wrapped__() == expected + @pytest.mark.parametrize( + [ + "uname_result", + "changelog_timestamp", + "expected", + ], + [ + ( + os.uname_result( + [ + "", + "", + "", + "#20-Ubuntu SMP PREEMPT_DYNAMIC Thu Apr 6 07:48:48 UTC 2023", # noqa: E501 + "", + ] + ), + mock.sentinel.changelog_timestamp, + datetime.datetime( + 2023, 4, 6, 7, 48, 48, tzinfo=datetime.timezone.utc + ), + ), + ( + os.uname_result( + [ + "", + "", + "", + "#33~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon Jan 30 17:03:34 UTC 2", # noqa: E501 + "", + ] + ), + mock.sentinel.changelog_timestamp, + mock.sentinel.changelog_timestamp, + ), + ( + os.uname_result(["", "", "", "corrupted", ""]), + mock.sentinel.changelog_timestamp, + mock.sentinel.changelog_timestamp, + ), + ], + ) + @mock.patch("uaclient.system._get_kernel_changelog_timestamp") + def test_get_kernel_build_date( + self, + m_get_kernel_changelog_timestamp, + uname_result, + changelog_timestamp, + expected, + ): + m_get_kernel_changelog_timestamp.return_value = changelog_timestamp + assert expected == system._get_kernel_build_date(uname_result) + + @pytest.mark.parametrize( + [ + "uname_result", + "is_container", + "stat_result", + "expected_stat_call_args", + "expected", + ], + [ + ( + None, + True, + None, + [], + None, + ), + ( + os.uname_result(["", "", "version-here", "", ""]), + False, + Exception(), + [ + mock.call( + "/usr/share/doc/linux-image-version-here/changelog.Debian.gz" # noqa: E501 + ) + ], + None, + ), + ( + os.uname_result(["", "", "version-here", "", ""]), + False, + [os.stat_result([0, 0, 0, 0, 0, 0, 0, 0, 1680762951, 0])], + [ + mock.call( + "/usr/share/doc/linux-image-version-here/changelog.Debian.gz" # noqa: E501 + ) + ], + datetime.datetime( + 2023, 4, 6, 6, 35, 51, tzinfo=datetime.timezone.utc + ), + ), + ], + ) + @mock.patch("os.stat") + @mock.patch("uaclient.system.is_container") + def test_get_kernel_changelog_timestamp( + self, + m_is_container, + m_os_stat, + uname_result, + is_container, + stat_result, + expected_stat_call_args, + expected, + ): + m_is_container.return_value = is_container + m_os_stat.side_effect = stat_result + assert expected == system._get_kernel_changelog_timestamp(uname_result) + assert expected_stat_call_args == m_os_stat.call_args_list + class TestGetDpkgArch: @pytest.mark.parametrize( @@ -457,9 +651,9 @@ ) @mock.patch("uaclient.system.load_file") def test_parse_os_release(self, m_load_file, content, expected): - """parse_os_release returns a dict of values from /etc/os-release.""" + """_parse_os_release returns a dict of values from /etc/os-release.""" m_load_file.return_value = content - assert expected == system.parse_os_release.__wrapped__() + assert expected == system._parse_os_release.__wrapped__() assert m_load_file.call_args_list == [mock.call("/etc/os-release")] @@ -508,9 +702,9 @@ assert messages.MISSING_DISTRO_INFO_FILE == excinfo.value.msg -class TestGetPlatformInfo: +class TestGetReleaseInfo: @pytest.mark.parametrize( - "version, expected_exception", + ["version", "expected_exception"], ( ( "junk", @@ -522,95 +716,67 @@ ), ), ) - @mock.patch("uaclient.system.parse_os_release") - def test_get_platform_info_error( - self, m_parse, version, expected_exception + @mock.patch("uaclient.system._parse_os_release") + def test_get_release_info_error( + self, m_parse_os_release, version, expected_exception ): - """get_platform_info errors when it cannot parse os-release.""" - m_parse.return_value = {"VERSION": version} + """get_release_info errors when it cannot parse os-release.""" + m_parse_os_release.return_value = {"VERSION": version} with pytest.raises(expected_exception): # Use __wrapped__ to avoid hitting the # lru_cached value across tests - system.get_platform_info.__wrapped__() + system.get_release_info.__wrapped__() @pytest.mark.parametrize( - "os_release,arch,kernel,virt,expected", + ["os_release", "expected"], [ ( { "NAME": "Ubuntu", "VERSION": "16.04.5 LTS (Xenial Xerus)", }, - "arm64", - "kernel-ver1", - "lxd", - { - "arch": "arm64", - "distribution": "Ubuntu", - "kernel": "kernel-ver1", - "release": "16.04", - "series": "xenial", - "type": "Linux", - "version": "16.04 LTS (Xenial Xerus)", - "virt": "lxd", - }, + system.ReleaseInfo( + distribution="Ubuntu", + release="16.04", + series="xenial", + pretty_version="16.04 LTS (Xenial Xerus)", + ), ), ( { "NAME": "Ubuntu", "VERSION": "18.04.1 LTS (Bionic Beaver)", }, - "amd64", - "kernel-ver2", - "none", - { - "arch": "amd64", - "distribution": "Ubuntu", - "kernel": "kernel-ver2", - "release": "18.04", - "series": "bionic", - "type": "Linux", - "version": "18.04 LTS (Bionic Beaver)", - "virt": "none", - }, + system.ReleaseInfo( + distribution="Ubuntu", + release="18.04", + series="bionic", + pretty_version="18.04 LTS (Bionic Beaver)", + ), ), ( { "NAME": "Ubuntu", "VERSION": "22.04.1 LTS (Jammy Jellyfish)", }, - "arm64", - "kernel-ver3", - "qemu", - { - "arch": "arm64", - "distribution": "Ubuntu", - "kernel": "kernel-ver3", - "release": "22.04", - "series": "jammy", - "type": "Linux", - "version": "22.04 LTS (Jammy Jellyfish)", - "virt": "qemu", - }, + system.ReleaseInfo( + distribution="Ubuntu", + release="22.04", + series="jammy", + pretty_version="22.04 LTS (Jammy Jellyfish)", + ), ), ( { "NAME": "Ubuntu", - "VERSION": "22.10 LTS (Kinetic Kudu)", - }, - "amd64", - "kernel-ver4", - "wsl", - { - "arch": "amd64", - "distribution": "Ubuntu", - "kernel": "kernel-ver4", - "release": "22.10", - "series": "kinetic", - "type": "Linux", - "version": "22.10 LTS (Kinetic Kudu)", - "virt": "wsl", + "VERSION": "22.10 (Kinetic Kudu)", }, + system.ReleaseInfo( + distribution="Ubuntu", + release="22.10", + series="kinetic", + pretty_version="22.10 (Kinetic Kudu)", + ), ), ( { @@ -618,19 +784,12 @@ "VERSION": "22.04 LTS", "VERSION_CODENAME": "Jammy", }, - "amd64", - "kernel-ver4", - "lxd", - { - "arch": "amd64", - "distribution": "Ubuntu", - "kernel": "kernel-ver4", - "release": "22.04", - "series": "jammy", - "type": "Linux", - "version": "22.04 LTS", - "virt": "lxd", - }, + system.ReleaseInfo( + distribution="Ubuntu", + release="22.04", + series="jammy", + pretty_version="22.04 LTS", + ), ), ( { @@ -639,43 +798,30 @@ "VERSION_CODENAME": "Jammy", "VERSION_ID": "22.04", }, - "amd64", - "kernel-ver4", - "lxd", - { - "arch": "amd64", - "distribution": "Ubuntu", - "kernel": "kernel-ver4", - "release": "22.04", - "series": "jammy", - "type": "Linux", - "version": "CORRUPTED", - "virt": "lxd", - }, + system.ReleaseInfo( + distribution="Ubuntu", + release="22.04", + series="jammy", + pretty_version="CORRUPTED", + ), ), ], ) @mock.patch("uaclient.system.get_kernel_info") @mock.patch("uaclient.system.get_dpkg_arch") - @mock.patch("uaclient.system.parse_os_release") + @mock.patch("uaclient.system._parse_os_release") @mock.patch("uaclient.system.get_virt_type") - def test_get_platform_info_with_version( + def test_get_release_info_with_version( self, m_get_virt_type, m_parse_os_release, m_get_dpkg_arch, m_get_kernel_info, os_release, - arch, - kernel, - virt, expected, ): m_parse_os_release.return_value = os_release - m_get_dpkg_arch.return_value = arch - m_get_kernel_info.return_value = mock.MagicMock(uname_release=kernel) - m_get_virt_type.return_value = virt - assert expected == system.get_platform_info.__wrapped__() + assert expected == system.get_release_info.__wrapped__() class TestGetMachineId: @@ -956,6 +1102,49 @@ else: assert log not in logs + @pytest.mark.parametrize( + [ + "override_env_vars", + "os_environ", + "expected_env_arg", + ], + ( + (None, {}, None), + (None, {"test": "val"}, None), + ({}, {"test": "val"}, None), + ({"set": "new"}, {"test": "val"}, {"test": "val", "set": "new"}), + ( + {"set": "new", "test": "newval"}, + {"test": "val"}, + {"test": "newval", "set": "new"}, + ), + ), + ) + @mock.patch("subprocess.Popen") + def test_subp_uses_environment_variables( + self, + m_popen, + override_env_vars, + os_environ, + expected_env_arg, + _subp, + ): + mock_process = mock.MagicMock(returncode=0) + mock_process.communicate.return_value = (b"", b"") + m_popen.return_value = mock_process + + with mock.patch("os.environ", os_environ): + _subp(["apt", "nothing"], override_env_vars=override_env_vars) + + assert [ + mock.call( + [b"apt", b"nothing"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=expected_env_arg, + ) + ] == m_popen.call_args_list + class TestGetSystemdJobState: @pytest.mark.parametrize( @@ -987,3 +1176,109 @@ cmd="test", exit_code=3, stdout="inactive", stderr="" ) assert False is system.get_systemd_job_state(job_name="test") + + +class TestGetCpuInfo: + @pytest.mark.parametrize( + "cpuinfo,vendor_id,model,stepping", + ( + ( + textwrap.dedent( + """ + processor : 6 + vendor_id : GenuineIntel + cpu family : 6 + model : 142 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz + stepping : 10 + + processor : 7 + vendor_id : GenuineIntel + cpu family : 6 + model : 142 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz + stepping : 10""" + ), + "intel", + 142, + 10, + ), + ( + textwrap.dedent( + """ + processor : 6 + vendor_id : test + cpu family : 6 + model : 148 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz + stepping : 12 + + processor : 7 + vendor_id : GenuineIntel + cpu family : 6 + model : 142 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz + stepping : 10""" + ), + "test", + 148, + 12, + ), + ( + textwrap.dedent( + """ + processor : 6 + cpu family : 6 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz + + processor : 7 + cpu family : 6 + model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz""" + ), + "", + None, + None, + ), + ), + ) + @mock.patch("uaclient.system.load_file") + def test_get_cpu_vendor( + self, m_load_file, cpuinfo, vendor_id, model, stepping + ): + m_load_file.return_value = cpuinfo + assert vendor_id == system.get_cpu_info.__wrapped__().vendor_id + assert model == system.get_cpu_info.__wrapped__().model + assert stepping == system.get_cpu_info.__wrapped__().stepping + + +class TestGetUserCacheDir: + @pytest.mark.parametrize( + [ + "is_root", + "xdg_cache_home", + "expanduser_result", + "expected", + ], + ( + (True, None, None, "/run/ubuntu-advantage/"), + (False, None, "/home/user", "/home/user/.cache/ubuntu-pro"), + (False, "/something", "/home/user", "/something/ubuntu-pro"), + ), + ) + @mock.patch("os.path.expanduser") + @mock.patch("os.environ.get") + @mock.patch("uaclient.util.we_are_currently_root") + def test_get_user_cache_dir( + self, + m_we_are_currently_root, + m_environ_get, + m_expanduser, + is_root, + xdg_cache_home, + expanduser_result, + expected, + ): + m_we_are_currently_root.return_value = is_root + m_environ_get.return_value = xdg_cache_home + m_expanduser.return_value = expanduser_result + assert expected == system.get_user_cache_dir() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_upgrade_lts_contract.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_upgrade_lts_contract.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_upgrade_lts_contract.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_upgrade_lts_contract.py 2023-06-01 18:49:33.000000000 +0000 @@ -3,23 +3,22 @@ import mock import pytest -from lib.upgrade_lts_contract import process_contract_delta_after_apt_lock +from uaclient.upgrade_lts_contract import process_contract_delta_after_apt_lock @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) class TestUpgradeLTSContract: - @mock.patch( - "uaclient.config.UAConfig.is_attached", - new_callable=mock.PropertyMock, - return_value=False, - ) - def test_unattached_noops(self, m_is_attached, capsys, caplog_text): + @mock.patch("uaclient.upgrade_lts_contract._is_attached") + def test_unattached_noops( + self, m_is_attached, capsys, caplog_text, FakeConfig + ): + m_is_attached.return_value = mock.MagicMock(is_attached=False) expected_logs = [ "Check whether to upgrade-lts-contract", "Skipping upgrade-lts-contract. Machine is unattached", ] - process_contract_delta_after_apt_lock() + process_contract_delta_after_apt_lock(FakeConfig()) assert 1 == m_is_attached.call_count out, _err = capsys.readouterr() @@ -28,48 +27,20 @@ for log in expected_logs: assert log in debug_logs - @mock.patch( - "uaclient.config.UAConfig.is_attached", - new_callable=mock.PropertyMock, - return_value=True, - ) - @mock.patch("lib.upgrade_lts_contract.parse_os_release") - @mock.patch("lib.upgrade_lts_contract.subp") - def test_upgrade_cancel_when_current_version_not_supported( - self, m_subp, m_parse_os, m_is_attached, capsys, caplog_text - ): - m_parse_os.return_value = {"VERSION_ID": "NOT-SUPPORTED"} - m_subp.return_value = ("", "") - - expected_msgs = [ - "Starting upgrade-lts-contract.", - "Unable to get release codename for version: NOT-SUPPORTED", - ] - expected_logs = ["Check whether to upgrade-lts-contract"] - with pytest.raises(SystemExit) as execinfo: - process_contract_delta_after_apt_lock() - - assert 1 == execinfo.value.code - assert 1 == m_is_attached.call_count - assert 1 == m_parse_os.call_count - assert 1 == m_subp.call_count - out, _err = capsys.readouterr() - assert out == "\n".join(expected_msgs) + "\n" - debug_logs = caplog_text() - for log in expected_msgs + expected_logs: - assert log in debug_logs - - @mock.patch( - "uaclient.config.UAConfig.is_attached", - new_callable=mock.PropertyMock, - return_value=True, - ) - @mock.patch("lib.upgrade_lts_contract.parse_os_release") - @mock.patch("lib.upgrade_lts_contract.subp") + @mock.patch("uaclient.upgrade_lts_contract._is_attached") + @mock.patch("uaclient.upgrade_lts_contract.system.get_release_info") + @mock.patch("uaclient.upgrade_lts_contract.system.subp") def test_upgrade_cancel_when_past_version_not_supported( - self, m_subp, m_parse_os, m_is_attached, capsys, caplog_text + self, + m_subp, + m_get_release_info, + m_is_attached, + capsys, + caplog_text, + FakeConfig, ): - m_parse_os.return_value = {"VERSION_ID": "20.10"} + m_is_attached.return_value = mock.MagicMock(is_attached=True) + m_get_release_info.return_value = mock.MagicMock(series="groovy") m_subp.return_value = ("", "") expected_msgs = [ @@ -78,15 +49,11 @@ ] expected_logs = ["Check whether to upgrade-lts-contract"] with pytest.raises(SystemExit) as execinfo: - with mock.patch( - "lib.upgrade_lts_contract.version_to_codename", - {"20.10": "groovy"}, - ): - process_contract_delta_after_apt_lock() + process_contract_delta_after_apt_lock(FakeConfig()) assert 1 == execinfo.value.code assert 1 == m_is_attached.call_count - assert 1 == m_parse_os.call_count + assert 1 == m_get_release_info.call_count assert 1 == m_subp.call_count out, _err = capsys.readouterr() assert out == "\n".join(expected_msgs) + "\n" @@ -94,27 +61,26 @@ for log in expected_msgs + expected_logs: assert log in debug_logs + @mock.patch("uaclient.upgrade_lts_contract._is_attached") + @mock.patch("uaclient.upgrade_lts_contract.system.get_release_info") + @mock.patch("uaclient.upgrade_lts_contract.system.subp") @mock.patch( - "uaclient.config.UAConfig.is_attached", - new_callable=mock.PropertyMock, - return_value=True, + "uaclient.upgrade_lts_contract.contract.process_entitlements_delta" ) - @mock.patch("lib.upgrade_lts_contract.parse_os_release") - @mock.patch("lib.upgrade_lts_contract.subp") - @mock.patch("lib.upgrade_lts_contract.process_entitlements_delta") - @mock.patch("lib.upgrade_lts_contract.time.sleep") + @mock.patch("uaclient.upgrade_lts_contract.time.sleep") def test_upgrade_contract_when_apt_lock_is_held( self, m_sleep, m_process_delta, m_subp, - m_parse_os, + m_get_release_info, m_is_attached, capsys, caplog_text, FakeConfig, ): - m_parse_os.return_value = {"VERSION_ID": "20.04"} + m_is_attached.return_value = mock.MagicMock(is_attached=True) + m_get_release_info.return_value = mock.MagicMock(series="focal") m_subp.side_effect = [ ("apt 146195 root", ""), @@ -142,13 +108,13 @@ ] with mock.patch( - "lib.upgrade_lts_contract.UAConfig", + "uaclient.upgrade_lts_contract.UAConfig", return_value=FakeConfig(), ): - process_contract_delta_after_apt_lock() + process_contract_delta_after_apt_lock(FakeConfig()) assert 1 == m_is_attached.call_count - assert 1 == m_parse_os.call_count + assert 1 == m_get_release_info.call_count assert 4 == m_subp.call_count assert 1 == m_process_delta.call_count out, _err = capsys.readouterr() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_util.py ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_util.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/tests/test_util.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/tests/test_util.py 2023-06-01 18:49:33.000000000 +0000 @@ -9,6 +9,7 @@ import pytest from uaclient import cli, exceptions, messages, util +from uaclient.log import RedactionFilter class TestGetDictDeltas: @@ -71,7 +72,9 @@ class TestReadurl: - @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) + @pytest.mark.parametrize( + "caplog_text", [(logging.DEBUG, RedactionFilter)], indirect=True + ) @pytest.mark.parametrize( "headers,data,method,url,response,expected_logs", ( @@ -200,13 +203,22 @@ assert "no console handler found" in caplog_text() @pytest.mark.parametrize("disable_log", (True, False)) + @mock.patch("uaclient.log.get_user_log_file") def test_disable_log_to_console( - self, m_we_are_currently_root, logging_sandbox, capsys, disable_log + self, + m_get_user, + m_we_are_currently_root, + logging_sandbox, + capsys, + tmpdir, + disable_log, ): # This test is parameterised so that we are sure that the context # manager is suppressing the output, not some other config change - cli.setup_logging(logging.INFO, logging.INFO) + log_file = tmpdir.join("file.log").strpath + m_get_user.return_value = log_file + cli.setup_logging(logging.INFO, logging.INFO, log_file=log_file) if disable_log: context_manager = util.disable_log_to_console @@ -225,14 +237,25 @@ assert "test error" in combined_output assert "test info" in combined_output + @mock.patch("uaclient.log.get_user_log_file") def test_disable_log_to_console_does_nothing_at_debug_level( - self, m_we_are_currently_root, logging_sandbox, capsys + self, + m_get_user, + m_we_are_currently_root, + logging_sandbox, + capsys, + FakeConfig, + tmpdir, ): - cli.setup_logging(logging.DEBUG, logging.DEBUG) + m_get_user.return_value = tmpdir.join("file.log").strpath + with mock.patch( + "uaclient.cli.config.UAConfig", return_value=FakeConfig() + ): + cli.setup_logging(logging.DEBUG, logging.DEBUG) - with util.disable_log_to_console(): - logging.error("test error") - logging.info("test info") + with util.disable_log_to_console(): + logging.error("test error") + logging.info("test info") out, err = capsys.readouterr() combined_output = out + err @@ -264,7 +287,6 @@ class TestDatetimeAwareJSONDecoder: - # Note that the parameter names are flipped from # TestDatetimeAwareJSONEncoder @pytest.mark.parametrize("out,input", JSON_TEST_PAIRS) @@ -668,6 +690,7 @@ (messages.OKGREEN_CHECK + " test", "test"), (messages.FAIL_X + " fail", "fail"), ("\u2014 blah", "- blah"), + ("\xfcblah", "blah"), ), ) def test_handle_unicode_characters( diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/__init__.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/__init__.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/__init__.py 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,20 @@ +import logging +from subprocess import TimeoutExpired + +from uaclient import exceptions, system + +LOG = logging.getLogger("uaclient.timer") + + +def start(): + try: + system.subp(["systemctl", "start", "ua-timer.timer"], timeout=2.0) + except (exceptions.ProcessExecutionError, TimeoutExpired) as e: + LOG.warning(e) + + +def stop(): + try: + system.subp(["systemctl", "stop", "ua-timer.timer"], timeout=2.0) + except (exceptions.ProcessExecutionError, TimeoutExpired) as e: + LOG.warning(e) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/metering.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/metering.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/metering.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/metering.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,23 @@ +""" +Functions to be used when running metering jobs +""" + +from uaclient import config +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.cli import assert_lock_file +from uaclient.contract import UAContractClient + + +@assert_lock_file("timer metering job") +def metering_enabled_resources(cfg: config.UAConfig) -> bool: + # We only run this job if there is no other job running. + # The reason for that is to avoid potential conflicts with + # auto-attach, attach and enable operations. + + if not _is_attached(cfg).is_attached: + return False + + contract = UAContractClient(cfg) + contract.update_activity_token() + + return True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/tests/test_update_contract_info.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/tests/test_update_contract_info.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/tests/test_update_contract_info.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/tests/test_update_contract_info.py 2023-05-30 19:02:35.000000000 +0000 @@ -0,0 +1,88 @@ +import logging + +import mock +import pytest + +from uaclient.files.notices import Notice +from uaclient.timer.update_contract_info import update_contract_info + +M_PATH = "uaclient.timer.update_contract_info." + + +@mock.patch(M_PATH + "contract.is_contract_changed", return_value=False) +class TestUpdateContractInfo: + @pytest.mark.parametrize( + "contract_changed,is_attached", + ( + (False, True), + (True, False), + (True, True), + (False, False), + ), + ) + @mock.patch(M_PATH + "notices", autospec=True) + def test_is_contract_changed( + self, + m_notices, + m_contract_changed, + contract_changed, + is_attached, + FakeConfig, + ): + m_contract_changed.return_value = contract_changed + if is_attached: + cfg = FakeConfig().for_attached_machine() + else: + cfg = FakeConfig() + + update_contract_info(cfg=cfg) + + if is_attached: + if contract_changed: + assert [ + mock.call( + Notice.CONTRACT_REFRESH_WARNING, + ) + ] == m_notices.add.call_args_list + else: + assert [ + mock.call( + Notice.CONTRACT_REFRESH_WARNING, + ) + ] not in m_notices.add.call_args_list + assert [ + mock.call(Notice.CONTRACT_REFRESH_WARNING) + ] in m_notices.remove.call_args_list + else: + assert m_contract_changed.call_count == 0 + + @pytest.mark.parametrize( + "contract_changed", + ( + False, + True, + Exception("Error checking contract info"), + ), + ) + @pytest.mark.parametrize("caplog_text", [logging.DEBUG], indirect=True) + @mock.patch(M_PATH + "notices", autospec=True) + def test_contract_failure( + self, + m_notices, + m_contract_changed, + contract_changed, + caplog_text, + FakeConfig, + ): + m_contract_changed.side_effect = (contract_changed,) + m_notices.add.side_effect = Exception("Error checking contract info") + m_notices.remove.side_effect = Exception( + "Error checking contract info" + ) + cfg = FakeConfig().for_attached_machine() + + assert False is update_contract_info(cfg=cfg) + assert ( + "Failed to check for change in machine contract." + " Reason: Error checking contract info\n" + ) in caplog_text() diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/tests/test_update_messaging.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/tests/test_update_messaging.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/tests/test_update_messaging.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/tests/test_update_messaging.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,390 @@ +import datetime + +import mock +import pytest + +from uaclient import messages +from uaclient.api.u.pro.packages.updates.v1 import ( + PackageUpdatesResult, + UpdateSummary, +) +from uaclient.entitlements.entitlement_status import ApplicationStatus +from uaclient.timer.update_messaging import ( + ContractExpiryStatus, + get_contract_expiry_status, + update_motd_messages, +) + +M_PATH = "uaclient.timer.update_messaging." + + +class TestGetContractExpiryStatus: + @pytest.mark.parametrize( + "contract_remaining_days,expected_status", + ( + (21, ContractExpiryStatus.ACTIVE), + (20, ContractExpiryStatus.ACTIVE_EXPIRED_SOON), + (-1, ContractExpiryStatus.EXPIRED_GRACE_PERIOD), + (-20, ContractExpiryStatus.EXPIRED), + ), + ) + def test_contract_expiry_status_based_on_remaining_days( + self, contract_remaining_days, expected_status, FakeConfig + ): + """Return a tuple of ContractExpiryStatus and remaining_days""" + now = datetime.datetime.utcnow() + expire_date = now + datetime.timedelta(days=contract_remaining_days) + cfg = FakeConfig.for_attached_machine() + m_token = cfg.machine_token + m_token["machineTokenInfo"]["contractInfo"][ + "effectiveTo" + ] = expire_date + + assert ( + expected_status, + contract_remaining_days, + ) == get_contract_expiry_status(cfg) + + @pytest.mark.parametrize( + "expiry,is_updated", + (("2040-05-08T19:02:26Z", False), ("2042-05-08T19:02:26Z", True)), + ) + @mock.patch("uaclient.files.MachineTokenFile.write") + @mock.patch(M_PATH + "contract.UAContractClient.get_contract_machine") + def test_update_contract_expiry( + self, + m_get_contract_machine, + m_machine_token_write, + expiry, + is_updated, + ): + m_get_contract_machine.return_value = { + "machineTokenInfo": {"contractInfo": {"effectiveTo": expiry}} + } + if is_updated: + 1 == m_machine_token_write.call_count + else: + 0 == m_machine_token_write.call_count + + +class TestUpdateMotdMessages: + @pytest.mark.parametrize( + [ + "attached", + "contract_expiry_statuses", + "is_current_series_active_esm", + "infra_status", + "is_current_series_lts", + "apps_status", + "updates", + "expected", + "update_contract_expiry_calls", + "ensure_file_absent_calls", + "write_file_calls", + ], + [ + ( + # not attached + False, + [], + False, + None, + False, + None, + None, + False, + [], + [], + [], + ), + ( + # somehow attached but none contract status + True, + [(ContractExpiryStatus.NONE, None)], + False, + None, + False, + None, + None, + True, + [], + [mock.call(mock.ANY)], + [], + ), + ( + # active contract + True, + [(ContractExpiryStatus.ACTIVE, None)], + False, + None, + False, + None, + None, + True, + [], + [mock.call(mock.ANY)], + [], + ), + ( + # expiring soon contract, updated to be active + True, + [ + (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, None), + (ContractExpiryStatus.ACTIVE, None), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [mock.call(mock.ANY)], + [], + ), + ( + # expired grace period contract, updated to be active + True, + [ + (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, None), + (ContractExpiryStatus.ACTIVE, None), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [mock.call(mock.ANY)], + [], + ), + ( + # expired contract, updated to be active + True, + [ + (ContractExpiryStatus.EXPIRED, None), + (ContractExpiryStatus.ACTIVE, None), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [mock.call(mock.ANY)], + [], + ), + ( + # expiring soon for real + True, + [ + (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, 3), + (ContractExpiryStatus.ACTIVE_EXPIRED_SOON, 3), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [], + [ + mock.call( + mock.ANY, + messages.CONTRACT_EXPIRES_SOON_MOTD.format( + remaining_days=3 + ), + ) + ], + ), + ( + # expired grace period for real + True, + [ + (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, -3), + (ContractExpiryStatus.EXPIRED_GRACE_PERIOD, -3), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [], + [ + mock.call( + mock.ANY, + messages.CONTRACT_EXPIRED_GRACE_PERIOD_MOTD.format( + remaining_days=11, expired_date="21 Dec 2012" + ), + ) + ], + ), + ( + # expired, eol release, esm-infra not enabled + True, + [ + (ContractExpiryStatus.EXPIRED, 3), + (ContractExpiryStatus.EXPIRED, 3), + ], + True, + (ApplicationStatus.DISABLED, None), + False, + None, + None, + True, + [mock.call(mock.ANY)], + [], + [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], + ), + ( + # expired, lts release, esm-apps not enabled + True, + [ + (ContractExpiryStatus.EXPIRED, 3), + (ContractExpiryStatus.EXPIRED, 3), + ], + False, + None, + True, + (ApplicationStatus.DISABLED, None), + None, + True, + [mock.call(mock.ANY)], + [], + [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], + ), + ( + # expired, interim release + True, + [ + (ContractExpiryStatus.EXPIRED, 3), + (ContractExpiryStatus.EXPIRED, 3), + ], + False, + None, + False, + None, + None, + True, + [mock.call(mock.ANY)], + [], + [mock.call(mock.ANY, messages.CONTRACT_EXPIRED_MOTD_NO_PKGS)], + ), + ( + # expired, eol release, esm-infra enabled + True, + [ + (ContractExpiryStatus.EXPIRED, 3), + (ContractExpiryStatus.EXPIRED, 3), + ], + True, + (ApplicationStatus.ENABLED, None), + False, + None, + PackageUpdatesResult(UpdateSummary(0, 0, 4, 0, 0), []), + True, + [mock.call(mock.ANY)], + [], + [ + mock.call( + mock.ANY, + messages.CONTRACT_EXPIRED_MOTD_PKGS.format( + service="esm-infra", pkg_num=4 + ), + ) + ], + ), + ( + # expired, lts release, esm-apps enabled + True, + [ + (ContractExpiryStatus.EXPIRED, 3), + (ContractExpiryStatus.EXPIRED, 3), + ], + False, + None, + True, + (ApplicationStatus.ENABLED, None), + PackageUpdatesResult(UpdateSummary(0, 5, 0, 0, 0), []), + True, + [mock.call(mock.ANY)], + [], + [ + mock.call( + mock.ANY, + messages.CONTRACT_EXPIRED_MOTD_PKGS.format( + service="esm-apps", pkg_num=5 + ), + ) + ], + ), + ], + ) + @mock.patch(M_PATH + "api_u_pro_packages_updates_v1") + @mock.patch(M_PATH + "ESMAppsEntitlement.application_status") + @mock.patch(M_PATH + "system.is_current_series_lts") + @mock.patch(M_PATH + "ESMInfraEntitlement.application_status") + @mock.patch(M_PATH + "system.is_current_series_active_esm") + @mock.patch( + M_PATH + "UAConfig.machine_token_file", new_callable=mock.PropertyMock + ) + @mock.patch(M_PATH + "system.write_file") + @mock.patch(M_PATH + "system.ensure_file_absent") + @mock.patch(M_PATH + "update_contract_expiry") + @mock.patch(M_PATH + "get_contract_expiry_status") + @mock.patch(M_PATH + "_is_attached") + def test_update_motd_messages( + self, + m_is_attached, + m_get_contract_expiry_status, + m_update_contract_expiry, + m_ensure_file_absent, + m_write_file, + m_machine_token_file, + m_is_current_series_active_esm, + m_infra_status, + m_is_current_series_lts, + m_apps_status, + m_api_updates_v1, + attached, + contract_expiry_statuses, + is_current_series_active_esm, + infra_status, + is_current_series_lts, + apps_status, + updates, + expected, + update_contract_expiry_calls, + ensure_file_absent_calls, + write_file_calls, + FakeConfig, + ): + m_is_attached.return_value = mock.MagicMock(is_attached=attached) + m_get_contract_expiry_status.side_effect = contract_expiry_statuses + m_is_current_series_active_esm.return_value = ( + is_current_series_active_esm + ) + m_infra_status.return_value = infra_status + m_is_current_series_lts.return_value = is_current_series_lts + m_apps_status.return_value = apps_status + m_api_updates_v1.return_value = updates + + machine_token_file = mock.MagicMock() + machine_token_file.contract_expiry_datetime = datetime.datetime( + 2012, 12, 21 + ) + m_machine_token_file.return_value = machine_token_file + + assert expected == update_motd_messages(FakeConfig()) + + assert ( + update_contract_expiry_calls + == m_update_contract_expiry.call_args_list + ) + assert ensure_file_absent_calls == m_ensure_file_absent.call_args_list + assert write_file_calls == m_write_file.call_args_list diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/update_contract_info.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/update_contract_info.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/update_contract_info.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/update_contract_info.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,30 @@ +import logging + +from uaclient import contract, messages, util +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.config import UAConfig +from uaclient.files import notices +from uaclient.files.notices import Notice + +LOG = logging.getLogger(__name__) + + +def update_contract_info(cfg: UAConfig) -> bool: + if _is_attached(cfg).is_attached: + try: + if contract.is_contract_changed(cfg): + notices.add( + Notice.CONTRACT_REFRESH_WARNING, + ) + else: + notices.remove( + Notice.CONTRACT_REFRESH_WARNING, + ) + except Exception as e: + with util.disable_log_to_console(): + err_msg = messages.UPDATE_CHECK_CONTRACT_FAILURE.format( + reason=str(e) + ) + LOG.warning(err_msg) + return False + return True diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/update_messaging.py ubuntu-advantage-tools-28.1~18.04/uaclient/timer/update_messaging.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/timer/update_messaging.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/timer/update_messaging.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,189 @@ +""" +Update messaging text for use in MOTD and APT custom Ubuntu Pro messages. + +Messaging files will be emitted to /var/lib/ubuntu-advantage/message-* which +will be sourced by apt-hook/hook.cc and various /etc/update-motd.d/ hooks to +present updated text about Ubuntu Pro service and token state. +""" + +import enum +import logging +import os +from os.path import exists +from typing import Tuple + +from uaclient import contract, defaults, messages, system +from uaclient.api.u.pro.packages.updates.v1 import ( + _updates as api_u_pro_packages_updates_v1, +) +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.config import UAConfig +from uaclient.entitlements import ESMAppsEntitlement, ESMInfraEntitlement +from uaclient.entitlements.entitlement_status import ApplicationStatus + +MOTD_CONTRACT_STATUS_FILE_NAME = "motd-contract-status" +UPDATE_NOTIFIER_MOTD_SCRIPT = ( + "/usr/lib/update-notifier/update-motd-updates-available" +) + + +@enum.unique +class ContractExpiryStatus(enum.Enum): + NONE = 0 + ACTIVE = 1 + ACTIVE_EXPIRED_SOON = 2 + EXPIRED_GRACE_PERIOD = 3 + EXPIRED = 4 + + +def update_contract_expiry(cfg: UAConfig): + orig_token = cfg.machine_token + machine_token = orig_token.get("machineToken", "") + contract_id = ( + orig_token.get("machineTokenInfo", {}) + .get("contractInfo", {}) + .get("id", None) + ) + contract_client = contract.UAContractClient(cfg) + resp = contract_client.get_contract_machine(machine_token, contract_id) + resp_expiry = ( + resp.get("machineTokenInfo", {}) + .get("contractInfo", {}) + .get("effectiveTo", None) + ) + if ( + resp_expiry is not None + and resp_expiry != cfg.machine_token_file.contract_expiry_datetime + ): + orig_token["machineTokenInfo"]["contractInfo"][ + "effectiveTo" + ] = resp_expiry + cfg.machine_token_file.write(orig_token) + + +def get_contract_expiry_status( + cfg: UAConfig, +) -> Tuple[ContractExpiryStatus, int]: + """Return a tuple [ContractExpiryStatus, num_days]""" + if not _is_attached(cfg).is_attached: + return ContractExpiryStatus.NONE, 0 + + grace_period = defaults.CONTRACT_EXPIRY_GRACE_PERIOD_DAYS + pending_expiry = defaults.CONTRACT_EXPIRY_PENDING_DAYS + remaining_days = cfg.machine_token_file.contract_remaining_days + + # if unknown assume the worst + if remaining_days is None: + logging.warning( + "contract effectiveTo date is null - assuming it is expired" + ) + return ContractExpiryStatus.EXPIRED, -grace_period + + if 0 <= remaining_days <= pending_expiry: + return ContractExpiryStatus.ACTIVE_EXPIRED_SOON, remaining_days + elif -grace_period <= remaining_days < 0: + return ContractExpiryStatus.EXPIRED_GRACE_PERIOD, remaining_days + elif remaining_days < -grace_period: + return ContractExpiryStatus.EXPIRED, remaining_days + return ContractExpiryStatus.ACTIVE, remaining_days + + +def update_motd_messages(cfg: UAConfig) -> bool: + """Emit human-readable status message used by motd. + + Used by /etc/update.motd.d/91-contract-ua-esm-status + + :param cfg: UAConfig instance for this environment. + """ + if not _is_attached(cfg).is_attached: + return False + + logging.debug("Updating Ubuntu Pro messages for MOTD.") + motd_contract_status_msg_path = os.path.join( + cfg.data_dir, "messages", MOTD_CONTRACT_STATUS_FILE_NAME + ) + + expiry_status, remaining_days = get_contract_expiry_status(cfg) + if expiry_status in ( + ContractExpiryStatus.ACTIVE_EXPIRED_SOON, + ContractExpiryStatus.EXPIRED_GRACE_PERIOD, + ContractExpiryStatus.EXPIRED, + ): + update_contract_expiry(cfg) + expiry_status, remaining_days = get_contract_expiry_status(cfg) + + if expiry_status in ( + ContractExpiryStatus.ACTIVE, + ContractExpiryStatus.NONE, + ): + system.ensure_file_absent(motd_contract_status_msg_path) + elif expiry_status == ContractExpiryStatus.ACTIVE_EXPIRED_SOON: + system.write_file( + motd_contract_status_msg_path, + messages.CONTRACT_EXPIRES_SOON_MOTD.format( + remaining_days=remaining_days, + ), + ) + elif expiry_status == ContractExpiryStatus.EXPIRED_GRACE_PERIOD: + grace_period_remaining = ( + defaults.CONTRACT_EXPIRY_GRACE_PERIOD_DAYS + remaining_days + ) + exp_dt = cfg.machine_token_file.contract_expiry_datetime + if exp_dt is None: + exp_dt_str = "Unknown" + else: + exp_dt_str = exp_dt.strftime("%d %b %Y") + system.write_file( + motd_contract_status_msg_path, + messages.CONTRACT_EXPIRED_GRACE_PERIOD_MOTD.format( + expired_date=exp_dt_str, + remaining_days=grace_period_remaining, + ), + ) + elif expiry_status == ContractExpiryStatus.EXPIRED: + service = "n/a" + pkg_num = 0 + + if system.is_current_series_active_esm(): + esm_infra_status, _ = ESMInfraEntitlement(cfg).application_status() + if esm_infra_status == ApplicationStatus.ENABLED: + service = "esm-infra" + pkg_num = api_u_pro_packages_updates_v1( + cfg + ).summary.num_esm_infra_updates + elif system.is_current_series_lts(): + esm_apps_status, _ = ESMAppsEntitlement(cfg).application_status() + if esm_apps_status == ApplicationStatus.ENABLED: + service = "esm-apps" + pkg_num = api_u_pro_packages_updates_v1( + cfg + ).summary.num_esm_apps_updates + + if pkg_num == 0: + system.write_file( + motd_contract_status_msg_path, + messages.CONTRACT_EXPIRED_MOTD_NO_PKGS, + ) + else: + system.write_file( + motd_contract_status_msg_path, + messages.CONTRACT_EXPIRED_MOTD_PKGS.format( + pkg_num=pkg_num, + service=service, + ), + ) + + return True + + +def refresh_motd(): + # If update-notifier is present, we might as well update + # the package updates count related to MOTD + if exists(UPDATE_NOTIFIER_MOTD_SCRIPT): + # If this command fails, we shouldn't break the entire command, + # since this command should already be triggered by + # update-notifier apt hooks + try: + system.subp([UPDATE_NOTIFIER_MOTD_SCRIPT, "--force"]) + except Exception as exc: + logging.exception(exc) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/upgrade_lts_contract.py ubuntu-advantage-tools-28.1~18.04/uaclient/upgrade_lts_contract.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/upgrade_lts_contract.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/upgrade_lts_contract.py 2023-06-01 18:49:33.000000000 +0000 @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 + +""" +This function is called from lib/upgrade_lts_contract.py and from +lib/reboot_cmds.py + +This function should be used after running do-release-upgrade in a machine. +It will detect any contract deltas between the release before +do-release-upgrade and the current release. If we find any differences in +the uaclient contract between those releases, we will apply that difference +in the upgraded release. + +For example, suppose we are on Trusty and we are upgrading to Xenial. We found +that the apt url for esm services on trusty: + +https://esm.ubuntu.com/ubuntu + +While on Xenial, the apt url is: + +https://esm.ubuntu.com/infra/ubuntu + +This script will detect differences like that and update the Xenial system +to reflect them. +""" + +import logging +import sys +import time + +from uaclient import contract, defaults, system +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.config import UAConfig + +# We consider the past release for LTSs to be the last LTS, +# because we don't have any services available on non-LTS. +# This makes it safer for us to try to process contract deltas. +# For example, we had "jammy": "focal" even when Impish was +# still supported. +current_codename_to_past_codename = { + "xenial": "trusty", + "bionic": "xenial", + "focal": "bionic", + "jammy": "focal", + "kinetic": "jammy", +} + + +def process_contract_delta_after_apt_lock(cfg: UAConfig) -> None: + logging.debug("Check whether to upgrade-lts-contract") + if not _is_attached(cfg).is_attached: + logging.debug("Skipping upgrade-lts-contract. Machine is unattached") + return + out, _err = system.subp(["lsof", "/var/lib/apt/lists/lock"], rcs=[0, 1]) + msg = "Starting upgrade-lts-contract." + if out: + msg += " Retrying every 10 seconds waiting on released apt lock" + print(msg) + logging.debug(msg) + + current_release = system.get_release_info().series + + past_release = current_codename_to_past_codename.get(current_release) + if past_release is None: + msg = "Could not find past release for: {}".format(current_release) + print(msg) + logging.warning(msg) + sys.exit(1) + + past_entitlements = UAConfig( + series=past_release, + ).machine_token_file.entitlements + new_entitlements = UAConfig( + series=current_release, + ).machine_token_file.entitlements + + retry_count = 0 + while out: + # Loop until apt hold is released at the end of `do-release-upgrade` + time.sleep(10) + out, _err = system.subp( + ["lsof", "/var/lib/apt/lists/lock"], rcs=[0, 1] + ) + retry_count += 1 + + msg = "upgrade-lts-contract processing contract deltas: {} -> {}".format( + past_release, current_release + ) + print(msg) + logging.debug(msg) + + contract.process_entitlements_delta( + cfg=cfg, + past_entitlements=past_entitlements, + new_entitlements=new_entitlements, + allow_enable=True, + series_overrides=False, + ) + msg = "upgrade-lts-contract succeeded after {} retries".format(retry_count) + print(msg) + logging.debug(msg) + + +def remove_private_esm_apt_cache(): + system.ensure_folder_absent(defaults.ESM_APT_ROOTDIR) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/util.py ubuntu-advantage-tools-28.1~18.04/uaclient/util.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/util.py 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/util.py 2023-06-01 18:49:33.000000000 +0000 @@ -30,7 +30,6 @@ class LogFormatter(logging.Formatter): - FORMATS = { logging.ERROR: "ERROR: %(message)s", logging.DEBUG: "DEBUG: %(message)s", @@ -544,6 +543,9 @@ message = message.replace(messages.OKGREEN_CHECK + " ", "") message = message.replace(messages.FAIL_X + " ", "") + # Now we remove any remaining unicode characters from the string + message = message.encode("ascii", "ignore").decode() + return message @@ -577,6 +579,7 @@ merge_id_key_map = { "availableResources": "name", "resourceEntitlements": "type", + "overrides": "selector", } values_to_append = [] id_key = merge_id_key_map.get(key) diff -Nru ubuntu-advantage-tools-27.14.4~18.04/uaclient/version.py ubuntu-advantage-tools-28.1~18.04/uaclient/version.py --- ubuntu-advantage-tools-27.14.4~18.04/uaclient/version.py 2023-04-06 13:49:20.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/uaclient/version.py 2023-06-27 00:49:37.000000000 +0000 @@ -15,7 +15,7 @@ from uaclient.exceptions import ProcessExecutionError from uaclient.system import subp -__VERSION__ = "27.14.4" +__VERSION__ = "28.1" PACKAGED_VERSION = "@@PACKAGED_VERSION@@" CANDIDATE_REGEX = r"Candidate: (?P.*?)\n" diff -Nru ubuntu-advantage-tools-27.14.4~18.04/ubuntu-advantage.1 ubuntu-advantage-tools-28.1~18.04/ubuntu-advantage.1 --- ubuntu-advantage-tools-27.14.4~18.04/ubuntu-advantage.1 2023-04-05 15:14:00.000000000 +0000 +++ ubuntu-advantage-tools-28.1~18.04/ubuntu-advantage.1 2023-06-01 18:49:33.000000000 +0000 @@ -143,7 +143,7 @@ Show version of the Ubuntu Pro package. .SH PRO UPGRADE DAEMON -Ubuntu Pro client sets up a daemon on supported platforms (currently GCP only) to +Ubuntu Pro client sets up a daemon on supported platforms (currently on Azure and GCP) to detect if an Ubuntu Pro license is purchased for the machine. If an Ubuntu Pro license is detected, then the machine is automatically attached. If you are uninterested in Ubuntu Pro services, you can safely stop and disable the