diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/checklists/action.yml ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/action.yml --- ubuntu-advantage-tools-30~23.10/.github/actions/checklists/action.yml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/action.yml 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,9 @@ +name: 'Custom Checklists' +description: 'Add context-specific checklists to the PR' +inputs: + repo-token: + description: 'Token for the repository. Can be passed in using {{ secrets.GITHUB_TOKEN }}' + required: true +runs: + using: 'node16' + main: 'index.js' diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/checklists/index.js ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/index.js --- ubuntu-advantage-tools-30~23.10/.github/actions/checklists/index.js 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/index.js 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,75 @@ +const core = require('@actions/core'); +const github = require('@actions/github'); + +const commentHeader = (listName) => ``; +const messageChangesCommentHeader = commentHeader("message-changes"); + +function createMessageChecklistCommentBody() { + return `${messageChangesCommentHeader} +🌎 This PR changes translatable messages. 🌏 + +Please select which scenarios apply. For further explanation, please read our [policy on message changes](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/explanations/string_changes_policy.md). +- [ ] New messages are being added. + - We will ask translators to take a look and add translations if they have time, but it will not block this PR. +- [ ] Existing messages are being changed. + - ⚠️ Please add a comment with justification of why messages are being altered. + - If the changes are trivial (e.g. a typo fix), then translations must be preserved. + - If the changes are substantial, then we will ask translators to take a look and update translations if they have time, but it will not block this PR. +- [ ] Existing messages are being deleted. + - No special action needed. +` +} + +async function run() { + const context = github.context; + if (context.eventName !== "pull_request") { + console.log( + 'The event that triggered this action was not a pull request, skipping.' + ); + return; + } + + const client = github.getOctokit( + core.getInput('repo-token', {required: true}) + ); + + const files = await client.paginate(client.rest.pulls.listFiles, { + owner: context.issue.owner, + repo: context.issue.repo, + pull_number: context.issue.number, + }); + const comments = await client.rest.issues.listComments({ + owner: context.issue.owner, + repo: context.issue.repo, + issue_number: context.issue.number, + }); + + const filenames = files.map(f => f.filename); + + const modifiesMessages = filenames.includes("uaclient/messages/__init__.py") + const theComment = comments.data.find(c => c.body.includes(messageChangesCommentHeader)); + + console.log({modifiesMessages, commentExists: !!theComment}); + + if (theComment && !modifiesMessages) { + console.log("The comment already exists, but the PR no longer modifies messages. Deleting the comment.") + client.rest.issues.deleteComment({ + owner: context.issue.owner, + repo: context.issue.repo, + comment_id: theComment.id + }); + } else if (!theComment && modifiesMessages) { + console.log("The comment doesn't exist, but the PR modifies messages. Creating the comment.") + client.rest.issues.createComment({ + owner: context.issue.owner, + repo: context.issue.repo, + issue_number: context.issue.number, + body: createMessageChecklistCommentBody(), + }); + } +} + +run().catch(error => { + console.error(error); + core.setFailed(error.message); +}) diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/checklists/package-lock.json ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/package-lock.json --- ubuntu-advantage-tools-30~23.10/.github/actions/checklists/package-lock.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/package-lock.json 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,430 @@ +{ + "name": "bug-refs", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "bug-refs", + "version": "1.0.0", + "dependencies": { + "@actions/core": "^1.10.0", + "@actions/github": "^5.1.1" + } + }, + "node_modules/@actions/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz", + "integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==", + "dependencies": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "node_modules/@actions/github": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-5.1.1.tgz", + "integrity": "sha512-Nk59rMDoJaV+mHCOJPXuvB1zIbomlKS0dmSIqPGxd0enAXBnOfn4VWF+CGtRCwXZG9Epa54tZA7VIRlJDS8A6g==", + "dependencies": { + "@actions/http-client": "^2.0.1", + "@octokit/core": "^3.6.0", + "@octokit/plugin-paginate-rest": "^2.17.0", + "@octokit/plugin-rest-endpoint-methods": "^5.13.0" + } + }, + "node_modules/@actions/http-client": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz", + "integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==", + "dependencies": { + "tunnel": "^0.0.6" + } + }, + "node_modules/@octokit/auth-token": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.5.0.tgz", + "integrity": "sha512-r5FVUJCOLl19AxiuZD2VRZ/ORjp/4IN98Of6YJoJOkY75CIBuYfmiNHGrDwXr+aLGG55igl9QrxX3hbiXlLb+g==", + "dependencies": { + "@octokit/types": "^6.0.3" + } + }, + "node_modules/@octokit/core": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.6.0.tgz", + "integrity": "sha512-7RKRKuA4xTjMhY+eG3jthb3hlZCsOwg3rztWh75Xc+ShDWOfDDATWbeZpAHBNRpm4Tv9WgBMOy1zEJYXG6NJ7Q==", + "dependencies": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.6.3", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/endpoint": { + "version": "6.0.12", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.12.tgz", + "integrity": "sha512-lF3puPwkQWGfkMClXb4k/eUT/nZKQfxinRWJrdZaJO85Dqwo/G0yOC434Jr2ojwafWJMYqFGFa5ms4jJUgujdA==", + "dependencies": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/graphql": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.8.0.tgz", + "integrity": "sha512-0gv+qLSBLKF0z8TKaSKTsS39scVKF9dbMxJpj3U0vC7wjNWFuIpL/z76Qe2fiuCbDRcJSavkXsVtMS6/dtQQsg==", + "dependencies": { + "@octokit/request": "^5.6.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "12.11.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-12.11.0.tgz", + "integrity": "sha512-VsXyi8peyRq9PqIz/tpqiL2w3w80OgVMwBHltTml3LmVvXiphgeqmY9mvBw9Wu7e0QWk/fqD37ux8yP5uVekyQ==" + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "2.21.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.21.3.tgz", + "integrity": "sha512-aCZTEf0y2h3OLbrgKkrfFdjRL6eSOo8komneVQJnYecAxIej7Bafor2xhuDJOIFau4pk0i/P28/XgtbyPF0ZHw==", + "dependencies": { + "@octokit/types": "^6.40.0" + }, + "peerDependencies": { + "@octokit/core": ">=2" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "5.16.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.16.2.tgz", + "integrity": "sha512-8QFz29Fg5jDuTPXVtey05BLm7OB+M8fnvE64RNegzX7U+5NUXcOcnpTIK0YfSHBg8gYd0oxIq3IZTe9SfPZiRw==", + "dependencies": { + "@octokit/types": "^6.39.0", + "deprecation": "^2.3.1" + }, + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/request": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.6.3.tgz", + "integrity": "sha512-bFJl0I1KVc9jYTe9tdGGpAMPy32dLBXXo1dS/YwSCTL/2nd9XeHsY616RE3HPXDVk+a+dBuzyz5YdlXwcDTr2A==", + "dependencies": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.1.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.7", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/request-error": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.1.0.tgz", + "integrity": "sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg==", + "dependencies": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "node_modules/@octokit/types": { + "version": "6.41.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.41.0.tgz", + "integrity": "sha512-eJ2jbzjdijiL3B4PrSQaSjuF2sPEQPVCPzBvTHJD9Nz+9dw2SGH4K4xeQJ77YfTq5bRQ+bD8wT11JbeDPmxmGg==", + "dependencies": { + "@octokit/openapi-types": "^12.11.0" + } + }, + "node_modules/before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/node-fetch": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", + "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", + "engines": { + "node": ">=0.6.11 <=0.7.0 || >=0.7.3" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + }, + "dependencies": { + "@actions/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz", + "integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==", + "requires": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "@actions/github": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-5.1.1.tgz", + "integrity": "sha512-Nk59rMDoJaV+mHCOJPXuvB1zIbomlKS0dmSIqPGxd0enAXBnOfn4VWF+CGtRCwXZG9Epa54tZA7VIRlJDS8A6g==", + "requires": { + "@actions/http-client": "^2.0.1", + "@octokit/core": "^3.6.0", + "@octokit/plugin-paginate-rest": "^2.17.0", + "@octokit/plugin-rest-endpoint-methods": "^5.13.0" + } + }, + "@actions/http-client": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz", + "integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==", + "requires": { + "tunnel": "^0.0.6" + } + }, + "@octokit/auth-token": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.5.0.tgz", + "integrity": "sha512-r5FVUJCOLl19AxiuZD2VRZ/ORjp/4IN98Of6YJoJOkY75CIBuYfmiNHGrDwXr+aLGG55igl9QrxX3hbiXlLb+g==", + "requires": { + "@octokit/types": "^6.0.3" + } + }, + "@octokit/core": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.6.0.tgz", + "integrity": "sha512-7RKRKuA4xTjMhY+eG3jthb3hlZCsOwg3rztWh75Xc+ShDWOfDDATWbeZpAHBNRpm4Tv9WgBMOy1zEJYXG6NJ7Q==", + "requires": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.6.3", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "6.0.12", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.12.tgz", + "integrity": "sha512-lF3puPwkQWGfkMClXb4k/eUT/nZKQfxinRWJrdZaJO85Dqwo/G0yOC434Jr2ojwafWJMYqFGFa5ms4jJUgujdA==", + "requires": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.8.0.tgz", + "integrity": "sha512-0gv+qLSBLKF0z8TKaSKTsS39scVKF9dbMxJpj3U0vC7wjNWFuIpL/z76Qe2fiuCbDRcJSavkXsVtMS6/dtQQsg==", + "requires": { + "@octokit/request": "^5.6.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/openapi-types": { + "version": "12.11.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-12.11.0.tgz", + "integrity": "sha512-VsXyi8peyRq9PqIz/tpqiL2w3w80OgVMwBHltTml3LmVvXiphgeqmY9mvBw9Wu7e0QWk/fqD37ux8yP5uVekyQ==" + }, + "@octokit/plugin-paginate-rest": { + "version": "2.21.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.21.3.tgz", + "integrity": "sha512-aCZTEf0y2h3OLbrgKkrfFdjRL6eSOo8komneVQJnYecAxIej7Bafor2xhuDJOIFau4pk0i/P28/XgtbyPF0ZHw==", + "requires": { + "@octokit/types": "^6.40.0" + } + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "5.16.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.16.2.tgz", + "integrity": "sha512-8QFz29Fg5jDuTPXVtey05BLm7OB+M8fnvE64RNegzX7U+5NUXcOcnpTIK0YfSHBg8gYd0oxIq3IZTe9SfPZiRw==", + "requires": { + "@octokit/types": "^6.39.0", + "deprecation": "^2.3.1" + } + }, + "@octokit/request": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.6.3.tgz", + "integrity": "sha512-bFJl0I1KVc9jYTe9tdGGpAMPy32dLBXXo1dS/YwSCTL/2nd9XeHsY616RE3HPXDVk+a+dBuzyz5YdlXwcDTr2A==", + "requires": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.1.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.7", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.1.0.tgz", + "integrity": "sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg==", + "requires": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/types": { + "version": "6.41.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.41.0.tgz", + "integrity": "sha512-eJ2jbzjdijiL3B4PrSQaSjuF2sPEQPVCPzBvTHJD9Nz+9dw2SGH4K4xeQJ77YfTq5bRQ+bD8wT11JbeDPmxmGg==", + "requires": { + "@octokit/openapi-types": "^12.11.0" + } + }, + "before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==" + }, + "node-fetch": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", + "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "requires": { + "whatwg-url": "^5.0.0" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "requires": { + "wrappy": "1" + } + }, + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==" + }, + "universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + } +} diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/checklists/package.json ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/package.json --- ubuntu-advantage-tools-30~23.10/.github/actions/checklists/package.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/checklists/package.json 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "checklists", + "version": "1.0.0", + "description": "Add context-specific checklists to the PR", + "main": "index.js", + "dependencies": { + "@actions/core": "^1.10.0", + "@actions/github": "^5.1.1" + } +} diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/action.yaml ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/action.yaml --- ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/action.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/action.yaml 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,9 @@ +name: 'Create issues on PR merge' +description: 'Looks for "Breaks:" in a PR comment and opens an issue when found' +inputs: + repo-token: + description: 'Token for the repository. Can be passed in using {{ secrets.GITHUB_TOKEN }}' + required: true +runs: + using: 'node16' + main: 'index.js' diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/index.js ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/index.js --- ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/index.js 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/index.js 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,38 @@ +const core = require('@actions/core'); +const github = require('@actions/github'); + +async function run() { + const context = github.context; + if (context.eventName !== "pull_request") { + console.log( + 'The event that triggered this action was not a pull request, skipping.' + ); + return; + } + + const client = github.getOctokit( + core.getInput('repo-token', {required: true}) + ); + + const prNumber = context.issue.number + const comments = await client.rest.issues.listComments({ + owner: context.issue.owner, + repo: context.issue.repo, + issue_number: prNumber + }); + + issues = comments.data.filter(item => item.body.toLowerCase().startsWith("breaks: ")) + for (issue of issues) { + await client.rest.issues.create({ + owner: context.issue.owner, + repo: context.issue.repo, + title: issue.body.replace(/^[Bb]reaks: /, ""), + body: `This issue was created from a comment in Pull Request #${prNumber}.` + }) + } +} + +run().catch(error => { + console.error(error); + core.setFailed(error.message); +}) diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/package-lock.json ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/package-lock.json --- ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/package-lock.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/package-lock.json 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,414 @@ +{ + "name": "create-issue", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "create-issue", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@actions/core": "^1.10.1", + "@actions/github": "^6.0.0" + } + }, + "node_modules/@actions/core": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.1.tgz", + "integrity": "sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==", + "dependencies": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "node_modules/@actions/github": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-6.0.0.tgz", + "integrity": "sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==", + "dependencies": { + "@actions/http-client": "^2.2.0", + "@octokit/core": "^5.0.1", + "@octokit/plugin-paginate-rest": "^9.0.0", + "@octokit/plugin-rest-endpoint-methods": "^10.0.0" + } + }, + "node_modules/@actions/http-client": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.2.0.tgz", + "integrity": "sha512-q+epW0trjVUUHboliPb4UF9g2msf+w61b32tAkFEwL/IwP0DQWgbCMM0Hbe3e3WXSKz5VcUXbzJQgy8Hkra/Lg==", + "dependencies": { + "tunnel": "^0.0.6", + "undici": "^5.25.4" + } + }, + "node_modules/@fastify/busboy": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.0.0.tgz", + "integrity": "sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@octokit/auth-token": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-4.0.0.tgz", + "integrity": "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==", + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/core": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-5.0.1.tgz", + "integrity": "sha512-lyeeeZyESFo+ffI801SaBKmCfsvarO+dgV8/0gD8u1d87clbEdWsP5yC+dSj3zLhb2eIf5SJrn6vDz9AheETHw==", + "dependencies": { + "@octokit/auth-token": "^4.0.0", + "@octokit/graphql": "^7.0.0", + "@octokit/request": "^8.0.2", + "@octokit/request-error": "^5.0.0", + "@octokit/types": "^12.0.0", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/endpoint": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-9.0.2.tgz", + "integrity": "sha512-qhKW8YLIi+Kmc92FQUFGr++DYtkx/1fBv+Thua6baqnjnOsgBYJDCvWZR1YcINuHGOEQt416WOfE+A/oG60NBQ==", + "dependencies": { + "@octokit/types": "^12.0.0", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/graphql": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-7.0.2.tgz", + "integrity": "sha512-OJ2iGMtj5Tg3s6RaXH22cJcxXRi7Y3EBqbHTBRq+PQAqfaS8f/236fUrWhfSn8P4jovyzqucxme7/vWSSZBX2Q==", + "dependencies": { + "@octokit/request": "^8.0.1", + "@octokit/types": "^12.0.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "19.0.2", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-19.0.2.tgz", + "integrity": "sha512-8li32fUDUeml/ACRp/njCWTsk5t17cfTM1jp9n08pBrqs5cDFJubtjsSnuz56r5Tad6jdEPJld7LxNp9dNcyjQ==" + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-9.1.2.tgz", + "integrity": "sha512-euDbNV6fxX6btsCDnZoZM4vw3zO1nj1Z7TskHAulO6mZ9lHoFTpwll6farf+wh31mlBabgU81bBYdflp0GLVAQ==", + "dependencies": { + "@octokit/types": "^12.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "@octokit/core": ">=5" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-10.1.2.tgz", + "integrity": "sha512-JztgZ82CY4JNlPTuF0jh4iWuuGpEi5czFCoXyAbMg4F2XyFBbG5DWAKfa3odRvdZww6Df1tQgBKnqpd9X0WF9g==", + "dependencies": { + "@octokit/types": "^12.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "@octokit/core": ">=5" + } + }, + "node_modules/@octokit/request": { + "version": "8.1.4", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-8.1.4.tgz", + "integrity": "sha512-M0aaFfpGPEKrg7XoA/gwgRvc9MSXHRO2Ioki1qrPDbl1e9YhjIwVoHE7HIKmv/m3idzldj//xBujcFNqGX6ENA==", + "dependencies": { + "@octokit/endpoint": "^9.0.0", + "@octokit/request-error": "^5.0.0", + "@octokit/types": "^12.0.0", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/request-error": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-5.0.1.tgz", + "integrity": "sha512-X7pnyTMV7MgtGmiXBwmO6M5kIPrntOXdyKZLigNfQWSEQzVxR4a4vo49vJjTWX70mPndj8KhfT4Dx+2Ng3vnBQ==", + "dependencies": { + "@octokit/types": "^12.0.0", + "deprecation": "^2.0.0", + "once": "^1.4.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/types": { + "version": "12.2.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-12.2.0.tgz", + "integrity": "sha512-ZkdHqHJdifVndN7Pha10+qrgAjy3AcG//Vmjr/o5UFuTiYCcMhqDj39Yr9VM9zJ/42KO2xAYhV7cvLnLI9Kvwg==", + "dependencies": { + "@octokit/openapi-types": "^19.0.2" + } + }, + "node_modules/before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", + "engines": { + "node": ">=0.6.11 <=0.7.0 || >=0.7.3" + } + }, + "node_modules/undici": { + "version": "5.27.2", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.27.2.tgz", + "integrity": "sha512-iS857PdOEy/y3wlM3yRp+6SNQQ6xU0mmZcwRSriqk+et/cwWAtwmIGf6WkoDN2EK/AMdCO/dfXzIwi+rFMrjjQ==", + "dependencies": { + "@fastify/busboy": "^2.0.0" + }, + "engines": { + "node": ">=14.0" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.1.tgz", + "integrity": "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==" + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + }, + "dependencies": { + "@actions/core": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.1.tgz", + "integrity": "sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==", + "requires": { + "@actions/http-client": "^2.0.1", + "uuid": "^8.3.2" + } + }, + "@actions/github": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@actions/github/-/github-6.0.0.tgz", + "integrity": "sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==", + "requires": { + "@actions/http-client": "^2.2.0", + "@octokit/core": "^5.0.1", + "@octokit/plugin-paginate-rest": "^9.0.0", + "@octokit/plugin-rest-endpoint-methods": "^10.0.0" + } + }, + "@actions/http-client": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.2.0.tgz", + "integrity": "sha512-q+epW0trjVUUHboliPb4UF9g2msf+w61b32tAkFEwL/IwP0DQWgbCMM0Hbe3e3WXSKz5VcUXbzJQgy8Hkra/Lg==", + "requires": { + "tunnel": "^0.0.6", + "undici": "^5.25.4" + } + }, + "@fastify/busboy": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.0.0.tgz", + "integrity": "sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ==" + }, + "@octokit/auth-token": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-4.0.0.tgz", + "integrity": "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==" + }, + "@octokit/core": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-5.0.1.tgz", + "integrity": "sha512-lyeeeZyESFo+ffI801SaBKmCfsvarO+dgV8/0gD8u1d87clbEdWsP5yC+dSj3zLhb2eIf5SJrn6vDz9AheETHw==", + "requires": { + "@octokit/auth-token": "^4.0.0", + "@octokit/graphql": "^7.0.0", + "@octokit/request": "^8.0.2", + "@octokit/request-error": "^5.0.0", + "@octokit/types": "^12.0.0", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-9.0.2.tgz", + "integrity": "sha512-qhKW8YLIi+Kmc92FQUFGr++DYtkx/1fBv+Thua6baqnjnOsgBYJDCvWZR1YcINuHGOEQt416WOfE+A/oG60NBQ==", + "requires": { + "@octokit/types": "^12.0.0", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-7.0.2.tgz", + "integrity": "sha512-OJ2iGMtj5Tg3s6RaXH22cJcxXRi7Y3EBqbHTBRq+PQAqfaS8f/236fUrWhfSn8P4jovyzqucxme7/vWSSZBX2Q==", + "requires": { + "@octokit/request": "^8.0.1", + "@octokit/types": "^12.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/openapi-types": { + "version": "19.0.2", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-19.0.2.tgz", + "integrity": "sha512-8li32fUDUeml/ACRp/njCWTsk5t17cfTM1jp9n08pBrqs5cDFJubtjsSnuz56r5Tad6jdEPJld7LxNp9dNcyjQ==" + }, + "@octokit/plugin-paginate-rest": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-9.1.2.tgz", + "integrity": "sha512-euDbNV6fxX6btsCDnZoZM4vw3zO1nj1Z7TskHAulO6mZ9lHoFTpwll6farf+wh31mlBabgU81bBYdflp0GLVAQ==", + "requires": { + "@octokit/types": "^12.1.1" + } + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-10.1.2.tgz", + "integrity": "sha512-JztgZ82CY4JNlPTuF0jh4iWuuGpEi5czFCoXyAbMg4F2XyFBbG5DWAKfa3odRvdZww6Df1tQgBKnqpd9X0WF9g==", + "requires": { + "@octokit/types": "^12.1.1" + } + }, + "@octokit/request": { + "version": "8.1.4", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-8.1.4.tgz", + "integrity": "sha512-M0aaFfpGPEKrg7XoA/gwgRvc9MSXHRO2Ioki1qrPDbl1e9YhjIwVoHE7HIKmv/m3idzldj//xBujcFNqGX6ENA==", + "requires": { + "@octokit/endpoint": "^9.0.0", + "@octokit/request-error": "^5.0.0", + "@octokit/types": "^12.0.0", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-5.0.1.tgz", + "integrity": "sha512-X7pnyTMV7MgtGmiXBwmO6M5kIPrntOXdyKZLigNfQWSEQzVxR4a4vo49vJjTWX70mPndj8KhfT4Dx+2Ng3vnBQ==", + "requires": { + "@octokit/types": "^12.0.0", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/types": { + "version": "12.2.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-12.2.0.tgz", + "integrity": "sha512-ZkdHqHJdifVndN7Pha10+qrgAjy3AcG//Vmjr/o5UFuTiYCcMhqDj39Yr9VM9zJ/42KO2xAYhV7cvLnLI9Kvwg==", + "requires": { + "@octokit/openapi-types": "^19.0.2" + } + }, + "before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==" + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "requires": { + "wrappy": "1" + } + }, + "tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==" + }, + "undici": { + "version": "5.27.2", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.27.2.tgz", + "integrity": "sha512-iS857PdOEy/y3wlM3yRp+6SNQQ6xU0mmZcwRSriqk+et/cwWAtwmIGf6WkoDN2EK/AMdCO/dfXzIwi+rFMrjjQ==", + "requires": { + "@fastify/busboy": "^2.0.0" + } + }, + "universal-user-agent": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.1.tgz", + "integrity": "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==" + }, + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + } + } +} diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/package.json ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/package.json --- ubuntu-advantage-tools-30~23.10/.github/actions/create-issue/package.json 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/create-issue/package.json 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "create-issue", + "version": "1.0.0", + "description": "Looks for \"Breaks:\" in a PR comment and opens an issue when found", + "main": "index.js", + "dependencies": { + "@actions/core": "^1.10.1", + "@actions/github": "^6.0.0" + } +} diff -Nru ubuntu-advantage-tools-30~23.10/.github/actions/release-changelog-bug-refs/index.js ubuntu-advantage-tools-31.2~23.10/.github/actions/release-changelog-bug-refs/index.js --- ubuntu-advantage-tools-30~23.10/.github/actions/release-changelog-bug-refs/index.js 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/actions/release-changelog-bug-refs/index.js 2024-01-18 17:34:13.000000000 +0000 @@ -28,7 +28,7 @@ ghIssues = ghIssues.concat(Array.from(message.matchAll(/CLOSES: #(\d+)/g)).map(m => m[1])); }); const changelog = await fs.readFile("./debian/changelog", { encoding: "utf8" }); - const changelogEntries = changelog.split("ubuntu-advantage-tools"); + const changelogEntries = changelog.split(/^ubuntu-advantage-tools \(/); const newEntry = changelogEntries[1]; const missingLpBugs = lpBugs.filter(bug => !newEntry.includes(`LP: #${bug}`)); const missingGhIssues = ghIssues.filter(issue => !newEntry.includes(`GH: #${issue}`)); diff -Nru ubuntu-advantage-tools-30~23.10/.github/workflows/ci-base.yaml ubuntu-advantage-tools-31.2~23.10/.github/workflows/ci-base.yaml --- ubuntu-advantage-tools-30~23.10/.github/workflows/ci-base.yaml 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/workflows/ci-base.yaml 2024-01-18 17:34:13.000000000 +0000 @@ -18,7 +18,7 @@ - name: Install dependencies run: | sudo DEBIAN_FRONTEND=noninteractive apt-get -qy update - sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox libapt-pkg-dev intltool + sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox libapt-pkg-dev intltool hunspell - name: Git checkout uses: actions/checkout@v3 - name: Formatting @@ -29,6 +29,11 @@ run: tox -e mypy - name: Version Consistency run: python3 ./tools/check-versions-are-consistent.py + - name: Spellcheck messages + run: | + hunspell -p ./tools/spellcheck-allowed-words.txt -L ./uaclient/messages/__init__.py + hunspell -p ./tools/spellcheck-allowed-words.txt -l ./uaclient/messages/__init__.py + [ $(hunspell -p ./tools/spellcheck-allowed-words.txt -l ./uaclient/messages/__init__.py | wc -c) = "0" ] - name: Translation Template Updated run: | ./tools/update-pos.sh diff -Nru ubuntu-advantage-tools-30~23.10/.github/workflows/ci-integration.yaml ubuntu-advantage-tools-31.2~23.10/.github/workflows/ci-integration.yaml --- ubuntu-advantage-tools-30~23.10/.github/workflows/ci-integration.yaml 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/workflows/ci-integration.yaml 2024-02-29 14:03:11.000000000 +0000 @@ -27,7 +27,7 @@ runs-on: ubuntu-22.04 strategy: matrix: - release: ['xenial', 'bionic', 'focal', 'jammy', 'lunar', 'mantic'] + release: ['xenial', 'bionic', 'focal', 'jammy', 'mantic'] # , 'noble'] TODO flake8 is broken in noble as of Feb 21 2024. Add back once python3-flake8 >6 (currently in -proposed) gets into noble steps: - name: Prepare build tools env: @@ -65,22 +65,22 @@ # as much information as possible from them. fail-fast: false matrix: - release: ['bionic', 'focal', 'jammy', 'lunar'] - platform: ['lxd'] + release: ['bionic', 'focal', 'jammy', 'mantic'] + platform: ['lxd-container'] host_os: ['ubuntu-22.04'] include: # xenial lxd containers dont work on hosts >20.04 - release: xenial - platform: lxd + platform: lxd-container host_os: ubuntu-20.04 - release: bionic - platform: awspro + platform: aws.pro host_os: ubuntu-22.04 - release: bionic - platform: gcppro + platform: gcp.pro host_os: ubuntu-22.04 - release: bionic - platform: awspro-fips + platform: aws.pro-fips host_os: ubuntu-22.04 steps: - name: Prepare test tools @@ -93,14 +93,14 @@ # https://linuxcontainers.org/lxd/docs/master/howto/network_bridge_firewalld/#prevent-issues-with-lxd-and-docker sudo iptables -I DOCKER-USER -j ACCEPT - name: Refresh LXD - if: matrix.platform == 'lxd' || matrix.platform == 'vm' + if: matrix.platform == 'lxd-container' || matrix.platform == 'lxd-vm' run: sudo snap refresh --channel latest/stable lxd - name: Initialize LXD - if: matrix.platform == 'lxd' || matrix.platform == 'vm' + if: matrix.platform == 'lxd-container' || matrix.platform == 'lxd-vm' run: sudo lxd init --auto - name: Git checkout uses: actions/checkout@v3 - - name: Retieve debs + - name: Retrieve debs uses: actions/download-artifact@v3 with: name: 'ci-debs-${{ matrix.release }}' @@ -144,8 +144,7 @@ sh -c 'printf "%s\n" "$SSH_PRIVATE_KEY" > ~/.ssh/cloudinit_id_rsa' sh -c 'printf "%s\n" "$SSH_PUBLIC_KEY" > ~/.ssh/cloudinit_id_rsa.pub' - uversion=$(ubuntu-distro-info --series='${{ matrix.release }}' --release | cut -d' ' -f1) - sg lxd -c "tox -e 'behave-${{ matrix.platform }}-$uversion' -- --tags=-slow" + sg lxd -c "tox -e behave -- -D machine_types=${{ matrix.platform }} -D releases=${{ matrix.release }} --tags=-slow --tags=-upgrade" - name: Archive test artifacts if: always() uses: actions/upload-artifact@v3 diff -Nru ubuntu-advantage-tools-30~23.10/.github/workflows/custom_pr_checks.yaml ubuntu-advantage-tools-31.2~23.10/.github/workflows/custom_pr_checks.yaml --- ubuntu-advantage-tools-30~23.10/.github/workflows/custom_pr_checks.yaml 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/workflows/custom_pr_checks.yaml 2024-02-14 15:37:35.000000000 +0000 @@ -25,3 +25,15 @@ id: bug-refs with: repo-token: ${{ secrets.GITHUB_TOKEN }} + checklists: + runs-on: ubuntu-latest + steps: + - name: Git checkout + uses: actions/checkout@v3 + - name: Install dependencies + run: cd ./.github/actions/checklists && npm install + - name: Add context-specific checklists to the PR + uses: ./.github/actions/checklists + id: checklists + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} diff -Nru ubuntu-advantage-tools-30~23.10/.github/workflows/merge-actions.yaml ubuntu-advantage-tools-31.2~23.10/.github/workflows/merge-actions.yaml --- ubuntu-advantage-tools-30~23.10/.github/workflows/merge-actions.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.github/workflows/merge-actions.yaml 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,23 @@ +--- + +name: Post-merge actions + +on: + pull_request: + types: + - closed + +jobs: + create-issue: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + steps: + - name: Git checkout + uses: actions/checkout@v3 + - name: Install dependencies + run: cd ./.github/actions/create-issue && npm install + - name: Create issues if needed + uses: ./.github/actions/create-issue + id: create-issue + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} diff -Nru ubuntu-advantage-tools-30~23.10/.pre-commit-config.yaml ubuntu-advantage-tools-31.2~23.10/.pre-commit-config.yaml --- ubuntu-advantage-tools-30~23.10/.pre-commit-config.yaml 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/.pre-commit-config.yaml 2024-01-18 17:34:13.000000000 +0000 @@ -8,7 +8,7 @@ hooks: - id: isort - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.8.0.4 # Also stored in dev-requirements.txt; update both together! + rev: v0.9.0.6 # Also stored in dev-requirements.txt; update both together! hooks: - id: shellcheck args: ['--severity=warning'] diff -Nru ubuntu-advantage-tools-30~23.10/CONTRIBUTING.md ubuntu-advantage-tools-31.2~23.10/CONTRIBUTING.md --- ubuntu-advantage-tools-30~23.10/CONTRIBUTING.md 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/CONTRIBUTING.md 2024-01-18 17:34:13.000000000 +0000 @@ -13,6 +13,7 @@ * [How to release a new version](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/howtoguides/how_to_release_a_new_version_of_ua.md) * [How to use the contract staging environment](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/howtoguides/use_staging_environment.md) * [How to use the magic attach endpoints](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/howtoguides/how_to_use_magic_attach_endpoints.md) +* [How to spellcheck messages](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/howtoguides/spellcheck.md) ### Reference @@ -29,6 +30,8 @@ ### Explanation * [How auto-attach works](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/explanations/how_auto_attach_works.md) +* [Translations](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/explanations/translations.md) +* [Policy on string changes](https://github.com/canonical/ubuntu-pro-client/blob/docs/dev-docs/explanations/string_changes_policy.md) ### Documentation diff -Nru ubuntu-advantage-tools-30~23.10/README.md ubuntu-advantage-tools-31.2~23.10/README.md --- ubuntu-advantage-tools-30~23.10/README.md 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/README.md 2024-01-18 17:34:13.000000000 +0000 @@ -17,6 +17,7 @@ [![Released Jammy Version](https://img.shields.io/ubuntu/v/ubuntu-advantage-tools/jammy?label=Jammy&logo=ubuntu&logoColor=white)](https://launchpad.net/ubuntu/jammy/+source/ubuntu-advantage-tools) [![Released Lunar Version](https://img.shields.io/ubuntu/v/ubuntu-advantage-tools/lunar?label=Lunar&logo=ubuntu&logoColor=white)](https://launchpad.net/ubuntu/lunar/+source/ubuntu-advantage-tools) [![Released Mantic Version](https://img.shields.io/ubuntu/v/ubuntu-advantage-tools/mantic?label=Mantic&logo=ubuntu&logoColor=white)](https://launchpad.net/ubuntu/mantic/+source/ubuntu-advantage-tools) +[![Released Noble Version](https://img.shields.io/ubuntu/v/ubuntu-advantage-tools/noble?label=Noble&logo=ubuntu&logoColor=white)](https://launchpad.net/ubuntu/mantic/+source/ubuntu-advantage-tools) The Ubuntu Pro Client (`pro`) is the official tool to enable Canonical offerings on your system. diff -Nru ubuntu-advantage-tools-30~23.10/apport/source_ubuntu-advantage-tools.py ubuntu-advantage-tools-31.2~23.10/apport/source_ubuntu-advantage-tools.py --- ubuntu-advantage-tools-30~23.10/apport/source_ubuntu-advantage-tools.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/apport/source_ubuntu-advantage-tools.py 2024-01-18 17:34:13.000000000 +0000 @@ -3,7 +3,7 @@ from apport.hookutils import attach_file_if_exists from uaclient import defaults -from uaclient.actions import collect_logs +from uaclient.actions import collect_logs, APPARMOR_PROFILES from uaclient.config import UAConfig @@ -11,6 +11,7 @@ report["LaunchpadPrivate"] = "1" report["LaunchpadSubscribe"] = "ua-client" cfg = UAConfig() + apparmor_files = [os.path.basename(f) for f in APPARMOR_PROFILES] with tempfile.TemporaryDirectory() as output_dir: collect_logs(cfg, output_dir) auto_include_log_files = { @@ -21,6 +22,8 @@ "livepatch-status.txt", "livepatch-status.txt-error", "pro-journal.txt", + "apparmor_logs.txt", + *apparmor_files, os.path.basename(cfg.cfg_path), os.path.basename(cfg.log_file), os.path.basename(cfg.data_path("jobs-status")), diff -Nru ubuntu-advantage-tools-30~23.10/debian/apparmor/ubuntu_pro_apt_news.jinja2 ubuntu-advantage-tools-31.2~23.10/debian/apparmor/ubuntu_pro_apt_news.jinja2 --- ubuntu-advantage-tools-30~23.10/debian/apparmor/ubuntu_pro_apt_news.jinja2 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/apparmor/ubuntu_pro_apt_news.jinja2 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,64 @@ +{% if ubuntu_codename not in ["xenial", "bionic", "focal"] %} +abi , +{% endif %} +include + +profile ubuntu_pro_apt_news flags=(attach_disconnected) { + include + include + include + include + + # Needed because apt-news calls apt_pkg.init() which tries to + # switch to the _apt system user/group. + capability setgid, + capability setuid, + capability dac_read_search, + + /etc/apt/** r, + /etc/default/apport r, + /etc/ubuntu-advantage/* r, + /usr/bin/python3.{1,}[0-9] mrix, +{% if ubuntu_codename in ["focal"] %} + # "import uuid" in focal triggers an uname call + /usr/bin/uname mrix, +{% endif %} + /usr/lib/apt/methods/http mrix, + /usr/lib/apt/methods/https mrix, + /usr/lib/ubuntu-advantage/apt_news.py r, + /usr/share/dpkg/* r, + /var/log/ubuntu-advantage.log rw, + /var/lib/ubuntu-advantage/** r, + /var/lib/ubuntu-advantage/messages/ rw, + /var/lib/ubuntu-advantage/messages/* rw, + /run/ubuntu-advantage/ rw, + /run/ubuntu-advantage/* rw, + + /tmp/** r, + + owner @{PROC}/@{pid}/fd/ r, + @{PROC}/@{pid}/cgroup r, +{% if ubuntu_codename in ["bionic", "xenial"] %} + # see https://bugs.python.org/issue40501 + /sbin/ldconfig rix, + /sbin/ldconfig.real rix, + @{PROC}/@{pid}/mounts r, + @{PROC}/@{pid}/status r, + /usr/bin/@{multiarch}-gcc-* rix, + /usr/bin/@{multiarch}-ld.bfd rix, + /usr/lib/gcc/@{multiarch}/*/collect2 rix, + /usr/bin/@{multiarch}-objdump rix, +{% endif %} +{% if ubuntu_codename in ["xenial"] %} + # for some reason, these were just needed in xenial + capability chown, + capability fowner, + capability dac_override, + + /etc/apt/auth.conf.d/90ubuntu-advantage rw, + /var/lib/apt/lists/partial/ rw, + /var/lib/apt/lists/partial/* rw, + /var/cache/apt/archives/partial/ rw, + /var/cache/apt/archives/partial/* rw, +{% endif %} +} diff -Nru ubuntu-advantage-tools-30~23.10/debian/changelog ubuntu-advantage-tools-31.2~23.10/debian/changelog --- ubuntu-advantage-tools-30~23.10/debian/changelog 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/changelog 2024-02-29 14:03:11.000000000 +0000 @@ -1,8 +1,90 @@ -ubuntu-advantage-tools (30~23.10) mantic; urgency=medium +ubuntu-advantage-tools (31.2~23.10) mantic; urgency=medium - * Backport new upstream release to mantic (LP: #2038461) + * Backport new upstream release (LP: #2048921) - -- Renan Rodrigo Tue, 07 Nov 2023 16:23:34 +0200 + -- Grant Orndorff Thu, 29 Feb 2024 09:03:11 -0500 + +ubuntu-advantage-tools (31.2) noble; urgency=medium + + * properly rename logrotate conffile to avoid duplicate confiles, keep user changes + and avoid unnecessary prompts (LP: #2055046) + * use mv_conffile on all ubuntu-advantage-tools conffiles to avoid "obsolete" dpkg + conffile statuses + * fix regression in api u.pro.attach.auto.should_auto_attach.v1 so that it works with + the new package names + + -- Grant Orndorff Mon, 26 Feb 2024 11:47:55 -0500 + +ubuntu-advantage-tools (31.1) noble; urgency=medium + + * fix unit test that failed on newer version of python + + -- Grant Orndorff Wed, 14 Feb 2024 13:51:44 -0500 + +ubuntu-advantage-tools (31) noble; urgency=medium + + * d/*: + - rename ubuntu-advantage-tools to ubuntu-pro-client + - rename ubuntu-advantage-pro to ubuntu-pro-image-auto-attach + * d/apparmor: + - introduce new ubuntu_pro_apt_news apparmor policy + * d/control: + - update descriptions and homepages + - update ubuntu-pro-client-l10n to Depend on same binary version + of ubuntu-pro-client + * d/rules: + - install ubuntu_pro_apt_news apparmor policy + * d/ubuntu-pro-client.prerm: + - removed dependency on python3 by reimplementing in sh (LP: #2021988) + * apport: + - collect logs related to ubuntu_pro_apt_news apparmor policy + * release-upgrades.d/ubuntu-advantage-upgrades.cfg: + - convert APT list files to deb822 files when upgrading to noble + * systemd/apt-news.service: + - add apparmor profile and capability restrictions + * systemd/ubuntu-advantage.service: + - avoid deadlock when started during cloud-config.service (LP: #2050022) + * New upstream release 31 (LP: #2048921) + - api: + + u.pro.attach.auto.full_auto_attach.v1: new cloud_override param + + u.pro.status.enabled_services.v1: + * include services in "warning" state + * include "usg" + + u.pro.security.fix.*.plan.v1: export common objects from + endpoint modules (GH: #2714) + - cli: + + add autocomplete for api subcommand + + autocomplete multiple services for enable/disable subcommands + + if lock is held, cli will retry over the course of a few seconds + - collect-logs: + + include logs related to ubuntu_pro_apt_news apparmor policy + + include logs of apt-news.service + + include logs of esm-cache.service + - enable: + + use deb822 apt source file format when on noble or later + - fix: + + avoid insinuating that CVEs were found on the machine (GH: #1522) + + ignore LSNs when considering related USNs + + pick CVE description based on what packages are installed + - landscape: + + don't disable landscape on ubuntu releases where it cannot be + enabled (GH: #2743) + + no longer assume landscape-client gets removed on disable (GH: #2840) + + leave client.conf in place instead of renaming + + require service to be running to consider "enabled" + + new explanatory message when disabling + - motd: properly pluralize messages about updates (GH: #1579) + - status: show warning when canonical-livepatch command fails + (LP: #2019997) + - timer jobs: jobs-status.json is now world readable (GH: #2601) + + -- Grant Orndorff Tue, 09 Jan 2024 14:24:33 -0500 + +ubuntu-advantage-tools (30.1) noble; urgency=medium + + * fix UnboundLocalError in update-check error handling (LP: #2043836) + + -- Grant Orndorff Fri, 17 Nov 2023 14:25:22 -0500 ubuntu-advantage-tools (30) noble; urgency=medium diff -Nru ubuntu-advantage-tools-30~23.10/debian/clean ubuntu-advantage-tools-31.2~23.10/debian/clean --- ubuntu-advantage-tools-30~23.10/debian/clean 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/clean 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1 @@ +debian/apparmor/ubuntu_pro_apt_news diff -Nru ubuntu-advantage-tools-30~23.10/debian/control ubuntu-advantage-tools-31.2~23.10/debian/control --- ubuntu-advantage-tools-30~23.10/debian/control 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/control 2024-02-14 15:37:46.000000000 +0000 @@ -6,6 +6,8 @@ debhelper (>=9), debianutils (>= 4.7), dh-python, + dh-apparmor, + apparmor, # After debhelper 13.3 we no longer need dh-systemd. # On hirsute and later, dh-systemd doesn't even exist. # On recent releases, the first alternative will be used. @@ -21,6 +23,7 @@ po-debconf, python3 (>= 3.4), python3-flake8, + python3-jinja2, python3-mock, python3-pytest, python3-setuptools, @@ -32,6 +35,16 @@ Rules-Requires-Root: no Package: ubuntu-advantage-tools +Depends: ubuntu-pro-client (>= ${binary:Version}), ${misc:Depends} +Architecture: all +Priority: optional +Section: oldlibs +Description: transitional dummy package for ubuntu-pro-client + This is a transitional dummy package for ubuntu-pro-client. It can safely be + removed. + +Package: ubuntu-pro-client +Homepage: https://canonical-ubuntu-pro-client.readthedocs-hosted.com/ Architecture: any Depends: ${misc:Depends}, ${python3:Depends}, @@ -40,27 +53,55 @@ python3-pkg-resources, ${extra:Depends} Recommends: ubuntu-pro-client-l10n -Description: management tools for Ubuntu Pro - Ubuntu Pro is the professional package of tooling, technology - and expertise from Canonical, helping organisations around the world - manage their Ubuntu deployments. - . - Subscribers to Ubuntu Pro will find helpful tools for accessing - services in this package. +Breaks: ubuntu-advantage-tools (<<31~) +Replaces: ubuntu-advantage-tools (<<31~) +# IMPORTANT: ubuntu-pro-client does not "Provide" ubuntu-advantage-tools +# At the time of the rename, existing releases with ubuntu-advantage-tools +# include xenial, bionic, focal, jammy, and mantic. +# We cannot allow ubuntu-advantage-tools <<31 to be removed on those releases, +# because it could break ESM (remove sources.list entries in prerm). +# On those releases, ubuntu-minimal (and ubuntu-cloud-minimal) Depends on +# ubuntu-advantage-tools, which prevents it from being removed without also +# removing ubuntu-minimal or ubuntu-cloud-minimal. We consider that to be a +# sufficient warning to users that removing ubuntu-advantage-tools is not +# supported. +# If ubuntu-pro-client were to "Provide" ubuntu-advantage-tools, then that +# mechanism would not work, and users could remove ubuntu-advantage-tools +# accidentally. +# ubuntu-pro-client could "Provide" ubuntu-advantage-tools in releases in the +# future, but it would not add any value over having the ubuntu-advantage-tools +# transitional dummy package. +Description: Management tools for Ubuntu Pro + Ubuntu Pro is a suite of additional services provided by Canonical on + top of Ubuntu. Whether you're an enterprise customer deploying systems + at scale or want security patching for your personal Ubuntu LTS + at home, the Ubuntu Pro Client (pro) is the command-line tool that + will help you manage the services you need. Package: ubuntu-pro-client-l10n Architecture: any -Depends: ${misc:Depends}, ubuntu-advantage-tools (>=30~) +Depends: ${misc:Depends}, ubuntu-pro-client (= ${binary:Version}) Description: Translations for Ubuntu Pro Client This package delivers translations of Ubuntu Pro Client for various languages. Package: ubuntu-advantage-pro +Depends: ubuntu-pro-auto-attach, ${misc:Depends} +Architecture: all +Priority: optional +Section: oldlibs +Description: transitional dummy package for ubuntu-pro-auto-attach + This is a transitional dummy package for ubuntu-pro-auto-attach. It can + safely be removed. + +Package: ubuntu-pro-auto-attach +Homepage: https://canonical-ubuntu-pro-client.readthedocs-hosted.com/en/latest/explanations/what_are_ubuntu_pro_cloud_instances/ Architecture: all -Depends: ${misc:Depends}, ubuntu-advantage-tools (>=20.2) -Replaces: ubuntu-advantage-tools (<<20.2) -Breaks: ubuntu-advantage-tools (<<20.2) -Description: Additional services for Ubuntu Pro images +Depends: ${misc:Depends}, ubuntu-pro-client (>=31~) +Breaks: ubuntu-advantage-pro (<<31~) +Replaces: ubuntu-advantage-pro (<<31~) +Provides: ubuntu-advantage-pro +Description: Service to auto-attach Ubuntu Pro cloud images This package delivers an additional service that performs an auto-attach operation for Ubuntu Pro cloud instances. This package should not be manually installed, as it is already present on the cloud instances that require it. diff -Nru ubuntu-advantage-tools-30~23.10/debian/jinja2_render ubuntu-advantage-tools-31.2~23.10/debian/jinja2_render --- ubuntu-advantage-tools-30~23.10/debian/jinja2_render 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/jinja2_render 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,20 @@ +#!/usr/bin/python3 + +import sys + +from jinja2 import Template + +input_file = sys.argv[1] +output_file = sys.argv[2] +kwargs = {} + +# key=value pairs in the command-line +if len(sys.argv) > 3: + kwargs = {arg.split("=")[0]:arg.split("=")[1] for arg in sys.argv[3:]} + +t = Template(open(input_file, "r").read()) +output = t.render(**kwargs) + +with open(output_file, "w") as f: + f.write(output) + diff -Nru ubuntu-advantage-tools-30~23.10/debian/po/pt_BR.po ubuntu-advantage-tools-31.2~23.10/debian/po/pt_BR.po --- ubuntu-advantage-tools-30~23.10/debian/po/pt_BR.po 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/po/pt_BR.po 2024-02-14 15:37:46.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-10-27 11:01-0400\n" +"POT-Creation-Date: 2024-01-19 16:13-0500\n" "PO-Revision-Date: 2023-09-25 12:29-0400\n" "Last-Translator: Lucas Moura \n" "Language-Team: Brazilian Portuguese for interval must be a positive integer." @@ -3238,17 +3255,17 @@ "Não foi possível associar {key} a {value}: precisa ser um inteiro " "positivo." -#: ../../uaclient/messages/__init__.py:2332 +#: ../../uaclient/messages/__init__.py:2346 #, python-brace-format msgid "Invalid url in config. {key}: {value}" msgstr "url inválida no arquivo de configuração. {key}: {value}" -#: ../../uaclient/messages/__init__.py:2337 +#: ../../uaclient/messages/__init__.py:2351 #, python-brace-format msgid "Could not find yaml file: {filepath}" msgstr "Não foi possível encontrar o arquivo yaml: {filepath}" -#: ../../uaclient/messages/__init__.py:2343 +#: ../../uaclient/messages/__init__.py:2357 msgid "" "Error: Setting global apt proxy and pro scoped apt proxy\n" "at the same time is unsupported.\n" @@ -3259,27 +3276,27 @@ "pro de apt ao mesmo tempo não é suportado.\n" "Cancelando o processo de configuração.\n" -#: ../../uaclient/messages/__init__.py:2353 +#: ../../uaclient/messages/__init__.py:2367 msgid "Can't load the distro-info database." msgstr "Não foi possível carregar o banco de dados do distro-info." -#: ../../uaclient/messages/__init__.py:2358 +#: ../../uaclient/messages/__init__.py:2372 #, python-brace-format msgid "Can't find series {series} in the distro-info database." msgstr "" "Não foi possível encontrar a série {series} na banco de dados do distro-info." -#: ../../uaclient/messages/__init__.py:2363 +#: ../../uaclient/messages/__init__.py:2377 #, python-brace-format msgid "Error: Cannot use {option1} together with {option2}." msgstr "Erro: não é possível usar {option1} junto com {option2}" -#: ../../uaclient/messages/__init__.py:2367 +#: ../../uaclient/messages/__init__.py:2381 #, python-brace-format msgid "No help available for '{name}'" msgstr "Ajuda não disponível para '{name}'" -#: ../../uaclient/messages/__init__.py:2373 +#: ../../uaclient/messages/__init__.py:2387 #, python-brace-format msgid "" "Error: issue \"{issue}\" is not recognized.\n" @@ -3288,34 +3305,34 @@ "Erro: problema de segurnça \"{issue}\" não foi reconhecido.\n" "Use: \"pro fix CVE-yyyy-nnnn\" ou \"pro fix USN-nnnn\"" -#: ../../uaclient/messages/__init__.py:2379 +#: ../../uaclient/messages/__init__.py:2393 #, python-brace-format msgid "{arg} must be one of: {choices}" msgstr "{arg} precisa ser um de: {choices}" -#: ../../uaclient/messages/__init__.py:2384 +#: ../../uaclient/messages/__init__.py:2398 #, python-brace-format msgid "Expected {expected} but found: {actual}" msgstr "Esperava {expected} mas encontrou: {actual}" -#: ../../uaclient/messages/__init__.py:2388 +#: ../../uaclient/messages/__init__.py:2402 msgid "Unable to process uaclient.conf" msgstr "Falha ao processar uaclient.conf" -#: ../../uaclient/messages/__init__.py:2393 +#: ../../uaclient/messages/__init__.py:2407 msgid "Unable to refresh your subscription" msgstr "Falha ao atualizar sua assinatura" -#: ../../uaclient/messages/__init__.py:2398 +#: ../../uaclient/messages/__init__.py:2412 msgid "Unable to update Ubuntu Pro related APT and MOTD messages." msgstr "" "Falha ao atualizar as mensagens de APT e MOTD relacioandas ao Ubuntu Pro" -#: ../../uaclient/messages/__init__.py:2404 +#: ../../uaclient/messages/__init__.py:2418 msgid "json formatted response requires --assume-yes flag." msgstr "resposta formatada em json necessita do paramêtro --assume-yes" -#: ../../uaclient/messages/__init__.py:2412 +#: ../../uaclient/messages/__init__.py:2426 msgid "" "Do not pass the TOKEN arg if you are using --attach-config.\n" "Include the token in the attach-config file instead.\n" @@ -3325,50 +3342,50 @@ "Ao invés disso, inclua o token no arquivo de attach-config.\n" " " -#: ../../uaclient/messages/__init__.py:2421 +#: ../../uaclient/messages/__init__.py:2435 msgid "Cannot provide both --args and --data at the same time" msgstr "Não é possível usar --args e --data ao mesmo tempo" -#: ../../uaclient/messages/__init__.py:2427 +#: ../../uaclient/messages/__init__.py:2441 #, python-brace-format msgid "Unable to perform: {lock_request}.\n" msgstr "Falha ao executar: {lock_request}.\n" -#: ../../uaclient/messages/__init__.py:2436 +#: ../../uaclient/messages/__init__.py:2450 msgid "This command must be run as root (try using sudo)." msgstr "Esse comando precisa ser executado como root (tente usando sudo)." -#: ../../uaclient/messages/__init__.py:2441 +#: ../../uaclient/messages/__init__.py:2455 #, python-brace-format msgid "Metadata for {issue} is invalid. Error: {error_msg}." msgstr "Metadados para {issue} não são válidos. Erro: {error_msg}." -#: ../../uaclient/messages/__init__.py:2448 +#: ../../uaclient/messages/__init__.py:2462 #, python-brace-format msgid "Error: {issue_id} not found." msgstr "Erro: {issue_id} não encontrada." -#: ../../uaclient/messages/__init__.py:2452 +#: ../../uaclient/messages/__init__.py:2466 #, python-brace-format msgid "GPG key '{keyfile}' not found." msgstr "chave GPG '{keyfile}' não foi encontrada" -#: ../../uaclient/messages/__init__.py:2457 +#: ../../uaclient/messages/__init__.py:2471 #, python-brace-format msgid "'{endpoint}' is not a valid endpoint" msgstr "'{endpoint}' não é um endpoint válido" -#: ../../uaclient/messages/__init__.py:2462 +#: ../../uaclient/messages/__init__.py:2476 #, python-brace-format msgid "Missing argument '{arg}' for endpoint {endpoint}" msgstr "'{arg}' está faltando para endpoint {endpoint}" -#: ../../uaclient/messages/__init__.py:2467 +#: ../../uaclient/messages/__init__.py:2481 #, python-brace-format msgid "{endpoint} accepts no arguments" msgstr "{endpoint} não aceita paramêtros" -#: ../../uaclient/messages/__init__.py:2472 +#: ../../uaclient/messages/__init__.py:2486 #, python-brace-format msgid "" "Error parsing API json data parameter:\n" @@ -3377,32 +3394,32 @@ "Error ao analisar paramêtro data para API json:\n" "{data}" -#: ../../uaclient/messages/__init__.py:2477 +#: ../../uaclient/messages/__init__.py:2491 #, python-brace-format msgid "'{arg}' is not formatted as 'key=value'" msgstr "'{arg}' não está formatado como 'chave=valor'" -#: ../../uaclient/messages/__init__.py:2482 +#: ../../uaclient/messages/__init__.py:2496 #, python-brace-format msgid "Unable to determine version: {error_msg}" msgstr "Não foi possível determinar a versão: {error_msg}" -#: ../../uaclient/messages/__init__.py:2487 +#: ../../uaclient/messages/__init__.py:2501 msgid "features.disable_auto_attach set in config" msgstr "features.disable_auto_attach definida na configuração" -#: ../../uaclient/messages/__init__.py:2492 +#: ../../uaclient/messages/__init__.py:2506 #, python-brace-format msgid "Unable to determine unattended-upgrades status: {error_msg}" msgstr "" "Não foi possível determinar o status do unattended-upgrades: {error_msg}" -#: ../../uaclient/messages/__init__.py:2498 +#: ../../uaclient/messages/__init__.py:2512 #, python-brace-format msgid "Expected value with type {expected_type} but got type: {got_type}" msgstr "Esperava valor com tipo {expected_type}, mas recebeu tipo {got_type}" -#: ../../uaclient/messages/__init__.py:2504 +#: ../../uaclient/messages/__init__.py:2518 #, python-brace-format msgid "" "Got value with incorrect type at index {index}:\n" @@ -3411,7 +3428,7 @@ "Valor com tipo incorreto na posição {index}:\n" "{nested_msg}" -#: ../../uaclient/messages/__init__.py:2510 +#: ../../uaclient/messages/__init__.py:2524 #, python-brace-format msgid "" "Got value with incorrect type for field \"{key}\":\n" @@ -3420,9 +3437,15 @@ "Valor com tipo incorreto para o campo \"{key}\":\n" "{nested_msg}" -#: ../../uaclient/messages/__init__.py:2517 +#: ../../uaclient/messages/__init__.py:2531 #, python-brace-format msgid "Value provided was not found in {enum_class}'s allowed: value: {values}" msgstr "" "Valor fornecido não está presente nos valores permitidos de {enum_class}: " "{values}" + +#~ msgid "Detach this machine from Ubuntu Pro services." +#~ msgstr "Desvincule esta máquina de uma assinatura Ubuntu Pro" + +#~ msgid "Found CVEs:" +#~ msgstr "CVEs encontradas:" diff -Nru ubuntu-advantage-tools-30~23.10/debian/po/ubuntu-pro.pot ubuntu-advantage-tools-31.2~23.10/debian/po/ubuntu-pro.pot --- ubuntu-advantage-tools-30~23.10/debian/po/ubuntu-pro.pot 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/po/ubuntu-pro.pot 2024-02-14 15:37:46.000000000 +0000 @@ -8,7 +8,7 @@ msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-10-27 11:01-0400\n" +"POT-Creation-Date: 2024-01-19 16:13-0500\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -312,7 +312,7 @@ msgid "" " A new version is available: {version}\n" "Please run:\n" -" sudo apt-get install ubuntu-advantage-tools\n" +" sudo apt install ubuntu-pro-client\n" "to get the latest bug fixes and new features." msgstr "" @@ -416,14 +416,14 @@ #, python-brace-format msgid "" "*Your Ubuntu Pro subscription has EXPIRED*\n" -"{{pkg_num}} additional security update(s) require Ubuntu Pro with " -"'{{service}}' enabled.\n" -"Renew your service at {url}" +"{{pkg_num}} additional security update require Ubuntu Pro with '{{service}}' " +"enabled.\n" +"Renew your subscription at {url}" msgid_plural "" "*Your Ubuntu Pro subscription has EXPIRED*\n" -"{{pkg_num}} additional security update(s) require Ubuntu Pro with " +"{{pkg_num}} additional security updates require Ubuntu Pro with " "'{{service}}' enabled.\n" -"Renew your service at {url}" +"Renew your subscription at {url}" msgstr[0] "" msgstr[1] "" @@ -461,7 +461,7 @@ #, python-brace-format msgid "" "*Your Ubuntu Pro subscription has EXPIRED*\n" -"Renew your service at {url}" +"Renew your subscription at {url}" msgstr "" #. ############################################################################## @@ -537,8 +537,8 @@ #: ../../uaclient/messages/__init__.py:272 msgid "" -"Currently attempting to automatically attach this machine to Ubuntu Pro " -"services" +"Currently attempting to automatically attach this machine to an Ubuntu Pro " +"subscription" msgstr "" #: ../../uaclient/messages/__init__.py:276 @@ -700,7 +700,7 @@ #: ../../uaclient/messages/__init__.py:386 #, python-brace-format msgid "" -"Failed to automatically attach to Ubuntu Pro services {num_attempts} " +"Failed to automatically attach to an Ubuntu Pro subscription {num_attempts} " "time(s).\n" "The failure was due to: {reason}.\n" "The next attempt is scheduled for {next_run_datestring}.\n" @@ -710,7 +710,7 @@ #: ../../uaclient/messages/__init__.py:394 #, python-brace-format msgid "" -"Failed to automatically attach to Ubuntu Pro services {num_attempts} " +"Failed to automatically attach to an Ubuntu Pro subscription {num_attempts} " "time(s).\n" "The most recent failure was due to: {reason}.\n" "Try re-launching the instance or report this issue by running `ubuntu-bug " @@ -996,7 +996,7 @@ msgstr "" #: ../../uaclient/messages/__init__.py:599 -msgid "Found CVEs:" +msgid "Associated CVEs:" msgstr "" #: ../../uaclient/messages/__init__.py:600 @@ -1039,12 +1039,12 @@ msgstr "" #: ../../uaclient/messages/__init__.py:628 -#: ../../uaclient/messages/__init__.py:1220 +#: ../../uaclient/messages/__init__.py:1222 msgid "Ubuntu Pro: ESM Infra" msgstr "" #: ../../uaclient/messages/__init__.py:629 -#: ../../uaclient/messages/__init__.py:1206 +#: ../../uaclient/messages/__init__.py:1208 msgid "Ubuntu Pro: ESM Apps" msgstr "" @@ -1102,7 +1102,7 @@ #: ../../uaclient/messages/__init__.py:683 msgid "" " Make sure to run\n" -" sudo apt-get update\n" +" sudo apt update\n" "to get the latest package information from apt." msgstr "" @@ -1412,7 +1412,7 @@ msgstr "" #: ../../uaclient/messages/__init__.py:883 -#: ../../uaclient/messages/__init__.py:1101 +#: ../../uaclient/messages/__init__.py:1103 msgid "Calls the Client API endpoints." msgstr "" @@ -1441,7 +1441,7 @@ msgstr "" #: ../../uaclient/messages/__init__.py:901 -msgid "Show customisable configuration settings" +msgid "Show customizable configuration settings" msgstr "" #: ../../uaclient/messages/__init__.py:903 @@ -1475,7 +1475,8 @@ #: ../../uaclient/messages/__init__.py:919 #, python-brace-format msgid "" -"Attach this machine to Ubuntu Pro with a token obtained from:\n" +"Attach this machine to an Ubuntu Pro subscription with a token obtained " +"from:\n" "{url}\n" "\n" "When running this command without a token, it will generate a short code\n" @@ -1564,7 +1565,7 @@ "* messages: Update APT and MOTD messages related to UA.\n" "\n" "You can individually target any of the three specific actions,\n" -"by passing it's target to nome to the command. If no `target`\n" +"by passing the target name to the command. If no `target`\n" "is specified, all targets are refreshed.\n" msgstr "" @@ -1572,68 +1573,68 @@ msgid "Target to refresh." msgstr "" -#: ../../uaclient/messages/__init__.py:1002 -msgid "Detach this machine from Ubuntu Pro services." +#: ../../uaclient/messages/__init__.py:1003 +msgid "Detach this machine from an Ubuntu Pro subscription." msgstr "" -#: ../../uaclient/messages/__init__.py:1005 +#: ../../uaclient/messages/__init__.py:1007 msgid "Provide detailed information about Ubuntu Pro services." msgstr "" -#: ../../uaclient/messages/__init__.py:1008 +#: ../../uaclient/messages/__init__.py:1010 #, python-brace-format msgid "a service to view help output for. One of: {options}" msgstr "" -#: ../../uaclient/messages/__init__.py:1010 +#: ../../uaclient/messages/__init__.py:1012 msgid "Include beta services" msgstr "" -#: ../../uaclient/messages/__init__.py:1012 +#: ../../uaclient/messages/__init__.py:1014 msgid "Enable an Ubuntu Pro service." msgstr "" -#: ../../uaclient/messages/__init__.py:1014 +#: ../../uaclient/messages/__init__.py:1016 #, python-brace-format msgid "the name(s) of the Ubuntu Pro services to enable. One of: {options}" msgstr "" -#: ../../uaclient/messages/__init__.py:1017 +#: ../../uaclient/messages/__init__.py:1019 msgid "" "do not auto-install packages. Valid for cc-eal, cis and realtime-kernel." msgstr "" -#: ../../uaclient/messages/__init__.py:1020 +#: ../../uaclient/messages/__init__.py:1022 msgid "allow beta service to be enabled" msgstr "" -#: ../../uaclient/messages/__init__.py:1022 +#: ../../uaclient/messages/__init__.py:1024 msgid "The name of the variant to use when enabling the service" msgstr "" -#: ../../uaclient/messages/__init__.py:1025 +#: ../../uaclient/messages/__init__.py:1027 msgid "Disable an Ubuntu Pro service." msgstr "" -#: ../../uaclient/messages/__init__.py:1027 +#: ../../uaclient/messages/__init__.py:1029 #, python-brace-format msgid "the name(s) of the Ubuntu Pro services to disable. One of: {options}" msgstr "" -#: ../../uaclient/messages/__init__.py:1030 +#: ../../uaclient/messages/__init__.py:1032 msgid "" "disable the service and remove/downgrade related packages (experimental)" msgstr "" -#: ../../uaclient/messages/__init__.py:1034 +#: ../../uaclient/messages/__init__.py:1036 msgid "Output system related information related to Pro services" msgstr "" -#: ../../uaclient/messages/__init__.py:1036 +#: ../../uaclient/messages/__init__.py:1038 msgid "does the system need to be rebooted" msgstr "" -#: ../../uaclient/messages/__init__.py:1038 +#: ../../uaclient/messages/__init__.py:1040 msgid "" "Report the current reboot-required status for the machine.\n" "\n" @@ -1649,7 +1650,7 @@ " nearest maintenance window.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1055 +#: ../../uaclient/messages/__init__.py:1057 msgid "" "Report current status of Ubuntu Pro services on system.\n" "\n" @@ -1685,80 +1686,80 @@ "listed in the output.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1090 +#: ../../uaclient/messages/__init__.py:1092 msgid "Block waiting on pro to complete" msgstr "" -#: ../../uaclient/messages/__init__.py:1092 +#: ../../uaclient/messages/__init__.py:1094 msgid "simulate the output status using a provided token" msgstr "" -#: ../../uaclient/messages/__init__.py:1094 +#: ../../uaclient/messages/__init__.py:1096 msgid "Include unavailable and beta services" msgstr "" -#: ../../uaclient/messages/__init__.py:1096 +#: ../../uaclient/messages/__init__.py:1098 msgid "show all debug log messages to console" msgstr "" -#: ../../uaclient/messages/__init__.py:1097 +#: ../../uaclient/messages/__init__.py:1099 #, python-brace-format msgid "show version of {name}" msgstr "" -#: ../../uaclient/messages/__init__.py:1099 +#: ../../uaclient/messages/__init__.py:1101 msgid "attach this machine to an Ubuntu Pro subscription" msgstr "" -#: ../../uaclient/messages/__init__.py:1102 +#: ../../uaclient/messages/__init__.py:1104 msgid "automatically attach on supported platforms" msgstr "" -#: ../../uaclient/messages/__init__.py:1103 +#: ../../uaclient/messages/__init__.py:1105 msgid "collect Pro logs and debug information" msgstr "" -#: ../../uaclient/messages/__init__.py:1104 +#: ../../uaclient/messages/__init__.py:1106 msgid "manage Ubuntu Pro configuration on this machine" msgstr "" -#: ../../uaclient/messages/__init__.py:1106 +#: ../../uaclient/messages/__init__.py:1108 msgid "remove this machine from an Ubuntu Pro subscription" msgstr "" -#: ../../uaclient/messages/__init__.py:1109 +#: ../../uaclient/messages/__init__.py:1111 msgid "disable a specific Ubuntu Pro service on this machine" msgstr "" -#: ../../uaclient/messages/__init__.py:1112 +#: ../../uaclient/messages/__init__.py:1114 msgid "enable a specific Ubuntu Pro service on this machine" msgstr "" -#: ../../uaclient/messages/__init__.py:1115 +#: ../../uaclient/messages/__init__.py:1117 msgid "check for and mitigate the impact of a CVE/USN on this system" msgstr "" -#: ../../uaclient/messages/__init__.py:1118 +#: ../../uaclient/messages/__init__.py:1120 msgid "list available security updates for the system" msgstr "" -#: ../../uaclient/messages/__init__.py:1121 +#: ../../uaclient/messages/__init__.py:1123 msgid "show detailed information about Ubuntu Pro services" msgstr "" -#: ../../uaclient/messages/__init__.py:1123 +#: ../../uaclient/messages/__init__.py:1125 msgid "refresh Ubuntu Pro services" msgstr "" -#: ../../uaclient/messages/__init__.py:1124 +#: ../../uaclient/messages/__init__.py:1126 msgid "current status of all Ubuntu Pro services" msgstr "" -#: ../../uaclient/messages/__init__.py:1125 +#: ../../uaclient/messages/__init__.py:1127 msgid "show system information related to Pro services" msgstr "" -#: ../../uaclient/messages/__init__.py:1128 +#: ../../uaclient/messages/__init__.py:1130 #, python-brace-format msgid "" "WARNING: this output is intended to be human readable, and subject to " @@ -1770,15 +1771,15 @@ #. ############################################################################## #. SERVICE-SPECIFIC MESSAGES # #. ############################################################################## -#: ../../uaclient/messages/__init__.py:1141 +#: ../../uaclient/messages/__init__.py:1143 msgid "Anbox Cloud" msgstr "" -#: ../../uaclient/messages/__init__.py:1142 +#: ../../uaclient/messages/__init__.py:1144 msgid "Scalable Android in the cloud" msgstr "" -#: ../../uaclient/messages/__init__.py:1144 +#: ../../uaclient/messages/__init__.py:1146 #, python-brace-format msgid "" "Anbox Cloud lets you stream mobile apps securely, at any scale, to any " @@ -1796,7 +1797,7 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1155 +#: ../../uaclient/messages/__init__.py:1157 #, python-brace-format msgid "" "To finish setting up the Anbox Cloud Appliance, run:\n" @@ -1808,15 +1809,15 @@ "For more information, see {url}\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1166 +#: ../../uaclient/messages/__init__.py:1168 msgid "CC EAL2" msgstr "" -#: ../../uaclient/messages/__init__.py:1167 +#: ../../uaclient/messages/__init__.py:1169 msgid "Common Criteria EAL2 Provisioning Packages" msgstr "" -#: ../../uaclient/messages/__init__.py:1169 +#: ../../uaclient/messages/__init__.py:1171 msgid "" "Common Criteria is an Information Technology Security Evaluation standard\n" "(ISO/IEC IS 15408) for computer security certification. Ubuntu 16.04 has " @@ -1826,29 +1827,29 @@ "on Intel x86_64, IBM Power8 and IBM Z hardware platforms." msgstr "" -#: ../../uaclient/messages/__init__.py:1176 +#: ../../uaclient/messages/__init__.py:1178 msgid "" "(This will download more than 500MB of packages, so may take some time.)" msgstr "" -#: ../../uaclient/messages/__init__.py:1180 +#: ../../uaclient/messages/__init__.py:1182 #, python-brace-format msgid "Please follow instructions in {filename} to configure EAL2" msgstr "" -#: ../../uaclient/messages/__init__.py:1183 +#: ../../uaclient/messages/__init__.py:1185 msgid "CIS Audit" msgstr "" -#: ../../uaclient/messages/__init__.py:1184 +#: ../../uaclient/messages/__init__.py:1186 msgid "Ubuntu Security Guide" msgstr "" -#: ../../uaclient/messages/__init__.py:1185 +#: ../../uaclient/messages/__init__.py:1187 msgid "Security compliance and audit tools" msgstr "" -#: ../../uaclient/messages/__init__.py:1187 +#: ../../uaclient/messages/__init__.py:1189 #, python-brace-format msgid "" "Ubuntu Security Guide is a tool for hardening and auditing and allows for\n" @@ -1858,29 +1859,29 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1193 +#: ../../uaclient/messages/__init__.py:1195 #, python-brace-format msgid "Visit {url} to learn how to use CIS" msgstr "" -#: ../../uaclient/messages/__init__.py:1196 +#: ../../uaclient/messages/__init__.py:1198 #, python-brace-format msgid "Visit {url} for the next steps" msgstr "" -#: ../../uaclient/messages/__init__.py:1200 +#: ../../uaclient/messages/__init__.py:1202 #, python-brace-format msgid "" -"From Ubuntu 20.04 and onwards 'pro enable cis' has been\n" +"From Ubuntu 20.04 onward 'pro enable cis' has been\n" "replaced by 'pro enable usg'. See more information at:\n" "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1208 +#: ../../uaclient/messages/__init__.py:1210 msgid "Expanded Security Maintenance for Applications" msgstr "" -#: ../../uaclient/messages/__init__.py:1211 +#: ../../uaclient/messages/__init__.py:1213 #, python-brace-format msgid "" "Expanded Security Maintenance for Applications is enabled by default on\n" @@ -1892,11 +1893,11 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1222 +#: ../../uaclient/messages/__init__.py:1224 msgid "Expanded Security Maintenance for Infrastructure" msgstr "" -#: ../../uaclient/messages/__init__.py:1225 +#: ../../uaclient/messages/__init__.py:1227 #, python-brace-format msgid "" "Expanded Security Maintenance for Infrastructure provides access to a " @@ -1909,15 +1910,15 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1234 +#: ../../uaclient/messages/__init__.py:1236 msgid "FIPS" msgstr "" -#: ../../uaclient/messages/__init__.py:1235 +#: ../../uaclient/messages/__init__.py:1237 msgid "NIST-certified FIPS crypto packages" msgstr "" -#: ../../uaclient/messages/__init__.py:1237 +#: ../../uaclient/messages/__init__.py:1239 #, python-brace-format msgid "" "Installs FIPS 140 crypto packages for FedRAMP, FISMA and compliance use " @@ -1928,42 +1929,42 @@ "choose \"fips-updates\" for maximum security. Find out more at {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1244 +#: ../../uaclient/messages/__init__.py:1246 msgid "Could not determine cloud, defaulting to generic FIPS package." msgstr "" -#: ../../uaclient/messages/__init__.py:1247 +#: ../../uaclient/messages/__init__.py:1249 #, python-brace-format msgid "" "FIPS kernel is running in a disabled state.\n" " To manually remove fips kernel: {url}\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1253 +#: ../../uaclient/messages/__init__.py:1255 msgid "" "Warning: FIPS kernel is not optimized for your specific cloud.\n" "To fix it, run the following commands:\n" "\n" " 1. sudo pro disable fips\n" -" 2. sudo apt-get remove ubuntu-fips\n" +" 2. sudo apt remove ubuntu-fips\n" " 3. sudo pro enable fips --assume-yes\n" " 4. sudo reboot\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1265 +#: ../../uaclient/messages/__init__.py:1267 msgid "" "This will install the FIPS packages. The Livepatch service will be " "unavailable.\n" "Warning: This action can take some time and cannot be undone.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1274 +#: ../../uaclient/messages/__init__.py:1276 msgid "" "This will install the FIPS packages including security updates.\n" "Warning: This action can take some time and cannot be undone.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1283 +#: ../../uaclient/messages/__init__.py:1285 #, python-brace-format msgid "" "Warning: Enabling {title} in a container.\n" @@ -1973,51 +1974,51 @@ "Warning: This action can take some time and cannot be undone.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1295 +#: ../../uaclient/messages/__init__.py:1297 #, python-brace-format msgid "" "This will disable the {title} entitlement but the {title} packages will " "remain installed.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1302 +#: ../../uaclient/messages/__init__.py:1304 msgid "FIPS support requires system reboot to complete configuration." msgstr "" -#: ../../uaclient/messages/__init__.py:1304 -#: ../../uaclient/messages/__init__.py:1735 +#: ../../uaclient/messages/__init__.py:1306 +#: ../../uaclient/messages/__init__.py:1755 msgid "Reboot to FIPS kernel required" msgstr "" -#: ../../uaclient/messages/__init__.py:1306 +#: ../../uaclient/messages/__init__.py:1308 msgid "This FIPS install is out of date, run: sudo pro enable fips" msgstr "" -#: ../../uaclient/messages/__init__.py:1309 +#: ../../uaclient/messages/__init__.py:1311 msgid "Disabling FIPS requires system reboot to complete operation." msgstr "" -#: ../../uaclient/messages/__init__.py:1312 +#: ../../uaclient/messages/__init__.py:1314 #, python-brace-format msgid "{service} {pkg} package could not be installed" msgstr "" -#: ../../uaclient/messages/__init__.py:1315 +#: ../../uaclient/messages/__init__.py:1317 msgid "" "Please run `apt upgrade` to ensure all FIPS packages are updated to the " "correct\n" "version.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1321 +#: ../../uaclient/messages/__init__.py:1323 msgid "FIPS Updates" msgstr "" -#: ../../uaclient/messages/__init__.py:1323 +#: ../../uaclient/messages/__init__.py:1325 msgid "FIPS compliant crypto packages with stable security updates" msgstr "" -#: ../../uaclient/messages/__init__.py:1326 +#: ../../uaclient/messages/__init__.py:1328 #, python-brace-format msgid "" "fips-updates installs FIPS 140 crypto packages including all security " @@ -2026,21 +2027,21 @@ "You can find out more at {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1332 +#: ../../uaclient/messages/__init__.py:1334 msgid "FIPS Preview" msgstr "" -#: ../../uaclient/messages/__init__.py:1334 +#: ../../uaclient/messages/__init__.py:1336 msgid "Preview of FIPS crypto packages undergoing certification with NIST" msgstr "" -#: ../../uaclient/messages/__init__.py:1337 +#: ../../uaclient/messages/__init__.py:1339 msgid "" "Installs FIPS crypto packages that are under certification with NIST,\n" "for FedRAMP, FISMA and compliance use cases." msgstr "" -#: ../../uaclient/messages/__init__.py:1342 +#: ../../uaclient/messages/__init__.py:1344 msgid "" "This will install crypto packages that have been submitted to NIST for " "review\n" @@ -2052,15 +2053,15 @@ "Warning: This action can take some time and cannot be undone.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1353 +#: ../../uaclient/messages/__init__.py:1355 msgid "Landscape" msgstr "" -#: ../../uaclient/messages/__init__.py:1355 +#: ../../uaclient/messages/__init__.py:1357 msgid "Management and administration tool for Ubuntu" msgstr "" -#: ../../uaclient/messages/__init__.py:1358 +#: ../../uaclient/messages/__init__.py:1360 #, python-brace-format msgid "" "Landscape Client can be installed on this machine and enrolled in " @@ -2073,15 +2074,22 @@ "more. Find out more about Landscape at {home_url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1371 +#: ../../uaclient/messages/__init__.py:1373 +msgid "" +"/etc/landscape/client.conf contains your landscape-client configuration.\n" +"To re-enable Landscape with the same configuration, run:\n" +" sudo pro enable landscape --assume-yes\n" +msgstr "" + +#: ../../uaclient/messages/__init__.py:1380 msgid "Livepatch" msgstr "" -#: ../../uaclient/messages/__init__.py:1372 +#: ../../uaclient/messages/__init__.py:1381 msgid "Canonical Livepatch service" msgstr "" -#: ../../uaclient/messages/__init__.py:1374 +#: ../../uaclient/messages/__init__.py:1383 #, python-brace-format msgid "" "Livepatch provides selected high and critical kernel CVE fixes and other\n" @@ -2096,42 +2104,42 @@ "service at {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1383 +#: ../../uaclient/messages/__init__.py:1392 msgid "Current kernel is not supported" msgstr "" -#: ../../uaclient/messages/__init__.py:1386 +#: ../../uaclient/messages/__init__.py:1395 #, python-brace-format msgid "Supported livepatch kernels are listed here: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1389 +#: ../../uaclient/messages/__init__.py:1398 #, python-brace-format msgid "Unable to configure livepatch: {error_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:1391 +#: ../../uaclient/messages/__init__.py:1400 msgid "Unable to enable Livepatch: " msgstr "" -#: ../../uaclient/messages/__init__.py:1393 +#: ../../uaclient/messages/__init__.py:1402 msgid "Disabling Livepatch prior to re-attach with new token" msgstr "" -#: ../../uaclient/messages/__init__.py:1396 +#: ../../uaclient/messages/__init__.py:1405 msgid "Livepatch support requires a system reboot across LTS upgrade." msgstr "" -#: ../../uaclient/messages/__init__.py:1399 -#: ../../uaclient/messages/__init__.py:1412 +#: ../../uaclient/messages/__init__.py:1408 +#: ../../uaclient/messages/__init__.py:1421 msgid "Real-time kernel" msgstr "" -#: ../../uaclient/messages/__init__.py:1401 +#: ../../uaclient/messages/__init__.py:1410 msgid "Ubuntu kernel with PREEMPT_RT patches integrated" msgstr "" -#: ../../uaclient/messages/__init__.py:1404 +#: ../../uaclient/messages/__init__.py:1413 msgid "" "The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches integrated. " "It\n" @@ -2144,27 +2152,35 @@ "Livepatch." msgstr "" -#: ../../uaclient/messages/__init__.py:1414 +#: ../../uaclient/messages/__init__.py:1423 msgid "Generic version of the RT kernel (default)" msgstr "" -#: ../../uaclient/messages/__init__.py:1416 +#: ../../uaclient/messages/__init__.py:1425 msgid "Real-time NVIDIA Tegra Kernel" msgstr "" -#: ../../uaclient/messages/__init__.py:1418 +#: ../../uaclient/messages/__init__.py:1427 msgid "RT kernel optimized for NVIDIA Tegra platform" msgstr "" -#: ../../uaclient/messages/__init__.py:1420 +#: ../../uaclient/messages/__init__.py:1429 +msgid "Raspberry Pi Real-time for Pi5/Pi4" +msgstr "" + +#: ../../uaclient/messages/__init__.py:1431 +msgid "24.04 Real-time kernel optimised for Raspberry Pi" +msgstr "" + +#: ../../uaclient/messages/__init__.py:1433 msgid "Real-time Intel IOTG Kernel" msgstr "" -#: ../../uaclient/messages/__init__.py:1422 +#: ../../uaclient/messages/__init__.py:1435 msgid "RT kernel optimized for Intel IOTG platform" msgstr "" -#: ../../uaclient/messages/__init__.py:1425 +#: ../../uaclient/messages/__init__.py:1438 #, python-brace-format msgid "" "The Real-time kernel is an Ubuntu kernel with PREEMPT_RT patches " @@ -2177,7 +2193,7 @@ "Do you want to continue? [ default = Yes ]: (Y/n) " msgstr "" -#: ../../uaclient/messages/__init__.py:1436 +#: ../../uaclient/messages/__init__.py:1449 msgid "" "This will remove the boot order preference for the Real-time kernel and\n" "disable updates to the Real-time kernel.\n" @@ -2194,15 +2210,15 @@ "Are you sure? (y/N) " msgstr "" -#: ../../uaclient/messages/__init__.py:1452 +#: ../../uaclient/messages/__init__.py:1465 msgid "ROS ESM Security Updates" msgstr "" -#: ../../uaclient/messages/__init__.py:1453 +#: ../../uaclient/messages/__init__.py:1466 msgid "Security Updates for the Robot Operating System" msgstr "" -#: ../../uaclient/messages/__init__.py:1455 +#: ../../uaclient/messages/__init__.py:1468 #, python-brace-format msgid "" "ros provides access to a private PPA which includes security-related " @@ -2215,15 +2231,15 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1464 +#: ../../uaclient/messages/__init__.py:1477 msgid "ROS ESM All Updates" msgstr "" -#: ../../uaclient/messages/__init__.py:1466 +#: ../../uaclient/messages/__init__.py:1479 msgid "All Updates for the Robot Operating System" msgstr "" -#: ../../uaclient/messages/__init__.py:1469 +#: ../../uaclient/messages/__init__.py:1482 #, python-brace-format msgid "" "ros-updates provides access to a private PPA that includes non-security-" @@ -2236,14 +2252,14 @@ "{url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1540 +#: ../../uaclient/messages/__init__.py:1553 msgid "" "Unexpected error(s) occurred.\n" "For more details, see the log: /var/log/ubuntu-advantage.log\n" "To file a bug run: ubuntu-bug ubuntu-advantage-tools" msgstr "" -#: ../../uaclient/messages/__init__.py:1550 +#: ../../uaclient/messages/__init__.py:1563 #, python-brace-format msgid "" "Failed to access URL: {url}\n" @@ -2251,7 +2267,7 @@ "Please install \"ca-certificates\" and try again." msgstr "" -#: ../../uaclient/messages/__init__.py:1560 +#: ../../uaclient/messages/__init__.py:1573 #, python-brace-format msgid "" "Failed to access URL: {url}\n" @@ -2259,209 +2275,216 @@ "Please check your openssl configuration." msgstr "" -#: ../../uaclient/messages/__init__.py:1569 +#: ../../uaclient/messages/__init__.py:1582 #, python-brace-format msgid "Ignoring unknown argument '{arg}'" msgstr "" -#: ../../uaclient/messages/__init__.py:1575 +#: ../../uaclient/messages/__init__.py:1588 #, python-brace-format msgid "" "A new version of the client is available: {version}. Please upgrade to the " "latest version to get the new features and bug fixes." msgstr "" -#: ../../uaclient/messages/__init__.py:1582 +#: ../../uaclient/messages/__init__.py:1595 #, python-brace-format msgid "{title} does not support being enabled with --access-only" msgstr "" -#: ../../uaclient/messages/__init__.py:1587 +#: ../../uaclient/messages/__init__.py:1600 #, python-brace-format msgid "{title} does not support being disabled with --purge" msgstr "" -#: ../../uaclient/messages/__init__.py:1593 +#: ../../uaclient/messages/__init__.py:1606 #, python-brace-format msgid "Cannot disable dependent service: {required_service}{error}" msgstr "" -#: ../../uaclient/messages/__init__.py:1600 +#: ../../uaclient/messages/__init__.py:1613 #, python-brace-format msgid "" "Cannot disable {service_being_disabled} when {dependent_service} is " "enabled.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1608 +#: ../../uaclient/messages/__init__.py:1621 #, python-brace-format msgid "Cannot disable {entitlement_name} with purge: no origin value defined" msgstr "" -#: ../../uaclient/messages/__init__.py:1615 +#: ../../uaclient/messages/__init__.py:1628 #, python-brace-format msgid "Cannot enable required service: {service}{error}" msgstr "" -#: ../../uaclient/messages/__init__.py:1620 +#: ../../uaclient/messages/__init__.py:1633 #, python-brace-format msgid "" "Cannot enable {service_being_enabled} when {required_service} is disabled.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:1628 +#: ../../uaclient/messages/__init__.py:1641 #, python-brace-format msgid "" "Cannot enable {service_being_enabled} when {incompatible_service} is enabled." msgstr "" -#: ../../uaclient/messages/__init__.py:1636 +#: ../../uaclient/messages/__init__.py:1649 #, python-brace-format msgid "Cannot install {title} on a container." msgstr "" -#: ../../uaclient/messages/__init__.py:1639 +#: ../../uaclient/messages/__init__.py:1652 #, python-brace-format msgid "{title} is not configured" msgstr "" -#: ../../uaclient/messages/__init__.py:1644 +#: ../../uaclient/messages/__init__.py:1657 #, python-brace-format msgid "" "The {service} service is not enabled because the {package} package is\n" "not installed." msgstr "" -#: ../../uaclient/messages/__init__.py:1650 +#: ../../uaclient/messages/__init__.py:1663 #, python-brace-format msgid "{title} is active" msgstr "" -#: ../../uaclient/messages/__init__.py:1654 +#: ../../uaclient/messages/__init__.py:1667 #, python-brace-format msgid "{title} does not have an aptURL directive" msgstr "" -#: ../../uaclient/messages/__init__.py:1659 +#: ../../uaclient/messages/__init__.py:1672 #, python-brace-format msgid "" "{title} is not currently enabled\n" "See: sudo pro status" msgstr "" -#: ../../uaclient/messages/__init__.py:1666 +#: ../../uaclient/messages/__init__.py:1679 +#, python-brace-format +msgid "" +"Disabling {title} with pro is not supported.\n" +"See: sudo pro status" +msgstr "" + +#: ../../uaclient/messages/__init__.py:1686 #, python-brace-format msgid "" "{title} is already enabled.\n" "See: sudo pro status" msgstr "" -#: ../../uaclient/messages/__init__.py:1673 +#: ../../uaclient/messages/__init__.py:1693 #, python-brace-format msgid "" "This subscription is not entitled to {{title}}\n" "View your subscription at: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1679 +#: ../../uaclient/messages/__init__.py:1699 #, python-brace-format msgid "{title} is not entitled" msgstr "" -#: ../../uaclient/messages/__init__.py:1685 +#: ../../uaclient/messages/__init__.py:1705 #, python-brace-format msgid "" "{title} is not available for kernel {kernel}.\n" "Minimum kernel version required: {min_kernel}." msgstr "" -#: ../../uaclient/messages/__init__.py:1693 +#: ../../uaclient/messages/__init__.py:1713 #, python-brace-format msgid "" "{title} is not available for kernel {kernel}.\n" "Supported flavors are: {supported_kernels}." msgstr "" -#: ../../uaclient/messages/__init__.py:1701 +#: ../../uaclient/messages/__init__.py:1721 #, python-brace-format msgid "{title} is not available for Ubuntu {series}." msgstr "" -#: ../../uaclient/messages/__init__.py:1708 +#: ../../uaclient/messages/__init__.py:1728 #, python-brace-format msgid "" "{title} is not available for platform {arch}.\n" "Supported platforms are: {supported_arches}." msgstr "" -#: ../../uaclient/messages/__init__.py:1716 +#: ../../uaclient/messages/__init__.py:1736 #, python-brace-format msgid "" "{title} is not available for CPU vendor {vendor}.\n" "Supported CPU vendors are: {supported_vendors}." msgstr "" -#: ../../uaclient/messages/__init__.py:1723 +#: ../../uaclient/messages/__init__.py:1743 msgid "no entitlement affordances checked" msgstr "" -#: ../../uaclient/messages/__init__.py:1729 +#: ../../uaclient/messages/__init__.py:1749 #, python-brace-format msgid "" "Ubuntu {{series}} does not provide {{cloud}} optimized FIPS kernel\n" "For help see: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:1739 +#: ../../uaclient/messages/__init__.py:1759 #, python-brace-format msgid "Cannot enable {fips} when {fips_updates} is enabled." msgstr "" -#: ../../uaclient/messages/__init__.py:1742 +#: ../../uaclient/messages/__init__.py:1762 #, python-brace-format msgid "{file_name} is not set to 1" msgstr "" -#: ../../uaclient/messages/__init__.py:1746 +#: ../../uaclient/messages/__init__.py:1766 #, python-brace-format msgid "Cannot enable {fips} because {fips_updates} was once enabled." msgstr "" -#: ../../uaclient/messages/__init__.py:1751 +#: ../../uaclient/messages/__init__.py:1771 msgid "" "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS " "Updates installs security patches that aren't officially certified." msgstr "" -#: ../../uaclient/messages/__init__.py:1759 +#: ../../uaclient/messages/__init__.py:1779 msgid "" "FIPS Updates cannot be enabled if FIPS is enabled. FIPS Updates installs " "security patches that aren't officially certified." msgstr "" -#: ../../uaclient/messages/__init__.py:1768 +#: ../../uaclient/messages/__init__.py:1788 msgid "" "Livepatch cannot be enabled while running the official FIPS certified " "kernel. If you would like a FIPS compliant kernel with additional bug fixes " "and security updates, you can use the FIPS Updates service with Livepatch." msgstr "" -#: ../../uaclient/messages/__init__.py:1776 +#: ../../uaclient/messages/__init__.py:1796 msgid "canonical-livepatch snap is not installed." msgstr "" -#: ../../uaclient/messages/__init__.py:1780 +#: ../../uaclient/messages/__init__.py:1800 msgid "Cannot enable Livepatch when FIPS is enabled." msgstr "" -#: ../../uaclient/messages/__init__.py:1785 +#: ../../uaclient/messages/__init__.py:1805 msgid "" "The running kernel has reached the end of its active livepatch window.\n" "Please upgrade the kernel with apt and reboot for continued livepatch " "support." msgstr "" -#: ../../uaclient/messages/__init__.py:1793 +#: ../../uaclient/messages/__init__.py:1813 #, python-brace-format msgid "" "The current kernel ({{version}}, {{arch}}) has reached the end of its " @@ -2471,7 +2494,7 @@ "this warning." msgstr "" -#: ../../uaclient/messages/__init__.py:1802 +#: ../../uaclient/messages/__init__.py:1822 #, python-brace-format msgid "" "The current kernel ({{version}}, {{arch}}) is not supported by livepatch.\n" @@ -2480,80 +2503,75 @@ "this warning." msgstr "" -#: ../../uaclient/messages/__init__.py:1812 +#: ../../uaclient/messages/__init__.py:1832 msgid "canonical-livepatch status didn't finish successfully" msgstr "" -#: ../../uaclient/messages/__init__.py:1818 +#: ../../uaclient/messages/__init__.py:1838 +#, python-brace-format +msgid "" +"Error running canonical-livepatch status:\n" +"{livepatch_error}" +msgstr "" + +#: ../../uaclient/messages/__init__.py:1847 msgid "" "Realtime and FIPS require different kernels, so you cannot enable both at " "the same time." msgstr "" -#: ../../uaclient/messages/__init__.py:1825 +#: ../../uaclient/messages/__init__.py:1854 msgid "" "Realtime and FIPS Updates require different kernels, so you cannot enable " "both at the same time." msgstr "" -#: ../../uaclient/messages/__init__.py:1832 +#: ../../uaclient/messages/__init__.py:1861 msgid "Livepatch is not currently supported for the Real-time kernel." msgstr "" -#: ../../uaclient/messages/__init__.py:1837 +#: ../../uaclient/messages/__init__.py:1866 #, python-brace-format msgid "{service} cannot be enabled together with {variant}" msgstr "" -#: ../../uaclient/messages/__init__.py:1841 +#: ../../uaclient/messages/__init__.py:1870 msgid "Cannot install Real-time kernel on a container." msgstr "" -#: ../../uaclient/messages/__init__.py:1846 +#: ../../uaclient/messages/__init__.py:1875 msgid "apt-daily.timer jobs are not running" msgstr "" -#: ../../uaclient/messages/__init__.py:1850 +#: ../../uaclient/messages/__init__.py:1879 #, python-brace-format msgid "{cfg_name} is empty" msgstr "" -#: ../../uaclient/messages/__init__.py:1854 +#: ../../uaclient/messages/__init__.py:1883 #, python-brace-format msgid "{cfg_name} is turned off" msgstr "" -#: ../../uaclient/messages/__init__.py:1858 +#: ../../uaclient/messages/__init__.py:1887 msgid "unattended-upgrades package is not installed" msgstr "" -#: ../../uaclient/messages/__init__.py:1863 -msgid "lanscape-client is not installed" -msgstr "" - -#: ../../uaclient/messages/__init__.py:1868 -msgid "" -"Landscape is installed but not configured.\n" -"Run `sudo landscape-config` to set it up, or run `sudo pro disable landscape`" -msgstr "" - -#: ../../uaclient/messages/__init__.py:1877 +#: ../../uaclient/messages/__init__.py:1893 msgid "" "Landscape is installed and configured but not registered.\n" "Run `sudo landscape-config` to register, or run `sudo pro disable landscape`" msgstr "" -#: ../../uaclient/messages/__init__.py:1886 -msgid "" -"Landscape is installed and configured and registered but not running.\n" -"Run `sudo landscape-config` to start it, or run `sudo pro disable landscape`" +#: ../../uaclient/messages/__init__.py:1902 +msgid "landscape-client is either not installed or installed but disabled." msgstr "" -#: ../../uaclient/messages/__init__.py:1894 +#: ../../uaclient/messages/__init__.py:1907 msgid "landscape-config command failed" msgstr "" -#: ../../uaclient/messages/__init__.py:1900 +#: ../../uaclient/messages/__init__.py:1913 #, python-brace-format msgid "" "Error: issue \"{issue_id}\" is not recognized.\n" @@ -2563,28 +2581,28 @@ "USNs should follow the pattern USN-nnnn." msgstr "" -#: ../../uaclient/messages/__init__.py:1914 +#: ../../uaclient/messages/__init__.py:1927 msgid "Another process is running APT." msgstr "" -#: ../../uaclient/messages/__init__.py:1920 +#: ../../uaclient/messages/__init__.py:1933 #, python-brace-format msgid "" "APT update failed to read APT config for the following:\n" "{failed_repos}" msgstr "" -#: ../../uaclient/messages/__init__.py:1950 +#: ../../uaclient/messages/__init__.py:1963 #, python-brace-format msgid "Invalid APT credentials provided for {repo}" msgstr "" -#: ../../uaclient/messages/__init__.py:1955 +#: ../../uaclient/messages/__init__.py:1968 #, python-brace-format msgid "Timeout trying to access APT repository at {repo}" msgstr "" -#: ../../uaclient/messages/__init__.py:1961 +#: ../../uaclient/messages/__init__.py:1974 #, python-brace-format msgid "" "Unexpected APT error.\n" @@ -2592,106 +2610,107 @@ "See /var/log/ubuntu-advantage.log" msgstr "" -#: ../../uaclient/messages/__init__.py:1971 +#: ../../uaclient/messages/__init__.py:1984 #, python-brace-format msgid "" "Cannot validate credentials for APT repo. Timeout after {seconds} seconds " "trying to reach {repo}." msgstr "" -#: ../../uaclient/messages/__init__.py:1978 +#: ../../uaclient/messages/__init__.py:1991 #, python-brace-format msgid "snap {snap} is not installed or doesn't exist" msgstr "" -#: ../../uaclient/messages/__init__.py:1983 +#: ../../uaclient/messages/__init__.py:1996 #, python-brace-format msgid "" "Unexpected SNAPD API error\n" "{error}" msgstr "" -#: ../../uaclient/messages/__init__.py:1987 +#: ../../uaclient/messages/__init__.py:2000 msgid "Could not reach the SNAPD API" msgstr "" -#: ../../uaclient/messages/__init__.py:1991 +#: ../../uaclient/messages/__init__.py:2004 msgid "Failed to install snapd on the system" msgstr "" -#: ../../uaclient/messages/__init__.py:1996 +#: ../../uaclient/messages/__init__.py:2009 #, python-brace-format msgid "Unable to install Livepatch client: {error_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2001 +#: ../../uaclient/messages/__init__.py:2014 #, python-brace-format msgid "\"{proxy}\" is not working. Not setting as proxy." msgstr "" -#: ../../uaclient/messages/__init__.py:2006 +#: ../../uaclient/messages/__init__.py:2019 #, python-brace-format msgid "\"{proxy}\" is not a valid url. Not setting as proxy." msgstr "" -#: ../../uaclient/messages/__init__.py:2012 +#: ../../uaclient/messages/__init__.py:2025 msgid "" "To use an HTTPS proxy for HTTPS connections, please install pycurl with `apt " "install python3-pycurl`" msgstr "" -#: ../../uaclient/messages/__init__.py:2018 +#: ../../uaclient/messages/__init__.py:2031 #, python-brace-format msgid "PycURL Error: {e}" msgstr "" -#: ../../uaclient/messages/__init__.py:2022 +#: ../../uaclient/messages/__init__.py:2035 msgid "Proxy authentication failed" msgstr "" -#: ../../uaclient/messages/__init__.py:2028 +#: ../../uaclient/messages/__init__.py:2041 +#, python-brace-format msgid "" -"Failed to connect to authentication server\n" -"Check your Internet connection and try again." +"Failed to connect to {url}\n" +"{cause_error}\n" msgstr "" -#: ../../uaclient/messages/__init__.py:2035 +#: ../../uaclient/messages/__init__.py:2049 #, python-brace-format msgid "Error connecting to {url}: {code} {body}" msgstr "" -#: ../../uaclient/messages/__init__.py:2041 +#: ../../uaclient/messages/__init__.py:2055 #, python-brace-format msgid "" "Cannot {operation} unknown service '{invalid_service}'.\n" "{service_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2050 +#: ../../uaclient/messages/__init__.py:2064 #, python-brace-format msgid "" "This machine is already attached to '{account_name}'\n" "To use a different subscription first run: sudo pro detach." msgstr "" -#: ../../uaclient/messages/__init__.py:2057 +#: ../../uaclient/messages/__init__.py:2071 #, python-brace-format msgid "Failed to attach machine. See {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2064 +#: ../../uaclient/messages/__init__.py:2078 #, python-brace-format msgid "" "Error while reading {config_name}:\n" "{error}" msgstr "" -#: ../../uaclient/messages/__init__.py:2069 +#: ../../uaclient/messages/__init__.py:2083 #, python-brace-format msgid "Invalid token. See {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2075 +#: ../../uaclient/messages/__init__.py:2089 #, python-brace-format msgid "" "Attach denied:\n" @@ -2699,7 +2718,7 @@ "Visit {url} to manage contract tokens." msgstr "" -#: ../../uaclient/messages/__init__.py:2085 +#: ../../uaclient/messages/__init__.py:2099 #, python-brace-format msgid "" "Attach denied:\n" @@ -2707,7 +2726,7 @@ "Visit {url} to manage contract tokens." msgstr "" -#: ../../uaclient/messages/__init__.py:2095 +#: ../../uaclient/messages/__init__.py:2109 #, python-brace-format msgid "" "Attach denied:\n" @@ -2715,41 +2734,41 @@ "Visit {url} to manage contract tokens." msgstr "" -#: ../../uaclient/messages/__init__.py:2105 +#: ../../uaclient/messages/__init__.py:2119 #, python-brace-format msgid "Expired token or contract. To obtain a new token visit: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2112 +#: ../../uaclient/messages/__init__.py:2126 msgid "The magic attach token is already activated." msgstr "" -#: ../../uaclient/messages/__init__.py:2118 +#: ../../uaclient/messages/__init__.py:2132 msgid "The magic attach token is invalid, has expired or never existed" msgstr "" -#: ../../uaclient/messages/__init__.py:2124 +#: ../../uaclient/messages/__init__.py:2138 msgid "Service unavailable, please try again later." msgstr "" -#: ../../uaclient/messages/__init__.py:2129 +#: ../../uaclient/messages/__init__.py:2143 #, python-brace-format msgid "This attach flow does not support {param} with value: {value}" msgstr "" -#: ../../uaclient/messages/__init__.py:2135 +#: ../../uaclient/messages/__init__.py:2149 #, python-brace-format msgid "Ubuntu Pro server provided no aptURL directive for {entitlement_name}" msgstr "" -#: ../../uaclient/messages/__init__.py:2143 +#: ../../uaclient/messages/__init__.py:2157 #, python-brace-format msgid "" "This machine is not attached to an Ubuntu Pro subscription.\n" "See {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2152 +#: ../../uaclient/messages/__init__.py:2166 #, python-brace-format msgid "" "To use '{{valid_service}}' you need an Ubuntu Pro subscription\n" @@ -2757,59 +2776,59 @@ "See {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2168 +#: ../../uaclient/messages/__init__.py:2182 #, python-brace-format msgid "could not find entitlement named \"{entitlement_name}\"" msgstr "" -#: ../../uaclient/messages/__init__.py:2173 +#: ../../uaclient/messages/__init__.py:2187 msgid "failed to enable some services" msgstr "" -#: ../../uaclient/messages/__init__.py:2179 +#: ../../uaclient/messages/__init__.py:2193 msgid "Failed to enable default services, check: sudo pro status" msgstr "" -#: ../../uaclient/messages/__init__.py:2187 +#: ../../uaclient/messages/__init__.py:2201 msgid "Something went wrong during the attach process. Check the logs." msgstr "" -#: ../../uaclient/messages/__init__.py:2195 +#: ../../uaclient/messages/__init__.py:2209 #, python-brace-format msgid "Ubuntu Pro server provided no aptKey directive for {entitlement_name}" msgstr "" -#: ../../uaclient/messages/__init__.py:2202 +#: ../../uaclient/messages/__init__.py:2216 #, python-brace-format msgid "Ubuntu Pro server provided no suites directive for {entitlement_name}" msgstr "" -#: ../../uaclient/messages/__init__.py:2209 +#: ../../uaclient/messages/__init__.py:2223 #, python-brace-format msgid "" "Cannot setup apt pin. Empty apt repo origin value for {entitlement_name}" msgstr "" -#: ../../uaclient/messages/__init__.py:2218 +#: ../../uaclient/messages/__init__.py:2232 #, python-brace-format msgid "Could not determine contract delta service type {orig} {new}" msgstr "" -#: ../../uaclient/messages/__init__.py:2222 +#: ../../uaclient/messages/__init__.py:2236 #, python-brace-format msgid "" "Error on Pro Image:\n" "{error_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2228 +#: ../../uaclient/messages/__init__.py:2242 #, python-brace-format msgid "" "An error occurred while talking the the cloud metadata service: {code} - " "{body}" msgstr "" -#: ../../uaclient/messages/__init__.py:2235 +#: ../../uaclient/messages/__init__.py:2249 #, python-brace-format msgid "" "Failed to attach machine\n" @@ -2817,41 +2836,41 @@ "For more information, see {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2245 +#: ../../uaclient/messages/__init__.py:2259 #, python-brace-format msgid "No valid AWS IMDS endpoint discovered at addresses: {addresses}" msgstr "" -#: ../../uaclient/messages/__init__.py:2252 +#: ../../uaclient/messages/__init__.py:2266 msgid "Unable to determine cloud platform." msgstr "" -#: ../../uaclient/messages/__init__.py:2260 +#: ../../uaclient/messages/__init__.py:2274 #, python-brace-format msgid "" "Auto-attach image support is not available on this image\n" "See: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2269 +#: ../../uaclient/messages/__init__.py:2283 #, python-brace-format msgid "" "Auto-attach image support is not available on {{cloud_type}}\n" "See: {url}" msgstr "" -#: ../../uaclient/messages/__init__.py:2277 +#: ../../uaclient/messages/__init__.py:2291 #, python-brace-format msgid "{file_name} is not valid {file_format}" msgstr "" -#: ../../uaclient/messages/__init__.py:2283 +#: ../../uaclient/messages/__init__.py:2297 #, python-brace-format msgid "" "Could not parse /etc/os-release VERSION: {orig_ver} (modified to {mod_ver})" msgstr "" -#: ../../uaclient/messages/__init__.py:2291 +#: ../../uaclient/messages/__init__.py:2305 #, python-brace-format msgid "" "Could not extract series information from /etc/os-release.\n" @@ -2859,7 +2878,7 @@ "and the VERSION_CODENAME information is not present" msgstr "" -#: ../../uaclient/messages/__init__.py:2301 +#: ../../uaclient/messages/__init__.py:2315 #, python-brace-format msgid "" "There is a corrupted lock file in the system. To continue, please remove it\n" @@ -2868,189 +2887,189 @@ "$ sudo rm {lock_file_path}" msgstr "" -#: ../../uaclient/messages/__init__.py:2310 +#: ../../uaclient/messages/__init__.py:2324 #, python-brace-format msgid "{source} returned invalid json: {out}" msgstr "" -#: ../../uaclient/messages/__init__.py:2316 +#: ../../uaclient/messages/__init__.py:2330 #, python-brace-format msgid "" "Invalid value for {path_to_value} in /etc/ubuntu-advantage/uaclient.conf. " "Expected {expected_value}, found {value}." msgstr "" -#: ../../uaclient/messages/__init__.py:2325 +#: ../../uaclient/messages/__init__.py:2339 #, python-brace-format msgid "" "Cannot set {key} to {value}: for interval must be a positive integer." msgstr "" -#: ../../uaclient/messages/__init__.py:2332 +#: ../../uaclient/messages/__init__.py:2346 #, python-brace-format msgid "Invalid url in config. {key}: {value}" msgstr "" -#: ../../uaclient/messages/__init__.py:2337 +#: ../../uaclient/messages/__init__.py:2351 #, python-brace-format msgid "Could not find yaml file: {filepath}" msgstr "" -#: ../../uaclient/messages/__init__.py:2343 +#: ../../uaclient/messages/__init__.py:2357 msgid "" "Error: Setting global apt proxy and pro scoped apt proxy\n" "at the same time is unsupported.\n" "Cancelling config process operation.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:2353 +#: ../../uaclient/messages/__init__.py:2367 msgid "Can't load the distro-info database." msgstr "" -#: ../../uaclient/messages/__init__.py:2358 +#: ../../uaclient/messages/__init__.py:2372 #, python-brace-format msgid "Can't find series {series} in the distro-info database." msgstr "" -#: ../../uaclient/messages/__init__.py:2363 +#: ../../uaclient/messages/__init__.py:2377 #, python-brace-format msgid "Error: Cannot use {option1} together with {option2}." msgstr "" -#: ../../uaclient/messages/__init__.py:2367 +#: ../../uaclient/messages/__init__.py:2381 #, python-brace-format msgid "No help available for '{name}'" msgstr "" -#: ../../uaclient/messages/__init__.py:2373 +#: ../../uaclient/messages/__init__.py:2387 #, python-brace-format msgid "" "Error: issue \"{issue}\" is not recognized.\n" "Usage: \"pro fix CVE-yyyy-nnnn\" or \"pro fix USN-nnnn\"" msgstr "" -#: ../../uaclient/messages/__init__.py:2379 +#: ../../uaclient/messages/__init__.py:2393 #, python-brace-format msgid "{arg} must be one of: {choices}" msgstr "" -#: ../../uaclient/messages/__init__.py:2384 +#: ../../uaclient/messages/__init__.py:2398 #, python-brace-format msgid "Expected {expected} but found: {actual}" msgstr "" -#: ../../uaclient/messages/__init__.py:2388 +#: ../../uaclient/messages/__init__.py:2402 msgid "Unable to process uaclient.conf" msgstr "" -#: ../../uaclient/messages/__init__.py:2393 +#: ../../uaclient/messages/__init__.py:2407 msgid "Unable to refresh your subscription" msgstr "" -#: ../../uaclient/messages/__init__.py:2398 +#: ../../uaclient/messages/__init__.py:2412 msgid "Unable to update Ubuntu Pro related APT and MOTD messages." msgstr "" -#: ../../uaclient/messages/__init__.py:2404 +#: ../../uaclient/messages/__init__.py:2418 msgid "json formatted response requires --assume-yes flag." msgstr "" -#: ../../uaclient/messages/__init__.py:2412 +#: ../../uaclient/messages/__init__.py:2426 msgid "" "Do not pass the TOKEN arg if you are using --attach-config.\n" "Include the token in the attach-config file instead.\n" " " msgstr "" -#: ../../uaclient/messages/__init__.py:2421 +#: ../../uaclient/messages/__init__.py:2435 msgid "Cannot provide both --args and --data at the same time" msgstr "" -#: ../../uaclient/messages/__init__.py:2427 +#: ../../uaclient/messages/__init__.py:2441 #, python-brace-format msgid "Unable to perform: {lock_request}.\n" msgstr "" -#: ../../uaclient/messages/__init__.py:2436 +#: ../../uaclient/messages/__init__.py:2450 msgid "This command must be run as root (try using sudo)." msgstr "" -#: ../../uaclient/messages/__init__.py:2441 +#: ../../uaclient/messages/__init__.py:2455 #, python-brace-format msgid "Metadata for {issue} is invalid. Error: {error_msg}." msgstr "" -#: ../../uaclient/messages/__init__.py:2448 +#: ../../uaclient/messages/__init__.py:2462 #, python-brace-format msgid "Error: {issue_id} not found." msgstr "" -#: ../../uaclient/messages/__init__.py:2452 +#: ../../uaclient/messages/__init__.py:2466 #, python-brace-format msgid "GPG key '{keyfile}' not found." msgstr "" -#: ../../uaclient/messages/__init__.py:2457 +#: ../../uaclient/messages/__init__.py:2471 #, python-brace-format msgid "'{endpoint}' is not a valid endpoint" msgstr "" -#: ../../uaclient/messages/__init__.py:2462 +#: ../../uaclient/messages/__init__.py:2476 #, python-brace-format msgid "Missing argument '{arg}' for endpoint {endpoint}" msgstr "" -#: ../../uaclient/messages/__init__.py:2467 +#: ../../uaclient/messages/__init__.py:2481 #, python-brace-format msgid "{endpoint} accepts no arguments" msgstr "" -#: ../../uaclient/messages/__init__.py:2472 +#: ../../uaclient/messages/__init__.py:2486 #, python-brace-format msgid "" "Error parsing API json data parameter:\n" "{data}" msgstr "" -#: ../../uaclient/messages/__init__.py:2477 +#: ../../uaclient/messages/__init__.py:2491 #, python-brace-format msgid "'{arg}' is not formatted as 'key=value'" msgstr "" -#: ../../uaclient/messages/__init__.py:2482 +#: ../../uaclient/messages/__init__.py:2496 #, python-brace-format msgid "Unable to determine version: {error_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2487 +#: ../../uaclient/messages/__init__.py:2501 msgid "features.disable_auto_attach set in config" msgstr "" -#: ../../uaclient/messages/__init__.py:2492 +#: ../../uaclient/messages/__init__.py:2506 #, python-brace-format msgid "Unable to determine unattended-upgrades status: {error_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2498 +#: ../../uaclient/messages/__init__.py:2512 #, python-brace-format msgid "Expected value with type {expected_type} but got type: {got_type}" msgstr "" -#: ../../uaclient/messages/__init__.py:2504 +#: ../../uaclient/messages/__init__.py:2518 #, python-brace-format msgid "" "Got value with incorrect type at index {index}:\n" "{nested_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2510 +#: ../../uaclient/messages/__init__.py:2524 #, python-brace-format msgid "" "Got value with incorrect type for field \"{key}\":\n" "{nested_msg}" msgstr "" -#: ../../uaclient/messages/__init__.py:2517 +#: ../../uaclient/messages/__init__.py:2531 #, python-brace-format msgid "Value provided was not found in {enum_class}'s allowed: value: {values}" msgstr "" diff -Nru ubuntu-advantage-tools-30~23.10/debian/rules ubuntu-advantage-tools-31.2~23.10/debian/rules --- ubuntu-advantage-tools-30~23.10/debian/rules 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/rules 2024-02-14 15:37:46.000000000 +0000 @@ -11,17 +11,13 @@ # the flag --supported-esm. Those versions are 0.18 and 0.14build1, # respectively. So we set specific distro-info requirements for bionic and later # versus Xenial to make those contraints applicable on each series. -DISTRO_INFO_DEPS="distro-info (>= 0.18ubuntu0.18.04.1)," +DISTRO_INFO_DEPS=distro-info (>= 0.18ubuntu0.18.04.1), ifeq (${VERSION_ID},"16.04") -APT_PKG_DEPS="apt (>= 1.2.32), apt-transport-https (>= 1.2.32), apt-utils (>= 1.2.32), libapt-inst2.0 (>= 1.2.32), libapt-pkg5.0 (>= 1.2.32)," -DISTRO_INFO_DEPS="distro-info (>= 0.14ubuntu0.2)," +APT_PKG_DEPS=apt (>= 1.2.32), apt-transport-https (>= 1.2.32), apt-utils (>= 1.2.32), libapt-inst2.0 (>= 1.2.32), libapt-pkg5.0 (>= 1.2.32), +DISTRO_INFO_DEPS=distro-info (>= 0.14ubuntu0.2), else ifeq (${VERSION_ID},"18.04") -APT_PKG_DEPS="apt (>= 1.6.11), apt-utils (>= 1.6.11), libapt-inst2.0 (>= 1.6.11), libapt-pkg5.0 (>= 1.6.11)," -else ifeq (${VERSION_ID},"19.04") -APT_PKG_DEPS="apt (>= 1.8.1), apt-utils (>= 1.8.1), libapt-inst2.0 (>= 1.8.1), libapt-pkg5.0 (>= 1.8.1)," -else ifeq (${VERSION_ID},"19.10") -APT_PKG_DEPS="apt (>= 1.8.1), apt-utils (>= 1.8.1), libapt-pkg5.90 (>= 1.8.1)," +APT_PKG_DEPS=apt (>= 1.6.11), apt-utils (>= 1.6.11), libapt-inst2.0 (>= 1.6.11), libapt-pkg5.0 (>= 1.6.11), endif %: @@ -43,44 +39,54 @@ endif override_dh_gencontrol: - echo extra:Depends=$(APT_PKG_DEPS) $(DISTRO_INFO_DEPS) >> debian/ubuntu-advantage-tools.substvars + echo "extra:Depends=$(APT_PKG_DEPS) $(DISTRO_INFO_DEPS)" >> debian/ubuntu-pro-client.substvars dh_gencontrol override_dh_systemd_enable: - dh_systemd_enable -pubuntu-advantage-pro ua-auto-attach.service - dh_systemd_enable -pubuntu-advantage-tools ua-reboot-cmds.service - dh_systemd_enable -pubuntu-advantage-tools ua-timer.timer - dh_systemd_enable -pubuntu-advantage-tools ua-timer.service - dh_systemd_enable -pubuntu-advantage-tools ubuntu-advantage.service + dh_systemd_enable -pubuntu-pro-auto-attach ua-auto-attach.service + dh_systemd_enable -pubuntu-pro-client ua-reboot-cmds.service + dh_systemd_enable -pubuntu-pro-client ua-timer.timer + dh_systemd_enable -pubuntu-pro-client ua-timer.service + dh_systemd_enable -pubuntu-pro-client ubuntu-advantage.service ifeq (${VERSION_ID},"16.04") # Only enable cloud-id-shim on Xenial - dh_systemd_enable -pubuntu-advantage-tools ubuntu-advantage-cloud-id-shim.service + dh_systemd_enable -pubuntu-pro-client ubuntu-advantage-cloud-id-shim.service endif override_dh_systemd_start: - dh_systemd_start -pubuntu-advantage-tools ua-timer.timer - dh_systemd_start -pubuntu-advantage-tools ubuntu-advantage.service + dh_systemd_start -pubuntu-pro-client ua-timer.timer + dh_systemd_start -pubuntu-pro-client ubuntu-advantage.service override_dh_auto_install: - dh_auto_install --destdir=debian/ubuntu-advantage-tools + dh_auto_install --destdir=debian/ubuntu-pro-client + debian/jinja2_render debian/apparmor/ubuntu_pro_apt_news.jinja2 debian/apparmor/ubuntu_pro_apt_news ubuntu_codename=${UBUNTU_CODENAME} + # quick syntax check on the generated profile + apparmor_parser -K -T -Q debian/apparmor/ubuntu_pro_apt_news + install -D -m 644 $(CURDIR)/debian/apparmor/ubuntu_pro_apt_news $(CURDIR)/debian/ubuntu-pro-client/etc/apparmor.d/ubuntu_pro_apt_news + dh_apparmor -pubuntu-pro-client --profile-name=ubuntu_pro_apt_news + flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} # We install the conf file even on non-LTS version to avoid issues on upgrade scenarios - make -C apt-hook DESTDIR=$(CURDIR)/debian/ubuntu-advantage-tools install-conf - make -C apt-hook DESTDIR=$(CURDIR)/debian/ubuntu-advantage-tools install + make -C apt-hook DESTDIR=$(CURDIR)/debian/ubuntu-pro-client install-conf + make -C apt-hook DESTDIR=$(CURDIR)/debian/ubuntu-pro-client install # We want to guarantee that we are not shipping any conftest files - find $(CURDIR)/debian/ubuntu-advantage-tools -type f -name conftest.py -delete + find $(CURDIR)/debian/ubuntu-pro-client -type f -name conftest.py -delete ifneq (${VERSION_ID},"16.04") # Only install cloud-id-shim on Xenial - rm $(CURDIR)/debian/ubuntu-advantage-tools/lib/systemd/system/ubuntu-advantage-cloud-id-shim.service + rm $(CURDIR)/debian/ubuntu-pro-client/lib/systemd/system/ubuntu-advantage-cloud-id-shim.service endif - # Move ua-auto-attach.service out to ubuntu-advantage-pro - mkdir -p debian/ubuntu-advantage-pro/lib/systemd/system - mv debian/ubuntu-advantage-tools/lib/systemd/system/ua-auto-attach.* debian/ubuntu-advantage-pro/lib/systemd/system - cd debian/ubuntu-advantage-tools + # Move ua-auto-attach.service out to ubuntu-pro-auto-attach + mkdir -p debian/ubuntu-pro-auto-attach/lib/systemd/system + mv debian/ubuntu-pro-client/lib/systemd/system/ua-auto-attach.* debian/ubuntu-pro-auto-attach/lib/systemd/system + + # move migration scripts from before the package rename into ubuntu-advantage-tools + mkdir -p debian/ubuntu-advantage-tools/usr/lib/ubuntu-advantage + mv debian/ubuntu-pro-client/usr/lib/ubuntu-advantage/migrate_user_config.py debian/ubuntu-advantage-tools/usr/lib/ubuntu-advantage + mv debian/ubuntu-pro-client/usr/lib/ubuntu-advantage/patch_status_json.py debian/ubuntu-advantage-tools/usr/lib/ubuntu-advantage override_dh_auto_clean: diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-pro.lintian-overrides ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-pro.lintian-overrides --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-pro.lintian-overrides 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-pro.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# Avoid warning on wanted-by-target - -ubuntu-advantage-pro: systemd-service-file-refers-to-unusual-wantedby-target diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.bash-completion ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.bash-completion --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.bash-completion 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.bash-completion 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -tools/ua.bash ua -tools/ua.bash pro diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.links ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.links --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.links 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.links 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -usr/bin/ubuntu-advantage usr/bin/ua -usr/bin/ubuntu-advantage usr/bin/pro -usr/share/man/man1/ubuntu-advantage.1 usr/share/man/man1/ua.1 -usr/share/man/man1/ubuntu-advantage.1 usr/share/man/man1/pro.1 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.lintian-overrides ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.lintian-overrides --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.lintian-overrides 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.lintian-overrides 2024-02-14 15:37:46.000000000 +0000 @@ -1,10 +1,14 @@ # We are silencing this issue because of this LP: #1930121 ubuntu-advantage-tools: command-with-path-in-maintainer-script -# Ubuntu doesn't support sysv init.d -ubuntu-advantage-tools: package-supports-alternative-init-but-no-init.d-script +ubuntu-advantage-tools: possible-bashism-in-maintainer-script -# We have made the decision of delivering those files -ubuntu-advantage-tools: package-installs-apt-preferences +# python3 is a transitive dependency via ubuntu-pro-client +ubuntu-advantage-tools: python3-script-but-no-python3-dep -ubuntu-advantage-tools: possible-bashism-in-maintainer-script +# This is only to cleanup an old tmp file +ubuntu-advantage-tools: possibly-insecure-handling-of-tmp-files-in-maintainer-script /tmp [postinst:61] + +# systemctl is the only way to do these calls, and we do the required check before calling it +ubuntu-advantage-tools: maintainer-script-calls-systemctl [postinst:260] +ubuntu-advantage-tools: maintainer-script-calls-systemctl [postinst:263] \ No newline at end of file diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.logrotate ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.logrotate --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.logrotate 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.logrotate 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -# use the root group by default, since this is the owning group -# of /var/log/ubuntu-advantage*.log files. -/var/log/ubuntu-advantage*.log { - su root root - create 0644 root root - rotate 6 - monthly - compress - delaycompress - missingok - notifempty -} diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.maintscript ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.maintscript --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.maintscript 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.maintscript 2024-02-29 14:03:11.000000000 +0000 @@ -5,3 +5,10 @@ rm_conffile /etc/init/ua-auto-attach.conf 20.2~ ubuntu-advantage-tools rm_conffile /etc/update-motd.d/88-esm-announce 27.14~ ubuntu-advantage-tools rm_conffile /etc/ubuntu-advantage/help_data.yaml 30~ ubuntu-advantage-tools +mv_conffile /etc/logrotate.d/ubuntu-advantage-tools /etc/logrotate.d/ubuntu-pro-client 31.2~ ubuntu-advantage-tools +mv_conffile /etc/apt/apt.conf.d/20apt-esm-hook.conf /etc/apt/apt.conf.d/20apt-esm-hook.conf 31.2~ ubuntu-advantage-tools +mv_conffile /etc/apt/preferences.d/ubuntu-pro-esm-apps /etc/apt/preferences.d/ubuntu-pro-esm-apps 31.2~ ubuntu-advantage-tools +mv_conffile /etc/apt/preferences.d/ubuntu-pro-esm-infra /etc/apt/preferences.d/ubuntu-pro-esm-infra 31.2~ ubuntu-advantage-tools +mv_conffile /etc/ubuntu-advantage/uaclient.conf /etc/ubuntu-advantage/uaclient.conf 31.2~ ubuntu-advantage-tools +mv_conffile /etc/update-manager/release-upgrades.d/ubuntu-advantage-upgrades.cfg /etc/update-manager/release-upgrades.d/ubuntu-advantage-upgrades.cfg 31.2~ ubuntu-advantage-tools +mv_conffile /etc/update-motd.d/91-contract-ua-esm-status /etc/update-motd.d/91-contract-ua-esm-status 31.2~ ubuntu-advantage-tools diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.manpages ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.manpages --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.manpages 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.manpages 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -ubuntu-advantage.1 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.postinst ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.postinst --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.postinst 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.postinst 2024-02-14 15:37:46.000000000 +0000 @@ -450,7 +450,6 @@ rm_old_license_check_marker disable_new_timer_if_old_timer_already_disabled remove_old_systemd_units - /usr/lib/ubuntu-advantage/cloud-id-shim.sh || true # On old version of ubuntu-advantange-tools, we don't have a public # machine_token.json file on attached machines. Since the non-root @@ -474,15 +473,6 @@ if dpkg --compare-versions "$PREVIOUS_PKG_VER" lt "29~"; then rename_gpg_keys fi - - if grep -q "^ua_config:" /etc/ubuntu-advantage/uaclient.conf; then - echo "Warning: uaclient.conf contains old ua_config field." >&2 - echo " Please do the following:" >&2 - echo " 1. Run 'pro config set field=value' for each field/value pair" >&2 - echo " present under ua_config in /etc/ubuntu-advantage/uaclient.conf" >&2 - echo " 2. Delete ua_config and all sub-fields in" >&2 - echo " /etc/ubuntu-advantage/uaclient.conf" >&2 - fi ;; esac diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.postrm ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.postrm --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.postrm 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.postrm 2024-02-14 15:37:46.000000000 +0000 @@ -3,33 +3,7 @@ set -e -remove_apt_auth(){ - # config file created at runtime, needs explicit handling on purge - rm -f /etc/apt/auth.conf.d/90ubuntu-advantage -} - -remove_cache_dir(){ - rm -rf /var/lib/ubuntu-advantage -} - -remove_logs(){ - rm -f /var/log/ubuntu-advantage.log* - rm -f /var/log/ubuntu-advantage-timer.log* - rm -f /var/log/ubuntu-advantage-license-check.log* - rm -f /var/log/ubuntu-advantage-daemon.log* -} - -remove_gpg_files(){ - rm -f /etc/apt/trusted.gpg.d/ubuntu-pro-*.gpg -} - case "$1" in - purge) - remove_apt_auth - remove_cache_dir - remove_logs - remove_gpg_files - ;; abort-install|abort-upgrade) # LP: #2004280 if dpkg --compare-versions "$2" lt "27.14~"; then diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.prerm ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.prerm --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-advantage-tools.prerm 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-advantage-tools.prerm 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -#!/bin/sh - -set -e - -remove_apt_files() { - /usr/bin/python3 -c ' -from uaclient.apt import clean_apt_files - -clean_apt_files() -' - -} - -case "$1" in - purge|remove) - remove_apt_files - ;; -esac - -#DEBHELPER# - -exit 0 - diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-auto-attach.lintian-overrides ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-auto-attach.lintian-overrides --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-auto-attach.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-auto-attach.lintian-overrides 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,3 @@ +# Avoid warning on wanted-by-target + +ubuntu-pro-auto-attach: systemd-service-file-refers-to-unusual-wantedby-target diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.bash-completion ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.bash-completion --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.bash-completion 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.bash-completion 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,2 @@ +tools/ua.bash ua +tools/ua.bash pro diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.links ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.links --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.links 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.links 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,4 @@ +usr/bin/ubuntu-advantage usr/bin/ua +usr/bin/ubuntu-advantage usr/bin/pro +usr/share/man/man1/ubuntu-advantage.1 usr/share/man/man1/ua.1 +usr/share/man/man1/ubuntu-advantage.1 usr/share/man/man1/pro.1 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.lintian-overrides ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.lintian-overrides --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.lintian-overrides 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,8 @@ +# Ubuntu doesn't support sysv init.d +ubuntu-pro-client: package-supports-alternative-init-but-no-init.d-script + +# We have made the decision of delivering those files +ubuntu-pro-client: package-installs-apt-preferences + +# This is intentional +ubuntu-pro-client: uses-dpkg-database-directly [usr/lib/ubuntu-advantage/apt-esm-json-hook] \ No newline at end of file diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.logrotate ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.logrotate --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.logrotate 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.logrotate 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,12 @@ +# use the root group by default, since this is the owning group +# of /var/log/ubuntu-advantage*.log files. +/var/log/ubuntu-advantage*.log { + su root root + create 0644 root root + rotate 6 + monthly + compress + delaycompress + missingok + notifempty +} diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.manpages ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.manpages --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.manpages 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.manpages 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1 @@ +ubuntu-advantage.1 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.postinst ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.postinst --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.postinst 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.postinst 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,98 @@ +#!/bin/sh + +set -e + +. /etc/os-release # For VERSION_ID + +# Needed even if this script doesn't call debconf, see: +# https://lintian.debian.org/tags/postinst-does-not-load-confmodule.html +# Note: this may re-exec the postinst script. +. /usr/share/debconf/confmodule + +if [ -z "${VERSION_ID}" ]; then + echo "Warning: missing VERSION_ID in /etc/os-release" >&2 + VERSION_ID="NO-VERSION_ID" +fi + + +XENIAL_CLOUD_ID_SHIM_UNIT_LOCATION="/etc/systemd/system/multi-user.target.wants/ubuntu-advantage-cloud-id-shim.service" + +NOTICES_DIR="/var/lib/ubuntu-advantage/notices" +TEMP_NOTICES_DIR="/run/ubuntu-advantage/notices" +REBOOT_CMD_MARKER_FILE="/var/lib/ubuntu-advantage/marker-reboot-cmds-required" + + +# +# Helpers that change state of pro-client +# +add_notice() { + notice=$1 + mkdir -p $NOTICES_DIR + touch $NOTICES_DIR/$notice +} +add_temp_notice() { + notice=$1 + mkdir -p $TEMP_NOTICES_DIR + touch $TEMP_NOTICES_DIR/$notice +} +mark_reboot_cmds_as_needed() { + if [ ! -f "$REBOOT_CMD_MARKER_FILE" ]; then + touch $REBOOT_CMD_MARKER_FILE + fi +} + + +case "$1" in + configure) + PREVIOUS_PKG_VER=$2 + + # + # Migrations from previous ubuntu-pro-client package versions + # + # These should always be version-gated using PREVIOUS_PKG_VER and execute in order from oldest to newest. + # For example: + # if dpkg --compare-versions "$PREVIOUS_PKG_VER" lt "33~"; then + # # do the migrations to version 33 + # fi + # if dpkg --compare-versions "$PREVIOUS_PKG_VER" lt "34~"; then + # # do the migrations to version 34 + # fi + # + + # none yet for ubuntu-pro-client package + + + # + # do-release-upgrade migrations from previous Ubuntu release ubuntu-pro-client package versions + # + + # Xenial -> Bionic: clean up unnecessary cloud-id-shim unit that is only in xenial packages + if [ "$VERSION_ID" = "18.04" ]; then + if echo "$PREVIOUS_PKG_VER" | grep -q "16.04"; then + if [ -L $XENIAL_CLOUD_ID_SHIM_UNIT_LOCATION ]; then + deb-systemd-helper purge ubuntu-advantage-cloud-id-shim.service > /dev/null || true + deb-systemd-helper unmask ubuntu-advantage-cloud-id-shim.service > /dev/null || true + fi + fi + fi + + + # + # Always do these to ensure ubuntu-pro-client is in correct state + # + + /usr/lib/ubuntu-advantage/cloud-id-shim.sh || true + + if grep -q "^ua_config:" /etc/ubuntu-advantage/uaclient.conf; then + echo "Warning: uaclient.conf contains old ua_config field." >&2 + echo " Please do the following:" >&2 + echo " 1. Run 'pro config set field=value' for each field/value pair" >&2 + echo " present under ua_config in /etc/ubuntu-advantage/uaclient.conf" >&2 + echo " 2. Delete ua_config and all sub-fields in" >&2 + echo " /etc/ubuntu-advantage/uaclient.conf" >&2 + fi + ;; +esac + +#DEBHELPER# +exit 0 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.postrm ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.postrm --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.postrm 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.postrm 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,37 @@ +#!/bin/sh + +set -e + + +remove_apt_auth(){ + # config file created at runtime, needs explicit handling on purge + rm -f /etc/apt/auth.conf.d/90ubuntu-advantage +} + +remove_cache_dir(){ + rm -rf /var/lib/ubuntu-advantage +} + +remove_logs(){ + rm -f /var/log/ubuntu-advantage.log* + rm -f /var/log/ubuntu-advantage-timer.log* + rm -f /var/log/ubuntu-advantage-license-check.log* + rm -f /var/log/ubuntu-advantage-daemon.log* +} + +remove_gpg_files(){ + rm -f /etc/apt/trusted.gpg.d/ubuntu-pro-*.gpg +} + +case "$1" in + purge) + remove_apt_auth + remove_cache_dir + remove_logs + remove_gpg_files + ;; +esac + +#DEBHELPER# + +exit 0 diff -Nru ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.prerm ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.prerm --- ubuntu-advantage-tools-30~23.10/debian/ubuntu-pro-client.prerm 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/debian/ubuntu-pro-client.prerm 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,26 @@ +#!/bin/sh + +set -e + +remove_apt_files() { + # This list should be kept up to date with the list of available apt-repo-based services + for service in anbox cc-eal cis esm-apps esm-infra fips fips-preview fips-updates realtime-kernel ros ros-updates; do + rm -f /etc/apt/sources.list.d/ubuntu-${service}.list + rm -f /etc/apt/sources.list.d/ubuntu-${service}.sources + done + # preferences are only dynamically created for fips services + for fips_service in fips fips-preview fips-updates; do + rm -f /etc/apt/preferences.d/ubuntu-${fips_service} + done +} + +case "$1" in + purge|remove) + remove_apt_files + ;; +esac + +#DEBHELPER# + +exit 0 + diff -Nru ubuntu-advantage-tools-30~23.10/dev-requirements.txt ubuntu-advantage-tools-31.2~23.10/dev-requirements.txt --- ubuntu-advantage-tools-30~23.10/dev-requirements.txt 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/dev-requirements.txt 2024-01-18 17:34:13.000000000 +0000 @@ -3,4 +3,4 @@ black==22.3.0 isort==5.12.0 pre-commit -shellcheck-py==0.8.0.4 +shellcheck-py==0.9.0.6 diff -Nru ubuntu-advantage-tools-30~23.10/features/_version.feature ubuntu-advantage-tools-31.2~23.10/features/_version.feature --- ubuntu-advantage-tools-30~23.10/features/_version.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/_version.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,20 +1,8 @@ Feature: Pro is expected version - @series.all @uses.config.check_version - @uses.config.machine_type.lxd-container - @uses.config.machine_type.lxd-vm - @uses.config.machine_type.aws.generic - @uses.config.machine_type.aws.pro - @uses.config.machine_type.aws.pro-fips - @uses.config.machine_type.azure.generic - @uses.config.machine_type.azure.pro - @uses.config.machine_type.azure.pro-fips - @uses.config.machine_type.gcp.generic - @uses.config.machine_type.gcp.pro - @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check pro version - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `dpkg-query --showformat='${Version}' --show ubuntu-advantage-tools` with sudo Then I will see the following on stdout """ @@ -34,20 +22,67 @@ THIS GETS REPLACED AT RUNTIME VIA A HACK IN steps/ubuntu_advantage_tools.py """ Examples: version - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | xenial | lxd-vm | + | xenial | aws.generic | + | xenial | aws.pro | + | xenial | aws.pro-fips | + | xenial | azure.generic | + | xenial | azure.pro | + | xenial | azure.pro-fips | + | xenial | gcp.generic | + | xenial | gcp.pro | + | xenial | gcp.pro-fips | + | bionic | lxd-container | + | bionic | lxd-vm | + | bionic | aws.generic | + | bionic | aws.pro | + | bionic | aws.pro-fips | + | bionic | azure.generic | + | bionic | azure.pro | + | bionic | azure.pro-fips | + | bionic | gcp.generic | + | bionic | gcp.pro | + | bionic | gcp.pro-fips | + | focal | lxd-container | + | focal | lxd-vm | + | focal | aws.generic | + | focal | aws.pro | + | focal | aws.pro-fips | + | focal | azure.generic | + | focal | azure.pro | + | focal | azure.pro-fips | + | focal | gcp.generic | + | focal | gcp.pro | + | focal | gcp.pro-fips | + | jammy | lxd-container | + | jammy | lxd-vm | + | jammy | aws.generic | + | jammy | aws.pro | + | jammy | aws.pro-fips | + | jammy | azure.generic | + | jammy | azure.pro | + | jammy | azure.pro-fips | + | jammy | gcp.generic | + | jammy | gcp.pro | + | jammy | gcp.pro-fips | + | mantic | lxd-container | + | mantic | lxd-vm | + | mantic | aws.generic | + | mantic | aws.pro | + | mantic | aws.pro-fips | + | mantic | azure.generic | + | mantic | azure.pro | + | mantic | azure.pro-fips | + | mantic | gcp.generic | + | mantic | gcp.pro | + | mantic | gcp.pro-fips | - @series.all @uses.config.check_version - @uses.config.machine_type.lxd-container @upgrade Scenario Outline: Check pro version - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `dpkg-query --showformat='${Version}' --show ubuntu-advantage-tools` with sudo Then I will see the following on stdout """ @@ -67,10 +102,9 @@ THIS GETS REPLACED AT RUNTIME VIA A HACK IN steps/ubuntu_advantage_tools.py """ Examples: version - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/airgapped.feature ubuntu-advantage-tools-31.2~23.10/features/airgapped.feature --- ubuntu-advantage-tools-30~23.10/features/airgapped.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/airgapped.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,15 +1,13 @@ @uses.config.contract_token Feature: Performing attach using ua-airgapped - @series.jammy - @uses.config.machine_type.lxd-container - Scenario Outline: Attach in an airgapped scenario - Given a `` machine with ubuntu-advantage-tools installed + Scenario Outline: Pro works with the airgapped contract server + Given a `` `` machine with ubuntu-advantage-tools installed # set up the apt mirror configuration - Given a `jammy` machine named `mirror` + Given a `jammy` `` machine named `mirror` When I run `add-apt-repository ppa:yellow/ua-airgapped -y` `with sudo` on the `mirror` machine - And I run `apt-get update` `with sudo` on the `mirror` machine - And I run `apt-get install apt-mirror get-resource-tokens ua-airgapped -yq` `with sudo` on the `mirror` machine + And I apt update on the `mirror` machine + And I apt install `apt-mirror get-resource-tokens ua-airgapped` on the `mirror` machine And I download the service credentials on the `mirror` machine And I extract the `esm-infra` credentials from the `mirror` machine And I extract the `esm-apps` credentials from the `mirror` machine @@ -21,10 +19,10 @@ And I create the contract config overrides file for `esm-infra,esm-apps` on the `mirror` machine And I generate the contracts-airgapped configuration on the `mirror` machine # set up the contracts-airgapped configuration - Given a `jammy` machine named `contracts` + Given a `jammy` `` machine named `contracts` When I run `add-apt-repository ppa:yellow/ua-airgapped -y` `with sudo` on the `contracts` machine - And I run `apt-get update` `with sudo` on the `contracts` machine - And I run `apt-get install contracts-airgapped -yq` `with sudo` on the `contracts` machine + And I apt update on the `contracts` machine + And I apt install `contracts-airgapped` on the `contracts` machine And I run `apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 4067E40313CB4B13` `with sudo` on the `contracts` machine And I disable any internet connection on the `contracts` machine And I send the contracts-airgapped config from the `mirror` machine to the `contracts` machine @@ -33,11 +31,8 @@ And I disable any internet connection on the machine And I change config key `contract_url` to use value `http://$behave_var{machine-ip contracts}:8484` And I attach `contract_token` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled .* - esm-infra +yes +enabled .* - """ + Then I verify that `esm-infra` is enabled + And I verify that `esm-apps` is enabled When I run `apt-cache policy hello` with sudo Then stdout matches regexp: """ @@ -50,5 +45,5 @@ Then I verify that running `pro refresh` `with sudo` exits `0` Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/anbox.feature ubuntu-advantage-tools-31.2~23.10/features/anbox.feature --- ubuntu-advantage-tools-30~23.10/features/anbox.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/anbox.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,16 +1,10 @@ @uses.config.contract_token Feature: Enable anbox on Ubuntu - @series.jammy - @uses.config.machine_type.lxd-container Scenario Outline: Enable Anbox cloud service in a container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` - When I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +disabled - """ + Then I verify that `anbox-cloud` is disabled Then I verify that running `pro enable anbox-cloud` `as non-root` exits `1` And I will see the following on stderr: """ @@ -30,31 +24,21 @@ Updating Anbox Cloud package lists Anbox Cloud access enabled """ - When I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +enabled - """ + And I verify that `anbox-cloud` is enabled When I run `apt-cache policy` with sudo Then apt-cache policy for the following url has priority `500` """ https://archive.anbox-cloud.io/stable /main amd64 Packages """ When I run `pro disable anbox-cloud` with sudo - And I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +disabled - """ + Then I verify that `anbox-cloud` is disabled Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | - @series.xenial - @uses.config.machine_type.lxd-vm Scenario Outline: Enable Anbox cloud service in an unsupported release - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` And I verify that running `pro enable anbox-cloud` `with sudo` exits `1` Then I will see the following on stdout: @@ -64,13 +48,11 @@ """ Examples: ubuntu release - | release | - | xenial | + | release | machine_type | + | xenial | lxd-vm | - @series.jammy - @uses.config.machine_type.lxd-vm Scenario Outline: Enable Anbox cloud service in a VM - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` And I run `snap remove lxd` with sudo And I run `pro enable anbox-cloud --access-only --assume-yes` with sudo @@ -80,11 +62,7 @@ Updating Anbox Cloud package lists Anbox Cloud access enabled """ - When I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +enabled - """ + And I verify that `anbox-cloud` is enabled When I run `apt-cache policy` with sudo Then apt-cache policy for the following url has priority `500` """ @@ -97,11 +75,7 @@ When I run `cat /var/lib/ubuntu-advantage/private/anbox-cloud-credentials` with sudo Then stdout is a json matching the `anbox_cloud_credentials` schema When I run `pro disable anbox-cloud` with sudo - And I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +disabled - """ + Then I verify that `anbox-cloud` is disabled And I verify that no files exist matching `/var/lib/ubuntu-advantage/private/anbox-cloud-credentials` When I run `pro enable anbox-cloud --assume-yes` with sudo Then I will see the following on stdout: @@ -121,11 +95,7 @@ configuration changes. For more information, see https://anbox-cloud.io/docs/tut/installing-appliance#initialise """ - When I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +enabled - """ + Then I verify that `anbox-cloud` is enabled When I run `apt-cache policy` with sudo Then apt-cache policy for the following url has priority `500` """ @@ -138,13 +108,9 @@ When I run `cat /var/lib/ubuntu-advantage/private/anbox-cloud-credentials` with sudo Then stdout is a json matching the `anbox_cloud_credentials` schema When I run `pro disable anbox-cloud` with sudo - And I run `pro status` as non-root - Then stdout matches regexp: - """ - anbox-cloud +yes +disabled - """ + Then I verify that `anbox-cloud` is disabled And I verify that no files exist matching `/var/lib/ubuntu-advantage/private/anbox-cloud-credentials` Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-vm | diff -Nru ubuntu-advantage-tools-30~23.10/features/api.feature ubuntu-advantage-tools-31.2~23.10/features/api.feature --- ubuntu-advantage-tools-30~23.10/features/api.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,8 +1,5 @@ Feature: Client behaviour for the API endpoints - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: all API endpoints can be imported individually Given a `` `` machine with ubuntu-advantage-tools installed When I run `python3 -c "from uaclient.api.u.pro.attach.auto.configure_retry_service.v1 import configure_retry_service"` as non-root @@ -13,6 +10,8 @@ When I run `python3 -c "from uaclient.api.u.pro.attach.magic.wait.v1 import wait"` as non-root When I run `python3 -c "from uaclient.api.u.pro.packages.summary.v1 import summary"` as non-root When I run `python3 -c "from uaclient.api.u.pro.packages.updates.v1 import updates"` as non-root + When I run `python3 -c "from uaclient.api.u.pro.security.fix.cve.execute.v1 import execute"` as non-root + When I run `python3 -c "from uaclient.api.u.pro.security.fix.usn.execute.v1 import execute"` as non-root When I run `python3 -c "from uaclient.api.u.pro.security.fix.cve.plan.v1 import plan"` as non-root When I run `python3 -c "from uaclient.api.u.pro.security.fix.usn.plan.v1 import plan"` as non-root When I run `python3 -c "from uaclient.api.u.pro.security.status.livepatch_cves.v1 import livepatch_cves"` as non-root @@ -30,12 +29,8 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | | mantic | lxd-container | - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: API invalid endpoint or args Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro api invalid.endpoint` `with sudo` exits `1` @@ -55,12 +50,8 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | | mantic | lxd-container | - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: Basic endpoints Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.version.v1` with sudo @@ -95,5 +86,4 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_configure_retry_service.feature ubuntu-advantage-tools-31.2~23.10/features/api_configure_retry_service.feature --- ubuntu-advantage-tools-30~23.10/features/api_configure_retry_service.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_configure_retry_service.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,23 +1,20 @@ Feature: api.u.pro.attach.auto.configure_retry_service - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: v1 successfully triggers retry service when run during startup - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo When I create the file `/lib/systemd/system/apitest.service` with the following """ [Unit] Description=test - Before=cloud-config.service - After=cloud-config.target + Before=ubuntu-advantage.service [Service] Type=oneshot ExecStart=/usr/bin/pro api u.pro.attach.auto.configure_retry_service.v1 [Install] - WantedBy=cloud-config.service multi-user.target + WantedBy=multi-user.target """ When I run `systemctl enable apitest.service` with sudo When I reboot the machine @@ -37,7 +34,7 @@ When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services 1 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 1 time\(s\). The failure was due to: an unknown error. The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. @@ -46,14 +43,14 @@ Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services 1 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 1 time\(s\). The failure was due to: an unknown error. The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_fix_execute.feature ubuntu-advantage-tools-31.2~23.10/features/api_fix_execute.feature --- ubuntu-advantage-tools-30~23.10/features/api_fix_execute.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_fix_execute.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Fix execute API endpoints - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Fix execute command on invalid CVEs/USNs - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-1800-123456"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -34,16 +32,14 @@ """ Examples: ubuntu release details - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Fix execute on a Focal machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2020-28196"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -65,8 +61,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"description": "Kerberos vulnerability", "errors": null, "status": "fixed", "title": "CVE-2020-28196", "upgraded_packages": \[\]}, {"description": "Linux kernel vulnerabilities", "errors": null, "status": "not-affected", "title": "CVE-2022-24959", "upgraded_packages": \[\]}\], "status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `apt install -y libawl-php=0.60-1 --allow-downgrades` with sudo + When I apt update + And I apt install `libawl-php=0.60-1` And I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema And stdout matches regexp: @@ -85,8 +81,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"status": "fixed", "usns": \[{"related_usns": \[\], "target_usn": {"description": "AWL vulnerability", "errors": null, "status": "fixed", "title": "USN-4539-1", "upgraded_packages": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt install -y rsync=3.1.3-8 --allow-downgrades` with sudo - And I run `apt install -y zlib1g=1:1.2.11.dfsg-2ubuntu1 --allow-downgrades` with sudo + When I apt install `rsync=3.1.3-8 zlib1g=1:1.2.11.dfsg-2ubuntu1` And I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-5573-1"]}'` with sudo Then stdout is a json matching the `api_response` schema And stdout matches regexp: @@ -101,13 +96,11 @@ """ Examples: ubuntu release details - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.xenial - @uses.config.machine_type.lxd-container Scenario Outline: Fix execute API command on a Xenial machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2020-15180"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -122,8 +115,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"description": ".*", "errors": null, "status": "fixed", "title": "CVE-2020-28196", "upgraded_packages": \[\]}\], "status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -y expat=2.1.0-7 swish-e matanza ghostscript` with sudo + When I apt update + And I apt install `expat=2.1.0-7 swish-e matanza ghostscript` And I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2017-9233"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -145,7 +138,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["krb5"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["krb5"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-28196", "warnings": \[\]}, {"additional_data": {}, "affected_packages": \[\], "description": ".*", "error": null, "expected_status": "not-affected", "plan": \[{"data": {"status": "system-not-affected"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-15180", "warnings": \[\]}, {"additional_data": {}, "affected_packages": \["expat", "matanza", "swish-e"\], "description": ".*", "error": null, "expected_status": "still-affected", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["expat"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 2}\], "title": "CVE-2017-9233", "warnings": \[{"data": {"source_packages": \["matanza", "swish-e"\], "status": "needs-triage"}, "order": 1, "warning_type": "security-issue-not-fixed"}\]}\], "expected_status": "still-affected"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\] """ - When I run `apt install -y libawl-php` with sudo + When I apt install `libawl-php` And I reboot the machine And I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema @@ -193,8 +186,8 @@ When I run `pro detach --assume-yes` with sudo And I run `sed -i "/xenial-updates/d" /etc/apt/sources.list` with sudo And I run `sed -i "/xenial-security/d" /etc/apt/sources.list` with sudo - And I run `apt-get update` with sudo - And I run `apt-get install squid -y` with sudo + And I apt update + And I apt install `squid` And I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2020-25097"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -204,13 +197,11 @@ """ Examples: ubuntu release details - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.bionic - @uses.config.machine_type.lxd-container Scenario Outline: Fix execute API command on a Bionic machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2020-28196"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -218,8 +209,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"description": "Kerberos vulnerability", "errors": null, "status": "fixed", "title": "CVE-2020-28196", "upgraded_packages": \[\]}\], "status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `apt-get install xterm=330-1ubuntu2 -y` with sudo + When I apt update + And I apt install `xterm=330-1ubuntu2` And I run `pro api u.pro.security.fix.cve.execute.v1 --data '{"cves": ["CVE-2021-27135"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_execute` schema @@ -234,15 +225,14 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"description": "xterm vulnerability", "errors": null, "status": "fixed", "title": "CVE-2021-27135", "upgraded_packages": \[{"name": "xterm", "pocket": "standard-updates", "version": ".*"}\]}\], "status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt install -y libawl-php` with sudo + When I apt install `libawl-php` And I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema And stdout matches regexp: """ {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"status": "not-affected", "usns": \[{"related_usns": \[\], "target_usn": {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-4539-1", "upgraded_packages": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get install libbz2-1.0=1.0.6-8.1 -y --allow-downgrades` with sudo - And I run `apt-get install bzip2=1.0.6-8.1 -y` with sudo + When I apt install `libbz2-1.0=1.0.6-8.1 bzip2=1.0.6-8.1` And I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-4038-3"]}'` as non-root Then stdout is a json matching the `api_response` schema And stdout matches regexp: @@ -259,7 +249,7 @@ Then stdout is a json matching the `api_response` schema And stdout matches regexp: """ - {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"status": "not-affected", "usns": \[{"related_usns": \[{"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6033-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6122-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6123-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6124-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6127-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6131-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6132-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6135-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6149-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6150-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6162-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6173-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6175-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6186-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6222-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6256-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6385-1", "upgraded_packages": \[\]}\], "target_usn": {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6130-1", "upgraded_packages": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"status": "not-affected", "usns": \[{"related_usns": \[{"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6033-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6122-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6123-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6124-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6127-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6131-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6132-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6135-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6149-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6150-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6162-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6173-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6175-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6186-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6222-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6256-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6385-1", "upgraded_packages": \[\]}, {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6460-1", "upgraded_packages": \[\]}\], "target_usn": {"description": ".*", "errors": null, "status": "not-affected", "title": "USN-6130-1", "upgraded_packages": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixExecute"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ When I run `pro api u.pro.security.fix.usn.execute.v1 --data '{"usns": ["USN-4539-1", "USN-4038-1"]}'` with sudo Then stdout is a json matching the `api_response` schema @@ -269,5 +259,5 @@ """ Examples: ubuntu release details - | release | - | bionic | + | release | machine_type | + | bionic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_fix_plan.feature ubuntu-advantage-tools-31.2~23.10/features/api_fix_plan.feature --- ubuntu-advantage-tools-30~23.10/features/api_fix_plan.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_fix_plan.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Fix plan API endpoints - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-1800-123456"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -34,16 +32,14 @@ """ Examples: ubuntu release details - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2020-28196"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -65,8 +61,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["krb5"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["krb5"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-28196", "warnings": \[\]}, {"additional_data": {}, "affected_packages": \[\], "description": ".*", "error": null, "expected_status": "not-affected", "plan": \[{"data": {"status": "system-not-affected"}, "operation": "no-op", "order": 1}], "title": "CVE-2022-24959", "warnings": \[\]}\], "expected_status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `apt install -y libawl-php=0.60-1 --allow-downgrades` with sudo + When I apt update + And I apt install `libawl-php=0.60-1` And I run `pro api u.pro.security.fix.usn.plan.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `usn_fix_plan` schema @@ -74,8 +70,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"expected_status": "fixed", "usns": \[{"related_usns_plan": \[\], "target_usn_plan": {"additional_data": {"associated_cves": \["CVE-2020-11728"\], "associated_launchpad_bugs": \[\]}, "affected_packages": \["awl"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"binary_packages": \["libawl-php"\], "pocket": "standard-updates", "source_packages": \["awl"\]}, "operation": "apt-upgrade", "order": 1}\], "title": "USN-4539-1", "warnings": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt install -y rsync=3.1.3-8 --allow-downgrades` with sudo - And I run `apt install -y zlib1g=1:1.2.11.dfsg-2ubuntu1 --allow-downgrades` with sudo + When I apt install `rsync=3.1.3-8 zlib1g=1:1.2.11.dfsg-2ubuntu1` And I run `pro api u.pro.security.fix.usn.plan.v1 --data '{"usns": ["USN-5573-1"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `usn_fix_plan` schema @@ -92,13 +87,11 @@ """ Examples: ubuntu release details - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.xenial - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2020-15180"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -113,8 +106,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["krb5"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["krb5"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-28196", "warnings": \[\]}\], "expected_status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -y expat=2.1.0-7 swish-e matanza ghostscript` with sudo + When I apt update + And I apt install `expat=2.1.0-7 swish-e matanza ghostscript` And I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2017-9233"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -129,7 +122,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["krb5"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["krb5"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-28196", "warnings": \[\]}, {"additional_data": {}, "affected_packages": \[\], "description": ".*", "error": null, "expected_status": "not-affected", "plan": \[{"data": {"status": "system-not-affected"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-15180", "warnings": \[\]}, {"additional_data": {}, "affected_packages": \["expat", "matanza", "swish-e"\], "description": ".*", "error": null, "expected_status": "still\-affected", "plan": \[{"data": {"binary_packages": \["expat"\], "pocket": "standard-updates", "source_packages": \["expat"\]}, "operation": "apt-upgrade", "order": 2}\], "title": "CVE-2017-9233", "warnings": \[{"data": {"source_packages": \["matanza", "swish\-e"\], "status": "needs-triage"}, "order": 1, "warning_type": "security\-issue-not\-fixed"}\]}\], "expected_status": "still\-affected"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt install -y libawl-php` with sudo + When I apt install `libawl-php` And I reboot the machine And I run `pro api u.pro.security.fix.usn.plan.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema @@ -168,8 +161,8 @@ """ When I run `sed -i "/xenial-updates/d" /etc/apt/sources.list` with sudo And I run `sed -i "/xenial-security/d" /etc/apt/sources.list` with sudo - And I run `apt-get update` with sudo - And I run `apt-get install squid -y` with sudo + And I apt update + And I apt install `squid` And I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2020-25097"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -179,13 +172,11 @@ """ Examples: ubuntu release details - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.bionic - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2020-28196"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -193,8 +184,8 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["krb5"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"pocket": "standard-updates", "source_packages": \["krb5"\], "status": "cve-already-fixed"}, "operation": "no-op", "order": 1}\], "title": "CVE-2020-28196", "warnings": \[\]}\], "expected_status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get update` with sudo - And I run `apt-get install xterm=330-1ubuntu2 -y` with sudo + When I apt update + And I apt install `xterm=330-1ubuntu2` And I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2021-27135"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema @@ -202,7 +193,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["xterm"\], "description": ".*", "error": null, "expected_status": "fixed", "plan": \[{"data": {"binary_packages": \["xterm"\], "pocket": "standard-updates", "source_packages": \["xterm"\]}, "operation": "apt-upgrade", "order": 1}\], "title": "CVE-2021-27135", "warnings": \[\]}\], "expected_status": "fixed"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt install -y libawl-php` with sudo + When I apt install `libawl-php` And I run `pro api u.pro.security.fix.usn.plan.v1 --data '{"usns": ["USN-4539-1"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `usn_fix_plan` schema @@ -210,8 +201,7 @@ """ {"_schema_version": "v1", "data": {"attributes": {"usns_data": {"expected_status": "not-affected", "usns": \[{"related_usns_plan": \[\], "target_usn_plan": {"additional_data": {"associated_cves": \["CVE-2020-11728"\], "associated_launchpad_bugs": \[\]}, "affected_packages": \[\], "description": ".*", "error": null, "expected_status": "not-affected", "plan": \[{"data": {"status": "system-not-affected"}, "operation": "no-op", "order": 1}\], "title": "USN-4539-1", "warnings": \[\]}}\]}}, "meta": {"environment_vars": \[\]}, "type": "USNFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ - When I run `apt-get install libbz2-1.0=1.0.6-8.1 -y --allow-downgrades` with sudo - And I run `apt-get install bzip2=1.0.6-8.1 -y` with sudo + When I apt install `libbz2-1.0=1.0.6-8.1 bzip2=1.0.6-8.1` And I run `pro api u.pro.security.fix.usn.plan.v1 --data '{"usns": ["USN-4038-3"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `usn_fix_plan` schema @@ -235,21 +225,19 @@ """ Examples: ubuntu release details - | release | - | bionic | + | release | machine_type | + | bionic | lxd-container | - @series.mantic - @uses.config.machine_type.lxd-vm Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.security.fix.cve.plan.v1 --data '{"cves": ["CVE-2022-40982"]}'` as non-root Then stdout is a json matching the `api_response` schema And the json API response data matches the `cve_fix_plan` schema And stdout matches regexp: """ - {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \["linux"\], "description": ".*", "error": null, "expected_status": "still-affected", "plan": \[\], "title": "CVE-2022-40982", "warnings": \[{"data": {"source_packages": \["linux"\], "status": "pending"}, "order": 1, "warning_type": "security-issue-not-fixed"}\]}\], "expected_status": "still-affected"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} + {"_schema_version": "v1", "data": {"attributes": {"cves_data": {"cves": \[{"additional_data": {}, "affected_packages": \[\], "description": ".*", "error": null, "expected_status": "not-affected", "plan": \[{"data": {"status": "system-not-affected"}, "operation": "no-op", "order": 1}\], "title": "CVE-2022-40982", "warnings": \[\]}\], "expected_status": "not-affected"}}, "meta": {"environment_vars": \[\]}, "type": "CVEFixPlan"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} """ Examples: ubuntu release details - | release | - | mantic | + | release | machine_type | + | mantic | lxd-vm | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_full_auto_attach.feature ubuntu-advantage-tools-31.2~23.10/features/api_full_auto_attach.feature --- ubuntu-advantage-tools-30~23.10/features/api_full_auto_attach.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_full_auto_attach.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,11 +1,7 @@ Feature: Full Auto-Attach Endpoint - @series.lts - @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro - @uses.config.machine_type.gcp.pro Scenario Outline: Run auto-attach on cloud instance. - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -20,19 +16,21 @@ full_auto_attach(FullAutoAttachOptions(enable=["esm-infra"])) """ And I run `python3 /tmp/full_auto_attach.py` with sudo - And I run `pro status --all` with sudo - Then stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ - Then stdout matches regexp: - """ - livepatch +yes +(disabled|n/a) +(Canonical Livepatch service|Current kernel is not supported) - """ + Then I verify that `esm-infra` is enabled + And I verify that `livepatch` is disabled + Examples: - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_magic_attach.feature ubuntu-advantage-tools-31.2~23.10/features/api_magic_attach.feature --- ubuntu-advantage-tools-30~23.10/features/api_magic_attach.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_magic_attach.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Magic Attach endpoints - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Call magic attach endpoints - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo And I verify that running `pro api u.pro.attach.magic.revoke.v1` `as non-root` exits `1` Then stdout is a json matching the `api_response` schema @@ -67,8 +65,8 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_packages.feature ubuntu-advantage-tools-31.2~23.10/features/api_packages.feature --- ubuntu-advantage-tools-30~23.10/features/api_packages.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_packages.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,10 +1,8 @@ Feature: Package related API endpoints - @series.all - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Call packages API endpoints to see information in a Ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.pro.packages.summary.v1` as non-root Then stdout matches regexp: """ @@ -17,9 +15,9 @@ """ # Make sure we have an updated system When I attach `contract_token` with sudo - And I run `apt upgrade -y` with sudo + And I apt upgrade # Install some outdated package - And I run `apt install = -y --allow-downgrades` with sudo + And I apt install `=` # See the update there When I store candidate version of package `` And I regexify `candidate` stored var @@ -30,9 +28,9 @@ """ Examples: ubuntu release - | release | package | outdated_version | provided_by | - | xenial | libcurl3-gnutls | 7.47.0-1ubuntu2 | esm-infra | - | bionic | libcurl4 | 7.58.0-2ubuntu3 | esm-infra | - | focal | libcurl4 | 7.68.0-1ubuntu2 | standard-security | - | jammy | libcurl4 | 7.81.0-1 | standard-security | - | lunar | libcurl4 | 7.88.1-8ubuntu1 | standard-security | + | release | machine_type | package | outdated_version | provided_by | + | xenial | lxd-container | libcurl3-gnutls | 7.47.0-1ubuntu2 | esm-infra | + | bionic | lxd-container | libcurl4 | 7.58.0-2ubuntu3 | esm-infra | + | focal | lxd-container | libcurl4 | 7.68.0-1ubuntu2 | standard-security | + | jammy | lxd-container | libcurl4 | 7.81.0-1 | standard-security | + | mantic | lxd-container | libcurl4 | 8.2.1-1ubuntu3 | standard-security | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_security.feature ubuntu-advantage-tools-31.2~23.10/features/api_security.feature --- ubuntu-advantage-tools-30~23.10/features/api_security.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_security.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,10 +1,8 @@ Feature: API security/security status tests - @series.xenial - @uses.config.machine_type.lxd-vm @uses.config.contract_token Scenario: Call Livepatched CVEs endpoint - Given a `xenial` machine with ubuntu-advantage-tools installed + Given a `xenial` `lxd-vm` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro api u.pro.security.status.livepatch_cves.v1` as non-root Then stdout matches regexp: @@ -16,22 +14,16 @@ "type": "LivepatchCVEs" """ - @series.lts - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Call package manifest endpoint for machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ - When I run `apt update` with sudo - And I run `apt upgrade -y` with sudo - And I run `apt install jq bzip2 -y` with sudo + Then I verify that `esm-infra` is enabled + When I apt update + And I apt upgrade + And I apt install `jq bzip2` # Install the oscap version 1.3.7 which solved the epoch error message issue - And I run `apt-get install -y cmake libdbus-1-dev libdbus-glib-1-dev libcurl4-openssl-dev libgcrypt20-dev libselinux1-dev libxslt1-dev libgconf2-dev libacl1-dev libblkid-dev libcap-dev libxml2-dev libldap2-dev libpcre3-dev swig libxml-parser-perl libxml-xpath-perl libperl-dev libbz2-dev g++ libapt-pkg-dev libyaml-dev libxmlsec1-dev libxmlsec1-openssl` with sudo + And I apt install `cmake libdbus-1-dev libdbus-glib-1-dev libcurl4-openssl-dev libgcrypt20-dev libselinux1-dev libxslt1-dev libgconf2-dev libacl1-dev libblkid-dev libcap-dev libxml2-dev libldap2-dev libpcre3-dev swig libxml-parser-perl libxml-xpath-perl libperl-dev libbz2-dev g++ libapt-pkg-dev libyaml-dev libxmlsec1-dev libxmlsec1-openssl` And I run `wget https://github.com/OpenSCAP/openscap/releases/download/1.3.7/openscap-1.3.7.tar.gz` as non-root And I run `tar xzf openscap-1.3.7.tar.gz` as non-root And I run shell command `mkdir -p openscap-1.3.7/build` as non-root @@ -64,18 +56,16 @@ oval:com.ubuntu.:def::\s+false """ # Downgrade the package - When I run shell command `apt install libgnutls30= -y --allow-downgrades` with sudo + When I apt install `libgnutls30=` And I run shell command `pro api u.security.package_manifest.v1 | jq -r '.data.attributes.manifest_data' > manifest` as non-root And I run shell command `oscap oval eval --report report.html oci.com.ubuntu..usn.oval.xml` as non-root Then stdout matches regexp: """ oval:com.ubuntu.:def::\s+true """ - - Examples: ubuntu release - | release | base_version | CVE_ID | - | xenial | 3.4.10-4ubuntu1 | 39991000000 | - | bionic | 3.5.18-1ubuntu1 | 55501000000 | - | focal | 3.6.13-2ubuntu1 | 55501000000 | - | jammy | 3.7.3-4ubuntu1 | 55501000000 | + | release | machine_type | base_version | CVE_ID | + | xenial | lxd-container | 3.4.10-4ubuntu1 | 39991000000 | + | bionic | lxd-container | 3.5.18-1ubuntu1 | 55501000000 | + | focal | lxd-container | 3.6.13-2ubuntu1 | 55501000000 | + | jammy | lxd-container | 3.7.3-4ubuntu1 | 55501000000 | diff -Nru ubuntu-advantage-tools-30~23.10/features/api_unattended_upgrades.feature ubuntu-advantage-tools-31.2~23.10/features/api_unattended_upgrades.feature --- ubuntu-advantage-tools-30~23.10/features/api_unattended_upgrades.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/api_unattended_upgrades.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: api.u.unattended_upgrades.status.v1 - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: v1 unattended upgrades status - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro api u.unattended_upgrades.status.v1` as non-root Then stdout matches regexp: """ @@ -13,8 +11,8 @@ """ APT::Periodic::Enable "0"; """ - And I run `apt-get update` with sudo - And I run `apt-get install jq -y` with sudo + And I apt update + And I apt install `jq` And I run shell command `pro api u.unattended_upgrades.status.v1 | jq .data.attributes.apt_periodic_job_enabled` as non-root Then I will see the following on stdout: """ @@ -196,7 +194,7 @@ "vim" ] """ - When I run `apt remove unattended-upgrades -y` with sudo + When I apt remove `unattended-upgrades` And I run `pro api u.unattended_upgrades.status.v1` as non-root Then stdout matches regexp: """ @@ -204,10 +202,9 @@ """ Examples: ubuntu release - | release | extra_field | - | xenial | | - | bionic | "Unattended-Upgrade::DevRelease": "false" | - | focal | "Unattended-Upgrade::DevRelease": "auto" | - | jammy | "Unattended-Upgrade::DevRelease": "auto" | - | lunar | "Unattended-Upgrade::DevRelease": "auto" | - | mantic | "Unattended-Upgrade::DevRelease": "auto" | + | release | machine_type | extra_field | + | xenial | lxd-container | | + | bionic | lxd-container | "Unattended-Upgrade::DevRelease": "false" | + | focal | lxd-container | "Unattended-Upgrade::DevRelease": "auto" | + | jammy | lxd-container | "Unattended-Upgrade::DevRelease": "auto" | + | mantic | lxd-container | "Unattended-Upgrade::DevRelease": "auto" | diff -Nru ubuntu-advantage-tools-30~23.10/features/apt_messages.feature ubuntu-advantage-tools-31.2~23.10/features/apt_messages.feature --- ubuntu-advantage-tools-30~23.10/features/apt_messages.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/apt_messages.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,17 +1,13 @@ Feature: APT Messages - @series.xenial - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: APT JSON Hook prints package counts correctly on xenial Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - When I run `apt-get update` with sudo - When I run `apt-get upgrade -y` with sudo - - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt update + When I apt upgrade + When I apt install `` + When I apt upgrade Then stdout matches regexp: """ 1 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -19,8 +15,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install `` + When I apt upgrade Then stdout matches regexp: """ 2 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -28,8 +24,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install `` + When I apt upgrade Then stdout matches regexp: """ 1 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -37,9 +33,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install ` ` + When I apt upgrade Then stdout matches regexp: """ 3 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -47,9 +42,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install ` ` + When I apt upgrade Then stdout matches regexp: """ 2 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -57,9 +51,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install ` ` + When I apt upgrade Then stdout matches regexp: """ 3 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -67,10 +60,8 @@ """ - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt-get install -y --allow-downgrades ` with sudo - When I run `apt upgrade -y` with sudo + When I apt install ` ` + When I apt upgrade Then stdout matches regexp: """ 4 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -78,7 +69,7 @@ """ - When I run `apt upgrade -y` with sudo + When I apt upgrade Then stdout matches regexp: """ 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. @@ -100,19 +91,15 @@ | release | machine_type | standard-pkg | infra-pkg | apps-pkg | | xenial | lxd-container | wget=1.17.1-1ubuntu1 | curl=7.47.0-1ubuntu2 libcurl3-gnutls=7.47.0-1ubuntu2 | hello=2.10-1 | - @series.bionic - @series.xenial - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: APT Hook advertises esm-infra on upgrade Given a `` `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get -y upgrade` with sudo - When I run `apt-get -y autoremove` with sudo + When I apt update + When I apt upgrade + When I apt autoremove When I run `pro config set apt_news=false` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -124,7 +111,7 @@ Learn more about Ubuntu Pro for \.04 at https:\/\/ubuntu\.com\/-04 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded\. """ - When I run `apt-get upgrade` with sudo + When I apt-get upgrade Then I will see the following on stdout: """ Reading package lists... @@ -134,7 +121,7 @@ 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. """ When I attach `contract_token` with sudo - When I run `apt upgrade --dry-run` with sudo + When I apt upgrade on a dry run Then stdout matches regexp: """ Reading package lists... @@ -143,10 +130,10 @@ Calculating upgrade... The following packages will be upgraded: """ - When I run `apt-get upgrade -y` with sudo + When I apt upgrade When I run `pro detach --assume-yes` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -160,20 +147,16 @@ | xenial | lxd-container | 16 | | bionic | lxd-container | 18 | - @series.focal - @series.jammy - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: APT Hook advertises esm-apps on upgrade Given a `` `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get -o APT::Get::Always-Include-Phased-Updates=true upgrade -y` with sudo - When I run `apt-get -y autoremove` with sudo - When I run `apt-get install -y` with sudo + When I apt update + When I apt upgrade including phased updates + When I apt autoremove + When I apt install `` When I run `pro config set apt_news=false` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -185,7 +168,7 @@ 0 upgraded, 0 newly installed, 0 to remove and \d+ not upgraded. """ - When I run `apt-get upgrade` with sudo + When I apt-get upgrade Then stdout matches regexp: """ Reading package lists... @@ -195,7 +178,7 @@ 0 upgraded, 0 newly installed, 0 to remove and \d+ not upgraded. """ When I attach `contract_token` with sudo - When I run `apt upgrade --dry-run` with sudo + When I apt upgrade on a dry run Then stdout matches regexp: """ Reading package lists... @@ -205,10 +188,10 @@ The following packages will be upgraded: """ - When I run `apt-get upgrade -y` with sudo + When I apt upgrade When I run `pro detach --assume-yes` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -219,26 +202,22 @@ """ Examples: ubuntu release | release | machine_type | package | more_msg | learn_more_msg | - | bionic | lxd-container | ansible | more security updates | Learn more about Ubuntu Pro for 18.04 at https://ubuntu.com/18-04 | | focal | lxd-container | hello | another security update | Learn more about Ubuntu Pro at https://ubuntu.com/pro | | jammy | lxd-container | hello | another security update | Learn more about Ubuntu Pro at https://ubuntu.com/pro | - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: APT News Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - # On interim releases we will not enable any service, so we need a manual apt-get update - When I run `apt-get update` with sudo - When I run `DEBIAN_FRONTEND=noninteractive apt-get -o APT::Get::Always-Include-Phased-Updates=true upgrade -y` with sudo - When I run `apt-get autoremove -y` with sudo + # On interim releases we will not enable any service, so we need a manual apt update + When I apt update + When I apt upgrade including phased updates + When I apt autoremove When I apt install `jq` When I run `pro detach --assume-yes` with sudo Given a `focal` `` machine named `apt-news-server` - When I run `apt-get update` `with sudo` on the `apt-news-server` machine + When I apt update on the `apt-news-server` machine When I apt install `nginx` on the `apt-news-server` machine When I run `sed -i "s/gzip on;/gzip on;\n\tgzip_min_length 1;\n\tgzip_types application\/json;\n/" /etc/nginx/nginx.conf` `with sudo` on the `apt-news-server` machine When I run `systemctl restart nginx` `with sudo` on the `apt-news-server` machine @@ -259,7 +238,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -278,7 +257,7 @@ """ # Test that it is not shown in apt-get output - When I run `apt-get upgrade` with sudo + When I apt-get upgrade Then I will see the following on stdout """ Reading package lists... @@ -304,8 +283,8 @@ } """ # apt update stamp will prevent a apt_news refresh - When I run `apt-get update` with sudo - When I run `apt upgrade` with sudo + When I apt update + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -325,7 +304,7 @@ # manual refresh gets new message When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -349,8 +328,10 @@ When I run `rm -rf /run/ubuntu-advantage` with sudo When I run `rm -rf /var/lib/ubuntu-advantage/messages` with sudo When I run `rm /var/lib/apt/periodic/update-success-stamp` with sudo - When I run `apt-get update` with sudo - When I run `apt upgrade` with sudo + When I apt update + # the apt-news.service unit runs in the background, give it some time to fetch the json file + When I wait `5` seconds + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -388,7 +369,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -418,7 +399,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -449,7 +430,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -478,7 +459,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -512,7 +493,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -542,7 +523,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -572,7 +553,7 @@ } """ When I attach `contract_token` with sudo - When I run `apt upgrade -y` with sudo + When I apt upgrade When I set the machine token overlay to the following yaml """ machineTokenInfo: @@ -581,8 +562,10 @@ """ # test that apt update will trigger hook to update apt_news for local override When I run `rm -f /var/lib/apt/periodic/update-success-stamp` with sudo - When I run `apt-get update` with sudo - When I run `apt upgrade` with sudo + When I apt update + # the apt-news.service unit runs in the background, give it some time to fetch the json file + When I wait `5` seconds + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -608,7 +591,7 @@ effectiveTo: $behave_var{today -3} """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -635,7 +618,7 @@ effectiveTo: $behave_var{today -20} """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -644,16 +627,16 @@ Calculating upgrade... # # *Your Ubuntu Pro subscription has EXPIRED* - # Renew your service at https://ubuntu.com/pro/dashboard + # Renew your subscription at https://ubuntu.com/pro/dashboard # 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. """ When I run shell command `pro api u.apt_news.current_news.v1 | jq .data.attributes.current_news` as non-root Then I will see the following on stdout """ - "*Your Ubuntu Pro subscription has EXPIRED*\nRenew your service at https://ubuntu.com/pro/dashboard" + "*Your Ubuntu Pro subscription has EXPIRED*\nRenew your subscription at https://ubuntu.com/pro/dashboard" """ - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -664,7 +647,7 @@ } """ When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then I will see the following on stdout """ Reading package lists... @@ -673,14 +656,14 @@ Calculating upgrade... # # *Your Ubuntu Pro subscription has EXPIRED* - # Renew your service at https://ubuntu.com/pro/dashboard + # Renew your subscription at https://ubuntu.com/pro/dashboard # 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. """ When I run shell command `pro api u.apt_news.current_news.v1 | jq .data.attributes.current_news` as non-root Then I will see the following on stdout """ - "*Your Ubuntu Pro subscription has EXPIRED*\nRenew your service at https://ubuntu.com/pro/dashboard" + "*Your Ubuntu Pro subscription has EXPIRED*\nRenew your subscription at https://ubuntu.com/pro/dashboard" """ Examples: ubuntu release | release | machine_type | @@ -688,21 +671,15 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | + | mantic | lxd-container | - @series.xenial - @series.bionic - @series.focal - @uses.config.machine_type.any - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: Cloud and series-specific URLs Given a `` `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get install ansible -y` with sudo - When I run `apt-get update` with sudo - When I run `apt upgrade --dry-run` with sudo + When I apt update + When I apt install `ansible` + # Update after installing to make sure messages are there + When I apt update + When I apt upgrade on a dry run Then stdout contains substring: """ @@ -719,24 +696,21 @@ | focal | azure.generic | Learn more about Ubuntu Pro on Azure at https://ubuntu.com/azure/pro | | focal | gcp.generic | Learn more about Ubuntu Pro on GCP at https://ubuntu.com/gcp/pro | - @series.lunar - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: APT Hook do not advertises esm-apps on upgrade for interim releases Given a `` `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get -o APT::Get::Always-Include-Phased-Updates=true upgrade -y` with sudo - When I run `apt-get -y autoremove` with sudo - When I run `apt-get install hello -y` with sudo + When I apt update + When I apt upgrade including phased updates + When I apt autoremove + When I apt install `hello` When I run `pro config set apt_news=false` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout does not match regexp: """ Get more security updates through Ubuntu Pro with 'esm-apps' enabled: """ - When I run `apt-get upgrade` with sudo + When I apt-get upgrade Then I will see the following on stdout: """ Reading package lists... @@ -746,7 +720,7 @@ 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. """ When I attach `contract_token` with sudo - When I run `apt upgrade --dry-run` with sudo + When I apt upgrade on a dry run Then stdout matches regexp: """ Reading package lists... @@ -755,10 +729,10 @@ Calculating upgrade... 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded\. """ - When I run `apt-get upgrade -y` with sudo + When I apt upgrade When I run `pro detach --assume-yes` with sudo When I run `pro refresh messages` with sudo - When I run `apt upgrade` with sudo + When I apt upgrade Then stdout matches regexp: """ Reading package lists... @@ -769,4 +743,4 @@ """ Examples: ubuntu release | release | machine_type | - | lunar | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/attach_invalidtoken.feature ubuntu-advantage-tools-31.2~23.10/features/attach_invalidtoken.feature --- ubuntu-advantage-tools-30~23.10/features/attach_invalidtoken.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/attach_invalidtoken.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,6 @@ Feature: Command behaviour when trying to attach a machine to an Ubuntu Pro subscription using an invalid token - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: Attach command failure on invalid token Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro attach INVALID_TOKEN` `with sudo` exits `1` @@ -29,12 +26,8 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | | mantic | lxd-container | - @series.all - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Attach command failure on expired token Given a `` `` machine with ubuntu-advantage-tools installed @@ -58,5 +51,4 @@ | bionic | lxd-container | | focal | lxd-container | | jammy | lxd-container | - | lunar | lxd-container | | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/attach_validtoken.feature ubuntu-advantage-tools-31.2~23.10/features/attach_validtoken.feature --- ubuntu-advantage-tools-30~23.10/features/attach_validtoken.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/attach_validtoken.feature 2024-01-18 17:34:13.000000000 +0000 @@ -2,16 +2,13 @@ Feature: Command behaviour when attaching a machine to an Ubuntu Pro subscription using a valid token - @series.lunar - @series.mantic - @uses.config.machine_type.lxd-container Scenario Outline: Attached command in a non-lts ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro status` as non-root Then stdout matches regexp: """ - No Ubuntu Pro services are available to this system. + """ And stdout matches regexp: """ @@ -38,17 +35,14 @@ """ Examples: ubuntu release - | release | landscape | - | lunar | n/a | - | mantic | yes | + | release | machine_type | landscape | status_string | + | mantic | lxd-container | disabled | landscape +yes +disabled +Management and administration tool for Ubuntu | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command in a ubuntu lxd container - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo, retrying exit [100] - And I run `apt install update-motd` with sudo, retrying exit [100] - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y ` with sudo, retrying exit [100] + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update + And I apt install `update-motd` + And I apt install `` And I run `pro refresh messages` with sudo Then stdout matches regexp: """ @@ -78,15 +72,12 @@ """ This machine is now attached to """ - And stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ And stderr matches regexp: """ Enabling default service esm-infra """ + Then I verify that `esm-infra` is enabled + And I verify that `esm-apps` is enabled When I verify that running `pro attach contract_token` `with sudo` exits `2` Then stderr matches regexp: """ @@ -95,16 +86,14 @@ """ Examples: ubuntu release packages - | release | downrev_pkg | cc_status | cis_or_usg | cis | fips | livepatch_desc | - | xenial | libkrad0=1.13.2+dfsg-5 | disabled | cis | disabled | disabled | Canonical Livepatch service | - | bionic | libkrad0=1.16-2build1 | disabled | cis | disabled | disabled | Canonical Livepatch service | - | focal | hello=2.10-2ubuntu2 | n/a | usg | disabled | disabled | Canonical Livepatch service | - | jammy | hello=2.10-2ubuntu4 | n/a | usg | n/a | n/a | Canonical Livepatch service | + | release | machine_type | downrev_pkg | cc_status | cis_or_usg | cis | fips | livepatch_desc | + | xenial | lxd-container | libkrad0=1.13.2+dfsg-5 | disabled | cis | disabled | disabled | Canonical Livepatch service | + | bionic | lxd-container | libkrad0=1.16-2build1 | disabled | cis | disabled | disabled | Canonical Livepatch service | + | focal | lxd-container | hello=2.10-2ubuntu2 | n/a | usg | disabled | disabled | Canonical Livepatch service | + | jammy | lxd-container | hello=2.10-2ubuntu4 | n/a | usg | n/a | n/a | Canonical Livepatch service | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command with attach config - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # simplest happy path When I create the file `/tmp/attach.yaml` with the following """ @@ -112,18 +101,9 @@ """ When I replace `` in `/tmp/attach.yaml` with token `contract_token` When I run `pro attach --attach-config /tmp/attach.yaml` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled - """ - And stdout matches regexp: - """ - esm-infra +yes +enabled - """ - And stdout matches regexp: - """ - +yes +disabled - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `` is disabled When I run `pro detach --assume-yes` with sudo # don't allow both token on cli and config Then I verify that running `pro attach TOKEN --attach-config /tmp/attach.yaml` `with sudo` exits `1` @@ -142,18 +122,9 @@ """ When I replace `` in `/tmp/attach.yaml` with token `contract_token` When I run `pro attach --attach-config /tmp/attach.yaml` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled - """ - And stdout matches regexp: - """ - esm-infra +yes +disabled - """ - And stdout matches regexp: - """ - +yes +enabled - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is disabled + And I verify that `` is enabled When I run `pro detach --assume-yes` with sudo # missing token When I create the file `/tmp/attach.yaml` with the following @@ -194,111 +165,22 @@ """ When I replace `` in `/tmp/attach.yaml` with token `contract_token` Then I verify that running `pro attach --attach-config /tmp/attach.yaml` `with sudo` exits `1` - Then stdout matches regexp: - """ - esm-apps +yes +enabled - """ - And stdout matches regexp: - """ - esm-infra +yes +disabled - """ - Then stderr matches regexp: - """ - Cannot enable unknown service 'nonexistent, nonexistent2'. - """ - Examples: ubuntu - | release | cis_or_usg | - | xenial | cis | - | bionic | cis | - | focal | usg | - - @series.all - @uses.config.machine_type.aws.generic - Scenario Outline: Attach command in an generic AWS Ubuntu VM - Given a `` machine with ubuntu-advantage-tools installed - When I set the machine token overlay to the following yaml - """ - machineTokenInfo: - contractInfo: - resourceEntitlements: - - type: esm-apps - entitled: false - """ - And I attach `contract_token` with sudo - Then stdout matches regexp: - """ - Ubuntu Pro: ESM Infra enabled - """ - And stdout matches regexp: - """ - This machine is now attached to - """ - And stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ And stderr matches regexp: """ - Enabling default service esm-infra + Cannot enable unknown service 'nonexistent, nonexistent2'. """ + And I verify that `esm-apps` is enabled + And I verify that `esm-infra` is disabled - Examples: ubuntu release livepatch status - | release | fips_status |lp_status | lp_desc | cc_status | cis_or_usg | cis_status | - | xenial | disabled |enabled | Canonical Livepatch service | disabled | cis | disabled | - | bionic | disabled |enabled | Canonical Livepatch service | disabled | cis | disabled | - | focal | disabled |enabled | Canonical Livepatch service | n/a | usg | disabled | - | jammy | n/a |enabled | Canonical Livepatch service | n/a | usg | n/a | - - @series.lts - @uses.config.machine_type.azure.generic - Scenario Outline: Attach command in an generic Azure Ubuntu VM - Given a `` machine with ubuntu-advantage-tools installed - When I set the machine token overlay to the following yaml - """ - machineTokenInfo: - contractInfo: - resourceEntitlements: - - type: esm-apps - entitled: false - """ - And I attach `contract_token` with sudo - Then stdout matches regexp: - """ - Ubuntu Pro: ESM Infra enabled - """ - And stdout matches regexp: - """ - This machine is now attached to - """ - And stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ - And stderr matches regexp: - """ - Enabling default service esm-infra - """ + Examples: ubuntu + | release | machine_type | cis_or_usg | + | xenial | lxd-container | cis | + | bionic | lxd-container | cis | + | focal | lxd-container | usg | - Examples: ubuntu release livepatch status - | release | lp_status | fips_status | cc_status | cis_or_usg | cis_status | - | xenial | enabled | disabled | disabled | cis | disabled | - | bionic | enabled | disabled | disabled | cis | disabled | - | focal | enabled | disabled | n/a | usg | disabled | - | jammy | enabled | n/a | n/a | usg | n/a | - - @series.all - @uses.config.machine_type.gcp.generic - Scenario Outline: Attach command in an generic GCP Ubuntu VM - Given a `` machine with ubuntu-advantage-tools installed - When I set the machine token overlay to the following yaml - """ - machineTokenInfo: - contractInfo: - resourceEntitlements: - - type: esm-apps - entitled: false - """ - And I attach `contract_token` with sudo + Scenario Outline: Attach command in an generic cloud images + Given a `` `` machine with ubuntu-advantage-tools installed + When I attach `contract_token` with sudo Then stdout matches regexp: """ Ubuntu Pro: ESM Infra enabled @@ -307,54 +189,51 @@ """ This machine is now attached to """ - And stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ And stderr matches regexp: """ Enabling default service esm-infra """ + And I verify that `esm-infra` is enabled Examples: ubuntu release livepatch status - | release | lp_status | fips_status | cc_status | cis_or_usg | cis_status | - | xenial | n/a | n/a | disabled | cis | disabled | - | bionic | enabled | disabled | disabled | cis | disabled | - | focal | enabled | disabled | n/a | usg | disabled | - | jammy | enabled | n/a | n/a | usg | n/a | + | release | machine_type | + | xenial | aws.generic | + | xenial | azure.generic | + | xenial | gcp.generic | + | bionic | aws.generic | + | bionic | azure.generic | + | bionic | gcp.generic | + | focal | aws.generic | + | focal | azure.generic | + | focal | gcp.generic | + | jammy | aws.generic | + | jammy | azure.generic | + | jammy | gcp.generic | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attach command with json output - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running attach `as non-root` with json response exits `1` Then I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ When I verify that running attach `with sudo` with json response exits `0` Then I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} - """ - When I run `pro status` with sudo - Then stdout matches regexp: """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} """ + And I verify that `esm-infra` is enabled + And I verify that `esm-apps` is enabled Examples: ubuntu release - | release | cc-eal | - | xenial | disabled | - | bionic | disabled | - | focal | n/a | - | jammy | n/a | + | release | machine_type | cc-eal | + | xenial | lxd-container | disabled | + | bionic | lxd-container | disabled | + | focal | lxd-container | n/a | + | jammy | lxd-container | n/a | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attach and Check for contract change in status checking - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ @@ -364,10 +243,7 @@ """ This machine is now attached to """ - And stdout matches regexp: - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ + And I verify that `esm-infra` is enabled When I set the machine token overlay to the following yaml """ machineTokenInfo: @@ -397,8 +273,8 @@ """ Examples: ubuntu release livepatch status - | release | + | release | machine_type | # removing until we add this feature back in a way that doesn't hammer the server - #| xenial | - #| bionic | - #| focal | + #| xenial | lxd-container | + #| bionic | lxd-container | + #| focal | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/attached_commands.feature ubuntu-advantage-tools-31.2~23.10/features/attached_commands.feature --- ubuntu-advantage-tools-30~23.10/features/attached_commands.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/attached_commands.feature 2024-02-29 14:03:11.000000000 +0000 @@ -1,116 +1,132 @@ @uses.config.contract_token Feature: Command behaviour when attached to an Ubuntu Pro subscription - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attached refresh in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo + And I verify that `Bearer ` field is redacted in the logs + And I verify that `'attach', '` field is redacted in the logs + And I verify that `'machineToken': '` field is redacted in the logs Then I verify that running `pro refresh` `as non-root` exits `1` And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ + """ + This command must be run as root \(try using sudo\). + """ When I run `pro refresh` with sudo Then I will see the following on stdout: - """ - Successfully processed your pro configuration. - Successfully refreshed your subscription. - Successfully updated Ubuntu Pro related APT and MOTD messages. - """ + """ + Successfully processed your pro configuration. + Successfully refreshed your subscription. + Successfully updated Ubuntu Pro related APT and MOTD messages. + """ When I run `pro refresh config` with sudo Then I will see the following on stdout: - """ - Successfully processed your pro configuration. - """ + """ + Successfully processed your pro configuration. + """ When I run `pro refresh contract` with sudo Then I will see the following on stdout: - """ - Successfully refreshed your subscription. - """ - When I run `pro refresh messages` with sudo - Then I will see the following on stdout: - """ - Successfully updated Ubuntu Pro related APT and MOTD messages. - """ - When I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo - And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root - Then stdout matches regexp: """ - /var/log/ubuntu-advantage.log + Successfully refreshed your subscription. """ - When I run `logrotate --force /etc/logrotate.d/ubuntu-advantage-tools` with sudo - And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root - Then stdout matches regexp: + When I run `pro refresh messages` with sudo + Then I will see the following on stdout: """ - /var/log/ubuntu-advantage.log - /var/log/ubuntu-advantage.log.1 + Successfully updated Ubuntu Pro related APT and MOTD messages. """ Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | - - @series.all - @uses.config.machine_type.lxd-container - Scenario Outline: Attached disable of an already disabled service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | + + Scenario Outline: Disable command on an attached machine + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro disable livepatch` `as non-root` exits `1` And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ - And I verify that running `pro disable livepatch` `with sudo` exits `1` - And I will see the following on stdout: - """ - Livepatch is not currently enabled - See: sudo pro status - """ + """ + This command must be run as root \(try using sudo\). + """ + When I verify that running `pro disable foobar` `as non-root` exits `1` + Then stderr matches regexp: + """ + This command must be run as root \(try using sudo\). + """ + When I verify that running `pro disable livepatch` `with sudo` exits `1` + Then I will see the following on stdout: + """ + Livepatch is not currently enabled + See: sudo pro status + """ + When I verify that running `pro disable foobar` `with sudo` exits `1` + Then stderr matches regexp: + """ + Cannot disable unknown service 'foobar'. + + """ + When I verify that running `pro disable livepatch foobar` `as non-root` exits `1` + Then stderr matches regexp: + """ + This command must be run as root \(try using sudo\) + """ + When I verify that running `pro disable livepatch foobar` `with sudo` exits `1` + Then I will see the following on stdout: + """ + Livepatch is not currently enabled + See: sudo pro status + """ + And stderr matches regexp: + """ + Cannot disable unknown service 'foobar'. + + """ + When I verify that running `pro disable esm-infra` `as non-root` exits `1` + Then stderr matches regexp: + """ + This command must be run as root \(try using sudo\). + """ + When I run `pro disable esm-infra` with sudo + Then I verify that `esm-infra` is disabled + And I verify that running `apt update` `with sudo` exits `0` Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | msg | + | xenial | lxd-container | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | bionic | lxd-container | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | focal | lxd-container | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | jammy | lxd-container | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached disable with json format - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro disable foobar --format json` `as non-root` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ Then I verify that running `pro disable foobar --format json` `with sudo` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ Then I verify that running `pro disable foobar --format json --assume-yes` `as non-root` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ And I verify that running `pro disable foobar --format json --assume-yes` `with sudo` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"additional_info": {"invalid_service": "foobar", "operation": "disable", "service_msg": "Try "}, "message": "Cannot disable unknown service 'foobar'.\nTry ", "message_code": "invalid-service-or-failure", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ + """ + {"_schema_version": "0.1", "errors": [{"additional_info": {"invalid_service": "foobar", "operation": "disable", "service_msg": "Try "}, "message": "Cannot disable unknown service 'foobar'.\nTry ", "message_code": "invalid-service-or-failure", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ And I verify that running `pro disable livepatch --format json --assume-yes` `with sudo` exits `1` And stdout is a json matching the `ua_operation` schema And I will see the following on stdout: @@ -132,57 +148,18 @@ """ Examples: ubuntu release - | release | valid_services | - | xenial | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | bionic | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | focal | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - | jammy | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - - @series.lts - @uses.config.machine_type.lxd-container - Scenario Outline: Attached disable of a service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - Then I verify that running `pro disable foobar` `as non-root` exits `1` - And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ - And I verify that running `pro disable foobar` `with sudo` exits `1` - And stderr matches regexp: - """ - Cannot disable unknown service 'foobar'. - - """ - And I verify that running `pro disable esm-infra` `as non-root` exits `1` - And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ - When I run `pro disable esm-infra` with sudo - Then I will see the following on stdout: - """ - Updating package lists - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-infra +yes +disabled +Expanded Security Maintenance for Infrastructure - """ - And I verify that running `apt update` `with sudo` exits `0` - - Examples: ubuntu release - | release | msg | - | xenial | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | bionic | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | focal | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - | jammy | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | release | machine_type | valid_services | + | xenial | lxd-container | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | bionic | lxd-container | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | focal | lxd-container | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | jammy | lxd-container | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached detach in an ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo + And I verify that `Bearer ` field is redacted in the logs + And I verify that `'attach', '` field is redacted in the logs + And I verify that `'machineToken': '` field is redacted in the logs And I run `pro api u.pro.status.enabled_services.v1` as non-root Then stdout matches regexp: """ @@ -203,98 +180,67 @@ Updating package lists This machine is now detached. """ - When I run `pro status --all` as non-root - Then stdout matches regexp: - """ - SERVICE +AVAILABLE DESCRIPTION - anbox-cloud + .* - cc-eal + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps + +Expanded Security Maintenance for Applications - esm-infra +yes +Expanded Security Maintenance for Infrastructure - fips + +NIST-certified FIPS crypto packages - fips-preview +.* +.* - fips-updates + +FIPS compliant crypto packages with stable security updates - landscape +(yes|no) +Management and administration tool for Ubuntu - livepatch +(yes|no) +(Canonical Livepatch service|Current kernel is not supported) - realtime-kernel + +Ubuntu kernel with PREEMPT_RT patches integrated - ros + +Security Updates for the Robot Operating System - ros-updates + +All Updates for the Robot Operating System - """ - Then stdout matches regexp: - """ - + +Security compliance and audit tools - """ - And stdout matches regexp: - """ - This machine is not attached to an Ubuntu Pro subscription. - """ - And I verify that running `apt update` `with sudo` exits `0` - When I attach `contract_token` with sudo - Then I verify that running `pro enable foobar --format json` `as non-root` exits `1` - And stdout is a json matching the `ua_operation` schema - And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ - Then I verify that running `pro enable foobar --format json` `with sudo` exits `1` - And stdout is a json matching the `ua_operation` schema - And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ - Then I verify that running `pro detach --format json --assume-yes` `as non-root` exits `1` - And stdout is a json matching the `ua_operation` schema - And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} - """ - When I run `pro detach --format json --assume-yes` with sudo - Then stdout is a json matching the `ua_operation` schema - And I will see the following on stdout: - """ - {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} - """ + And the machine is unattached + And I ensure apt update runs without errors + When I attach `contract_token` with sudo + Then I verify that running `pro enable foobar --format json` `as non-root` exits `1` + And stdout is a json matching the `ua_operation` schema + And I will see the following on stdout: + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ + Then I verify that running `pro enable foobar --format json` `with sudo` exits `1` + And stdout is a json matching the `ua_operation` schema + And I will see the following on stdout: + """ + {"_schema_version": "0.1", "errors": [{"message": "json formatted response requires --assume-yes flag.", "message_code": "json-format-require-assume-yes", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ + Then I verify that running `pro detach --format json --assume-yes` `as non-root` exits `1` + And stdout is a json matching the `ua_operation` schema + And I will see the following on stdout: + """ + {"_schema_version": "0.1", "errors": [{"message": "This command must be run as root (try using sudo).", "message_code": "nonroot-user", "service": null, "type": "system"}], "failed_services": [], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + """ + When I run `pro detach --format json --assume-yes` with sudo + Then stdout is a json matching the `ua_operation` schema + And I will see the following on stdout: + """ + {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["esm-apps", "esm-infra"], "result": "success", "warnings": []} + """ + And the machine is unattached Examples: ubuntu release - | release | anbox | esm-apps | cc-eal | cis | fips | fips-update | ros | ros-updates | cis_or_usg | realtime-kernel | - | xenial | no | yes | yes | yes | yes | yes | yes | yes | cis | no | - | bionic | no | yes | yes | yes | yes | yes | yes | yes | cis | no | - | focal | yes | yes | no | yes | yes | yes | yes | no | usg | no | - | jammy | yes | yes | no | yes | no | no | no | no | usg | yes | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attached auto-attach in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro auto-attach` `as non-root` exits `1` And stderr matches regexp: - """ - This command must be run as root \(try using sudo\). - """ + """ + This command must be run as root \(try using sudo\). + """ When I verify that running `pro auto-attach` `with sudo` exits `2` Then stderr matches regexp: - """ - This machine is already attached to 'UA Client Test' - To use a different subscription first run: sudo pro detach. - """ + """ + This machine is already attached to '.+' + To use a different subscription first run: sudo pro detach. + """ Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attached show version in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro version` as non-root Then I will see the uaclient version on stdout @@ -306,19 +252,16 @@ Then I will see the uaclient version on stdout Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attached status in a ubuntu machine with feature overrides - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/tmp/machine-token-overlay.json` with the following: + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -336,7 +279,7 @@ And I append the following on uaclient config: """ features: - machine_token_overlay: "/tmp/machine-token-overlay.json" + machine_token_overlay: "/var/lib/ubuntu-advantage/machine-token-overlay.json" disable_auto_attach: true other: false """ @@ -352,7 +295,7 @@ """ FEATURES disable_auto_attach: True - machine_token_overlay: /tmp/machine-token-overlay.json + machine_token_overlay: /var/lib/ubuntu-advantage/machine-token-overlay.json other: False """ When I run `pro status --all` as non-root @@ -366,7 +309,7 @@ """ FEATURES disable_auto_attach: True - machine_token_overlay: /tmp/machine-token-overlay.json + machine_token_overlay: /var/lib/ubuntu-advantage/machine-token-overlay.json other: False """ When I run `pro detach --assume-yes` with sudo @@ -377,42 +320,18 @@ """ Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | - - @series.lts - @uses.config.machine_type.lxd-container - Scenario Outline: Attached disable of different services in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | + + Scenario Outline: Attached enable when reboot required + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - Then I verify that running `pro disable esm-infra livepatch foobar` `as non-root` exits `1` - And stderr matches regexp: - """ - This command must be run as root \(try using sudo\) - """ - And I verify that running `pro disable esm-infra livepatch foobar` `with sudo` exits `1` - And I will see the following on stdout: - """ - Updating package lists - Livepatch is not currently enabled - See: sudo pro status - """ - And stderr matches regexp: - """ - Cannot disable unknown service 'foobar'. - - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-infra +yes +disabled +Expanded Security Maintenance for Infrastructure - """ - When I run `touch /var/run/reboot-required` with sudo + And I run `pro disable esm-infra` with sudo + And I run `touch /var/run/reboot-required` with sudo And I run `touch /var/run/reboot-required.pkgs` with sudo And I run `pro enable esm-infra` with sudo Then stdout matches regexp: @@ -426,19 +345,14 @@ """ Examples: ubuntu release - | release | msg | - | xenial | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | bionic | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | focal | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - | jammy | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - - @series.xenial - @series.bionic - @series.lunar - @series.mantic - @uses.config.machine_type.lxd-container + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + Scenario Outline: Help command on an attached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro help esm-infra` with sudo Then I will see the following on stdout: @@ -547,17 +461,13 @@ """ Examples: ubuntu release - | release | infra-status | - | bionic | enabled | - | xenial | enabled | - | lunar | n/a | - | mantic | n/a | - - @series.jammy - @series.focal - @uses.config.machine_type.lxd-container + | release | machine_type | infra-status | + | bionic | lxd-container | enabled | + | xenial | lxd-container | enabled | + | mantic | lxd-container | n/a | + Scenario Outline: Help command on an attached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro help esm-infra` with sudo Then I will see the following on stdout: @@ -678,14 +588,12 @@ """ Examples: ubuntu release - | release | - | focal | - | jammy | + | release | machine_type | + | focal | lxd-container | + | jammy | lxd-container | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Run timer script on an attached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `systemctl stop ua-timer.timer` with sudo And I attach `contract_token` with sudo Then I verify that running `pro config set update_messaging_timer=-2` `with sudo` exits `1` @@ -752,21 +660,18 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Run timer script to valid machine activity endpoint - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `apt update` with sudo - And I run `apt install jq -y` with sudo + And I apt update + And I apt install `jq` And I save the `activityInfo.activityToken` value from the contract And I save the `activityInfo.activityID` value from the contract # normal metering call when activityId is set by attach response above, expect new @@ -782,7 +687,7 @@ Then I verify that `activityInfo.activityToken` value has been updated on the contract And I verify that `activityInfo.activityID` value has been updated on the contract # We are keeping this test to guarantee that the activityPingInterval is also updated - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -793,7 +698,7 @@ } } """ - And I create the file `/tmp/response-overlay.json` with the following: + And I create the file `/var/lib/ubuntu-advantage/response-overlay.json` with the following: """ { "https://contracts.canonical.com/v1/contracts/testCID/machine-activity/testMID": [ @@ -810,8 +715,8 @@ And I append the following on uaclient config: """ features: - machine_token_overlay: "/tmp/machine-token-overlay.json" - serviceclient_url_responses: "/tmp/response-overlay.json" + machine_token_overlay: "/var/lib/ubuntu-advantage/machine-token-overlay.json" + serviceclient_url_responses: "/var/lib/ubuntu-advantage/response-overlay.json" """ When I delete the file `/var/lib/ubuntu-advantage/jobs-status.json` And I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo @@ -826,42 +731,30 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Run timer script to valid machine activity endpoint - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `rm /var/lib/ubuntu-advantage/machine-token.json` with sudo - And I run `ua status` as non-root - Then stdout matches regexp: - """ - SERVICE +AVAILABLE +DESCRIPTION - """ + Then the machine is unattached When I run `dpkg-reconfigure ubuntu-advantage-tools` with sudo Then I verify that files exist matching `/var/lib/ubuntu-advantage/machine-token.json` - When I run `ua status` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - """ + Then the machine is attached Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Disable with purge does not work with assume-yes - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify that running `pro disable esm-apps --assume-yes --purge` `with sudo` exits `1` Then stderr contains substring: @@ -869,18 +762,16 @@ Error: Cannot use --purge together with --assume-yes. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Disable with purge works and purges repo services not involving a kernel - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `apt update` with sudo + And I apt update And I apt install `ansible` And I run `pro disable esm-apps --purge` `with sudo` and stdin `y` Then stdout matches regexp: @@ -892,25 +783,19 @@ Do you want to proceed\? \(y/N\) """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +disabled +Expanded Security Maintenance for Applications - """ + And I verify that `esm-apps` is disabled And I verify that `ansible` is installed from apt source `http://archive.ubuntu.com/ubuntu /universe` Examples: ubuntu release - | release | pocket | + | release | machine_type | pocket | # This ends up in GH #943 but maybe can be improved? - | xenial | xenial-backports | - | bionic | bionic-updates | - | focal | focal | - | jammy | jammy | + | xenial | lxd-container | xenial-backports | + | bionic | lxd-container | bionic-updates | + | focal | lxd-container | focal | + | jammy | lxd-container | jammy | - @series.lts - @uses.config.machine_type.lxd-vm Scenario Outline: Disable with purge unsupported services - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify that running `pro disable livepatch --purge` `with sudo` exits `1` Then I will see the following on stdout: @@ -919,33 +804,27 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | + | focal | lxd-vm | + | jammy | lxd-vm | @slow - @series.lts - @uses.config.machine_type.lxd-vm Scenario Outline: Disable and purge fips - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `apt update` with sudo + And I apt update And I run `pro enable --assume-yes` with sudo And I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ + Then I verify that `` is enabled When I run `uname -r` as non-root Then stdout matches regexp: """ fips """ And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-fips` is installed from apt source `` + And I verify that `` is installed from apt source `` When I run `pro disable --purge` `with sudo` and stdin `y\ny` Then stdout matches regexp: """ @@ -969,226 +848,41 @@ Do you want to proceed\? \(y/N\) """ When I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled - """ + Then I verify that `` is disabled When I run `uname -r` as non-root Then stdout does not match regexp: """ fips """ And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-fips` is not installed - Examples: ubuntu release - | release | fips-service | fips-name | fips-source | archive-source | - | xenial | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu xenial/main | https://esm.ubuntu.com/infra/ubuntu xenial-infra-security/main | - | xenial | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu xenial-updates/main | https://esm.ubuntu.com/infra/ubuntu xenial-infra-security/main | - | bionic | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | bionic | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | focal | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu focal/main | http://archive.ubuntu.com/ubuntu focal-updates/main | - | focal | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://archive.ubuntu.com/ubuntu focal-updates/main | + And I verify that `` is not installed - @slow - @series.bionic - @series.focal - @uses.config.machine_type.gcp.generic - Scenario Outline: Disable and purge fips - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `apt update` with sudo - And I run `pro enable --assume-yes` with sudo - And I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-gcp-fips` is installed from apt source `` - When I run `pro disable --purge` `with sudo` and stdin `y\ny` - Then stdout matches regexp: - """ - \(The --purge flag is still experimental - use with caution\) - - Purging the packages would uninstall the following kernel\(s\): - .* - .* is the current running kernel\. - If you cannot guarantee that other kernels in this system are bootable and - working properly, \*do not proceed\*\. You may end up with an unbootable system\. - Do you want to proceed\? \(y/N\) - """ - And stdout matches regexp: - """ - The following package\(s\) will be REMOVED: - (.|\n)+ - - The following package\(s\) will be reinstalled from the archive: - (.|\n)+ - - Do you want to proceed\? \(y/N\) - """ - When I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled - """ - When I run `uname -r` as non-root - Then stdout does not match regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-gcp-fips` is not installed - Examples: ubuntu release - | release | fips-service | fips-name | fips-source | archive-source | - | bionic | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | bionic | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | focal | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu focal/main | http://us-west2.gce.archive.ubuntu.com/ubuntu focal-updates/main | - | focal | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://us-west2.gce.archive.ubuntu.com/ubuntu focal-updates/main | - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.aws.generic - Scenario Outline: Disable and purge fips - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `apt update` with sudo - And I run `pro enable --assume-yes` with sudo - And I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-aws-fips` is installed from apt source `` - When I run `pro disable --purge` `with sudo` and stdin `y\ny` - Then stdout matches regexp: - """ - \(The --purge flag is still experimental - use with caution\) - - Purging the packages would uninstall the following kernel\(s\): - .* - .* is the current running kernel\. - If you cannot guarantee that other kernels in this system are bootable and - working properly, \*do not proceed\*\. You may end up with an unbootable system\. - Do you want to proceed\? \(y/N\) - """ - And stdout matches regexp: - """ - The following package\(s\) will be REMOVED: - (.|\n)+ - - The following package\(s\) will be reinstalled from the archive: - (.|\n)+ - - Do you want to proceed\? \(y/N\) - """ - When I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled - """ - When I run `uname -r` as non-root - Then stdout does not match regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-aws-fips` is not installed - Examples: ubuntu release - | release | fips-service | fips-name | fips-source | archive-source | - | bionic | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | bionic | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | focal | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu focal/main | http://us-east-2.ec2.archive.ubuntu.com/ubuntu focal-updates/main | - | focal | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://us-east-2.ec2.archive.ubuntu.com/ubuntu focal-updates/main | - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.azure.generic - Scenario Outline: Disable and purge fips - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `apt update` with sudo - And I run `pro enable --assume-yes` with sudo - And I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-azure-fips` is installed from apt source `` - When I run `pro disable --purge` `with sudo` and stdin `y\ny` - Then stdout matches regexp: - """ - \(The --purge flag is still experimental - use with caution\) - - Purging the packages would uninstall the following kernel\(s\): - .* - .* is the current running kernel\. - If you cannot guarantee that other kernels in this system are bootable and - working properly, \*do not proceed\*\. You may end up with an unbootable system\. - Do you want to proceed\? \(y/N\) - """ - And stdout matches regexp: - """ - The following package\(s\) will be REMOVED: - (.|\n)+ - - The following package\(s\) will be reinstalled from the archive: - (.|\n)+ - - Do you want to proceed\? \(y/N\) - """ - When I reboot the machine - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled - """ - When I run `uname -r` as non-root - Then stdout does not match regexp: - """ - fips - """ - And I verify that `openssh-server` is installed from apt source `` - And I verify that `linux-azure-fips` is not installed Examples: ubuntu release - | release | fips-service | fips-name | fips-source | archive-source | - | bionic | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | bionic | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | - | focal | fips | FIPS | https://esm.ubuntu.com/fips/ubuntu focal/main | http://azure.archive.ubuntu.com/ubuntu focal-updates/main | - | focal | fips-updates | FIPS Updates | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://azure.archive.ubuntu.com/ubuntu focal-updates/main | + | release | machine_type | fips-service | fips-name | kernel-package | fips-source | archive-source | + | xenial | lxd-vm | fips | FIPS | linux-fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | https://esm.ubuntu.com/infra/ubuntu xenial-infra-security/main | + | xenial | lxd-vm | fips-updates | FIPS Updates | linux-fips | https://esm.ubuntu.com/fips-updates/ubuntu xenial-updates/main | https://esm.ubuntu.com/infra/ubuntu xenial-infra-security/main | + | bionic | lxd-vm | fips | FIPS | linux-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | lxd-vm | fips-updates | FIPS Updates | linux-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | aws.generic | fips | FIPS | linux-aws-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | aws.generic | fips-updates | FIPS Updates | linux-aws-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | azure.generic | fips | FIPS | linux-azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | azure.generic | fips-updates | FIPS Updates | linux-azure-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | gcp.generic | fips | FIPS | linux-gcp-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | bionic | gcp.generic | fips-updates | FIPS Updates | linux-gcp-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | https://esm.ubuntu.com/infra/ubuntu bionic-infra-security/main | + | focal | lxd-vm | fips | FIPS | linux-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | http://archive.ubuntu.com/ubuntu focal-updates/main | + | focal | lxd-vm | fips-updates | FIPS Updates | linux-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://archive.ubuntu.com/ubuntu focal-updates/main | + | focal | aws.generic | fips | FIPS | linux-aws-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | http://us-east-2.ec2.archive.ubuntu.com/ubuntu focal-updates/main | + | focal | aws.generic | fips-updates | FIPS Updates | linux-aws-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://us-east-2.ec2.archive.ubuntu.com/ubuntu focal-updates/main | + | focal | azure.generic | fips | FIPS | linux-azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | http://azure.archive.ubuntu.com/ubuntu focal-updates/main | + | focal | azure.generic | fips-updates | FIPS Updates | linux-azure-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://azure.archive.ubuntu.com/ubuntu focal-updates/main | + | focal | gcp.generic | fips | FIPS | linux-gcp-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | http://us-west2.gce.archive.ubuntu.com/ubuntu focal-updates/main | + | focal | gcp.generic | fips-updates | FIPS Updates | linux-gcp-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | http://us-west2.gce.archive.ubuntu.com/ubuntu focal-updates/main | @slow - @series.lts - @uses.config.machine_type.lxd-vm Scenario Outline: Disable does not purge if no other kernel found - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `apt update` with sudo + And I apt update And I run `pro enable fips --assume-yes` with sudo And I reboot the machine And I run shell command `rm -rf $(find /boot -name 'vmlinuz*[^fips]')` with sudo @@ -1206,7 +900,7 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | + | focal | lxd-vm | diff -Nru ubuntu-advantage-tools-30~23.10/features/attached_enable.feature ubuntu-advantage-tools-31.2~23.10/features/attached_enable.feature --- ubuntu-advantage-tools-30~23.10/features/attached_enable.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/attached_enable.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,12 +1,8 @@ @uses.config.contract_token Feature: Enable command behaviour when attached to an Ubuntu Pro subscription - @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable Common Criteria service in an ubuntu lxd container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable cc-eal` `as non-root` exits `1` And I will see the following on stderr: @@ -25,15 +21,12 @@ Please follow instructions in /usr/share/doc/ubuntu-commoncriteria/README to configure EAL2 """ Examples: ubuntu release - | release | - | xenial | - | bionic | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + Scenario Outline: Enable cc-eal with --access-only - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run `pro enable cc-eal --access-only` with sudo Then I will see the following on stdout: @@ -45,17 +38,12 @@ """ Then I verify that running `apt-get install ubuntu-commoncriteria` `with sudo` exits `0` Examples: ubuntu release - | release | - | xenial | - | bionic | - - @series.focal - @series.jammy - @series.lunar - @series.mantic - @uses.config.machine_type.lxd-container + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + Scenario Outline: Attached enable Common Criteria service in an ubuntu lxd container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable cc-eal` `as non-root` exits `1` And I will see the following on stderr: @@ -69,16 +57,13 @@ CC EAL2 is not available for Ubuntu (). """ Examples: ubuntu release - | release | version | full_name | - | focal | 20.04 LTS | Focal Fossa | - | jammy | 22.04 LTS | Jammy Jellyfish | - | lunar | 23.04 | Lunar Lobster | - | mantic | 23.10 | Mantic Minotaur | + | release | machine_type | version | full_name | + | focal | lxd-container | 20.04 LTS | Focal Fossa | + | jammy | lxd-container | 22.04 LTS | Jammy Jellyfish | + | mantic | lxd-container | 23.10 | Mantic Minotaur | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Empty series affordance means no series, null means all series - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` When I set the machine token overlay to the following yaml """ @@ -95,7 +80,7 @@ One moment, checking your subscription first Ubuntu Pro: ESM Infra is not available for Ubuntu .* """ - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -120,16 +105,14 @@ Ubuntu Pro: ESM Infra enabled """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of different services using json format - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable foobar --format json` `as non-root` exits `1` And stdout is a json matching the `ua_operation` schema @@ -190,16 +173,14 @@ """ Examples: ubuntu release - | release | valid_services | - | xenial | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | bionic | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | focal | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - | jammy | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | release | machine_type | valid_services | + | xenial | lxd-container | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | bionic | lxd-container | anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | focal | lxd-container | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | jammy | lxd-container | anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of a service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable foobar` `as non-root` exits `1` And I will see the following on stderr: @@ -242,8 +223,8 @@ """ -infra-security/main amd64 Packages """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y ` with sudo, retrying exit [100] + And I ensure apt update runs without errors + When I apt install `` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -251,15 +232,13 @@ """ Examples: ubuntu release - | release | infra-pkg | esm-infra-url | msg | - | xenial | libkrad0 | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | bionic | libkrad0 | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | - | focal | hello | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | + | release | machine_type | infra-pkg | esm-infra-url | msg | + | xenial | lxd-container | libkrad0 | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | bionic | lxd-container | libkrad0 | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, cis, esm-apps, esm-infra, fips, fips-preview,\nfips-updates, landscape, livepatch, realtime-kernel, ros, ros-updates. | + | focal | lxd-container | hello | https://esm.ubuntu.com/infra/ubuntu | Try anbox-cloud, cc-eal, esm-apps, esm-infra, fips, fips-preview, fips-updates,\nlandscape, livepatch, realtime-kernel, ros, ros-updates, usg. | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of non-container services in a ubuntu lxd container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable livepatch` `as non-root` exits `1` And I will see the following on stderr: @@ -274,18 +253,15 @@ """ Examples: Un-supported services in containers - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable not entitled service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I set the machine token overlay to the following yaml """ machineTokenInfo: @@ -309,17 +285,14 @@ """ Examples: not entitled services - | release | - | xenial | - | bionic | - | focal | - | jammy | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + Scenario Outline: Attached enable of cis service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify that running `pro enable cis --access-only` `with sudo` exits `0` Then I will see the following on stdout: @@ -397,20 +370,18 @@ """ Examples: cis script - | release | cis_script | - | bionic | Canonical_Ubuntu_18.04_CIS-harden.sh | - | xenial | Canonical_Ubuntu_16.04_CIS_v1.1.0-harden.sh | + | release | machine_type | cis_script | + | bionic | lxd-container | Canonical_Ubuntu_18.04_CIS-harden.sh | + | xenial | lxd-container | Canonical_Ubuntu_16.04_CIS_v1.1.0-harden.sh | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of cis service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify that running `pro enable cis` `with sudo` exits `0` Then I will see the following on stdout: """ One moment, checking your subscription first - From Ubuntu 20.04 and onwards 'pro enable cis' has been + From Ubuntu 20.04 onward 'pro enable cis' has been replaced by 'pro enable usg'. See more information at: https://ubuntu.com/security/certifications/docs/usg Updating CIS Audit package lists @@ -441,7 +412,7 @@ Then stdout matches regexp """ One moment, checking your subscription first - From Ubuntu 20.04 and onwards 'pro enable cis' has been + From Ubuntu 20.04 onward 'pro enable cis' has been replaced by 'pro enable usg'. See more information at: https://ubuntu.com/security/certifications/docs/usg CIS Audit is already enabled. @@ -478,14 +449,11 @@ """ Examples: cis script - | release | cis_script | - | focal | Canonical_Ubuntu_20.04_CIS-harden.sh | + | release | machine_type | cis_script | + | focal | lxd-container | Canonical_Ubuntu_20.04_CIS-harden.sh | - @series.bionic - @series.xenial - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of usg service in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify that running `pro enable usg` `with sudo` exits `1` Then I will see the following on stdout: @@ -499,86 +467,59 @@ """ Examples: cis service - | release | - | bionic | - | xenial | + | release | machine_type | + | bionic | lxd-container | + | xenial | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of usg service in a focal machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro enable usg` with sudo Then I will see the following on stdout: - """ - One moment, checking your subscription first - Updating Ubuntu Security Guide package lists - Ubuntu Security Guide enabled - Visit https://ubuntu.com/security/certifications/docs/usg for the next steps - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - usg +yes +enabled +Security compliance and audit tools - """ + """ + One moment, checking your subscription first + Updating Ubuntu Security Guide package lists + Ubuntu Security Guide enabled + Visit https://ubuntu.com/security/certifications/docs/usg for the next steps + """ + And I verify that `usg` is enabled When I run `pro disable usg` with sudo Then stdout matches regexp: """ Updating package lists """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - usg +yes +disabled +Security compliance and audit tools - """ + And I verify that `usg` is disabled When I run `pro enable cis` with sudo Then I will see the following on stdout: - """ - One moment, checking your subscription first - From Ubuntu 20.04 and onwards 'pro enable cis' has been - replaced by 'pro enable usg'. See more information at: - https://ubuntu.com/security/certifications/docs/usg - Updating CIS Audit package lists - Updating standard Ubuntu package lists - Installing CIS Audit packages - CIS Audit enabled - Visit https://ubuntu.com/security/cis to learn how to use CIS - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - usg +yes +enabled +Security compliance and audit tools - """ + """ + One moment, checking your subscription first + From Ubuntu 20.04 onward 'pro enable cis' has been + replaced by 'pro enable usg'. See more information at: + https://ubuntu.com/security/certifications/docs/usg + Updating CIS Audit package lists + Updating standard Ubuntu package lists + Installing CIS Audit packages + CIS Audit enabled + Visit https://ubuntu.com/security/cis to learn how to use CIS + """ + And I verify that `usg` is enabled When I run `pro disable usg` with sudo Then stdout matches regexp: - """ - Updating package lists - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - usg +yes +disabled +Security compliance and audit tools - """ + """ + Updating package lists + """ + And I verify that `usg` is disabled Examples: cis service - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.bionic - @series.xenial - @uses.config.machine_type.lxd-vm Scenario Outline: Attached disable of livepatch in a lxd vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +disabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - livepatch +yes + +Canonical Livepatch service - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `livepatch` status is `` When I run `pro disable livepatch` with sudo Then I verify that running `canonical-livepatch status` `with sudo` exits `1` And stderr matches regexp: @@ -586,15 +527,9 @@ Machine is not enabled. Please run 'sudo canonical-livepatch enable' with the token obtained from https://ubuntu.com/livepatch. """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +disabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - livepatch +yes +disabled +Canonical Livepatch service - """ + And I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `livepatch` is disabled When I verify that running `pro enable livepatch --access-only` `with sudo` exits `1` Then I will see the following on stdout: """ @@ -603,23 +538,20 @@ """ Examples: ubuntu release - | release | livepatch_status | - | xenial | warning | - | bionic | enabled | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm + | release | machine_type | livepatch_status | + | xenial | lxd-vm | warning | + | bionic | lxd-vm | enabled | + Scenario Outline: Attach works when snapd cannot be installed - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get remove -y snapd` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt remove `snapd` And I create the file `/etc/apt/preferences.d/no-snapd` with the following """ Package: snapd Pin: release o=* Pin-Priority: -10 """ - And I run `apt-get update` with sudo + And I apt update When I attempt to attach `contract_token` with sudo Then I will see the following on stderr: """ @@ -628,13 +560,9 @@ Enabling default service livepatch Failed to enable default services, check: sudo pro status """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes +disabled - """ - Then I verify that running `pro enable livepatch` `with sudo` exits `1` - Then I will see the following on stdout: + And I verify that `livepatch` is disabled + And I verify that running `pro enable livepatch` `with sudo` exits `1` + And I will see the following on stdout: """ One moment, checking your subscription first Installing snapd @@ -642,57 +570,44 @@ Failed to install snapd on the system """ Examples: ubuntu release - | release | - | xenial | - | bionic | - - @series.bionic - @series.xenial - @uses.config.machine_type.lxd-vm + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | + Scenario Outline: Attached enable livepatch - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `canonical-livepatch status` `with sudo` exits `1` Then I will see the following on stderr: - """ - sudo: canonical-livepatch: command not found - """ + """ + sudo: canonical-livepatch: command not found + """ When I attach `contract_token` with sudo Then stdout matches regexp: - """ - Installing canonical-livepatch snap - Canonical Livepatch enabled - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes + - """ + """ + Installing canonical-livepatch snap + Canonical Livepatch enabled + """ + And I verify that `livepatch` status is `` When I run `canonical-livepatch status` with sudo Then stdout matches regexp: - """ - running: true - """ + """ + running: true + """ Examples: ubuntu release - | release | livepatch_status | - | xenial | warning | - | bionic | enabled | + | release | machine_type | livepatch_status | + | xenial | lxd-vm | warning | + | bionic | lxd-vm | enabled | - @series.xenial - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable livepatch - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ Installing canonical-livepatch snap Canonical Livepatch enabled """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes +warning - """ + And I verify that `livepatch` status is warning When I run `pro api u.pro.security.status.reboot_required.v1` with sudo Then stdout matches regexp: """ @@ -703,7 +618,7 @@ """ no """ - When I run `apt-get install libc6 -y` with sudo + When I apt install `libc6` And I run `pro api u.pro.security.status.reboot_required.v1` as non-root Then stdout matches regexp: """ @@ -720,7 +635,7 @@ """ no """ - When I run `apt-get install linux-image-generic -y` with sudo + When I apt install `linux-image-generic` And I run `pro api u.pro.security.status.reboot_required.v1` as non-root Then stdout matches regexp: """ @@ -731,7 +646,7 @@ """ yes """ - When I run `apt-get install dbus -y` with sudo + When I apt install `dbus` And I run `pro api u.pro.security.status.reboot_required.v1` with sudo Then stdout matches regexp: """ @@ -744,14 +659,12 @@ """ Examples: ubuntu release - | release | - | xenial | + | release | machine_type | + | xenial | lxd-vm | @slow - @series.bionic - @uses.config.machine_type.lxd-vm Scenario: Attached enable livepatch on a machine with fips active - Given a `bionic` machine with ubuntu-advantage-tools installed + Given a `bionic` `lxd-vm` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ @@ -789,10 +702,8 @@ {"_schema_version": "0.1", "errors": [{"message": "Cannot enable Livepatch when FIPS is enabled.", "message_code": "livepatch-error-when-fips-enabled", "service": "livepatch", "type": "service"}], "failed_services": ["livepatch"], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} """ - @series.bionic - @uses.config.machine_type.lxd-vm Scenario: Attached enable fips on a machine with livepatch active - Given a `bionic` machine with ubuntu-advantage-tools installed + Given a `bionic` `lxd-vm` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ @@ -821,11 +732,8 @@ """ @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips on a machine with livepatch active - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ @@ -859,16 +767,13 @@ """ Examples: ubuntu release - | release | - | bionic | - | xenial | + | release | machine_type | + | bionic | lxd-vm | + | xenial | lxd-vm | @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips on a machine with fips-updates active - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then stdout matches regexp: """ @@ -898,35 +803,18 @@ """ Examples: ubuntu release - | release | - | bionic | - | xenial | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container + | release | machine_type | + | bionic | lxd-vm | + | xenial | lxd-vm | + Scenario Outline: Attached enable ros on a machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros +yes +disabled +Security Updates for the Robot Operating System - """ + Then I verify that `ros` is disabled When I run `pro enable ros --assume-yes` with sudo - And I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros +yes +enabled +Security Updates for the Robot Operating System - """ - And stdout matches regexp - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - """ - And stdout matches regexp - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ + Then I verify that `ros` is enabled + And I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled When I verify that running `pro disable esm-apps` `with sudo` and stdin `N` exits `1` Then stdout matches regexp """ @@ -940,15 +828,8 @@ Disable ROS ESM Security Updates and proceed to disable Ubuntu Pro: ESM Apps\? \(y\/N\) Disabling dependent service: ROS ESM Security Updates Updating package lists """ - When I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros +yes +disabled +Security Updates for the Robot Operating System - """ - And stdout matches regexp - """ - esm-apps +yes +disabled +Expanded Security Maintenance for Applications - """ + And I verify that `ros` is disabled + And I verify that `esm-apps` is disabled When I verify that running `pro enable ros` `with sudo` and stdin `N` exits `1` Then stdout matches regexp """ @@ -965,39 +846,25 @@ Updating ROS ESM Security Updates package lists ROS ESM Security Updates enabled """ - When I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros +yes +enabled +Security Updates for the Robot Operating System - """ - And stdout matches regexp - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - """ - And stdout matches regexp - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ + And I verify that `ros` is enabled + And I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled When I run `apt-cache policy` as non-root Then apt-cache policy for the following url has priority `500` """ amd64 Packages """ - When I run `apt install python3-catkin-pkg -y` with sudo + When I apt install `python3-catkin-pkg` Then I verify that `python3-catkin-pkg` is installed from apt source `` When I run `pro enable ros-updates --assume-yes` with sudo - And I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros-updates +yes +enabled +All Updates for the Robot Operating System - """ + Then I verify that `ros-updates` is enabled When I run `apt-cache policy` as non-root Then apt-cache policy for the following url has priority `500` """ amd64 Packages """ - When I run `apt install python3-catkin-pkg -y` with sudo + When I apt install `python3-catkin-pkg` Then I verify that `python3-catkin-pkg` is installed from apt source `` When I run `pro disable ros` `with sudo` and stdin `y` Then stdout matches regexp @@ -1006,6 +873,7 @@ Disable ROS ESM All Updates and proceed to disable ROS ESM Security Updates\? \(y\/N\) Disabling dependent service: ROS ESM All Updates Updating package lists """ + And I verify that `ros-updates` is disabled When I run `pro enable ros-updates` `with sudo` and stdin `y` Then stdout matches regexp """ @@ -1016,37 +884,17 @@ Updating ROS ESM All Updates package lists ROS ESM All Updates enabled """ - When I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros-updates +yes +enabled +All Updates for the Robot Operating System - """ - And stdout matches regexp - """ - ros +yes +enabled +Security Updates for the Robot Operating System - """ + And I verify that `ros-updates` is enabled + And I verify that `ros` is enabled When I run `pro disable ros-updates --assume-yes` with sudo - When I run `pro disable ros --assume-yes` with sudo - When I run `pro disable esm-apps --assume-yes` with sudo - When I run `pro disable esm-infra --assume-yes` with sudo - When I run `pro enable ros-updates --assume-yes` with sudo - When I run `pro status --all` as non-root - Then stdout matches regexp - """ - ros-updates +yes +enabled +All Updates for the Robot Operating System - """ - And stdout matches regexp - """ - ros +yes +enabled +Security Updates for the Robot Operating System - """ - And stdout matches regexp - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - """ - And stdout matches regexp - """ - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ + And I run `pro disable ros --assume-yes` with sudo + And I run `pro disable esm-apps --assume-yes` with sudo + And I run `pro disable esm-infra --assume-yes` with sudo + And I run `pro enable ros-updates --assume-yes` with sudo + Then I verify that `ros-updates` is enabled + And I verify that `ros` is enabled + And I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled When I run `pro detach` `with sudo` and stdin `y` Then stdout matches regexp: """ @@ -1056,20 +904,19 @@ Updating package lists This machine is now detached. """ + And the machine is unattached Examples: ubuntu release - | release | ros-security-source | ros-updates-source | - | xenial | https://esm.ubuntu.com/ros/ubuntu xenial-security/main | https://esm.ubuntu.com/ros-updates/ubuntu xenial-updates/main | - | bionic | https://esm.ubuntu.com/ros/ubuntu bionic-security/main | https://esm.ubuntu.com/ros-updates/ubuntu bionic-updates/main | + | release | machine_type | ros-security-source | ros-updates-source | + | xenial | lxd-container | https://esm.ubuntu.com/ros/ubuntu xenial-security/main | https://esm.ubuntu.com/ros-updates/ubuntu xenial-updates/main | + | bionic | lxd-container | https://esm.ubuntu.com/ros/ubuntu bionic-security/main | https://esm.ubuntu.com/ros-updates/ubuntu bionic-updates/main | # Overall test for overrides; in the future, when many services # have overrides, we can consider removing this # esm-infra is a good choice because it doesn't already have # other overrides that would interfere with the test - @series.focal - @uses.config.machine_type.aws.generic Scenario: Cloud overrides for a generic aws Focal instance - Given a `focal` machine with ubuntu-advantage-tools installed + Given a `focal` `aws.generic` machine with ubuntu-advantage-tools installed When I set the machine token overlay to the following yaml """ machineTokenInfo: @@ -1090,15 +937,13 @@ """ And I attach `contract_token` with sudo and options `--no-auto-enable` And I verify that running `pro enable esm-infra` `with sudo` exits `1` - Then stderr matches regexp: + Then stdout matches regexp: """ - Stderr: E: Unable to locate package some-package-aws + E: Unable to locate package some-package-aws """ - @series.xenial - @uses.config.machine_type.lxd-container Scenario Outline: APT auth file is edited correctly on enable - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run `wc -l /etc/apt/auth.conf.d/90ubuntu-advantage` with sudo Then I will see the following on stdout: @@ -1122,20 +967,14 @@ 3 /etc/apt/auth.conf.d/90ubuntu-advantage """ Examples: ubuntu release - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable esm-apps on a machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `pro status --all` as non-root - Then stdout matches regexp - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - """ - And I verify that running `apt update` `with sudo` exits `0` + Then I verify that `esm-apps` is enabled + And I ensure apt update runs without errors When I run `apt-cache policy` as non-root Then apt-cache policy for the following url has priority `510` """ @@ -1145,8 +984,8 @@ """ https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y ` with sudo, retrying exit [100] + And I ensure apt update runs without errors + When I apt install `` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -1163,15 +1002,13 @@ """ Examples: ubuntu release - | release | apps-pkg | - | xenial | jq | - | bionic | bundler | - | focal | ant | + | release | machine_type | apps-pkg | + | xenial | lxd-container | jq | + | bionic | lxd-container | bundler | + | focal | lxd-container | ant | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable with corrupt lock - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro disable esm-infra --assume-yes` with sudo And I create the file `/var/lib/ubuntu-advantage/lock` with the following: @@ -1188,8 +1025,8 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/attached_status.feature ubuntu-advantage-tools-31.2~23.10/features/attached_status.feature --- ubuntu-advantage-tools-30~23.10/features/attached_status.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/attached_status.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,17 +1,15 @@ -@uses.config.contract_token Feature: Attached status - @series.all - @uses.config.machine_type.lxd-container + @uses.config.contract_token Scenario Outline: Attached status in a ubuntu machine - formatted - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro status --format json` as non-root Then stdout is a json matching the `ua_status` schema When I run `pro status --format yaml` as non-root Then stdout is a yaml matching the `ua_status` schema - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -24,7 +22,7 @@ And I append the following on uaclient config: """ features: - machine_token_overlay: "/tmp/machine-token-overlay.json" + machine_token_overlay: "/var/lib/ubuntu-advantage/machine-token-overlay.json" """ And I run `pro status` with sudo Then stdout contains substring: @@ -33,18 +31,16 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.xenial - @uses.config.machine_type.lxd-container + @uses.config.contract_token Scenario Outline: Non-root status can see in-progress operations - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run shell command `sudo pro enable cis >/dev/null & pro status` as non-root Then stdout matches regexp: @@ -72,21 +68,227 @@ Operation in progress: pro enable """ When I run `pro disable cis --assume-yes` with sudo - When I run `apt-get install jq -y` with sudo + When I apt install `jq` When I run shell command `sudo pro enable cis >/dev/null & pro status --format json | jq -r .execution_status` as non-root Then I will see the following on stdout: """ active """ Examples: ubuntu release - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container + Scenario Outline: Attached status in a ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + """ + contract_url: 'https://contracts.canonical.com' + log_level: debug + """ + And I run `pro auto-attach` with sudo + When I run `pro status` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + """ + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +n/a +Scalable Android in the cloud + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-preview +yes +n/a +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + """ + + Examples: ubuntu release + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + + Scenario Outline: Attached status in a ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + """ + contract_url: 'https://contracts.canonical.com' + log_level: debug + """ + And I run `pro auto-attach` with sudo + And I verify root and non-root `pro status` calls have the same output + And I run `pro status` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + livepatch +yes +warning +Current kernel is not supported + """ + When I verify root and non-root `pro status --all` calls have the same output + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +n/a +Scalable Android in the cloud + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +n/a +NIST-certified FIPS crypto packages + fips-preview +yes +n/a +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +n/a +FIPS compliant crypto packages with stable security updates + livepatch +yes +warning +Current kernel is not supported + """ + + Examples: ubuntu release + | release | machine_type | + | xenial | gcp.pro | + + Scenario Outline: Attached status in a ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + """ + contract_url: 'https://contracts.canonical.com' + log_level: debug + """ + And I run `pro auto-attach` with sudo + And I verify root and non-root `pro status` calls have the same output + And I run `pro status` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + """ + When I verify root and non-root `pro status --all` calls have the same output + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +n/a +Scalable Android in the cloud + cc-eal +yes +disabled +Common Criteria EAL2 Provisioning Packages + cis +yes +disabled +Security compliance and audit tools + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-preview +yes +n/a +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + """ + + Examples: ubuntu release + | release | machine_type | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + + Scenario Outline: Attached status in a ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + """ + contract_url: 'https://contracts.canonical.com' + log_level: debug + """ + And I run `pro auto-attach` with sudo + And I verify root and non-root `pro status` calls have the same output + And I run `pro status` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +disabled +Scalable Android in the cloud + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + usg +yes +disabled +Security compliance and audit tools + """ + When I verify root and non-root `pro status --all` calls have the same output + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +disabled +Scalable Android in the cloud + cc-eal +yes +n/a +Common Criteria EAL2 Provisioning Packages + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +disabled +NIST-certified FIPS crypto packages + fips-preview +yes +n/a +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + usg +yes +disabled +Security compliance and audit tools + """ + + Examples: ubuntu release + | release | machine_type | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + + Scenario Outline: Attached status in a ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + """ + contract_url: 'https://contracts.canonical.com' + log_level: debug + """ + And I run `pro auto-attach` with sudo + And I verify root and non-root `pro status` calls have the same output + And I run `pro status` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +disabled +Scalable Android in the cloud + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips-preview +yes +disabled +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + usg +yes +disabled +Security compliance and audit tools + """ + When I verify root and non-root `pro status --all` calls have the same output + And I run `pro status --all` as non-root + Then stdout matches regexp: + """ + SERVICE +ENTITLED +STATUS +DESCRIPTION + anbox-cloud +yes +disabled +Scalable Android in the cloud + cc-eal +yes +n/a +Common Criteria EAL2 Provisioning Packages + esm-apps +yes +enabled +Expanded Security Maintenance for Applications + esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure + fips +yes +n/a +NIST-certified FIPS crypto packages + fips-preview +yes +disabled +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates + livepatch +yes +enabled +Canonical Livepatch service + usg +yes +disabled +Security compliance and audit tools + """ + + Examples: ubuntu release + | release | machine_type | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | + + @uses.config.contract_token Scenario Outline: Attached status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify root and non-root `pro status` calls have the same output And I run `pro status` as non-root @@ -128,14 +330,13 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container + @uses.config.contract_token Scenario Outline: Attached status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify root and non-root `pro status` calls have the same output And I run `pro status` as non-root @@ -176,13 +377,12 @@ """ Examples: ubuntu release - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.jammy - @uses.config.machine_type.lxd-container + @uses.config.contract_token Scenario Outline: Attached status in the latest LTS ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I verify root and non-root `pro status` calls have the same output And I run `pro status` as non-root @@ -193,6 +393,7 @@ esm-apps +yes +enabled +Expanded Security Maintenance for Applications esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure fips-preview +yes +disabled +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates usg +yes +disabled +Security compliance and audit tools For a list of all Ubuntu Pro services, run 'pro status --all' @@ -209,7 +410,7 @@ esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure fips +yes +n/a +NIST-certified FIPS crypto packages fips-preview +yes +disabled +Preview of FIPS crypto packages undergoing certification with NIST - fips-updates +yes +n/a +FIPS compliant crypto packages with stable security updates + fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates landscape +yes +n/a +Management and administration tool for Ubuntu livepatch +yes +n/a +Canonical Livepatch service realtime-kernel +yes +n/a +Ubuntu kernel with PREEMPT_RT patches integrated @@ -223,5 +424,5 @@ """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/cloud.py ubuntu-advantage-tools-31.2~23.10/features/cloud.py --- ubuntu-advantage-tools-30~23.10/features/cloud.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/cloud.py 2024-01-18 17:34:13.000000000 +0000 @@ -10,6 +10,49 @@ DEFAULT_CONFIG_PATH = "~/.config/pycloudlib.toml" +def cloud_factory(pro_config, cloud_name): + if cloud_name == "aws": + return EC2( + cloud_credentials_path=pro_config.cloud_credentials_path, + tag=pro_config.timed_job_tag, + timestamp_suffix=False, + ) + if cloud_name == "azure": + return Azure( + cloud_credentials_path=pro_config.cloud_credentials_path, + tag=pro_config.timed_job_tag, + timestamp_suffix=False, + ) + if cloud_name == "gcp": + return GCP( + cloud_credentials_path=pro_config.cloud_credentials_path, + tag=pro_config.timed_job_tag, + timestamp_suffix=False, + ) + if cloud_name == "lxd-vm": + return LXDVirtualMachine( + cloud_credentials_path=pro_config.cloud_credentials_path, + ) + if cloud_name == "lxd-container": + return LXDContainer( + cloud_credentials_path=pro_config.cloud_credentials_path, + ) + raise RuntimeError("Invalid cloud name") + + +class CloudManager: + def __init__(self, pro_config): + self.pro_config = pro_config + self.clouds = {} + + def get(self, cloud_name): + cloud = self.clouds.get(cloud_name) + if cloud is None: + cloud = cloud_factory(self.pro_config, cloud_name) + self.clouds[cloud_name] = cloud + return cloud + + class Cloud: """Base class for cloud providers that should be tested through behave. @@ -174,7 +217,11 @@ return instance.id def locate_image_name( - self, series: str, machine_type: str, daily: bool = True + self, + series: str, + machine_type: str, + daily: bool = True, + include_deprecated: bool = False, ) -> str: """Locate and return the image name to use for vm provision. @@ -193,10 +240,8 @@ ) image_type = ImageType.GENERIC - include_deprecated = False if "pro-fips" in machine_type: image_type = ImageType.PRO_FIPS - include_deprecated = True elif "pro" in machine_type: image_type = ImageType.PRO @@ -343,8 +388,19 @@ daily = False else: daily = True + + include_deprecated = False + if series == "xenial": + logging.debug( + "including deprecated images when locating xenial on aws" + ) + include_deprecated = True + image_name = self.locate_image_name( - series, machine_type, daily=daily + series, + machine_type, + daily=daily, + include_deprecated=include_deprecated, ) logging.info( @@ -659,7 +715,11 @@ return instance.name def locate_image_name( - self, series: str, machine_type: str, daily: bool = True + self, + series: str, + machine_type: str, + daily: bool = True, + include_deprecated: bool = False, ) -> str: """Locate and return the image name to use for vm provision. diff -Nru ubuntu-advantage-tools-30~23.10/features/cloud_pro_clone.feature ubuntu-advantage-tools-31.2~23.10/features/cloud_pro_clone.feature --- ubuntu-advantage-tools-30~23.10/features/cloud_pro_clone.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/cloud_pro_clone.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,10 +1,7 @@ Feature: Creating golden images based on Cloud Ubuntu Pro instances - @series.lts - @uses.config.machine_type.aws.pro - @uses.config.machine_type.gcp.pro Scenario Outline: Create a Pro fips-updates image and launch - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -14,8 +11,8 @@ """ When I run `pro auto-attach` with sudo Then the machine is attached - When I run `apt update` with sudo - When I run `apt install -y jq` with sudo + When I apt update + When I apt install `jq` When I save the `activityInfo.activityToken` value from the contract When I save the `activityInfo.activityID` value from the contract When I run `pro enable fips-updates --assume-yes` with sudo @@ -31,7 +28,7 @@ When I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo Then I verify that `activityInfo.activityToken` value has been updated on the contract Then I verify that `activityInfo.activityID` value has not been updated on the contract - When I launch a `` machine named `clone` from the snapshot of `system-under-test` + When I launch a `` `` machine named `clone` from the snapshot of `system-under-test` # The clone will run auto-attach on boot When I run `pro status --wait` `with sudo` on the `clone` machine Then the machine is attached @@ -52,6 +49,8 @@ status: enabled """ Examples: ubuntu release - | release | - | bionic | - | focal | + | release | machine_type | + | bionic | aws.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | gcp.pro | diff -Nru ubuntu-advantage-tools-30~23.10/features/collect_logs.feature ubuntu-advantage-tools-31.2~23.10/features/collect_logs.feature --- ubuntu-advantage-tools-30~23.10/features/collect_logs.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/collect_logs.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,25 +1,25 @@ Feature: Command behaviour when attached to an Ubuntu Pro subscription - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Run collect-logs on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo # simulate logrotate When I run `touch /var/log/ubuntu-advantage.log.1` with sudo When I run `touch /var/log/ubuntu-advantage.log.2.gz` with sudo - When I run `pro collect-logs` with sudo + When I run `pro collect-logs` as non-root Then I verify that files exist matching `ua_logs.tar.gz` - When I run `tar zxf ua_logs.tar.gz` as non-root + When I run `tar zxf ua_logs.tar.gz` with sudo Then I verify that files exist matching `logs/` When I run `sh -c "ls -1 logs/ | sort -d"` as non-root # On Xenial, the return value for inexistent services is the same as for dead ones (3). # So the -error suffix does not appear there. Then stdout matches regexp: """ + apt-news.service.txt build.info cloud-id.txt cloud-init-journal.txt + esm-cache.service.txt jobs-status.json livepatch-status.txt-error pro-journal.txt @@ -37,25 +37,22 @@ ubuntu-advantage.service.txt """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Run collect-logs on an attached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `python3 /usr/lib/ubuntu-advantage/timer.py` with sudo # simulate logrotate When I run `touch /var/log/ubuntu-advantage.log.1` with sudo When I run `touch /var/log/ubuntu-advantage.log.2.gz` with sudo - When I run `pro collect-logs` with sudo + When I run `pro collect-logs` as non-root Then I verify that files exist matching `ua_logs.tar.gz` When I run `tar zxf ua_logs.tar.gz` as non-root Then I verify that files exist matching `logs/` @@ -64,9 +61,11 @@ # So the -error suffix does not appear there. Then stdout matches regexp: """ + apt-news.service.txt build.info cloud-id.txt cloud-init-journal.txt + esm-cache.service.txt jobs-status.json livepatch-status.txt-error pro-journal.txt @@ -86,8 +85,8 @@ ubuntu-esm-infra.list """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/config.feature ubuntu-advantage-tools-31.2~23.10/features/config.feature --- ubuntu-advantage-tools-30~23.10/features/config.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/config.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,12 +1,8 @@ Feature: pro config sub-command # earliest, latest lts[, latest stable] - @series.xenial - @series.jammy - @series.lunar - @uses.config.machine_type.lxd-container Scenario Outline: old ua_config in uaclient.conf is still supported - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro config show` with sudo Then I will see the following on stdout: """ @@ -50,7 +46,7 @@ """ """ Examples: ubuntu release - | release | - | xenial | - | jammy | - | lunar | + | release | machine_type | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/daemon.feature ubuntu-advantage-tools-31.2~23.10/features/daemon.feature --- ubuntu-advantage-tools-30~23.10/features/daemon.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/daemon.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,28 +1,23 @@ Feature: Pro Upgrade Daemon only runs in environments where necessary - @series.all @uses.config.contract_token - @uses.config.machine_type.lxd-container Scenario Outline: cloud-id-shim service is not installed on anything other than xenial - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed Then I verify that running `systemctl status ubuntu-advantage-cloud-id-shim.service` `with sudo` exits `4` Then stderr matches regexp: """ Unit ubuntu-advantage-cloud-id-shim.service could not be found. """ Examples: version - | release | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts @uses.config.contract_token - @uses.config.machine_type.lxd-container Scenario Outline: cloud-id-shim should run in postinst and on boot - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # verify installing pro created the cloud-id file When I run `cat /run/cloud-init/cloud-id` with sudo Then I will see the following on stdout @@ -52,14 +47,12 @@ lxd """ Examples: version - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.lts @uses.config.contract_token - @uses.config.machine_type.gcp.generic Scenario Outline: daemon should run when appropriate on gcp generic lts - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # verify its enabled, but stops itself when not configured to poll When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo Then stdout contains substring: @@ -110,7 +103,7 @@ # TODO find out what caused memory to go up, try to lower it again Then on `xenial`, systemd status output says memory usage is less than `17` MB Then on `bionic`, systemd status output says memory usage is less than `15` MB - Then on `focal`, systemd status output says memory usage is less than `13` MB + Then on `focal`, systemd status output says memory usage is less than `14` MB Then on `jammy`, systemd status output says memory usage is less than `14` MB When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo @@ -206,17 +199,15 @@ Active: inactive \(dead\) """ Examples: version - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | gcp.generic | + | bionic | gcp.generic | + | focal | gcp.generic | + | jammy | gcp.generic | - @series.lts @uses.config.contract_token - @uses.config.machine_type.azure.generic Scenario Outline: daemon should run when appropriate on azure generic lts - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # verify its enabled, but stops itself when not configured to poll When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo Then stdout contains substring: @@ -274,18 +265,15 @@ inactive """ Examples: version - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | azure.generic | + | bionic | azure.generic | + | focal | azure.generic | + | jammy | azure.generic | - @series.lunar @uses.config.contract_token - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: daemon does not start on gcp,azure generic non lts - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I wait `1` seconds When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo Then stdout contains substring: @@ -301,16 +289,13 @@ daemon ending """ Examples: version - | release | - | lunar | + | release | machine_type | + | mantic | azure.generic | + | mantic | gcp.generic | - @series.all @uses.config.contract_token - @uses.config.machine_type.lxd-container - @uses.config.machine_type.lxd-vm - @uses.config.machine_type.aws.generic Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed Then I verify that running `systemctl status ubuntu-advantage.service` `with sudo` exits `3` Then stdout matches regexp: """ @@ -327,17 +312,25 @@ \s*Condition: start condition failed.* """ Examples: version - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | + | release | machine_type | + | xenial | lxd-container | + | xenial | lxd-vm | + | xenial | aws.generic | + | bionic | lxd-container | + | bionic | lxd-vm | + | bionic | aws.generic | + | focal | lxd-container | + | focal | lxd-vm | + | focal | aws.generic | + | jammy | lxd-container | + | jammy | lxd-vm | + | jammy | aws.generic | + | mantic | lxd-container | + | mantic | lxd-vm | + | mantic | aws.generic | - @series.lts - @uses.config.machine_type.aws.pro Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -361,16 +354,13 @@ \s*Condition: start condition failed.* """ Examples: version - | release | - | xenial | - | bionic | - | focal | - - @series.lts - @uses.config.machine_type.gcp.pro - @uses.config.machine_type.azure.pro + | release | machine_type | + | xenial | aws.pro | + | bionic | aws.pro | + | focal | aws.pro | + Scenario Outline: daemon does not start when not on gcpgeneric or azuregeneric - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -387,7 +377,6 @@ """ Active: inactive \(dead\).* \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met """ When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo Then stdout does not contain substring: @@ -400,7 +389,6 @@ """ Active: inactive \(dead\) \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met """ When I run `journalctl -o cat -u ubuntu-advantage.service` with sudo Then stdout does not contain substring: @@ -408,7 +396,50 @@ daemon starting """ Examples: version - | release | - | xenial | - | bionic | - | focal | + | release | machine_type | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | azure.pro | + | focal | gcp.pro | + + @skip_local_environment + @skip_prebuilt_environment + @uses.config.contract_token + Scenario Outline: daemon should wait for cloud-config.service to finish + Given a `` `` machine with ubuntu-advantage-tools installed adding this cloud-init user_data + """ + ubuntu_advantage: {} + """ + When I apt remove `ubuntu-advantage-tools ubuntu-pro-client` + When I run `cloud-init clean --logs` with sudo + When I reboot the machine + When I run `journalctl -b -o cat -u ubuntu-advantage.service` with sudo + Then stdout contains substring: + """ + daemon starting + """ + Then stdout contains substring: + """ + cloud-config.service is activating. waiting to check again + """ + Then stdout does not contain substring: + """ + cloud-config.service is not activating. continuing + """ + When I wait `20` seconds + When I run `journalctl -b -o cat -u ubuntu-advantage.service` with sudo + Then stdout contains substring: + """ + cloud-config.service is not activating. continuing + """ + Then stdout contains substring: + """ + checking for condition files + """ + Examples: version + | release | machine_type | + | bionic | gcp.generic | + | focal | gcp.generic | + | jammy | gcp.generic | diff -Nru ubuntu-advantage-tools-30~23.10/features/detached_auto_attach.feature ubuntu-advantage-tools-31.2~23.10/features/detached_auto_attach.feature --- ubuntu-advantage-tools-30~23.10/features/detached_auto_attach.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/detached_auto_attach.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,12 +1,8 @@ @uses.config.contract_token Feature: Attached cloud does not detach when auto-attaching after manually attaching - @series.lts - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: No detaching on manually attached machine on all clouds - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro refresh` with sudo Then I will see the following on stdout: @@ -18,17 +14,19 @@ When I verify that running `pro auto-attach` `with sudo` exits `2` Then stderr matches regexp: """ - This machine is already attached to 'UA Client Test' + This machine is already attached to '.+' To use a different subscription first run: sudo pro detach. """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - esm-infra +yes + +Expanded Security Maintenance for Infrastructure - """ + And I verify that `esm-infra` is enabled Examples: ubuntu release - | release | esm-service | - | bionic | enabled | - | focal | enabled | - | xenial | enabled | + | release | machine_type | + | xenial | aws.generic | + | xenial | azure.generic | + | xenial | gcp.generic | + | bionic | aws.generic | + | bionic | azure.generic | + | bionic | gcp.generic | + | focal | aws.generic | + | focal | azure.generic | + | focal | gcp.generic | diff -Nru ubuntu-advantage-tools-30~23.10/features/docker.feature ubuntu-advantage-tools-31.2~23.10/features/docker.feature --- ubuntu-advantage-tools-30~23.10/features/docker.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/docker.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,25 +1,23 @@ -@uses.config.contract_token Feature: Build docker images with pro services @slow - @docker - @series.mantic - @uses.config.machine_type.lxd-vm + @uses.config.contract_token Scenario Outline: Build docker images with pro services - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I have the `` debs under test in `/home/ubuntu` - When I run `apt-get install -y docker.io docker-buildx jq` with sudo + When I apt install `docker.io docker-buildx jq` When I create the file `/home/ubuntu/Dockerfile` with the following: """ FROM ubuntu: COPY ./ubuntu-advantage-tools.deb /ua.deb + COPY ./ubuntu-pro-client.deb /pro.deb RUN --mount=type=secret,id=ua-attach-config \ apt-get update \ && apt-get install --no-install-recommends -y ubuntu-advantage-tools ca-certificates \ - && ((dpkg -i /ua.deb || true)) \ + && ((apt install /ua.deb /pro.deb -y || true)) \ && apt-get install -f \ @@ -73,7 +71,98 @@ Then I verify that running `DOCKER_BUILDKIT=1 docker build . --no-cache --secret id=ua-attach-config,src=ua-attach-config.yaml -t ua-test` `with sudo` exits `1` Examples: ubuntu release - | release | container_release |enable_services | test_package_name | test_package_version | - | mantic | xenial | [ esm-infra ] | curl | esm | - | mantic | bionic | [ fips ] | openssl | fips | - | mantic | focal | [ esm-apps ] | hello | esm | + | release | machine_type | container_release |enable_services | test_package_name | test_package_version | + | mantic | lxd-vm | xenial | [ esm-infra ] | curl | esm | + | mantic | lxd-vm | bionic | [ fips ] | openssl | fips | + | mantic | lxd-vm | focal | [ esm-apps ] | hello | esm | + + Scenario Outline: Build pro docker images auto-attached instances - settings_overrides method + Given a `` `` machine with ubuntu-advantage-tools installed + When I have the `` debs under test in `/home/ubuntu` + When I run `apt-get update` with sudo + When I apt install `docker.io docker-buildx` + When I create the file `/home/ubuntu/Dockerfile` with the following: + """ + FROM ubuntu: + ARG PRO_CLOUD_OVERRIDE= + + COPY ./ubuntu-advantage-tools.deb /ua.deb + COPY ./ubuntu-pro-client.deb /pro.deb + + RUN --mount=type=secret,id=ua-attach-config \ + apt-get update \ + && apt-get install --no-install-recommends -y ubuntu-advantage-tools ca-certificates \ + + && ((apt install /ua.deb /pro.deb -y || true)) \ + + && apt-get install -f \ + + && echo "settings_overrides: { cloud_type: $PRO_CLOUD_OVERRIDE }" >> /etc/ubuntu-advantage/uaclient.conf \ + && pro api u.pro.attach.auto.full_auto_attach.v1 --data '{"enable": }' \ + + && apt-get install -y \ + + # If you need ca-certificates, remove it from this line + && apt-get purge --auto-remove -y ubuntu-advantage-tools ca-certificates \ + + && rm -rf /var/lib/apt/lists/* + """ + # Build succeeds + When I run shell command `DOCKER_BUILDKIT=1 docker build . -t test --build-arg PRO_CLOUD_OVERRIDE= ` with sudo + + # Service successfully enabled (Correct version of package installed) + When I run `docker run test dpkg-query --showformat='${Version}' --show ` with sudo + Then stdout matches regexp: + """ + + """ + Examples: ubuntu release + | release | machine_type | cloud_override | container_release | enable_services | test_package_name | test_package_version | extra_build_args | + | jammy | aws.pro | aws | xenial | [ "esm-infra" ] | curl | esm | --network=host | + | jammy | azure.pro | azure | bionic | [ "fips" ] | openssl | fips | | + | jammy | gcp.pro | gce | focal | [ "esm-apps" ] | hello | esm | | + + Scenario Outline: Build pro docker images auto-attached instances - API arg method + Given a `` `` machine with ubuntu-advantage-tools installed + When I have the `` debs under test in `/home/ubuntu` + When I run `apt-get update` with sudo + When I apt install `docker.io docker-buildx` + When I create the file `/home/ubuntu/Dockerfile` with the following: + """ + FROM ubuntu: + ARG PRO_CLOUD_OVERRIDE= + + COPY ./ubuntu-advantage-tools.deb /ua.deb + COPY ./ubuntu-pro-client.deb /pro.deb + + RUN --mount=type=secret,id=ua-attach-config \ + apt-get update \ + && apt-get install --no-install-recommends -y ubuntu-advantage-tools ca-certificates \ + + && ((apt install /ua.deb /pro.deb -y || true)) \ + + && apt-get install -f \ + + && pro --debug api u.pro.attach.auto.full_auto_attach.v1 --data "{\"cloud_override\": \"$PRO_CLOUD_OVERRIDE\", \"enable\": }" \ + + && apt-get install -y \ + + # If you need ca-certificates, remove it from this line + && apt-get purge --auto-remove -y ubuntu-advantage-tools ca-certificates \ + + && rm -rf /var/lib/apt/lists/* + """ + # Build succeeds + When I run shell command `DOCKER_BUILDKIT=1 docker build . -t test --build-arg PRO_CLOUD_OVERRIDE= ` with sudo + + # Service successfully enabled (Correct version of package installed) + When I run `docker run test dpkg-query --showformat='${Version}' --show ` with sudo + Then stdout matches regexp: + """ + + """ + Examples: ubuntu release + | release | machine_type | cloud_override | container_release | enable_services | test_package_name | test_package_version | extra_build_args | + | jammy | aws.pro | aws | xenial | [ \"esm-infra\" ] | curl | esm | --network=host | + | jammy | azure.pro | azure | bionic | [ \"fips\" ] | openssl | fips | | + | jammy | gcp.pro | gce | focal | [ \"esm-apps\" ] | hello | esm | | diff -Nru ubuntu-advantage-tools-30~23.10/features/enable_fips_cloud.feature ubuntu-advantage-tools-31.2~23.10/features/enable_fips_cloud.feature --- ubuntu-advantage-tools-30~23.10/features/enable_fips_cloud.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/enable_fips_cloud.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,10 +1,8 @@ @uses.config.contract_token Feature: FIPS enablement in cloud based machines - @series.lts - @uses.config.machine_type.gcp.generic Scenario Outline: Attached enable of FIPS services in an ubuntu gcp vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo Then I verify that running `pro enable --assume-yes` `with sudo` exits `1` And stdout matches regexp: @@ -13,17 +11,14 @@ """ Examples: fips - | release | release_title | fips_service | - | xenial | Xenial | fips | - | xenial | Xenial | fips-updates | - - @series.xenial - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic + | release | machine_type | release_title | fips_service | + | xenial | gcp.generic | Xenial | fips | + | xenial | gcp.generic | Xenial | fips-updates | + Scenario Outline: FIPS unholds packages - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo + And I apt install `openssh-client openssh-server strongswan` And I run `apt-mark hold openssh-client openssh-server strongswan` with sudo And I run `pro enable fips --assume-yes` with sudo Then I verify that `openssh-server` is installed from apt source `` @@ -49,18 +44,15 @@ And I verify that `strongswan-hmac` installed version matches regexp `fips` Examples: ubuntu release - | release | fips-apt-source | - | xenial | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | release | machine_type | fips-apt-source | + | xenial | aws.generic | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | xenial | azure.generic | https://esm.ubuntu.com/fips/ubuntu xenial/main | - @series.bionic - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: FIPS unholds packages - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo + And I apt install `openssh-client openssh-server strongswan` And I run `apt-mark hold openssh-client openssh-server strongswan` with sudo And I run `pro enable fips --assume-yes` with sudo Then I verify that `openssh-server` is installed from apt source `` @@ -86,17 +78,15 @@ And I verify that `strongswan-hmac` installed version matches regexp `fips` Examples: ubuntu release - | release | fips-apt-source | - | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | release | machine_type | fips-apt-source | + | bionic | aws.generic | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | azure.generic | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | gcp.generic | https://esm.ubuntu.com/fips/ubuntu bionic/main | - @series.focal - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: FIPS unholds packages - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo + And I apt install `openssh-client openssh-server strongswan` And I run `apt-mark hold openssh-client openssh-server strongswan` with sudo And I run `pro enable fips --assume-yes` with sudo Then I verify that `openssh-server` is installed from apt source `` @@ -118,31 +108,26 @@ And I verify that `strongswan-hmac` installed version matches regexp `fips` Examples: ubuntu release - | release | fips-apt-source | - | focal | https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | fips-apt-source | + | focal | aws.generic | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | azure.generic | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | gcp.generic | https://esm.ubuntu.com/fips/ubuntu focal/main | @slow - @series.xenial - @series.bionic - @series.focal - @uses.config.machine_type.azure.generic - Scenario Outline: Enable FIPS in an ubuntu Azure vm - Given a `` machine with ubuntu-advantage-tools installed + Scenario Outline: Enable FIPS in a cloud VM + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + Then stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install + """ + And I verify that `` is enabled + And I ensure apt update runs without errors And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` When I run `apt-cache policy ` as non-root Then stdout does not match regexp: @@ -152,14 +137,15 @@ When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - - """ + """ + + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ 1 """ + And I verify that `` is installed from apt source `` When I run `pro disable --assume-yes` with sudo Then stdout matches regexp: """ @@ -171,247 +157,98 @@ .*Installed: \(none\) """ When I reboot the machine - And I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + Then I verify that `` is disabled Examples: ubuntu release - | release | fips-name | fips-service | fips-package | fips-kernel | fips-apt-source | - | xenial | FIPS | fips | ubuntu-fips | fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | - | xenial | FIPS Updates | fips-updates | ubuntu-fips | fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | - | bionic | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | fips-name | fips-service | fips-package | fips-kernel | fips-apt-source | + | xenial | azure.generic | FIPS | fips | ubuntu-fips | fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | xenial | azure.generic | FIPS Updates | fips-updates | ubuntu-fips | fips | https://esm.ubuntu.com/fips-updates/ubuntu xenial-updates/main | + | xenial | aws.generic | FIPS | fips | ubuntu-fips | fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | bionic | azure.generic | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | azure.generic | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | + | bionic | aws.generic | FIPS | fips | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | aws.generic | FIPS Updates | fips-updates | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | + | bionic | gcp.generic | FIPS | fips | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | gcp.generic | FIPS Updates | fips-updates | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | + | focal | azure.generic | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | azure.generic | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | + | focal | aws.generic | FIPS | fips | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | aws.generic | FIPS Updates | fips-updates | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | + | focal | gcp.generic | FIPS | fips | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | gcp.generic | FIPS Updates | fips-updates | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | + | jammy | azure.generic | FIPS Preview | fips-preview | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips-preview/ubuntu jammy/main | + | jammy | azure.generic | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips-updates/ubuntu jammy-updates/main | + | jammy | aws.generic | FIPS Preview | fips-preview | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips-preview/ubuntu jammy/main | + | jammy | aws.generic | FIPS Updates | fips-updates | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips-updates/ubuntu jammy-updates/main | + | jammy | gcp.generic | FIPS Preview | fips-preview | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips-preview/ubuntu jammy/main | + | jammy | gcp.generic | FIPS Updates | fips-updates | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips-updates/ubuntu jammy-updates/main | @slow - @series.xenial - @uses.config.machine_type.aws.generic - Scenario Outline: Attached FIPS in an ubuntu Xenial AWS vm - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `pro disable livepatch` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo - And I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - And I verify that `openssh-server` is installed from apt source `` - And I verify that `openssh-client` is installed from apt source `` - And I verify that `strongswan` is installed from apt source `` - And I verify that `openssh-server-hmac` is installed from apt source `` - And I verify that `openssh-client-hmac` is installed from apt source `` - And I verify that `strongswan-hmac` is installed from apt source `` - When I run `apt-cache policy ubuntu-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - fips - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: + Scenario Outline: Attached enable of FIPS in an ubuntu image with cloud-init disabled + Given a `` `` machine with ubuntu-advantage-tools installed + When I run `touch /etc/cloud/cloud-init.disabled` with sudo + And I reboot the machine + And I verify that running `cloud-id` `with sudo` exits `1` + Then stderr matches regexp: """ - 1 + File not found '/run/cloud-init/instance-data.json'. Provide a path to instance data json file using --instance-data """ - When I run `pro disable --assume-yes` with sudo - Then stdout matches regexp: + When I attach `contract_token` with sudo + And I run `pro enable fips --assume-yes` with sudo + Then stdout contains substring: """ - Updating package lists + Could not determine cloud, defaulting to generic FIPS package. + Updating FIPS package lists + Installing FIPS packages + Updating standard Ubuntu package lists + FIPS enabled + A reboot is required to complete install. """ When I run `apt-cache policy ubuntu-fips` as non-root - Then stdout matches regexp: - """ - .*Installed: \(none\) - """ - When I reboot the machine - Then I verify that `openssh-server` installed version matches regexp `fips` - And I verify that `openssh-client` installed version matches regexp `fips` - And I verify that `strongswan` installed version matches regexp `fips` - And I verify that `openssh-server-hmac` installed version matches regexp `fips` - And I verify that `openssh-client-hmac` installed version matches regexp `fips` - And I verify that `strongswan-hmac` installed version matches regexp `fips` - When I run `apt-mark unhold openssh-client openssh-server strongswan` with sudo - Then I will see the following on stdout: - """ - openssh-client was already not hold. - openssh-server was already not hold. - strongswan was already not hold. - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ - - Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | xenial | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu xenial/main | - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.aws.generic - Scenario Outline: Attached enable of FIPS in an ubuntu AWS vm - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `pro disable livepatch` with sudo - And I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `apt-cache policy ubuntu-aws-fips` as non-root Then stdout does not match regexp: """ .*Installed: \(none\) """ When I reboot the machine And I run `uname -r` as non-root - Then stdout matches regexp: - """ - aws-fips - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - When I run `pro disable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - """ - When I run `apt-cache policy ubuntu-aws-fips` as non-root - Then stdout matches regexp: + Then stdout does not match regexp: """ - .*Installed: \(none\) + aws-fips """ - When I reboot the machine - And I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ - - Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu focal/main | - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.gcp.generic - Scenario Outline: Attached enable of FIPS in an ubuntu GCP vm - Given a `` machine with ubuntu-advantage-tools installed - When I attach `contract_token` with sudo - And I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `apt-cache policy ubuntu-gcp-fips` as non-root - Then stdout does not match regexp: + And stdout matches regexp: """ - .*Installed: \(none\) + fips """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - gcp-fips - """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ 1 """ - When I run `pro disable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - """ - When I run `apt-cache policy ubuntu-gcp-fips` as non-root - Then stdout matches regexp: - """ - .*Installed: \(none\) - """ - When I reboot the machine - And I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | + | xenial | aws.generic | @slow - @series.lts - @uses.config.machine_type.any - @uses.config.machine_type.aws.generic Scenario Outline: Attached enable of FIPS in an ubuntu image with cloud-init disabled - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `touch /etc/cloud/cloud-init.disabled` with sudo And I reboot the machine - And I verify that running `cloud-id` `with sudo` exits `1` - Then stderr matches regexp: + And I verify that running `cloud-id` `with sudo` exits `2` + Then I will see the following on stdout: """ - File not found '/run/cloud-init/instance-data.json'. Provide a path to instance data json file using --instance-data + disabled """ When I attach `contract_token` with sudo And I run `pro enable fips --assume-yes` with sudo Then stdout matches regexp: """ - Updating package lists + Could not determine cloud, defaulting to generic FIPS package. + Updating FIPS package lists Installing FIPS packages Updating standard Ubuntu package lists FIPS enabled - A reboot is required to complete install + A reboot is required to complete install. """ When I run `apt-cache policy ubuntu-fips` as non-root Then stdout does not match regexp: @@ -435,8 +272,6 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | bionic | aws.generic | + | focal | aws.generic | diff -Nru ubuntu-advantage-tools-30~23.10/features/enable_fips_container.feature ubuntu-advantage-tools-31.2~23.10/features/enable_fips_container.feature --- ubuntu-advantage-tools-30~23.10/features/enable_fips_container.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/enable_fips_container.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,44 +1,36 @@ - @uses.config.contract_token Feature: FIPS enablement in lxd containers - @series.xenial - @series.bionic - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Attached enable of FIPS in an ubuntu lxd container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan openssl libgcrypt20` with sudo, retrying exit [100] + And I apt install `openssh-client openssh-server strongswan openssl libgcrypt20` And I run `pro enable fips` `with sudo` and stdin `y` Then stdout matches regexp: - """ - Warning: Enabling in a container. - This will install the FIPS packages but not the kernel. - This container must run on a host with enabled to be - compliant. - Warning: This action can take some time and cannot be undone. - """ - And stdout matches regexp: - """ - Updating package lists - Installing packages - Updating standard Ubuntu package lists - enabled - A reboot is required to complete install. - Please run `apt upgrade` to ensure all FIPS packages are updated to the correct - version. - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips +yes enabled - """ - And stdout matches regexp: - """ - FIPS support requires system reboot to complete configuration - """ - And I verify that running `apt update` `with sudo` exits `0` + """ + Warning: Enabling in a container. + This will install the FIPS packages but not the kernel. + This container must run on a host with enabled to be + compliant. + Warning: This action can take some time and cannot be undone. + """ + And stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install. + Please run `apt upgrade` to ensure all FIPS packages are updated to the correct + version. + """ + And I verify that `fips` is enabled + When I run `pro status --all` with sudo + Then stdout matches regexp: + """ + FIPS support requires system reboot to complete configuration + """ + And I ensure apt update runs without errors And I verify that `openssh-server` is installed from apt source `https://esm.ubuntu.com/fips/ubuntu /main` And I verify that `openssh-client` is installed from apt source `https://esm.ubuntu.com/fips/ubuntu /main` And I verify that `strongswan` is installed from apt source `https://esm.ubuntu.com/fips/ubuntu /main` @@ -50,36 +42,33 @@ When I reboot the machine When I run `pro status --all` with sudo Then stdout does not match regexp: - """ - FIPS support requires system reboot to complete configuration - """ + """ + FIPS support requires system reboot to complete configuration + """ When I run `pro disable fips` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will disable the entitlement but the packages will remain installed. - """ - And stdout matches regexp: - """ - Updating package lists - """ + """ + This will disable the entitlement but the packages will remain installed. + """ + And stdout matches regexp: + """ + Updating package lists + """ And stdout does not match regexp: - """ - A reboot is required to complete disable operation - """ + """ + A reboot is required to complete disable operation + """ + And I verify that `fips` is disabled When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips +yes disabled - """ Then stdout does not match regexp: - """ - Disabling requires system reboot to complete operation - """ + """ + Disabling requires system reboot to complete operation + """ When I run `apt-cache policy ubuntu-fips` as non-root Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ + """ + .*Installed: \(none\) + """ Then I verify that `openssh-server` installed version matches regexp `fips` And I verify that `openssh-client` installed version matches regexp `fips` And I verify that `strongswan` installed version matches regexp `fips` @@ -90,56 +79,40 @@ And I verify that packages `` installed versions match regexp `fips` Examples: ubuntu release - | release | fips-name | updates | libssl | additional-fips-packages | - | xenial | FIPS | | libssl1.0.0 | openssh-server-hmac openssh-client-hmac | - | xenial | FIPS Updates | -updates | libssl1.0.0 | openssh-server-hmac openssh-client-hmac | - | bionic | FIPS | | libssl1.1 | openssh-server-hmac openssh-client-hmac libgcrypt20 libgcrypt20-hmac | - | bionic | FIPS Updates | -updates | libssl1.1 | openssh-server-hmac openssh-client-hmac libgcrypt20 libgcrypt20-hmac | - | focal | FIPS | | libssl1.1 | libgcrypt20 libgcrypt20-hmac | - | focal | FIPS Updates | -updates | libssl1.1 | libgcrypt20 libgcrypt20-hmac | + | release | machine_type | fips-name | updates | libssl | additional-fips-packages | + | xenial | lxd-container | FIPS | | libssl1.0.0 | openssh-server-hmac openssh-client-hmac | + | xenial | lxd-container | FIPS Updates | -updates | libssl1.0.0 | openssh-server-hmac openssh-client-hmac | + | bionic | lxd-container | FIPS | | libssl1.1 | openssh-server-hmac openssh-client-hmac libgcrypt20 libgcrypt20-hmac | + | bionic | lxd-container | FIPS Updates | -updates | libssl1.1 | openssh-server-hmac openssh-client-hmac libgcrypt20 libgcrypt20-hmac | + | focal | lxd-container | FIPS | | libssl1.1 | libgcrypt20 libgcrypt20-hmac | + | focal | lxd-container | FIPS Updates | -updates | libssl1.1 | libgcrypt20 libgcrypt20-hmac | - @series.xenial - @series.bionic - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Try to enable FIPS after FIPS Updates in a lxd container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes +disabled - """ - And stdout matches regexp: - """ - fips +yes +disabled - """ + Then I verify that `fips-updates` is disabled + And I verify that `fips` is disabled When I run `pro enable fips-updates --assume-yes` with sudo + Then I verify that `fips-updates` is enabled When I run `pro status --all` with sudo Then stdout matches regexp: - """ - fips-updates +yes +enabled - """ - And stdout matches regexp: - """ - fips +yes +n/a - """ + """ + fips +yes +n/a + """ When I verify that running `pro enable fips --assume-yes` `with sudo` exits `1` Then stdout matches regexp: - """ - Cannot enable FIPS when FIPS Updates is enabled. - """ + """ + Cannot enable FIPS when FIPS Updates is enabled. + """ When I run `pro status --all` with sudo Then stdout matches regexp: - """ - fips-updates +yes +enabled - """ - And stdout matches regexp: - """ - fips +yes +n/a - """ + """ + fips +yes +n/a + """ + And I verify that `fips-updates` is enabled + Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/enable_fips_pro.feature ubuntu-advantage-tools-31.2~23.10/features/enable_fips_pro.feature --- ubuntu-advantage-tools-30~23.10/features/enable_fips_pro.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/enable_fips_pro.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,179 +1,56 @@ Feature: FIPS enablement in PRO cloud based machines @slow - @series.bionic - @series.focal - @uses.config.machine_type.aws.pro - Scenario Outline: Attached enable of FIPS in an ubuntu Azure PRO vm - Given a `` machine with ubuntu-advantage-tools installed + Scenario Outline: Attached enable of FIPS in an ubuntu Aws PRO vm + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log """ And I run `pro auto-attach` with sudo - And I run `pro status --wait` with sudo - Then stdout matches regexp: - """ - fips +yes +disabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - When I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `apt-cache policy ubuntu-aws-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - aws-fips - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - - Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu focal/main | - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.azure.pro - Scenario Outline: Attached enable of FIPS in an ubuntu Azure PRO vm - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` with sudo - Then stdout matches regexp: - """ - fips +yes +disabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - When I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `apt-cache policy ubuntu-azure-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - azure-fips - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - - Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu focal/main | - - - @slow - @series.bionic - @series.focal - @uses.config.machine_type.gcp.pro - Scenario Outline: Attached enable of FIPS in an ubuntu GCP PRO vm - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: + Then I verify that `fips` is disabled + And I verify that `fips-updates` is disabled + When I run `pro enable --assume-yes` with sudo + Then stdout contains substring: """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` with sudo - Then stdout matches regexp: - """ - fips +yes +disabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - When I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + And I verify that `` is enabled + And I ensure apt update runs without errors And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `apt-cache policy ubuntu-gcp-fips` as non-root + When I run `apt-cache policy ` as non-root Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ + """ + .*Installed: \(none\) + """ When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - gcp-fips - """ + """ + + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: - """ - 1 - """ + """ + 1 + """ Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu bionic/main | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | fips-name | fips-service | package-name | kernel-name | fips-apt-source | + | bionic | aws.pro | FIPS | fips | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | aws.pro | FIPS Updates | fips-updates | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | azure.pro | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | azure.pro | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | gcp.pro | FIPS | fips | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | gcp.pro | FIPS Updates | fips-updates | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | focal | aws.pro | FIPS | fips | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | aws.pro | FIPS Updates | fips-updates | ubuntu-aws-fips | aws-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | azure.pro | FIPS | fips | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | azure.pro | FIPS Updates | fips-updates | ubuntu-azure-fips | azure-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | gcp.pro | FIPS | fips | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | gcp.pro | FIPS Updates | fips-updates | ubuntu-gcp-fips | gcp-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | diff -Nru ubuntu-advantage-tools-30~23.10/features/enable_fips_vm.feature ubuntu-advantage-tools-31.2~23.10/features/enable_fips_vm.feature --- ubuntu-advantage-tools-30~23.10/features/enable_fips_vm.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/enable_fips_vm.feature 2024-01-18 17:34:13.000000000 +0000 @@ -2,11 +2,8 @@ Feature: FIPS enablement in lxd VMs @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS in an ubuntu lxd vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo When I run `pro status --format json` with sudo Then stdout contains substring @@ -14,32 +11,28 @@ {"available": "yes", "blocked_by": [{"name": "livepatch", "reason": "Livepatch cannot be enabled while running the official FIPS certified kernel. If you would like a FIPS compliant kernel with additional bug fixes and security updates, you can use the FIPS Updates service with Livepatch.", "reason_code": "livepatch-invalidates-fips"}], "description": "NIST-certified FIPS crypto packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "disabled", "status_details": "FIPS is not configured", "warning": null} """ When I run `pro disable livepatch` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo, retrying exit [100] + And I apt install `openssh-client openssh-server strongswan` And I run `apt-mark hold openssh-client openssh-server strongswan` with sudo And I run `pro enable ` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will install the FIPS packages. The Livepatch service will be unavailable. - Warning: This action can take some time and cannot be undone. - """ - And stdout matches regexp: - """ - Updating package lists - Installing packages - Updating standard Ubuntu package lists - enabled - A reboot is required to complete install - """ + """ + This will install the FIPS packages. The Livepatch service will be unavailable. + Warning: This action can take some time and cannot be undone. + """ + And stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install. + """ When I run `pro status --all` with sudo Then stdout matches regexp: - """ - +yes enabled - """ - And stdout matches regexp: - """ - FIPS support requires system reboot to complete configuration - """ - And I verify that running `apt update` `with sudo` exits `0` + """ + FIPS support requires system reboot to complete configuration + """ + And I ensure apt update runs without errors And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` And I verify that `strongswan` is installed from apt source `` @@ -101,11 +94,8 @@ openssh-server was already not hold. strongswan was already not hold. """ + And I verify that `` is disabled When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ Then stdout does not match regexp: """ Disabling FIPS requires system reboot to complete operation @@ -123,46 +113,35 @@ """ {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": true, "processed_services": [""], "result": "success", "warnings": []} """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + And I verify that `` is disabled Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | xenial | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu xenial/main | - | bionic | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | + | release | machine_type | fips-name | fips-service |fips-apt-source | + | xenial | lxd-vm | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu xenial/main | + | bionic | lxd-vm | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu bionic/main | @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS-updates in an ubuntu lxd vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro disable livepatch` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo, retrying exit [100] + And I apt install `openssh-client openssh-server strongswan` When I run `pro enable ` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will install the FIPS packages including security updates. - Warning: This action can take some time and cannot be undone. - """ - And stdout matches regexp: - """ - Updating package lists - Installing packages - Updating standard Ubuntu package lists - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + """ + This will install the FIPS packages including security updates. + Warning: This action can take some time and cannot be undone. + """ + And stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install. + """ + And I verify that `` is enabled + And I ensure apt update runs without errors And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` And I verify that `strongswan` is installed from apt source `` @@ -174,13 +153,12 @@ """ {"available": "no", "blocked_by": [{"name": "fips-updates", "reason": "FIPS cannot be enabled if FIPS Updates has ever been enabled because FIPS Updates installs security patches that aren't officially certified.", "reason_code": "fips-updates-invalidates-fips"}], "description": "NIST-certified FIPS crypto packages", "description_override": null, "entitled": "yes", "name": "fips", "status": "n/a", "status_details": "Cannot enable FIPS when FIPS Updates is enabled.", "warning": null} """ - When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ @@ -188,14 +166,14 @@ """ When I run `pro disable ` `with sudo` and stdin `y` Then stdout matches regexp: - """ - This will disable the FIPS Updates entitlement but the FIPS Updates packages will remain installed. - """ + """ + This will disable the FIPS Updates entitlement but the FIPS Updates packages will remain installed. + """ And stdout matches regexp: - """ - Updating package lists - A reboot is required to complete disable operation - """ + """ + Updating package lists + A reboot is required to complete disable operation + """ When I reboot the machine Then I verify that `openssh-server` installed version matches regexp `fips` And I verify that `openssh-client` installed version matches regexp `fips` @@ -210,39 +188,20 @@ openssh-server was already not hold. strongswan was already not hold. """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + And I verify that `` is disabled When I verify that running `pro enable fips --assume-yes` `with sudo` exits `1` Then stdout matches regexp: - """ - Cannot enable FIPS because FIPS Updates was once enabled. - """ + """ + Cannot enable FIPS because FIPS Updates was once enabled. + """ And I verify that files exist matching `/var/lib/ubuntu-advantage/services-once-enabled` - When I run `pro enable --assume-yes` with sudo - When I reboot the machine - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ - Then stdout matches regexp: - """ - livepatch +yes +disabled - """ + And I reboot the machine + Then I verify that `` is enabled + And I verify that `livepatch` is disabled When I run `pro enable livepatch --assume-yes` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes +enabled - """ - Then stdout matches regexp: - """ - livepatch +yes +enabled - """ + Then I verify that `` is enabled + And I verify that `livepatch` is enabled When I run `pro status --all --format json` with sudo Then stdout contains substring: """ @@ -262,98 +221,65 @@ """ {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": true, "processed_services": [""], "result": "success", "warnings": []} """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + And I verify that `` is disabled Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | xenial | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips-updates/ubuntu xenial-updates/main | - | bionic | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | + | release | machine_type | fips-name | fips-service |fips-apt-source | + | xenial | lxd-vm | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips-updates/ubuntu xenial-updates/main | + | bionic | lxd-vm | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips-updates/ubuntu bionic-updates/main | @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable FIPS-updates while livepatch is enabled - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes disabled - """ - Then stdout matches regexp: - """ - livepatch +yes + - """ + Then I verify that `fips-updates` is disabled + And I verify that `livepatch` is enabled When I run `pro enable fips-updates --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating FIPS Updates package lists - Installing FIPS Updates packages - Updating standard Ubuntu package lists - FIPS Updates enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes enabled - """ - Then stdout matches regexp: - """ - livepatch +yes + - """ + Then stdout contains substring: + """ + Updating FIPS Updates package lists + Installing FIPS Updates packages + Updating standard Ubuntu package lists + FIPS Updates enabled + A reboot is required to complete install. + """ + And I verify that `fips-updates` is enabled + And I verify that `livepatch` is enabled When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ 1 """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes enabled - """ - Then stdout matches regexp: - """ - livepatch +yes +enabled - """ + And I verify that `fips-updates` is enabled + And I verify that `livepatch` is enabled + Examples: ubuntu release - | release | livepatch_status | - | xenial | warning | - | bionic | enabled | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | @slow - @series.focal - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS in an ubuntu lxd vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo, retrying exit [100] + And I apt install `openssh-client openssh-server strongswan` When I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - Updating standard Ubuntu package lists - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + Then stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install. + """ + And I verify that `` is enabled + And I ensure apt update runs without errors And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` And I verify that `strongswan` is installed from apt source `` @@ -361,9 +287,9 @@ When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ @@ -371,10 +297,10 @@ """ When I run `pro disable --assume-yes` with sudo Then stdout matches regexp: - """ - Updating package lists - A reboot is required to complete disable operation - """ + """ + Updating package lists + A reboot is required to complete disable operation + """ When I reboot the machine Then I verify that `openssh-server` installed version matches regexp `fips` And I verify that `openssh-client` installed version matches regexp `fips` @@ -387,38 +313,28 @@ openssh-server was already not hold. strongswan was already not hold. """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + And I verify that `` is disabled Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | focal | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | fips-name | fips-service |fips-apt-source | + | focal | lxd-vm | FIPS | fips |https://esm.ubuntu.com/fips/ubuntu focal/main | @slow - @series.focal - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable of FIPS-updates in an ubuntu lxd vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openssh-client openssh-server strongswan` with sudo, retrying exit [100] + And I apt install `openssh-client openssh-server strongswan` When I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating package lists - Installing packages - Updating standard Ubuntu package lists - enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + Then stdout contains substring: + """ + Updating package lists + Installing packages + Updating standard Ubuntu package lists + enabled + A reboot is required to complete install. + """ + And I verify that `` is enabled + And I ensure apt update runs without errors And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` And I verify that `strongswan` is installed from apt source `` @@ -426,9 +342,9 @@ When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I run `cat /proc/sys/crypto/fips_enabled` with sudo Then I will see the following on stdout: """ @@ -436,146 +352,99 @@ """ When I run `pro disable --assume-yes` with sudo Then stdout matches regexp: - """ - Updating package lists - A reboot is required to complete disable operation - """ + """ + Updating package lists + A reboot is required to complete disable operation + """ When I reboot the machine - Then I verify that `openssh-server` installed version matches regexp `fips` - And I verify that `openssh-client` installed version matches regexp `fips` - And I verify that `strongswan` installed version matches regexp `fips` - And I verify that `strongswan-hmac` installed version matches regexp `fips` + Then I verify that `openssh-server` installed version matches regexp `` + And I verify that `openssh-client` installed version matches regexp `` + And I verify that `strongswan` installed version matches regexp `` + And I verify that `strongswan-hmac` installed version matches regexp `` When I run `apt-mark unhold openssh-client openssh-server strongswan` with sudo - Then I will see the following on stdout: + Then stdout matches regexp: """ - openssh-client was already not hold. - openssh-server was already not hold. - strongswan was already not hold. + openssh-client was already (not|not on) hold. + openssh-server was already (not|not on) hold. + strongswan was already (not|not on) hold. """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes disabled - """ + And I verify that `` is disabled When I verify that running `pro enable fips --assume-yes` `with sudo` exits `1` Then stdout matches regexp: - """ - Cannot enable FIPS because FIPS Updates was once enabled. - """ + """ + Cannot enable FIPS because FIPS Updates was once enabled. + """ And I verify that files exist matching `/var/lib/ubuntu-advantage/services-once-enabled` Examples: ubuntu release - | release | fips-name | fips-service |fips-apt-source | - | focal | FIPS Updates | fips-updates |https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | + | release | machine_type | fips-name | fips-service | fips-package-str | fips-apt-source | + | focal | lxd-vm | FIPS Updates | fips-updates | fips | https://esm.ubuntu.com/fips-updates/ubuntu focal-updates/main | + | jammy | lxd-vm | FIPS Updates | fips-updates | Fips | https://esm.ubuntu.com/fips-updates/ubuntu jammy-updates/main | @slow - @series.lts - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips-updates on fips enabled vm - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro enable fips --assume-yes` with sudo - Then stdout matches regexp: - """ - Updating FIPS package lists - Installing FIPS packages - Updating standard Ubuntu package lists - FIPS enabled - A reboot is required to complete install - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips +yes enabled - """ + Then stdout contains substring: + """ + Updating FIPS package lists + Installing FIPS packages + Updating standard Ubuntu package lists + FIPS enabled + A reboot is required to complete install. + """ + And I verify that `fips` is enabled + And I verify that `livepatch` is disabled When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: - """ - fips - """ + """ + fips + """ When I verify that running `pro enable fips-updates --assume-yes` `with sudo` exits `0` - Then stdout matches regexp: - """ - One moment, checking your subscription first - Disabling incompatible service: FIPS - Updating FIPS Updates package lists - Installing FIPS Updates packages - Updating standard Ubuntu package lists - FIPS Updates enabled - A reboot is required to complete install. - """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes enabled - """ - And stdout matches regexp: - """ - fips +yes n/a - """ - When I reboot the machine - And I run `pro enable livepatch` with sudo - And I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips-updates +yes enabled - """ - And stdout matches regexp: - """ - fips +yes n/a - """ - And stdout matches regexp: - """ - livepatch +yes (enabled|warning) - """ - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - fips - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - - Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - - @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm - Scenario Outline: FIPS enablement message when cloud init didn't run properly - Given a `` machine with ubuntu-advantage-tools installed - When I delete the file `/run/cloud-init/instance-data.json` - And I attach `contract_token` with sudo - And I run `pro disable livepatch` with sudo - And I run `pro enable fips --assume-yes` with sudo - Then stdout matches regexp: + Then stdout contains substring: """ - Could not determine cloud, defaulting to generic FIPS package. + One moment, checking your subscription first + Disabling incompatible service: FIPS + Updating FIPS Updates package lists + Installing FIPS Updates packages + Updating standard Ubuntu package lists + FIPS Updates enabled + A reboot is required to complete install. """ + And I verify that `fips-updates` is enabled + And I verify that `fips` is disabled + When I reboot the machine + And I run `pro enable livepatch` with sudo + Then I verify that `fips-updates` is enabled + And I verify that `fips` is disabled + And I verify that `livepatch` is enabled When I run `pro status --all` with sudo Then stdout matches regexp: """ - fips +yes enabled + fips +yes +n/a + """ + When I run `uname -r` as non-root + Then stdout matches regexp: + """ + fips + """ + When I run `cat /proc/sys/crypto/fips_enabled` with sudo + Then I will see the following on stdout: + """ + 1 """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | + | focal | lxd-vm | @slow - @series.focal - @uses.config.machine_type.lxd-vm Scenario Outline: FIPS enablement message when cloud init didn't run properly - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I delete the file `/run/cloud-init/instance-data.json` And I attach `contract_token` with sudo And I run `pro enable fips --assume-yes` with sudo @@ -583,39 +452,19 @@ """ Could not determine cloud, defaulting to generic FIPS package. """ - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - fips +yes enabled - """ + And I verify that `fips` is enabled Examples: ubuntu release - | release | - | focal | - + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | + | focal | lxd-vm | @slow - @series.jammy - @uses.config.machine_type.lxd-vm Scenario Outline: Attached enable fips-preview - Given a `` machine with ubuntu-advantage-tools installed - When I set the machine token overlay to the following yaml - """ - availableResources: - - available: true - name: fips-preview - machineTokenInfo: - contractInfo: - resourceEntitlements: - - type: fips-preview - entitled: true - """ - And I attach `contract_token` with sudo - And I run `pro status` with sudo - Then stdout matches regexp: - """ - fips-preview +yes +disabled +.* - """ + Given a `` `` machine with ubuntu-advantage-tools installed + When I attach `contract_token` with sudo + Then I verify that `fips-preview` is disabled When I verify that running `pro enable fips-preview` `with sudo` and stdin `N` exits `1` Then stdout matches regexp: """ @@ -640,5 +489,5 @@ """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-vm | diff -Nru ubuntu-advantage-tools-30~23.10/features/environment.py ubuntu-advantage-tools-31.2~23.10/features/environment.py --- ubuntu-advantage-tools-30~23.10/features/environment.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/environment.py 2024-02-14 15:37:46.000000000 +0000 @@ -1,5 +1,4 @@ import datetime -import itertools import logging import os import random @@ -15,10 +14,10 @@ import features.cloud as cloud from features.util import ( + BUILDER_NAME_PREFIX, SUT, InstallationSource, landscape_reject_all_pending_computers, - lxc_get_property, process_template_vars, ) @@ -44,22 +43,16 @@ A valid contract token to use during attach scenarios :param contract_token_staging: A valid staging contract token to use during attach scenarios - :param image_clean: - This indicates whether the image created for this test run should be - cleaned up when all tests are complete. - :param machine_type: - The default machine_type to test: lxd-container, lxd-vm, azure.pro, - azure.generic, aws.pro or aws.generic + :param machine_types: + A comma-separated string of machine_types to test: lxd-container, + lxd-vm, azure.pro, azure.pro-fips, azure.generic, aws.pro, + aws.pro-fips, aws.generic, gcp.pro, gcp.pro-fips, gcp.generic :param private_key_file: Optional path to pre-existing private key file to use when connecting launched VMs via ssh. :param private_key_name: Optional name of the cloud's named private key object to use when connecting to launched VMs via ssh. Default: uaclient-integration. - :param reuse_image: - A string with an image name that should be used instead of building a - fresh image for this test run. If specified, this image will not be - deleted. :param destroy_instances: This boolean indicates that test containers should be destroyed after the completion. Set to False to leave instances running. @@ -76,7 +69,6 @@ # environment variable input to the appropriate Python types for use within # the test framework boolean_options = [ - "image_clean", "destroy_instances", "ephemeral_instance", "snapshot_strategy", @@ -90,10 +82,10 @@ "landscape_registration_key", "landscape_api_access_key", "landscape_api_secret_key", - "machine_type", + "machine_types", + "releases", "private_key_file", "private_key_name", - "reuse_image", "artifact_dir", "install_from", "custom_ppa", @@ -119,15 +111,14 @@ self, *, cloud_credentials_path: Optional[str] = None, - image_clean: bool = True, destroy_instances: bool = True, ephemeral_instance: bool = False, snapshot_strategy: bool = False, sbuild_output_to_terminal: bool = False, - machine_type: str = "lxd-container", + machine_types: Optional[str] = None, + releases: Optional[str] = None, private_key_file: Optional[str] = None, private_key_name: str = "uaclient-integration", - reuse_image: Optional[str] = None, contract_token: Optional[str] = None, contract_token_staging: Optional[str] = None, contract_token_staging_expired: Optional[str] = None, @@ -141,7 +132,6 @@ userdata_file: Optional[str] = None, check_version: Optional[str] = None, sbuild_chroot: Optional[str] = None, - cmdline_tags: List = [] ) -> None: # First, store the values we've detected self.cloud_credentials_path = cloud_credentials_path @@ -154,13 +144,9 @@ self.landscape_registration_key = landscape_registration_key self.landscape_api_access_key = landscape_api_access_key self.landscape_api_secret_key = landscape_api_secret_key - self.image_clean = image_clean self.destroy_instances = destroy_instances - self.machine_type = machine_type self.private_key_file = private_key_file self.private_key_name = private_key_name - self.reuse_image = reuse_image - self.cmdline_tags = cmdline_tags self.artifact_dir = artifact_dir self.install_from = install_from self.custom_ppa = custom_ppa @@ -168,13 +154,10 @@ self.userdata_file = userdata_file self.check_version = check_version self.sbuild_chroot = sbuild_chroot - self.filter_series = set( - [ - tag.split(".")[1] - for tag in cmdline_tags - if tag.startswith("series.") and "series.all" not in tag - ] + self.machine_types = ( + machine_types.split(",") if machine_types else None ) + self.releases = releases.split(",") if releases else None # Next, perform any required validation if install_from == InstallationSource.CUSTOM and not custom_ppa: logging.error( @@ -189,12 +172,12 @@ ) sys.exit(1) - if self.reuse_image is not None: - if self.image_clean: - print(" Reuse_image specified, it will not be deleted.") - ignore_vars = () # type: Tuple[str, ...] - if "pro" in self.machine_type: + if ( + self.machine_types + and len(self.machine_types) == 1 + and "pro" in self.machine_types[0] + ): ignore_vars += ( "UACLIENT_BEHAVE_CONTRACT_TOKEN", "UACLIENT_BEHAVE_CONTRACT_TOKEN_STAGING", @@ -204,8 +187,8 @@ attr_name = env_name.replace("UACLIENT_BEHAVE_", "").lower() if getattr(self, attr_name): print( - " --- Ignoring {} because machine_type is {}".format( - env_name, self.machine_type + " --- Ignoring {} because machine_types is {}".format( + env_name, self.machine_types ) ) setattr(self, attr_name, None) @@ -228,40 +211,9 @@ random.choices(string.ascii_lowercase + string.digits, k=8) ) timed_job_tag += "-" + random_suffix + self.timed_job_tag = timed_job_tag - self.clouds = { - "aws": cloud.EC2( - cloud_credentials_path=self.cloud_credentials_path, - tag=timed_job_tag, - timestamp_suffix=False, - ), - "azure": cloud.Azure( - cloud_credentials_path=self.cloud_credentials_path, - tag=timed_job_tag, - timestamp_suffix=False, - ), - "gcp": cloud.GCP( - cloud_credentials_path=self.cloud_credentials_path, - tag=timed_job_tag, - timestamp_suffix=False, - ), - "lxd-vm": cloud.LXDVirtualMachine( - cloud_credentials_path=self.cloud_credentials_path, - ), - "lxd-container": cloud.LXDContainer( - cloud_credentials_path=self.cloud_credentials_path, - ), - } - if "aws" in self.machine_type: - self.default_cloud = self.clouds["aws"] - elif "azure" in self.machine_type: - self.default_cloud = self.clouds["azure"] - elif "gcp" in self.machine_type: - self.default_cloud = self.clouds["gcp"] - elif "lxd-vm" in self.machine_type: - self.default_cloud = self.clouds["lxd-vm"] - else: - self.default_cloud = self.clouds["lxd-container"] + self.clouds = cloud.CloudManager(self) # Finally, print the config options. This helps users debug the use of # config options, and means they'll be included in test logs in CI. @@ -282,13 +234,7 @@ kwargs = ( {} ) # type: Dict[str, Union[str, bool, List, InstallationSource]] - # Preserve cmdline_tags for reference - if not config.tags.ands: - kwargs["cmdline_tags"] = [] - else: - kwargs["cmdline_tags"] = list( - itertools.chain.from_iterable(config.tags.ands) - ) + for key, value in os.environ.items(): if not key.startswith(cls.prefix): continue @@ -310,7 +256,13 @@ kwargs[key] = bool_value if "install_from" in kwargs: - kwargs["install_from"] = InstallationSource(kwargs["install_from"]) + if str(kwargs["install_from"]).startswith("ppa:"): + kwargs["custom_ppa"] = kwargs["install_from"] + kwargs["install_from"] = InstallationSource.CUSTOM + else: + kwargs["install_from"] = InstallationSource( + kwargs["install_from"] + ) return cls(**kwargs) # type: ignore @@ -337,35 +289,16 @@ for key, value in userdata.items(): logging.debug(" - {} = {}".format(key, value)) print(" - {} = {}".format(key, value)) - context.series_image_name = {} - context.series_reuse_image = "" context.pro_config = UAClientBehaveConfig.from_environ(context.config) context.snapshots = {} context.machines = {} - if context.pro_config.reuse_image: - series = lxc_get_property( - context.pro_config.reuse_image, property_name="series", image=True - ) - machine_type = lxc_get_property( - context.pro_config.reuse_image, - property_name="machine_type", - image=True, - ) - if machine_type: - print("Found machine_type: {vm_type}".format(vm_type=machine_type)) - if series is not None: - context.series_reuse_image = series - context.series_image_name[series] = context.pro_config.reuse_image - else: - print(" Could not check image series. It will not be used. ") - context.pro_config.reuse_image = None - -def _should_skip_tags(context: Context, tags: List) -> str: - """Return a reason if a feature or scenario should be skipped""" - machine_type = getattr(context.pro_config, "machine_type", "") - machine_types = [] +def _should_skip_config_tags(context: Context, tags: List) -> str: + """ + Return a reason if a feature or scenario should be skipped based on + missing but required config tags + """ for tag in tags: parts = tag.split(".") @@ -374,26 +307,14 @@ val = context.pro_config for idx, attr in enumerate(parts[2:], 1): val = getattr(val, attr, None) - if attr == "machine_type": - curr_machine_type = ".".join(parts[idx + 2 :]) - machine_types.append(curr_machine_type) - if curr_machine_type == machine_type: - return "" - - break if val is None: return "Skipped: tag value was None: {}".format(tag) - if machine_types: - return "Skipped: machine type {} was not found in tags:\n {}".format( - machine_type, ", ".join(machine_types) - ) - return "" def before_feature(context: Context, feature: Feature): - reason = _should_skip_tags(context, feature.tags) + reason = _should_skip_config_tags(context, feature.tags) if reason: feature.skip(reason=reason) @@ -401,58 +322,67 @@ def before_scenario(context: Context, scenario: Scenario): context.stored_vars = {} - reason = _should_skip_tags(context, scenario.effective_tags) + reason = _should_skip_config_tags(context, scenario.effective_tags) if reason: scenario.skip(reason=reason) return - filter_series = context.pro_config.filter_series - given_a_series_match = re.match( - "a `([a-z]*)` machine with ubuntu-advantage-tools installed", + # Determine release and machine_type of this scenario. + # First check outline example table row. + # Then override with what is passed directly to the "Give a machine" step + # if applicable. + scenario_release = None + scenario_machine_type = None + + if hasattr(scenario, "_row") and scenario._row is not None: + scenario_release = scenario._row.get("release") + scenario_machine_type = scenario._row.get("machine_type") + + given_a_series_machine_type_match = re.match( + "a `(.*)` `(.*)` machine with ubuntu-advantage-tools installed", scenario.steps[0].name, ) - if filter_series and given_a_series_match: - series = given_a_series_match.group(1) - if series and series not in filter_series: - scenario.skip( - reason=( - "Skipping scenario outline series `{series}`." - " Cmdline provided @series tags: {cmdline_series}".format( - series=series, cmdline_series=filter_series - ) + if given_a_series_machine_type_match: + step_release = given_a_series_machine_type_match.group(1) + if step_release != "": + scenario_release = step_release + step_machine_type = given_a_series_machine_type_match.group(2) + if step_machine_type != "": + scenario_machine_type = step_machine_type + + releases = context.pro_config.releases + if releases and scenario_release not in releases: + scenario.skip( + reason=( + "Scenario release is `{}`, but releases filter set" + " to {} - skipping.".format(scenario_release, releases) + ) + ) + return + machine_types = context.pro_config.machine_types + if machine_types and scenario_machine_type not in machine_types: + scenario.skip( + reason=( + "Scenario machine_type is `{}`, but machine_types filter set" + " to {} - skipping.".format( + scenario_machine_type, context.pro_config.machine_types ) ) - return + ) + return - if hasattr(scenario, "_row") and scenario._row is not None: - row_release = scenario._row.get("release") - if ( - row_release - and len(filter_series) > 0 - and row_release not in filter_series - ): + install_from = context.pro_config.install_from + if install_from == InstallationSource.LOCAL: + if "skip_local_environment" in scenario.effective_tags: scenario.skip( - reason=( - "Skipping scenario outline series `{series}`." - " Cmdline provided @series tags: {cmdline_series}".format( - series=row_release, cmdline_series=filter_series - ) - ) + reason="Scenario does not support install_from local" ) return - row_machine_type = scenario._row.get("machine_type") - if ( - row_machine_type - and context.pro_config.machine_type != "any" - and row_machine_type != context.pro_config.machine_type - ): + + if install_from == InstallationSource.PREBUILT: + if "skip_prebuilt_environment" in scenario.effective_tags: scenario.skip( - reason=( - "Skipping scenario outline machine_type `{}`." - " Cmdline provided machine_type: {}".format( - row_machine_type, context.pro_config.machine_type - ) - ) + reason="Scenario does not support install_from prebuilt" ) return @@ -528,37 +458,35 @@ def after_all(context): - if context.pro_config.image_clean: - for key, image in context.series_image_name.items(): - if key == context.series_reuse_image: - logging.info( - " Not deleting this image: ", - context.series_image_name[key], - ) - else: - context.pro_config.default_cloud.api.delete_image(image) - if context.pro_config.destroy_instances: try: - if context.pro_config.default_cloud._ssh_key_managed: - key_pair = context.pro_config.default_cloud.api.key_pair - os.remove(key_pair.private_key_path) - os.remove(key_pair.public_key_path) + for cloud_instance in context.pro_config.clouds.clouds.values(): + if cloud_instance._ssh_key_managed: + key_pair = cloud_instance.api.key_pair + os.remove(key_pair.private_key_path) + os.remove(key_pair.public_key_path) except Exception as e: logging.error( "Failed to delete instance ssh keys:\n{}".format(str(e)) ) - if "builder" in context.snapshots: + # Builder snapshots don't get an auto-cleanup function, so clean them here + builder_snapshots = [ + name + for name in context.snapshots + if name.startswith(BUILDER_NAME_PREFIX) + ] + for snapshot in builder_snapshots: + cloud = context.snapshots[snapshot].cloud try: - context.pro_config.default_cloud.api.delete_image( - context.snapshots["builder"] + context.pro_config.clouds.get(cloud).api.delete_image( + context.snapshots[snapshot].name ) except RuntimeError as e: logging.error( "Failed to delete image: {}\n{}".format( - context.snapshots["builder"], str(e) + context.snapshots[snapshot].name, str(e) ) ) diff -Nru ubuntu-advantage-tools-30~23.10/features/fix.feature ubuntu-advantage-tools-31.2~23.10/features/fix.feature --- ubuntu-advantage-tools-30~23.10/features/fix.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/fix.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,11 +1,9 @@ Feature: Ua fix command behaviour - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Useful SSL failure message when there aren't any ca-certs - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt remove ca-certificates -y` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update + When I apt remove `ca-certificates` When I run `rm -f /etc/ssl/certs/ca-certificates.crt` with sudo When I verify that running `ua fix CVE-1800-123456` `as non-root` exits `1` Then stderr matches regexp: @@ -14,7 +12,7 @@ Cannot verify certificate of server Please install "ca-certificates" and try again. """ - When I run `apt install ca-certificates -y` with sudo + When I apt install `ca-certificates` When I run `mv /etc/ssl/certs /etc/ssl/wronglocation` with sudo When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` Then stderr matches regexp: @@ -24,19 +22,16 @@ Please check your openssl configuration. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` Then I will see the following on stderr: """ @@ -59,12 +54,12 @@ Error: issue "USN-12345678-12" is not recognized. Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" """ - When I run `apt install -y libawl-php=0.60-1 --allow-downgrades` with sudo + When I apt install `libawl-php=0.60-1` And I run `pro fix USN-4539-1` with sudo Then stdout matches regexp: """ USN-4539-1: AWL vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2020-11728 Fixing requested USN-4539-1 @@ -98,13 +93,12 @@ .*✔.* CVE-2022-24959 does not affect your system. """ - When I run `apt install -y rsync=3.1.3-8 --allow-downgrades` with sudo - And I run `apt install -y zlib1g=1:1.2.11.dfsg-2ubuntu1 --allow-downgrades` with sudo + When I apt install `rsync=3.1.3-8 zlib1g=1:1.2.11.dfsg-2ubuntu1` And I run `pro fix USN-5573-1` with sudo Then stdout matches regexp: """ USN-5573-1: rsync vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2022-37434 Fixing requested USN-5573-1 @@ -142,7 +136,7 @@ Then stdout matches regexp: """ USN-5573-1: rsync vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2022-37434 Fixing requested USN-5573-1 @@ -180,7 +174,7 @@ Then stdout matches regexp: """ USN-5573-1: rsync vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2022-37434 Fixing requested USN-5573-1 @@ -193,14 +187,12 @@ """ Examples: ubuntu release details - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.xenial @uses.config.contract_token - @uses.config.machine_type.lxd-container Scenario Outline: Fix command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` Then I will see the following on stderr: """ @@ -211,14 +203,14 @@ """ Error: USN-12345-12 not found. """ - When I run `apt-get update` with sudo - When I run `apt install -y libawl-php` with sudo + When I apt update + When I apt install `libawl-php` And I reboot the machine And I run `pro fix USN-4539-1` as non-root Then stdout matches regexp: """ USN-4539-1: AWL vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2020-11728 Fixing requested USN-4539-1 @@ -249,13 +241,13 @@ .*✔.* CVE-2020-28196 is resolved. """ - When I run `DEBIAN_FRONTEND=noninteractive apt-get install -y expat=2.1.0-7 swish-e matanza ghostscript` with sudo + When I apt install `expat=2.1.0-7 swish-e matanza ghostscript` And I verify that running `pro fix CVE-2017-9233 --dry-run` `as non-root` exits `1` Then stdout matches regexp: """ .*WARNING: The option --dry-run is being used. No packages will be installed when running this command..* - CVE-2017-9233: Coin3D vulnerability + CVE-2017-9233: Expat vulnerability - https://ubuntu.com/security/CVE-2017-9233 3 affected source packages are installed: expat, matanza, swish-e @@ -271,7 +263,7 @@ When I verify that running `pro fix CVE-2017-9233` `with sudo` exits `1` Then stdout matches regexp: """ - CVE-2017-9233: Coin3D vulnerability + CVE-2017-9233: Expat vulnerability - https://ubuntu.com/security/CVE-2017-9233 3 affected source packages are installed: expat, matanza, swish-e @@ -290,7 +282,7 @@ .*WARNING: The option --dry-run is being used. No packages will be installed when running this command..* USN-5079-2: curl vulnerabilities - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 @@ -328,7 +320,7 @@ Then stdout matches regexp """ USN-5079-2: curl vulnerabilities - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 @@ -353,7 +345,7 @@ Then stdout matches regexp: """ USN-5079-2: curl vulnerabilities - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2021-22946 - https://ubuntu.com/security/CVE-2021-22947 @@ -395,7 +387,7 @@ Then stdout matches regexp: """ USN-5051-2: OpenSSL vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2021-3712 Fixing requested USN-5051-2 @@ -410,14 +402,14 @@ When I run `pro disable esm-infra` with sudo # Allow esm-cache to be populated And I run `sleep 5` as non-root - And I run `apt-get install gzip -y` with sudo + And I apt install `gzip` And I run `pro fix USN-5378-4 --dry-run` as non-root Then stdout matches regexp: """ .*WARNING: The option --dry-run is being used. No packages will be installed when running this command..* USN-5378-4: Gzip vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2022-1271 Fixing requested USN-5378-4 @@ -472,7 +464,7 @@ Then stdout matches regexp: """ USN-5378-4: Gzip vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2022-1271 Fixing requested USN-5378-4 @@ -523,8 +515,8 @@ When I run `pro detach --assume-yes` with sudo And I run `sed -i "/xenial-updates/d" /etc/apt/sources.list` with sudo And I run `sed -i "/xenial-security/d" /etc/apt/sources.list` with sudo - And I run `apt-get update` with sudo - And I run `apt-get install squid -y` with sudo + And I apt update + And I apt install `squid` And I verify that running `pro fix CVE-2020-25097` `as non-root` exits `1` Then stdout matches regexp: """ @@ -542,14 +534,12 @@ """ Examples: ubuntu release details - | release | - | xenial | + | release | machine_type | + | xenial | lxd-container | - @series.bionic - @uses.config.machine_type.lxd-container Scenario: Fix command on an unattached machine - Given a `bionic` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo + Given a `bionic` `lxd-container` machine with ubuntu-advantage-tools installed + When I apt update When I verify that running `pro fix CVE-1800-123456` `as non-root` exits `1` Then I will see the following on stderr: """ @@ -572,14 +562,14 @@ Error: issue "USN-12345678-12" is not recognized. Usage: "pro fix CVE-yyyy-nnnn" or "pro fix USN-nnnn" """ - When I run `apt install -y libawl-php` with sudo + When I apt install `libawl-php` And I run `pro fix USN-4539-1 --dry-run` as non-root Then stdout matches regexp: """ .*WARNING: The option --dry-run is being used. No packages will be installed when running this command..* USN-4539-1: AWL vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2020-11728 Fixing requested USN-4539-1 @@ -591,7 +581,7 @@ Then stdout matches regexp: """ USN-4539-1: AWL vulnerability - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2020-11728 Fixing requested USN-4539-1 @@ -612,7 +602,7 @@ .*✔.* CVE-2020-28196 is resolved. """ - When I run `apt-get install xterm=330-1ubuntu2 -y` with sudo + When I apt install `xterm=330-1ubuntu2` And I verify that running `pro fix CVE-2021-27135` `as non-root` exits `1` Then stdout matches regexp: """ @@ -669,8 +659,7 @@ .*✔.* CVE-2021-27135 is resolved. """ - When I run `apt-get install libbz2-1.0=1.0.6-8.1 -y --allow-downgrades` with sudo - And I run `apt-get install bzip2=1.0.6-8.1 -y` with sudo + When I apt install `libbz2-1.0=1.0.6-8.1 bzip2=1.0.6-8.1` And I run `pro fix USN-4038-3` with sudo Then stdout matches regexp: """ @@ -690,7 +679,7 @@ Then stdout matches regexp: """ USN-6130-1: Linux kernel vulnerabilities - Found CVEs: + Associated CVEs: - https://ubuntu.com/security/CVE-2023-30456 - https://ubuntu.com/security/CVE-2023-1380 - https://ubuntu.com/security/CVE-2023-32233 @@ -719,6 +708,7 @@ - USN-6222-1 - USN-6256-1 - USN-6385-1 + - USN-6460-1 Fixing related USNs: - USN-6033-1 @@ -806,6 +796,11 @@ .*✔.* USN-6385-1 does not affect your system. + - USN-6460-1 + No affected source packages are installed. + + .*✔.* USN-6460-1 does not affect your system. + Summary: .*✔.* USN-6130-1 \[requested\] does not affect your system. .*✔.* USN-6033-1 \[related\] does not affect your system. @@ -825,15 +820,24 @@ .*✔.* USN-6222-1 \[related\] does not affect your system. .*✔.* USN-6256-1 \[related\] does not affect your system. .*✔.* USN-6385-1 \[related\] does not affect your system. + .*✔.* USN-6460-1 \[related\] does not affect your system. + """ + When I run `pro fix CVE-2023-42752` with sudo + Then stdout matches regexp: + """ + CVE-2023-42752: Linux kernel \(NVIDIA\) vulnerabilities + - https://ubuntu.com/security/CVE-2023-42752 + + No affected source packages are installed. + + .*✔.* CVE-2023-42752 does not affect your system. """ - @series.bionic - @uses.config.machine_type.lxd-container Scenario: Fix command on a machine without security/updates source lists - Given a `bionic` machine with ubuntu-advantage-tools installed + Given a `bionic` `lxd-container` machine with ubuntu-advantage-tools installed When I run `sed -i "/bionic-updates/d" /etc/apt/sources.list` with sudo And I run `sed -i "/bionic-security/d" /etc/apt/sources.list` with sudo - And I run `apt-get update` with sudo + And I apt update And I run `wget -O pkg.deb https://launchpad.net/ubuntu/+source/openssl/1.1.1-1ubuntu2.1~18.04.14/+build/22454675/+files/openssl_1.1.1-1ubuntu2.1~18.04.14_amd64.deb` as non-root And I run `dpkg -i pkg.deb` with sudo And I verify that running `pro fix CVE-2023-0286` `as non-root` exits `1` diff -Nru ubuntu-advantage-tools-30~23.10/features/i18n.feature ubuntu-advantage-tools-31.2~23.10/features/i18n.feature --- ubuntu-advantage-tools-30~23.10/features/i18n.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/i18n.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,8 +1,5 @@ Feature: Pro supports multiple languages - @series.lts - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: Translation works Given a `` `` machine with ubuntu-advantage-tools installed When I run shell command `LANGUAGE=pt_BR.UTF-8 pro security-status` as non-root @@ -19,14 +16,14 @@ """ não """ - When I run `apt update` with sudo + When I apt update And I apt install `jq` And I run shell command `LANGUAGE=pt_BR.UTF-8 pro status --format json | jq .services[0].available` as non-root Then I will see the following on stdout: """ "yes" """ - When I run `apt-get remove -y ubuntu-pro-client-l10n` with sudo + When I apt remove `ubuntu-pro-client-l10n` When I run shell command `LANGUAGE=pt_BR.UTF-8 pro security-status` as non-root Then stdout contains substring: """ @@ -38,9 +35,35 @@ | focal | lxd-container | | jammy | lxd-container | - @series.xenial - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container + Scenario Outline: Translation works + Given a `` `` machine with ubuntu-advantage-tools installed + When I run shell command `LANGUAGE=pt_BR.UTF-8 pro security-status` as non-root + Then stdout contains substring: + """ + Ubuntu Pro não está disponível para versões do Ubuntu não LTS. + """ + When I run shell command `LANGUAGE=pt_BR.UTF-8 pro status --all` as non-root + Then stdout contains substring: + """ + não + """ + When I apt update + And I apt install `jq` + And I run shell command `LANGUAGE=pt_BR.UTF-8 pro status --format json | jq .result` as non-root + Then I will see the following on stdout: + """ + "success" + """ + When I apt remove `ubuntu-pro-client-l10n` + When I run shell command `LANGUAGE=pt_BR.UTF-8 pro security-status` as non-root + Then stdout contains substring: + """ + Ubuntu Pro is not available for non-LTS releases. + """ + Examples: ubuntu release + | release | machine_type | + | mantic | lxd-container | + # Note: Translations do work on xenial, but our test environment triggers a bug in python that # causes it to think we're in an ascii-only environment Scenario Outline: Translation doesn't error when python thinks it's ascii only @@ -59,18 +82,16 @@ | release | machine_type | | xenial | lxd-container | - @series.focal - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: apt-hook translations work Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - When I run `apt-get update` with sudo - When I run `apt-get upgrade -y` with sudo + When I apt update + When I apt upgrade When I run `pro detach --assume-yes` with sudo - When I run `apt-get update` with sudo - When I run `apt-get install hello` with sudo + When I apt update + When I apt install `hello` When I attach `contract_token` with sudo + # Didn't call the step specifically because of the language environment When I run shell command `LANGUAGE=pt_BR.UTF-8 apt upgrade -y` with sudo Then stdout matches regexp: """ @@ -80,13 +101,11 @@ | release | machine_type | | focal | lxd-container | - @series.all - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Pro client's commands run successfully in a different locale - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed ## Change the locale - When I run `apt install language-pack-fr -y` with sudo + When I apt install `language-pack-fr` And I run `update-locale LANG=fr_FR.UTF-8` with sudo And I reboot the machine And I run `cat /etc/default/locale` as non-root @@ -105,7 +124,7 @@ """ Architecture: """ - When I run `apt update` with sudo + When I apt update Then stdout does not match regexp: """ Hit @@ -134,7 +153,7 @@ When I verify that running `pro auto-attach` `with sudo` exits `2` Then stderr matches regexp: """ - This machine is already attached to 'UA Client Test' + This machine is already attached to '.+' To use a different subscription first run: sudo pro detach. """ # status command @@ -142,7 +161,7 @@ Then stdout is a json matching the `ua_status` schema When I run `pro status --format yaml` as non-root Then stdout is a yaml matching the `ua_status` schema - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -155,7 +174,7 @@ And I append the following on uaclient config: """ features: - machine_token_overlay: "/tmp/machine-token-overlay.json" + machine_token_overlay: "/var/lib/ubuntu-advantage/machine-token-overlay.json" """ And I run `pro status` with sudo Then stdout contains substring: @@ -199,10 +218,9 @@ When I run `pro --version` with sudo Then I will see the uaclient version on stdout Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/install_uninstall.feature ubuntu-advantage-tools-31.2~23.10/features/install_uninstall.feature --- ubuntu-advantage-tools-30~23.10/features/install_uninstall.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/install_uninstall.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,26 +1,21 @@ Feature: Pro Install and Uninstall related tests - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Do not fail on postinst when cloud-id returns error - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I delete the file `/run/cloud-init/instance-data.json` Then I verify that running `dpkg-reconfigure ubuntu-advantage-tools` `with sudo` exits `0` Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts @uses.config.contract_token - @uses.config.machine_type.lxd-container Scenario Outline: Purge package after attaching it to a machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `touch /etc/apt/preferences.d/ubuntu-esm-infra` with sudo Then I verify that files exist matching `/var/log/ubuntu-advantage.log` @@ -29,7 +24,7 @@ And I verify that files exist matching `/etc/apt/trusted.gpg.d/ubuntu-pro-esm-infra.gpg` And I verify that files exist matching `/etc/apt/sources.list.d/ubuntu-esm-infra.list` And I verify that files exist matching `/etc/apt/preferences.d/ubuntu-esm-infra` - When I run `apt-get purge ubuntu-advantage-tools -y` with sudo, retrying exit [100] + When I run `apt purge ubuntu-pro-client -y` with sudo, retrying exit [100] Then stdout matches regexp: """ Purging configuration files for ubuntu-advantage-tools @@ -42,20 +37,18 @@ And I verify that no files exist matching `/etc/apt/preferences.d/ubuntu-*` Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Do not fail during postinst with nonstandard python setup - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # Works when in a python virtualenv - When I run `apt update` with sudo - And I run `apt install python3-venv -y` with sudo + When I apt update + And I apt install `python3-venv` And I run `python3 -m venv env` with sudo Then I verify that running `bash -c ". env/bin/activate && python3 -c 'import uaclient'"` `with sudo` exits `1` Then stderr matches regexp: @@ -67,7 +60,7 @@ # Works with python built/installed from source When I run `wget https://www.python.org/ftp/python/3.10.0/Python-3.10.0.tgz` with sudo When I run `tar -xvf Python-3.10.0.tgz` with sudo - When I run `apt install build-essential zlib1g-dev -y` with sudo + When I apt install `build-essential zlib1g-dev` When I run `sh -c "cd Python-3.10.0 && ./configure"` with sudo When I run `make -C Python-3.10.0` with sudo When I run `make -C Python-3.10.0 install` with sudo @@ -84,8 +77,57 @@ Then I verify that running `dpkg-reconfigure ubuntu-advantage-tools` `with sudo` exits `0` Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + + @skip_local_environment + @skip_prebuilt_environment + Scenario Outline: Package ubuntu-advantage-tools now install + Given a `` `` machine + When I install transition package ubuntu-advantage-tools + Then I verify that `ubuntu-pro-client` is installed + + Examples: ubuntu release + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + + @skip_local_environment + @skip_prebuilt_environment + Scenario Outline: Package ubuntu-advantage-tools now install + Given a `` `` machine + When I install transition package ubuntu-advantage-tools + Then I verify that `ubuntu-pro-auto-attach` is installed + + Examples: ubuntu release + | release | machine_type | + | xenial | aws.pro | + | bionic | aws.pro | + | focal | aws.pro | + | jammy | aws.pro | + | jammy | aws.pro | + + @skip_local_environment + @skip_prebuilt_environment + Scenario Outline: Does not cause deadlock when cloud-init installs ubuntu-advantage-tools + Given a `` `` machine with ubuntu-advantage-tools installed adding this cloud-init user_data + """ + : {} + """ + When I apt remove `ubuntu-advantage-tools ubuntu-pro-client` + When I run `cloud-init clean --logs` with sudo + When I reboot the machine + When I run `cloud-init status --wait` with sudo + Then I verify that `ubuntu-advantage-tools` is installed + + Examples: ubuntu release + | release | machine_type | user_data_field | + | xenial | lxd-container | ubuntu-advantage | + | bionic | lxd-container | ubuntu_advantage | + | focal | lxd-container | ubuntu_advantage | + | jammy | lxd-container | ubuntu_advantage | + | mantic | lxd-container | ubuntu_advantage | diff -Nru ubuntu-advantage-tools-30~23.10/features/landscape.feature ubuntu-advantage-tools-31.2~23.10/features/landscape.feature --- ubuntu-advantage-tools-30~23.10/features/landscape.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/landscape.feature 2024-01-18 17:34:13.000000000 +0000 @@ -4,12 +4,9 @@ @uses.config.landscape_api_secret_key Feature: Enable landscape on Ubuntu - @series.mantic - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: Enable Landscape non-interactively Given a `` `` machine with ubuntu-advantage-tools installed - When I attach `contract_token_staging` with sudo and options `--no-auto-enable` + When I attach `contract_token` with sudo and options `--no-auto-enable` Then I verify that running `pro enable landscape` `as non-root` exits `1` And I will see the following on stderr: @@ -21,94 +18,55 @@ Then stdout contains substring: """ One moment, checking your subscription first - Updating Landscape package lists + Updating standard Ubuntu package lists Installing landscape-client Executing `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key --silent` """ Then stdout contains substring """ - System successfully registered. - """ - When I run `pro status` as non-root - Then stdout matches regexp: - """ - landscape +yes +enabled - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +enabled - """ - - When I run `systemctl stop landscape-client` with sudo - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +warning - """ - Then stdout contains substring: - """ - Landscape is installed and configured and registered but not running. - Run `sudo landscape-config` to start it, or run `sudo pro disable landscape` - """ - - When I run `rm /etc/landscape/client.conf` with sudo - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +warning - """ - Then stdout contains substring: - """ - Landscape is installed but not configured. - Run `sudo landscape-config` to set it up, or run `sudo pro disable landscape` + Registration request sent successfully. """ + And I verify that `landscape` is enabled When I run `sudo pro disable landscape` with sudo - Then I will see the following on stdout: - """ - Executing `landscape-config --disable` - Failed running command 'landscape-config --disable' [exit(1)]. Message: error: config file /etc/landscape/client.conf can't be read - Backing up /etc/landscape/client.conf as /etc/landscape/client.conf.pro-disable-backup - [Errno 2] No such file or directory: '/etc/landscape/client.conf' -> '/etc/landscape/client.conf.pro-disable-backup' - Uninstalling landscape-client - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +disabled - """ + Then I verify that `landscape` is disabled # Enable with assume-yes When I run `pro enable landscape --assume-yes -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key $behave_var{config landscape_registration_key}` with sudo Then I will see the following on stdout: """ One moment, checking your subscription first - Updating Landscape package lists + Updating standard Ubuntu package lists Installing landscape-client Executing `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key --silent` Landscape enabled """ - When I run `pro status` with sudo - Then stdout matches regexp: + And I verify that `landscape` is enabled + + # stopping the service effectively disables it + When I run `systemctl stop landscape-client` with sudo + Then I verify that `landscape` is disabled + When I verify that running `sudo pro disable landscape` `with sudo` exits `1` + Then I will see the following on stdout: """ - landscape +yes +enabled + Landscape is not currently enabled + See: sudo pro status """ - When I run `sudo pro disable landscape` with sudo # Fail to enable with assume-yes - When I verify that running `pro enable landscape --assume-yes -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa` `with sudo` exits `1` + When I verify that running `pro enable landscape --assume-yes -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key wrong` `with sudo` exits `1` Then I will see the following on stdout: """ One moment, checking your subscription first - Updating Landscape package lists + Updating standard Ubuntu package lists Installing landscape-client - Executing `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --silent` - Created symlink /etc/systemd/system/multi-user.target.wants/landscape-client.service → /lib/systemd/system/landscape-client.service. + Executing `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key --silent` Invalid account name or registration key. Could not enable Landscape. """ + # This will become obsolete soon: #2864 When I run `pro status` with sudo + # I am keeping this check until the non-root landscape-config check works as expected Then stdout matches regexp: """ landscape +yes +warning @@ -119,6 +77,11 @@ Run `sudo landscape-config` to register, or run `sudo pro disable landscape` """ When I run `sudo pro disable landscape` with sudo + When I run `pro status` with sudo + Then stdout matches regexp: + """ + landscape +yes +disabled + """ # Enable with assume-yes and format json When I run `pro enable landscape --assume-yes --format=json -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key $behave_var{config landscape_registration_key}` with sudo @@ -126,20 +89,18 @@ """ {"_schema_version": "0.1", "errors": [], "failed_services": [], "needs_reboot": false, "processed_services": ["landscape"], "result": "success", "warnings": []} """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +enabled - """ + And I verify that `landscape` is enabled When I run `sudo pro disable landscape` with sudo # Fail to enable with assume-yes and format json - When I verify that running `pro enable landscape --assume-yes --format=json -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa` `with sudo` exits `1` + When I verify that running `pro enable landscape --assume-yes --format=json -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key wrong` `with sudo` exits `1` Then I will see the following on stdout: """ - {"_schema_version": "0.1", "errors": [{"additional_info": {"stderr": "Created symlink /etc/systemd/system/multi-user.target.wants/landscape-client.service \u2192 /lib/systemd/system/landscape-client.service.\nInvalid account name or registration key.", "stdout": "Please wait..."}, "message": "landscape-config command failed", "message_code": "landscape-config-failed", "service": "landscape", "type": "service"}], "failed_services": ["landscape"], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} + {"_schema_version": "0.1", "errors": [{"additional_info": {"stderr": "Invalid account name or registration key.", "stdout": ""}, "message": "landscape-config command failed", "message_code": "landscape-config-failed", "service": "landscape", "type": "service"}], "failed_services": ["landscape"], "needs_reboot": false, "processed_services": [], "result": "failure", "warnings": []} """ + # This will become obsolete soon: #2864 When I run `pro status` with sudo + # I am keeping this check until the non-root landscape-config check works as expected Then stdout matches regexp: """ landscape +yes +warning @@ -149,17 +110,13 @@ Landscape is installed and configured but not registered. Run `sudo landscape-config` to register, or run `sudo pro disable landscape` """ - When I run `sudo pro disable landscape` with sudo Examples: ubuntu release | release | machine_type | | mantic | lxd-container | - @series.mantic - @uses.config.machine_type.any - @uses.config.machine_type.lxd-container Scenario Outline: Enable Landscape interactively Given a `` `` machine with ubuntu-advantage-tools installed - When I attach `contract_token_staging` with sudo and options `--no-auto-enable` + When I attach `contract_token` with sudo and options `--no-auto-enable` Then I verify that running `pro enable landscape` `as non-root` exits `1` And I will see the following on stderr: @@ -169,83 +126,230 @@ When I run `pro enable landscape` `with sudo` and the following stdin # This will change in the future, but right now the lines are: - # allow starting on boot + # use self-hosted? # computer title # account name # registration key # confirm registration key # http proxy # https proxy - # enable script execution - # access group - # tags # request registration """ - y + n $behave_var{machine-name system-under-test} pro-client-qa $behave_var{config landscape_registration_key} $behave_var{config landscape_registration_key} - n - - y """ Then stdout contains substring: """ One moment, checking your subscription first - Updating Landscape package lists + Updating standard Ubuntu package lists Installing landscape-client Executing `landscape-config` """ Then stdout contains substring: """ - System successfully registered. - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - landscape +yes +enabled + Registration request sent successfully. """ + And I verify that `landscape` is enabled When I run `pro disable landscape` with sudo When I verify that running `pro enable landscape` `with sudo` and the following stdin exits `1` """ - y + n $behave_var{machine-name system-under-test} pro-client-qa wrong wrong - n - - y """ Then stdout contains substring: """ One moment, checking your subscription first - Updating Landscape package lists + Updating standard Ubuntu package lists Installing landscape-client Executing `landscape-config` """ - Then stderr contains substring: + And stderr contains substring: """ Invalid account name or registration key. """ When I run `pro status` with sudo + Then stdout contains substring: + """ + Landscape is installed and configured but not registered. + Run `sudo landscape-config` to register, or run `sudo pro disable landscape` + """ + Examples: ubuntu release + | release | machine_type | + | mantic | lxd-container | + + Scenario Outline: Easily re-enable Landscape non-interactively after a disable + Given a `` `` machine with ubuntu-advantage-tools installed + When I attach `contract_token` with sudo and options `--no-auto-enable` + + When I run `pro enable landscape --assume-yes -- --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key $behave_var{config landscape_registration_key}` with sudo + Then I will see the following on stdout: + """ + One moment, checking your subscription first + Updating standard Ubuntu package lists + Installing landscape-client + Executing `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key --silent` + Landscape enabled + """ + When I run `pro status` with sudo Then stdout matches regexp: """ - landscape +yes +warning + landscape +yes +enabled + """ + + When I run `pro disable landscape` with sudo + Then I will see the following on stdout: """ + Executing `landscape-config --disable` + /etc/landscape/client.conf contains your landscape-client configuration. + To re-enable Landscape with the same configuration, run: + sudo pro enable landscape --assume-yes + """ + When I run `pro status` with sudo + Then stdout matches regexp: + """ + landscape +yes +disabled + """ + + When I run `pro enable landscape --assume-yes` with sudo + When I run `pro status` with sudo + Then stdout matches regexp: + """ + landscape +yes +enabled + """ + When I run shell command `cat /etc/landscape/client.conf | grep computer_title` with sudo + Then I will see the following on stdout: + """ + computer_title = $behave_var{machine-name system-under-test} + """ + When I run shell command `cat /etc/landscape/client.conf | grep account_name` with sudo + Then I will see the following on stdout: + """ + account_name = pro-client-qa + """ + + # Now do the same test but with a full detach + When I run `pro detach --assume-yes` with sudo + Then I will see the following on stdout: + """ + Detach will disable the following service: + landscape + Executing `landscape-config --disable` + /etc/landscape/client.conf contains your landscape-client configuration. + To re-enable Landscape with the same configuration, run: + sudo pro enable landscape --assume-yes + + This machine is now detached. + """ + When I run `pro api u.pro.status.is_attached.v1` with sudo Then stdout contains substring: """ - Landscape is installed and configured but not registered. - Run `sudo landscape-config` to register, or run `sudo pro disable landscape` + "is_attached": false + """ + + When I attach `contract_token` with sudo and options `--no-auto-enable` + When I run `pro enable landscape --assume-yes` with sudo + When I run `pro status` with sudo + Then stdout matches regexp: + """ + landscape +yes +enabled + """ + When I run shell command `cat /etc/landscape/client.conf | grep computer_title` with sudo + Then I will see the following on stdout: + """ + computer_title = $behave_var{machine-name system-under-test} + """ + When I run shell command `cat /etc/landscape/client.conf | grep account_name` with sudo + Then I will see the following on stdout: + """ + account_name = pro-client-qa """ Examples: ubuntu release | release | machine_type | | mantic | lxd-container | + + Scenario Outline: Detaching/reattaching on an unsupported release does not affect landscape + Given a `` `` machine with ubuntu-advantage-tools installed + When I attach `contract_token` with sudo and options `--no-auto-enable` + When I run `pro status` with sudo + Then stdout does not contain substring: + """ + landscape + """ + + When I apt install `landscape-client` + + # assert pre-enabled state + When I verify that running `systemctl is-active landscape-client` `with sudo` exits `3` + Then I will see the following on stdout: + """ + inactive + """ + + # enable with landscape-config directly + When I run `landscape-config --computer-title $behave_var{machine-name system-under-test} --account-name pro-client-qa --registration-key $behave_var{config landscape_registration_key} --silent` with sudo + Then I will see the following on stdout: + """ + Please wait... + System successfully registered. + """ + + # assert that landscape is running, but pro doesn't care + When I verify that running `systemctl is-active landscape-client` `with sudo` exits `0` + Then I will see the following on stdout: + """ + active + """ + When I run `pro status` with sudo + Then stdout does not contain substring: + """ + landscape + """ + + # disable refuses + When I verify that running `pro disable landscape` `with sudo` exits `1` + Then I will see the following on stdout: + """ + Disabling Landscape with pro is not supported. + See: sudo pro status + """ + + # detach doesn't touch it + When I run `pro detach --assume-yes` with sudo + Then I will see the following on stdout: + """ + This machine is now detached. + """ + + # still running + When I verify that running `systemctl is-active landscape-client` `with sudo` exits `0` + Then I will see the following on stdout: + """ + active + """ + + # re-attaching doesn't affect it either + When I attach `contract_token` with sudo and options `--no-auto-enable` + + # still running + When I verify that running `systemctl is-active landscape-client` `with sudo` exits `0` + Then I will see the following on stdout: + """ + active + """ + + Examples: ubuntu release + | release | machine_type | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/livepatch.feature ubuntu-advantage-tools-31.2~23.10/features/livepatch.feature --- ubuntu-advantage-tools-30~23.10/features/livepatch.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/livepatch.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,6 @@ @uses.config.contract_token Feature: Livepatch - @series.focal - @uses.config.machine_type.any - @uses.config.machine_type.lxd-vm Scenario Outline: Unattached livepatch status shows warning when on unsupported kernel Given a `` `` machine with ubuntu-advantage-tools installed When I change config key `livepatch_url` to use value `` @@ -23,8 +20,8 @@ Supported livepatch kernels are listed here: https://ubuntu.com/security/livepatch/docs/kernels """ Then I verify that files exist matching `/run/ubuntu-advantage/livepatch-kernel-support-cache.json` - When I run `apt-get install linux-generic -y` with sudo - When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-kvm -y` with sudo + When I apt install `linux-generic` + When I apt remove `linux-image*-kvm` When I run `update-grub` with sudo When I reboot the machine When I run `pro status` with sudo @@ -41,9 +38,6 @@ | focal | lxd-vm | https://livepatch.canonical.com | | focal | lxd-vm | https://livepatch.staging.canonical.com | - @series.focal - @uses.config.machine_type.any - @uses.config.machine_type.lxd-vm Scenario Outline: Attached livepatch status shows warning when on unsupported kernel Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo @@ -74,8 +68,8 @@ Either switch to a supported kernel or `pro disable livepatch` to dismiss this warning. """ - When I run `apt-get install linux-generic -y` with sudo - When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-kvm -y` with sudo + When I apt install `linux-generic` + When I apt remove `linux-image*-kvm` When I run `update-grub` with sudo When I reboot the machine When I run `pro status` with sudo @@ -93,14 +87,11 @@ | release | machine_type | | focal | lxd-vm | - @series.focal - @uses.config.machine_type.any - @uses.config.machine_type.gcp.generic Scenario Outline: Attached livepatch status shows upgrade required when on an old kernel Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token_staging` with sudo - When I run `apt-get install linux-headers- linux-image- -y` with sudo - When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-gcp -y` with sudo + When I apt install `linux-headers- linux-image-` + When I apt remove `linux-image*-gcp` When I run `update-grub` with sudo When I reboot the machine When I run `uname -r` with sudo @@ -108,11 +99,8 @@ """ """ + And I verify that `livepatch` status is warning When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes +warning +Canonical Livepatch service - """ Then stdout contains substring: """ NOTICES @@ -120,18 +108,14 @@ Please upgrade the kernel with apt and reboot for continued livepatch support. """ - When I run `apt-get install linux-headers-generic linux-image-generic -y` with sudo + When I apt install `linux-headers-generic linux-image-generic` When I reboot the machine When I run `uname -r` with sudo Then stdout does not contain substring: """ """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes +enabled +Canonical Livepatch service - """ + And I verify that `livepatch` is enabled Then stdout does not contain substring: """ NOTICES @@ -143,10 +127,6 @@ | release | machine_type | old_kernel_version | | focal | gcp.generic | 5.4.0-28-generic | - @series.lunar - @series.mantic - @uses.config.machine_type.any - @uses.config.machine_type.lxd-vm Scenario Outline: Livepatch is not enabled by default and can't be enabled on interim releases Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro status --all` with sudo @@ -172,35 +152,26 @@ """ Examples: ubuntu release | release | machine_type | pretty_name | - | lunar | lxd-vm | 23.04 (Lunar Lobster) | | mantic | lxd-vm | 23.10 (Mantic Minotaur) | - @series.jammy - @uses.config.machine_type.any - @uses.config.machine_type.lxd.vm Scenario Outline: Livepatch is supported on interim HWE kernel # This test is intended to ensure that an interim HWE kernel has the correct support status # It should be kept up to date so that it runs on the latest LTS and installs the latest # HWE kernel for that release. Given a `` `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - When I run `apt-get install linux-generic-hwe- -y` with sudo - When I run `DEBIAN_FRONTEND=noninteractive apt-get remove linux-image*-kvm -y` with sudo + When I apt update + When I apt install `linux-generic-hwe-` + When I apt remove `linux-image*-kvm` When I run `update-grub` with sudo When I reboot the machine When I attach `contract_token` with sudo When I run `pro status` with sudo - Then stdout matches regexp: - """ - livepatch +yes +enabled +Canonical Livepatch service - """ + Then I verify that `livepatch` is enabled + Examples: ubuntu release | release | machine_type | release_num | | jammy | lxd-vm | 22.04 | - @series.xenial - @uses.config.machine_type.any - @uses.config.machine_type.lxd.vm Scenario Outline: snapd installed as a snap if necessary Given a `` `` machine with ubuntu-advantage-tools installed When I run `snap list` with sudo diff -Nru ubuntu-advantage-tools-30~23.10/features/logs.feature ubuntu-advantage-tools-31.2~23.10/features/logs.feature --- ubuntu-advantage-tools-30~23.10/features/logs.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/logs.feature 2024-02-29 14:03:11.000000000 +0000 @@ -1,12 +1,10 @@ Feature: Logs in Json Array Formatter - @series.all - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: The log file can be successfully parsed as json array - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt update` with sudo - And I run `apt install jq -y` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update + And I apt install `jq` And I verify that running `pro status` `with sudo` exits `0` And I verify that running `pro enable test_entitlement` `with sudo` exits `1` And I run shell command `tail /var/log/ubuntu-advantage.log | jq -r .` as non-root @@ -22,20 +20,18 @@ """ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.all - @uses.config.machine_type.lxd.container Scenario Outline: Non-root user and root user log files are different - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # Confirm user log file does not exist - When I verify `/var/log/ubuntu-advantage.log` is empty + When I run `truncate -s 0 /var/log/ubuntu-advantage.log` with sudo + And I verify `/var/log/ubuntu-advantage.log` is empty Then I verify that no files exist matching `/home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` When I verify that running `pro status` `as non-root` exits `0` Then I verify that files exist matching `/home/ubuntu/.cache/ubuntu-pro/ubuntu-pro.log` @@ -54,18 +50,15 @@ Executed with sys.argv: ['/usr/bin/pro', 'attach' """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.all - @uses.config.machine_type.lxd.container Scenario Outline: Non-root user log files included in collect logs - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When i verify that running `pro status` `with sudo` exits `0` And I verify that running `pro collect-logs` `with sudo` exits `0` And I run `tar -tf ua_logs.tar.gz` as non-root @@ -81,10 +74,57 @@ user0.log """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | + + Scenario Outline: logrotate configuration works + Given a `` `` machine with ubuntu-advantage-tools installed + When I run `pro status` with sudo + And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root + Then stdout contains substring: + """ + /var/log/ubuntu-advantage.log + """ + Then stdout does not contain substring: + """ + /var/log/ubuntu-advantage.log.1 + """ + When I run `logrotate --force /etc/logrotate.d/ubuntu-pro-client` with sudo + And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root + Then stdout contains substring: + """ + /var/log/ubuntu-advantage.log + /var/log/ubuntu-advantage.log.1 + """ + # reset and run logrotate with full config + When I run `rm /var/log/ubuntu-advantage.log.1` with sudo + When I run `pro status` with sudo + And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root + Then stdout contains substring: + """ + /var/log/ubuntu-advantage.log + """ + Then stdout does not contain substring: + """ + /var/log/ubuntu-advantage.log.1 + """ + # This uses all logrotate config files on the system + When I run `logrotate --force /etc/logrotate.conf` with sudo + And I run `sh -c "ls /var/log/ubuntu-advantage* | sort -d"` as non-root + Then stdout contains substring: + """ + /var/log/ubuntu-advantage.log + /var/log/ubuntu-advantage.log.1 + """ + + Examples: ubuntu release + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/magic_attach.feature ubuntu-advantage-tools-31.2~23.10/features/magic_attach.feature --- ubuntu-advantage-tools-30~23.10/features/magic_attach.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/magic_attach.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Magic attach flow related tests - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach using the magic attach flow - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/tmp/response-overlay.json` with the following: """ { @@ -49,8 +47,8 @@ And the machine is attached Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/motd_messages.feature ubuntu-advantage-tools-31.2~23.10/features/motd_messages.feature --- ubuntu-advantage-tools-30~23.10/features/motd_messages.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/motd_messages.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,12 +1,9 @@ Feature: MOTD Messages - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Contract update prevents contract expiration messages - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update When I attach `contract_token` with sudo When I update contract to use `effectiveTo` as `$behave_var{today +2}` When I run `pro refresh messages` with sudo @@ -43,24 +40,21 @@ [\w\d.]+ \*Your Ubuntu Pro subscription has EXPIRED\* - \d+ additional security update\(s\) require Ubuntu Pro with '' enabled. - Renew your service at https:\/\/ubuntu.com\/pro\/dashboard + \d+ additional security updates require Ubuntu Pro with '' enabled. + Renew your subscription at https:\/\/ubuntu.com\/pro\/dashboard [\w\d.]+ """ Examples: ubuntu release - | release | service | - | xenial | esm-infra | - | bionic | esm-apps | + | release | machine_type | service | + | xenial | lxd-container | esm-infra | + | bionic | lxd-container | esm-apps | - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container Scenario Outline: Contract Expiration Messages - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - And I run `apt-get install ansible -y` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update + And I apt install `ansible` And I attach `contract_token` with sudo And I set the machine token overlay to the following yaml """ @@ -110,11 +104,11 @@ [\w\d.]+ \*Your Ubuntu Pro subscription has EXPIRED\* - \d+ additional security update\(s\) require Ubuntu Pro with '' enabled. - Renew your service at https:\/\/ubuntu.com\/pro\/dashboard + \d+ additional security updates require Ubuntu Pro with '' enabled. + Renew your subscription at https:\/\/ubuntu.com\/pro\/dashboard """ - When I run `apt-get upgrade -y` with sudo + When I apt upgrade When I run `pro refresh messages` with sudo And I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: @@ -122,10 +116,10 @@ [\w\d.]+ \*Your Ubuntu Pro subscription has EXPIRED\* - Renew your service at https:\/\/ubuntu.com\/pro\/dashboard + Renew your subscription at https:\/\/ubuntu.com\/pro\/dashboard """ - When I create the file `/tmp/machine-token-overlay.json` with the following: + When I create the file `/var/lib/ubuntu-advantage/machine-token-overlay.json` with the following: """ { "machineTokenInfo": { @@ -143,10 +137,10 @@ [\w\d.]+ \*Your Ubuntu Pro subscription has EXPIRED\* - Renew your service at https:\/\/ubuntu.com\/pro\/dashboard + Renew your subscription at https:\/\/ubuntu.com\/pro\/dashboard """ Examples: ubuntu release - | release | service | - | xenial | esm-infra | - | bionic | esm-infra | + | release | machine_type | service | + | xenial | lxd-container | esm-infra | + | bionic | lxd-container | esm-infra | diff -Nru ubuntu-advantage-tools-30~23.10/features/proxy_config.feature ubuntu-advantage-tools-31.2~23.10/features/proxy_config.feature --- ubuntu-advantage-tools-30~23.10/features/proxy_config.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/proxy_config.feature 2024-01-18 17:34:13.000000000 +0000 @@ -2,16 +2,14 @@ Feature: Proxy configuration @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured for uaclient - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -41,8 +39,7 @@ """ .*CONNECT contracts.canonical.com.* """ - When I run `pro status` with sudo - Then the machine is attached + And the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro config set ua_apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -84,7 +81,7 @@ Acquire::http::Proxy::esm.ubuntu.com \".*\"; Acquire::https::Proxy::esm.ubuntu.com \".*\"; """ - When I run `apt-get update` with sudo + When I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -123,7 +120,7 @@ """ \"http://host:port\" is not a valid url. Not setting as proxy """ - When I run `apt install python3-pycurl -y` with sudo + When I apt install `python3-pycurl` And I verify that running `pro config set ua_apt_https_proxy=https://localhost:12345` `with sudo` exits `1` Then stderr matches regexp: """ @@ -151,24 +148,21 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attach command when proxy is configured - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `pro config set http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo And I run `pro config set https_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -205,11 +199,11 @@ """ When I run `pro refresh config` with sudo Then I will see the following on stdout: - """ - Setting snap proxy - Setting Livepatch proxy - Successfully processed your pro configuration. - """ + """ + Setting snap proxy + Setting Livepatch proxy + Successfully processed your pro configuration. + """ When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ { @@ -232,7 +226,7 @@ "https_proxy": "invalidurls" } """ - And I run `apt install python3-pycurl -y` with sudo + And I apt install `python3-pycurl` And I verify that running `pro refresh config` `with sudo` exits `1` Then stderr matches regexp: """ @@ -251,23 +245,21 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured for uaclient - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt update` `with sudo` on the `proxy` machine - And I run `apt install squid apache2-utils -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt update on the `proxy` machine + And I apt install `squid apache2-utils` on the `proxy` machine And I run `htpasswd -bc /etc/squid/passwordfile someuser somepassword` `with sudo` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret - """ + """ + dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -320,7 +312,7 @@ .*CONNECT esm.ubuntu.com.* """ When I run `pro refresh config` with sudo - And I run `apt-get update` with sudo + And I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -345,26 +337,23 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-vm Scenario Outline: Attach command when authenticated proxy is configured - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt update` `with sudo` on the `proxy` machine - And I run `apt install squid apache2-utils -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt update on the `proxy` machine + And I apt install `squid apache2-utils` on the `proxy` machine And I run `htpasswd -bc /etc/squid/passwordfile someuser somepassword` `with sudo` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret - """ + """ + dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -399,21 +388,19 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-vm | + | bionic | lxd-vm | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured manually via conf file for uaclient - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ @@ -430,8 +417,7 @@ """ .*CONNECT contracts.canonical.com.* """ - When I run `pro status` with sudo - Then the machine is attached + And the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ @@ -466,7 +452,7 @@ Acquire::http::Proxy::esm.ubuntu.com \".*\"; Acquire::https::Proxy::esm.ubuntu.com \".*\"; """ - When I run `apt-get update` with sudo + When I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -511,7 +497,7 @@ "ua_apt_https_proxy": "https://localhost:12345" } """ - And I run `apt install python3-pycurl -y` with sudo + And I apt install `python3-pycurl` And I verify that running `pro refresh config` `with sudo` exits `1` Then stderr matches regexp: """ @@ -539,25 +525,23 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured manually for uaclient - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt update` `with sudo` on the `proxy` machine - And I run `apt install squid apache2-utils -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt update on the `proxy` machine + And I apt install `squid apache2-utils` on the `proxy` machine And I run `htpasswd -bc /etc/squid/passwordfile someuser somepassword` `with sudo` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret - """ + """ + dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine When I create the file `/var/lib/ubuntu-advantage/user-config.json` with the following: """ @@ -583,7 +567,7 @@ """ And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro refresh config` with sudo - And I run `apt-get update` with sudo + And I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -607,7 +591,7 @@ "ua_apt_https_proxy": "http://wronguser:wrongpassword@$behave_var{machine-ip proxy}:3128" } """ - And I run `apt install python3-pycurl -y` with sudo + And I apt install `python3-pycurl` And I verify that running `pro refresh config` `with sudo` exits `1` Then stderr matches regexp: """ @@ -615,23 +599,21 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when proxy is configured globally - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -656,7 +638,7 @@ .*HEAD http://api.snapcraft.io.* """ # We need this for the route command - When I run `apt-get install net-tools` with sudo + When I apt install `net-tools` # We will guarantee that the machine will only use the proxy when # running the pro commands And I run `route del default` with sudo @@ -666,8 +648,7 @@ """ .*CONNECT contracts.canonical.com.* """ - When I run `pro status` with sudo - Then the machine is attached + And the machine is attached When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine And I run `pro config set global_apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo @@ -710,7 +691,7 @@ Acquire::http::Proxy \".*\"; Acquire::https::Proxy \".*\"; """ - When I run `apt-get update` with sudo + When I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -749,7 +730,7 @@ """ \"http://host:port\" is not a valid url. Not setting as proxy """ - When I run `apt install python3-pycurl -y` with sudo + When I apt install `python3-pycurl` And I verify that running `pro config set global_apt_https_proxy=https://localhost:12345` `with sudo` exits `1` Then stderr matches regexp: """ @@ -777,25 +758,23 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Attach command when authenticated proxy is configured globally - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt update` `with sudo` on the `proxy` machine - And I run `apt install squid apache2-utils -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt update on the `proxy` machine + And I apt install `squid apache2-utils` on the `proxy` machine And I run `htpasswd -bc /etc/squid/passwordfile someuser somepassword` `with sudo` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret - """ + """ + dns_v4_first on\nauth_param basic program \/usr\/lib\/squid\/basic_ncsa_auth \/etc\/squid\/passwordfile\nacl topsecret proxy_auth REQUIRED\nhttp_access allow topsecret + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -819,7 +798,7 @@ """ .*HEAD http://api.snapcraft.io.* """ - When I run `apt-get install net-tools` with sudo + When I apt install `net-tools` # We will guarantee that the machine will only use the proxy when # running the pro commands And I run `route del default` with sudo @@ -852,7 +831,7 @@ .*CONNECT esm.ubuntu.com.* """ When I run `pro refresh config` with sudo - And I run `apt-get update` with sudo + And I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -877,23 +856,21 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: Get warning when configuring global or uaclient proxy - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine @@ -918,7 +895,7 @@ Acquire::http::Proxy \".*\"; Acquire::https::Proxy \".*\"; """ - When I run `apt-get update` with sudo + When I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -1030,19 +1007,17 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: apt_http(s)_proxy still works - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: """ dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all @@ -1097,7 +1072,7 @@ """ When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine And I verify `/var/log/squid/access.log` is empty on `proxy` machine - When I run `apt-get update` with sudo + When I apt update And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -1156,7 +1131,7 @@ Acquire::http::Proxy \".*:3128\"; Acquire::https::Proxy \".*:3128\"; """ - When I run `apt install python3-pycurl -y` with sudo + When I apt install `python3-pycurl` And I verify that running `pro config set apt_https_proxy=https://localhost:12345` `with sudo` exits `1` Then stdout matches regexp: """ @@ -1167,53 +1142,45 @@ \"https://localhost:12345\" is not working. Not setting as proxy. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | @slow - @series.jammy - @uses.config.machine_type.lxd-vm Scenario: Enable realtime kernel through proxy on a machine with no internet - Given a `jammy` machine with ubuntu-advantage-tools installed + Given a `jammy` `lxd-vm` machine with ubuntu-advantage-tools installed When I disable any internet connection on the machine - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Given a `focal` `lxd-container` machine named `proxy` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine And I run `pro config set https_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo And I run `pro config set http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo And I run `pro config set global_apt_http_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo And I run `pro config set global_apt_https_proxy=http://$behave_var{machine-ip proxy}:3128` with sudo And I attach `contract_token` with sudo - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled When I run `pro disable livepatch --assume-yes` with sudo When I run `pro enable realtime-kernel` `with sudo` and stdin `y` - Then stdout matches regexp: + Then stdout contains substring: """ Installing Real-time kernel packages Real-time kernel enabled A reboot is required to complete install. """ - @series.lts - @uses.config.machine_type.any - @uses.config.machine_type.lxd-vm Scenario Outline: Support HTTPS-in-HTTPS proxies Given a `` `` machine with ubuntu-advantage-tools installed # set up a HTTPS proxy Given a `jammy` `` machine named `proxy` - When I run `apt update` `with sudo` on the `proxy` machine + When I apt update on the `proxy` machine And I apt install `openssl libssl-dev ssl-cert squid-openssl apache2-utils` on the `proxy` machine And I run `openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -out ca.crt -keyout ca.key -subj "/C=CN/ST=BJ/O=STS/CN=CA"` `with sudo` on the `proxy` machine And I run `openssl genrsa -out $behave_var{machine-name proxy}.lxd.key` `with sudo` on the `proxy` machine @@ -1263,7 +1230,7 @@ Proxy authentication failed """ - When I run `apt remove ca-certificates -y` with sudo + When I apt remove `ca-certificates` And I run `rm -f /etc/ssl/certs/ca-certificates.crt` with sudo And I verify that running `pro config set https_proxy=https://someuser:somepassword@$behave_var{machine-name proxy}.lxd:3129` `with sudo` exits `1` Then stderr matches regexp: @@ -1273,7 +1240,7 @@ Please install "ca-certificates" and try again. """ - When I run `apt install ca-certificates -y` with sudo + When I apt install `ca-certificates` And I run `update-ca-certificates` with sudo And I run `pro config set https_proxy=https://someuser:somepassword@$behave_var{machine-name proxy}.lxd:3129` with sudo And I run `pro config set ua_apt_https_proxy=https://someuser:somepassword@$behave_var{machine-name proxy}.lxd:3129` with sudo @@ -1303,7 +1270,7 @@ """ # Pre-install canonical-livepatch to tell it to trust the cert - When I run `apt install snapd -y` with sudo + When I apt install `snapd` And I run `snap install canonical-livepatch` with sudo And I run shell command `canonical-livepatch config ca-certs=@stdin < /usr/local/share/ca-certificates/ca.crt` with sudo @@ -1320,7 +1287,7 @@ """ When I run `truncate -s 0 /var/log/squid/access.log` `with sudo` on the `proxy` machine - And I run `apt-get install hello` with sudo + And I apt install `hello` And I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout contains substring """ diff -Nru ubuntu-advantage-tools-30~23.10/features/realtime_kernel.feature ubuntu-advantage-tools-31.2~23.10/features/realtime_kernel.feature --- ubuntu-advantage-tools-30~23.10/features/realtime_kernel.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/realtime_kernel.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,10 +1,8 @@ @uses.config.contract_token Feature: Enable command behaviour when attached to an Ubuntu Pro subscription - @series.jammy - @uses.config.machine_type.lxd-container Scenario Outline: Enable Real-time kernel service in a container - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` Then I verify that running `pro enable realtime-kernel` `as non-root` exits `1` And I will see the following on stderr: @@ -18,13 +16,11 @@ Cannot install Real-time kernel on a container. """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | - @series.lts - @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service on unsupported release - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` Then I verify that running `pro enable realtime-kernel` `as non-root` exits `1` And I will see the following on stderr: @@ -38,15 +34,13 @@ Real-time kernel is not available for Ubuntu (). """ Examples: ubuntu release - | release | version | full_name | - | xenial | 16.04 LTS | Xenial Xerus | - | bionic | 18.04 LTS | Bionic Beaver | - | focal | 20.04 LTS | Focal Fossa | + | release | machine_type | version | full_name | + | xenial | lxd-vm | 16.04 LTS | Xenial Xerus | + | bionic | lxd-vm | 18.04 LTS | Bionic Beaver | + | focal | lxd-vm | 20.04 LTS | Focal Fossa | - @series.jammy - @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` Then I verify that running `pro enable realtime-kernel` `as non-root` exits `1` And I will see the following on stderr: @@ -66,7 +60,7 @@ Updating standard Ubuntu package lists Installing Real-time kernel packages Real-time kernel enabled - A reboot is required to complete install. + A reboot is required to complete install\. """ When I run `apt-cache policy ubuntu-realtime` as non-root Then stdout does not match regexp: @@ -203,11 +197,16 @@ resourceEntitlements: - type: realtime-kernel overrides: - - directives: + - selector: + variant: nvidia-tegra + directives: additionalPackages: - nvidia-prime - selector: - variant: nvidia-tegra + - selector: + variant: rpi + directives: + additionalPackages: + - raspi-config """ When I run `pro enable realtime-kernel --variant nvidia-tegra` `with sudo` and stdin `y` Then stdout matches regexp: @@ -241,7 +240,8 @@ realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated ├ generic yes +disabled +Generic version of the RT kernel \(default\) ├ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform - └ nvidia-tegra yes +enabled +RT kernel optimized for NVIDIA Tegra platform + ├ nvidia-tegra yes +enabled +RT kernel optimized for NVIDIA Tegra platform + └ rpi yes +disabled +24.04 Real-time kernel optimised for Raspberry Pi """ When I verify that running `pro enable realtime-kernel --variant intel-iotg` `with sudo` and stdin `N` exits `1` Then stdout matches regexp: @@ -253,6 +253,16 @@ """ Cannot enable Real-time Intel IOTG Kernel when Real-time NVIDIA Tegra Kernel is enabled. """ + When I run `pro enable realtime-kernel --variant rpi --assume-yes` with sudo + When I run `pro status --all` as non-root + Then stdout matches regexp: + """ + realtime-kernel yes +enabled +Ubuntu kernel with PREEMPT_RT patches integrated + ├ generic yes +disabled +Generic version of the RT kernel \(default\) + ├ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform + ├ nvidia-tegra yes +disabled +RT kernel optimized for NVIDIA Tegra platform + └ rpi yes +enabled +24.04 Real-time kernel optimised for Raspberry Pi + """ When I run `pro help realtime-kernel` as non-root Then I will see the following on stdout: """ @@ -278,6 +288,7 @@ * generic: Generic version of the RT kernel (default) * intel-iotg: RT kernel optimized for Intel IOTG platform * nvidia-tegra: RT kernel optimized for NVIDIA Tegra platform + * rpi: 24.04 Real-time kernel optimised for Raspberry Pi """ When I run `pro disable realtime-kernel` `with sudo` and stdin `y` Then stdout matches regexp: @@ -305,7 +316,8 @@ realtime-kernel yes +disabled +Ubuntu kernel with PREEMPT_RT patches integrated ├ generic yes +disabled +Generic version of the RT kernel \(default\) ├ intel-iotg yes +disabled +RT kernel optimized for Intel IOTG platform - └ nvidia-tegra yes +disabled +RT kernel optimized for NVIDIA Tegra platform + ├ nvidia-tegra yes +disabled +RT kernel optimized for NVIDIA Tegra platform + └ rpi yes +disabled +24.04 Real-time kernel optimised for Raspberry Pi """ When I verify that running `pro enable realtime-kernel --variant nonexistent` `with sudo` exits `1` Then I will see the following on stdout: @@ -334,13 +346,11 @@ """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-vm | - @series.jammy - @uses.config.machine_type.lxd-vm Scenario Outline: Enable Real-time kernel service access-only - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo and options `--no-auto-enable` When I run `pro enable realtime-kernel --access-only` with sudo Then stdout matches regexp: @@ -363,7 +373,7 @@ """ \s* 500 https://esm.ubuntu.com/realtime/ubuntu /main amd64 Packages """ - When I run `apt-get install -y ubuntu-realtime` with sudo + When I apt install `ubuntu-realtime` When I reboot the machine When I run `uname -r` as non-root Then stdout matches regexp: @@ -371,5 +381,5 @@ realtime """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-vm | diff -Nru ubuntu-advantage-tools-30~23.10/features/reboot_cmds.feature ubuntu-advantage-tools-31.2~23.10/features/reboot_cmds.feature --- ubuntu-advantage-tools-30~23.10/features/reboot_cmds.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/reboot_cmds.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,20 +1,14 @@ @uses.config.contract_token Feature: Reboot Commands - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: reboot-cmds removes fips package holds and updates packages - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - When I run `apt install -y strongswan` with sudo + When I apt install `strongswan` When I run `pro enable fips --assume-yes` with sudo When I reboot the machine - When I run `pro status` with sudo - Then stdout matches regexp: - """ - fips +yes +enabled - """ - When I run `apt install -y --allow-downgrades strongswan=` with sudo + Then I verify that `fips` is enabled + When I apt install `strongswan=` When I run `apt-mark hold strongswan` with sudo When I run `dpkg-reconfigure ubuntu-advantage-tools` with sudo When I run `pro status` with sudo @@ -44,5 +38,5 @@ *** 1001 """ Examples: ubuntu release - | release | old_version | new_version | - | focal | 5.8.2-1ubuntu3.5 | 5.8.2-1ubuntu3.fips.3.1.2 | + | release | machine_type | old_version | new_version | + | focal | lxd-container | 5.8.2-1ubuntu3 | 5.8.2-1ubuntu3.fips.3.1.2 | diff -Nru ubuntu-advantage-tools-30~23.10/features/retry_auto_attach.feature ubuntu-advantage-tools-31.2~23.10/features/retry_auto_attach.feature --- ubuntu-advantage-tools-30~23.10/features/retry_auto_attach.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/retry_auto_attach.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,11 +1,7 @@ Feature: auto-attach retries periodically on failures - @series.lts - @uses.config.machine_type.aws.generic - @uses.config.machine_type.azure.generic - @uses.config.machine_type.gcp.generic Scenario Outline: auto-attach retries for a month and updates status - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I change contract to staging with sudo When I install ubuntu-advantage-pro When I reboot the machine @@ -18,6 +14,7 @@ """ creating flag file to trigger retries """ + When I wait `20` seconds Then I verify that running `systemctl status ubuntu-advantage.service` `with sudo` exits `0` Then stdout matches regexp: """ @@ -34,7 +31,7 @@ When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services 1 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 1 time\(s\). The failure was due to: Canonical servers did not recognize this machine as Ubuntu Pro: ".*". The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. @@ -43,7 +40,7 @@ Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services 1 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 1 time\(s\). The failure was due to: Canonical servers did not recognize this machine as Ubuntu Pro: ".*". The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. @@ -53,6 +50,7 @@ When I set `interval_index` = `10` in json file `/var/lib/ubuntu-advantage/retry-auto-attach-state.json` When I set `failure_reason` = `"an unknown error"` in json file `/var/lib/ubuntu-advantage/retry-auto-attach-state.json` When I run `systemctl restart ubuntu-advantage.service` with sudo + And I wait `5` seconds Then I verify that running `systemctl status ubuntu-advantage.service` `with sudo` exits `0` Then stdout matches regexp: """ @@ -69,7 +67,7 @@ When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services 11 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 11 time\(s\). The failure was due to: an unknown error. The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. @@ -78,7 +76,7 @@ Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services 11 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 11 time\(s\). The failure was due to: an unknown error. The next attempt is scheduled for \d+-\d+-\d+T\d+:\d+:00.*. You can try manually with `sudo pro auto-attach`. @@ -87,6 +85,7 @@ # simulate all attempts failing When I set `interval_index` = `18` in json file `/var/lib/ubuntu-advantage/retry-auto-attach-state.json` When I run `systemctl restart ubuntu-advantage.service` with sudo + And I wait `5` seconds Then I verify that running `systemctl status ubuntu-advantage.service` `with sudo` exits `3` Then stdout contains substring """ @@ -103,7 +102,7 @@ When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services 19 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 19 time\(s\). The most recent failure was due to: an unknown error. Try re-launching the instance or report this issue by running `ubuntu-bug ubuntu-advantage-tools` You can try manually with `sudo pro auto-attach`. @@ -112,25 +111,29 @@ Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services 19 time\(s\). + Failed to automatically attach to an Ubuntu Pro subscription 19 time\(s\). The most recent failure was due to: an unknown error. Try re-launching the instance or report this issue by running `ubuntu-bug ubuntu-advantage-tools` You can try manually with `sudo pro auto-attach`. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.generic | + | xenial | azure.generic | + | xenial | gcp.generic | + | bionic | aws.generic | + | bionic | azure.generic | + | bionic | gcp.generic | + | focal | aws.generic | + | focal | azure.generic | + | focal | gcp.generic | + | jammy | aws.generic | + | jammy | azure.generic | + | jammy | gcp.generic | - @series.lts - @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro - @uses.config.machine_type.gcp.pro Scenario Outline: auto-attach retries stop if manual auto-attach succeeds - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -142,7 +145,7 @@ When I create the file `/var/lib/ubuntu-advantage/response-overlay.json` with the following: """ { - "https://contracts.canonical.com/v1/clouds/$behave_var{cloud}/token": [{ + "https://contracts.canonical.com/v1/clouds/$behave_var{cloud system-under-test}/token": [{ "type": "contract", "code": 400, "response": { @@ -167,16 +170,17 @@ """ Active: active \(running\) """ + When I wait `20` seconds When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I run `pro status` with sudo Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I append the following on uaclient config: """ @@ -194,25 +198,31 @@ Then I verify that running `run-parts /etc/update-motd.d/` `with sudo` exits `0,1` Then stdout does not match regexp: """ - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I run `pro status` with sudo Then stdout does not match regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | - @series.lts - @uses.config.machine_type.gcp.pro Scenario Outline: gcp auto-detect triggers retries on fail - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -260,28 +270,24 @@ When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I run `pro status` with sudo Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | gcp.pro | + | bionic | gcp.pro | + | focal | gcp.pro | + | jammy | gcp.pro | - @series.lts - @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro - @uses.config.machine_type.gcp.pro Scenario Outline: auto-attach retries eventually succeed and clean up - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # modify the wait time to be shorter so we don't have to wait 15m When I replace `900, # 15m (T+15m)` in `/usr/lib/python3/dist-packages/uaclient/daemon/retry_auto_attach.py` with `60,` When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: @@ -295,7 +301,7 @@ When I create the file `/var/lib/ubuntu-advantage/response-overlay.json` with the following: """ { - "https://contracts.canonical.com/v1/clouds/$behave_var{cloud}/token": [{ + "https://contracts.canonical.com/v1/clouds/$behave_var{cloud system-under-test}/token": [{ "type": "contract", "code": 400, "response": { @@ -320,16 +326,17 @@ """ Active: active \(running\) """ + When I wait `20` seconds When I run `run-parts /etc/update-motd.d/` with sudo Then stdout matches regexp: """ - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I run `pro status` with sudo Then stdout matches regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I append the following on uaclient config: """ @@ -347,17 +354,25 @@ Then I verify that running `run-parts /etc/update-motd.d/` `with sudo` exits `0,1` Then stdout does not match regexp: """ - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ When I run `pro status` with sudo Then stdout does not match regexp: """ NOTICES - Failed to automatically attach to Ubuntu Pro services + Failed to automatically attach to an Ubuntu Pro subscription """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | diff -Nru ubuntu-advantage-tools-30~23.10/features/security_status.feature ubuntu-advantage-tools-31.2~23.10/features/security_status.feature --- ubuntu-advantage-tools-30~23.10/features/security_status.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/security_status.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,13 +1,10 @@ Feature: Security status command behavior - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Run security status with JSON/YAML format - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt-get update` with sudo - And I run `apt-get install ansible -y` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update + And I apt install `ansible` And I run `pro security-status --format json` as non-root Then stdout is a json matching the `ua_security_status` schema And stdout matches regexp: @@ -87,15 +84,13 @@ argument --format: invalid choice: 'unsupported' (choose from 'json', 'yaml', 'text') """ Examples: ubuntu release - | release | package | service | - | xenial | apport | esm-infra | - | bionic | ansible | esm-apps | + | release | machine_type | package | service | + | xenial | lxd-container | apport | esm-infra | + | bionic | lxd-container | ansible | esm-apps | - @series.xenial - @uses.config.machine_type.lxd-vm @uses.config.contract_token Scenario: Check for livepatch CVEs in security-status on an Ubuntu machine - Given a `xenial` machine with ubuntu-advantage-tools installed + Given a `xenial` `lxd-vm` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `pro security-status --format json` as non-root Then stdout is a json matching the `ua_security_status` schema @@ -111,14 +106,12 @@ patched: true """ - @series.xenial - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario: Run security status in an Ubuntu machine - Given a `xenial` machine with ubuntu-advantage-tools installed + Given a `xenial` `lxd-container` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine # Ansible is in esm-apps - And I run `apt-get install -y ansible` with sudo + And I apt install `ansible` And I verify root and non-root `pro security-status` calls have the same output And I run `pro security-status` as non-root Then stdout matches regexp: @@ -260,7 +253,7 @@ apt-cache show .+ to learn more about that package\. """ - When I run `apt upgrade -y` with sudo + When I apt upgrade And I verify root and non-root `pro security-status` calls have the same output And I run `pro security-status` as non-root Then stdout matches regexp: @@ -404,7 +397,7 @@ for a list of available options\. The system apt cache may be outdated\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. This machine is NOT receiving security patches because the LTS period has ended @@ -436,7 +429,7 @@ for a list of available options\. The system apt information was updated 2 day\(s\) ago\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. This machine is NOT receiving security patches because the LTS period has ended @@ -454,14 +447,12 @@ Enable esm-apps with: pro enable esm-apps """ - @series.focal - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario: Run security status in an Ubuntu machine - Given a `focal` machine with ubuntu-advantage-tools installed + Given a `focal` `lxd-container` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine # Ansible is in esm-apps - And I run `apt-get install -y ansible` with sudo + And I apt install `ansible` And I verify root and non-root `pro security-status` calls have the same output And I run `pro security-status` as non-root Then stdout matches regexp: @@ -583,7 +574,7 @@ apt-cache show .+ to learn more about that package\. """ - When I run `apt upgrade -y` with sudo + When I apt upgrade And I verify root and non-root `pro security-status` calls have the same output And I run `pro security-status` as non-root Then stdout matches regexp: @@ -723,7 +714,7 @@ for a list of available options\. The system apt cache may be outdated\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. This machine is receiving security patching for Ubuntu Main/Restricted @@ -755,7 +746,7 @@ for a list of available options\. The system apt information was updated 2 day\(s\) ago\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. This machine is receiving security patching for Ubuntu Main/Restricted @@ -774,13 +765,11 @@ """ # Latest released non-LTS - @series.lunar - @uses.config.machine_type.lxd-container Scenario: Run security status in an Ubuntu machine - Given a `lunar` machine with ubuntu-advantage-tools installed + Given a `mantic` `lxd-container` machine with ubuntu-advantage-tools installed When I install third-party / unknown packages in the machine # Ansible is in esm-apps - And I run `apt-get install -y ansible` with sudo + And I apt install `ansible` And I verify root and non-root `pro security-status` calls have the same output And I run `pro security-status` as non-root Then stdout matches regexp: @@ -795,7 +784,7 @@ pro security-status --help for a list of available options\. - Main/Restricted packages receive updates until 1/2024\. + Main/Restricted packages receive updates until 7/2024\. Ubuntu Pro is not available for non-LTS releases\. """ @@ -806,7 +795,7 @@ \d+ packages installed: +\d+ packages from Ubuntu Main/Restricted repository - Main/Restricted packages receive updates until 1/2024\. + Main/Restricted packages receive updates until 7/2024\. Ubuntu Pro is not available for non-LTS releases\. """ @@ -834,10 +823,10 @@ for a list of available options\. The system apt cache may be outdated\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. - Main/Restricted packages receive updates until 1/2024\. + Main/Restricted packages receive updates until 7/2024\. Ubuntu Pro is not available for non-LTS releases\. """ @@ -856,10 +845,10 @@ for a list of available options\. The system apt information was updated 2 day\(s\) ago\. Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt\. - Main/Restricted packages receive updates until 1/2024\. + Main/Restricted packages receive updates until 7/2024\. Ubuntu Pro is not available for non-LTS releases\. """ diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/attach.py ubuntu-advantage-tools-31.2~23.10/features/steps/attach.py --- ubuntu-advantage-tools-30~23.10/features/steps/attach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/attach.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,6 +1,7 @@ import json from behave import then, when +from hamcrest import assert_that, equal_to from features.steps.contract import change_contract_endpoint_to_staging from features.steps.shell import ( @@ -8,6 +9,7 @@ when_i_retry_run_command, when_i_run_command, ) +from features.util import SUT ERROR_CODE = "1" @@ -70,13 +72,22 @@ ) -@then("the machine is attached") -def then_the_machine_is_attached(context): +def is_machine_attached(context, machine_name=SUT): when_i_run_command( context, - command="pro api u.pro.status.is_attached.v1", - user_spec="as non-root", + "pro api u.pro.status.is_attached.v1", + "as non-root", + machine_name=machine_name, ) + data = json.loads(context.process.stdout.strip()) + return data["data"]["attributes"]["is_attached"] + + +@then("the machine is attached") +def then_the_machine_is_attached(context): + assert_that(is_machine_attached(context), equal_to(True)) + - is_attached_resp = json.loads(context.process.stdout.strip()) - assert is_attached_resp["data"]["attributes"]["is_attached"] +@then("the machine is unattached") +def then_the_machine_is_unattached(context): + assert_that(is_machine_attached(context), equal_to(False)) diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/contract.py ubuntu-advantage-tools-31.2~23.10/features/steps/contract.py --- ubuntu-advantage-tools-30~23.10/features/steps/contract.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/contract.py 2024-01-18 17:34:13.000000000 +0000 @@ -124,10 +124,13 @@ yaml.safe_load(context.text), cls=util.DatetimeAwareJSONEncoder ) when_i_create_file_with_content( - context, "/tmp/machine-token-overlay.json", text=json_text + context, + "/var/lib/ubuntu-advantage/machine-token-overlay.json", + text=json_text, ) change_config_key_to_use_value( context, "features", - "{ machine_token_overlay: /tmp/machine-token-overlay.json}", + "{ machine_token_overlay: " + "/var/lib/ubuntu-advantage/machine-token-overlay.json}", ) diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/machines.py ubuntu-advantage-tools-31.2~23.10/features/steps/machines.py --- ubuntu-advantage-tools-30~23.10/features/steps/machines.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/machines.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,7 +1,7 @@ import datetime import logging import sys -from typing import Dict, NamedTuple +from typing import NamedTuple from behave import given, when from pycloudlib.instance import BaseInstance # type: ignore @@ -9,6 +9,7 @@ from features.steps.shell import when_i_run_command from features.steps.ubuntu_advantage_tools import when_i_install_uat from features.util import ( + BUILDER_NAME_PREFIX, SUT, InstallationSource, build_debs, @@ -16,42 +17,40 @@ ) MachineTuple = NamedTuple( - "MachineTuple", [("series", str), ("instance", BaseInstance)] + "MachineTuple", + [ + ("series", str), + ("instance", BaseInstance), + ("machine_type", str), + ("cloud", str), + ], +) +SnapshotTuple = NamedTuple( + "SnapshotTuple", + [("series", str), ("name", str), ("machine_type", str), ("cloud", str)], ) -MachinesDict = Dict[str, MachineTuple] @when( - "I launch a `{series}` machine named `{machine_name}` from the snapshot of `{snapshot_name}`" # noqa: E501 -) -@when( "I launch a `{series}` `{machine_type}` machine named `{machine_name}` from the snapshot of `{snapshot_name}`" # noqa: E501 ) -@given("a `{series}` machine") @given("a `{series}` `{machine_type}` machine") -@given("a `{series}` machine named `{machine_name}`") @given("a `{series}` `{machine_type}` machine named `{machine_name}`") @given( - "a `{series}` machine named `{machine_name}` with ingress ports `{ports}`" -) -@given( "a `{series}` `{machine_type}` machine named `{machine_name}` with ingress ports `{ports}`" # noqa: E501 ) def given_a_machine( context, series, - machine_type=None, + machine_type, machine_name=SUT, snapshot_name=None, user_data=None, ports=None, cleanup=True, ): - if machine_type is None: - machine_type = context.pro_config.machine_type - cloud = machine_type.split(".")[0] - context.pro_config.clouds[cloud].manage_ssh_key() + context.pro_config.clouds.get(cloud).manage_ssh_key() time_suffix = datetime.datetime.now().strftime("%m%d-%H%M%S%f") instance_name = "upro-behave-{series}-{machine_name}-{time_suffix}".format( @@ -75,18 +74,26 @@ if user_data is not None: user_data_to_use += user_data - instance = context.pro_config.clouds[cloud].launch( + if snapshot_name and snapshot_name in context.snapshots: + image_name = context.snapshots[snapshot_name].name + else: + image_name = None + + instance = context.pro_config.clouds.get(cloud).launch( series=series, machine_type=machine_type, instance_name=instance_name, ephemeral=context.pro_config.ephemeral_instance, - image_name=context.snapshots.get(snapshot_name, None), + image_name=image_name, inbound_ports=inbound_ports, user_data=user_data_to_use, ) context.machines[machine_name] = MachineTuple( - series=series, instance=instance + series=series, + instance=instance, + machine_type=machine_type, + cloud=cloud, ) if series == "xenial": @@ -123,40 +130,34 @@ @when("I take a snapshot of the machine") -def when_i_take_a_snapshot( - context, machine_type=None, machine_name=SUT, cleanup=True -): - if machine_type is None: - machine_type = context.pro_config.machine_type - - cloud = machine_type.split(".")[0] +def when_i_take_a_snapshot(context, machine_name=SUT, cleanup=True): + machine_type = context.machines[machine_name].machine_type + series = context.machines[machine_name].series + cloud = context.machines[machine_name].cloud inst = context.machines[machine_name].instance - snapshot = context.pro_config.clouds[cloud].api.snapshot(inst) + snapshot = context.pro_config.clouds.get(cloud).api.snapshot(inst) - context.snapshots[machine_name] = snapshot + context.snapshots[machine_name] = SnapshotTuple( + series=series, name=snapshot, machine_type=machine_type, cloud=cloud + ) if cleanup: def cleanup_snapshot() -> None: try: - context.pro_config.clouds[cloud].api.delete_image( - context.snapshots[machine_name] - ) + context.pro_config.clouds.get(cloud).api.delete_image(snapshot) except RuntimeError as e: logging.error( - "Failed to delete image: {}\n{}".format( - context.snapshots[machine_name], str(e) - ) + "Failed to delete image: {}\n{}".format(snapshot, str(e)) ) context.add_cleanup(cleanup_snapshot) -@given("a `{series}` machine with ubuntu-advantage-tools installed") @given( "a `{series}` `{machine_type}` machine with ubuntu-advantage-tools installed" # noqa: E501 ) -def given_a_sut_machine(context, series, machine_type=None): +def given_a_sut_machine(context, series, machine_type): if context.pro_config.install_from == InstallationSource.LOCAL: # build right away, this will cache the built debs for later use # building early means we catch build errors before investing in @@ -178,24 +179,30 @@ ) sys.exit(1) + builder_name = BUILDER_NAME_PREFIX + machine_type + if context.pro_config.snapshot_strategy: - if "builder" not in context.snapshots: + if builder_name not in context.snapshots: given_a_machine( context, series, machine_type=machine_type, - machine_name="builder", + machine_name=builder_name, cleanup=False, ) - when_i_install_uat(context, machine_name="builder") + when_i_install_uat(context, machine_name=builder_name) when_i_take_a_snapshot( context, - machine_type=machine_type, - machine_name="builder", + machine_name=builder_name, cleanup=False, ) - context.machines["builder"].instance.delete(wait=False) - given_a_machine(context, series, snapshot_name="builder") + context.machines[builder_name].instance.delete(wait=False) + given_a_machine( + context, + series, + machine_type=machine_type, + snapshot_name=builder_name, + ) else: given_a_machine(context, series, machine_type=machine_type) when_i_install_uat(context) @@ -206,12 +213,12 @@ @given( - "a `{series}` machine with ubuntu-advantage-tools installed adding this cloud-init user_data" # noqa: E501 + "a `{series}` `{machine_type}` machine with ubuntu-advantage-tools installed adding this cloud-init user_data" # noqa: E501 ) -def given_a_sut_machine_with_user_data(context, series): +def given_a_sut_machine_with_user_data(context, series, machine_type): # doesn't support snapshot strategy because the test depends on # custom user data - given_a_machine(context, series, user_data=context.text) + given_a_machine(context, series, machine_type, user_data=context.text) when_i_install_uat(context) diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/output.py ubuntu-advantage-tools-31.2~23.10/features/steps/output.py --- ubuntu-advantage-tools-30~23.10/features/steps/output.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/output.py 2024-02-14 15:37:46.000000000 +0000 @@ -1,4 +1,5 @@ import json +import logging import re import textwrap @@ -15,6 +16,8 @@ def then_i_will_see_on_stream(context, stream): content = getattr(context.process, stream).strip() text = process_template_vars(context, context.text) + logging.debug("repr(expected): %r", text) + logging.debug("repr(actual): %r", content) if not text == content: raise AssertionError( "Expected to find exactly:\n{}\nBut got:\n{}".format( @@ -153,3 +156,12 @@ assert_that(root_status_stdout, nonroot_status_stdout) assert root_status_stderr == nonroot_status_stderr + + +@when("I verify that `{field}` field is redacted in the logs") +def i_verify_field_is_redacted_in_the_logs(context, field): + when_i_run_command( + context, "cat /var/log/ubuntu-advantage.log", "with sudo" + ) + context.text = field + "" + then_stream_contains_substring(context, "stdout") diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/packages.py ubuntu-advantage-tools-31.2~23.10/features/steps/packages.py --- ubuntu-advantage-tools-30~23.10/features/steps/packages.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/packages.py 2024-02-14 15:37:46.000000000 +0000 @@ -1,21 +1,92 @@ import re from behave import then, when -from hamcrest import assert_that, contains_string, matches_regexp +from hamcrest import assert_that, contains_string, matches_regexp, not_ -from features.steps.shell import when_i_run_command +from features.steps.shell import when_i_retry_run_command, when_i_run_command from features.util import SUT +@when("I apt autoremove") +def when_i_autoremove(context): + when_i_run_command( + context, + "apt -y autoremove", + "with sudo", + ) + + +@when("I apt dist-upgrade") +def when_i_dist_update(context): + when_i_run_command( + context, + "apt dist-upgrade --assume-yes --allow-downgrades", + "with sudo", + ) + + +@then("I ensure apt update runs without errors") +def then_i_check_apt_update(context): + when_i_run_command( + context, + "apt update", + "with sudo", + ) + + +@when("I apt update") +@when("I apt update on the `{machine_name}` machine") +def when_i_apt_update(context, machine_name=SUT): + when_i_retry_run_command( + context, + "apt update", + "with sudo", + machine_name=machine_name, + exit_codes="100", + ) + + +@when("I {command} upgrade") +@when("I {command} upgrade including {phased} updates") +@when("I {command} upgrade on a {dry} run") +def when_i_apt_upgrade(context, command="apt", dry="", phased=""): + cmd_list = [ + command, + "upgrade", + ] + + if dry == "dry": + cmd_list.append("--dry-run") + else: + cmd_list.insert(0, "DEBIAN_FRONTEND=noninteractive") + cmd_list.extend( + [ + "-y", + "--allow-downgrades", + '-o Dpkg::Options::="--force-confdef"', + '-o Dpkg::Options::="--force-confold"', + ] + ) + # It will be either phased or dry, never both + if phased == "phased": + cmd_list.append("-o APT::Get::Always-Include-Phased-Updates=true") + + when_i_run_command( + context, + " ".join(cmd_list), + "with sudo", + ) + + @when("I apt install `{package_names}`") @when("I apt install `{package_names}` on the `{machine_name}` machine") def when_i_apt_install(context, package_names, machine_name=SUT): - when_i_run_command( + when_i_retry_run_command( context, " ".join( [ "DEBIAN_FRONTEND=noninteractive", - "apt-get", + "apt", "install", "-y", "--allow-downgrades", @@ -26,6 +97,24 @@ ), "with sudo", machine_name=machine_name, + exit_codes="100", + ) + + +@when("I apt remove `{package_names}`") +def when_i_apt_remove(context, package_names): + when_i_run_command( + context, + " ".join( + [ + "DEBIAN_FRONTEND=noninteractive", + "apt", + "remove", + "-y", + *package_names.split(" "), + ] + ), + "with sudo", ) @@ -72,6 +161,15 @@ # then the package is neither installed nor known +@then("I verify that `{package}` is installed") +def verify_package_installed(context, package): + when_i_run_command(context, "dpkg -l {}".format(package), "as non-root") + assert_that( + context.process.stdout.strip(), + not_(contains_string("no packages found matching {}".format(package))), + ) + + @then("I verify that `{package}` is installed from apt source `{apt_source}`") def verify_package_is_installed_from_apt_source(context, package, apt_source): when_i_run_command( diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/shell.py ubuntu-advantage-tools-31.2~23.10/features/steps/shell.py --- ubuntu-advantage-tools-30~23.10/features/steps/shell.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/shell.py 2024-01-18 17:34:13.000000000 +0000 @@ -10,8 +10,16 @@ @when("I run `{command}` {user_spec}, retrying exit [{exit_codes}]") -def when_i_retry_run_command(context, command, user_spec, exit_codes): - when_i_run_command(context, command, user_spec, verify_return=False) +def when_i_retry_run_command( + context, command, user_spec, exit_codes, machine_name=SUT +): + when_i_run_command( + context, + command, + user_spec, + verify_return=False, + machine_name=machine_name, + ) retries = [5, 5, 10] # Sleep times to wait between retries while len(retries) > 0 and str( context.process.returncode @@ -22,7 +30,13 @@ exit_code=context.process.returncode, command=command ) ) - when_i_run_command(context, command, user_spec, verify_return=False) + when_i_run_command( + context, + command, + user_spec, + verify_return=False, + machine_name=machine_name, + ) logging.warning( "Exhausted retries waiting for exit codes: %s. Final exit code: %d", exit_codes, @@ -119,7 +133,12 @@ ) expected_codes = exit_codes.split(",") - assert str(context.process.returncode) in expected_codes + if str(context.process.returncode) not in expected_codes: + raise AssertionError( + "Expected exit code in: {} but got {}".format( + expected_codes, context.process.returncode + ) + ) @step("I verify that running `{cmd_name}` `{spec}` exits `{exit_codes}`") @@ -129,7 +148,12 @@ when_i_run_command(context, cmd_name, spec, verify_return=False) logging.debug("got return code: %d", context.process.returncode) expected_codes = exit_codes.split(",") - assert str(context.process.returncode) in expected_codes + if str(context.process.returncode) not in expected_codes: + raise AssertionError( + "Expected exit code in: {} but got {}".format( + expected_codes, context.process.returncode + ) + ) def get_command_prefix_for_user_spec(user_spec): diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/status.py ubuntu-advantage-tools-31.2~23.10/features/steps/status.py --- ubuntu-advantage-tools-30~23.10/features/steps/status.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/status.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,6 +1,9 @@ -from behave import when +import json + +from behave import then, when from features.steps.shell import when_i_run_command +from features.util import SUT @when("I do a preflight check for `{contract_token}` {user_spec}") @@ -28,3 +31,81 @@ expected_codes = exit_codes.split(",") assert str(context.process.returncode) in expected_codes + + +def get_enabled_services(context, machine_name=SUT): + when_i_run_command( + context, + "pro api u.pro.status.enabled_services.v1", + "as non-root", + machine_name=machine_name, + ) + + data = json.loads(context.process.stdout.strip()) + + enabled_services = [] + warning_services = [] + for enabled_service in data["data"]["attributes"]["enabled_services"]: + if enabled_service["variant_enabled"]: + enabled_services.append(enabled_service["variant_name"]) + else: + enabled_services.append(enabled_service["name"]) + + for warning in data["warnings"]: + warning_services.append(warning["meta"]["service"]) + + return enabled_services, warning_services + + +@then("I verify that `{service}` is disabled") +def i_verify_that_service_is_disabled(context, service): + enabled_services, _ = get_enabled_services(context) + + if service in enabled_services: + raise AssertionError( + "Expected {} to not be enabled\nEnabled services: {}".format( + service, ", ".join(enabled_services) + ) + ) + + +@then("I verify that `{service}` is enabled") +def i_verify_that_service_is_enabled(context, service): + enabled_services, _ = get_enabled_services(context) + + if service not in enabled_services: + raise AssertionError( + "Expected {} to be enabled\nEnabled services: {}".format( + service, ", ".join(enabled_services) + ) + ) + + +@then("I verify that `{service}` status is warning") +def i_verify_that_service_status_is_warning(context, service): + enabled_services, warning_services = get_enabled_services(context) + + if service not in enabled_services: + msg = ( + "Expected {} status to be warning, but the service is disabled\n" + "Enabled services: {}" + ) + raise AssertionError(msg.format(service, ", ".join(enabled_services))) + + if service not in warning_services: + msg = "Expected {} status to be warning, but the status is enabled" + raise AssertionError(msg.format(service)) + + +@then("I verify that `{service}` status is `{status}`") +def i_verify_service_status(context, service, status): + if status == "enabled": + i_verify_that_service_is_enabled(context, service) + elif status == "disabled": + i_verify_that_service_is_disabled(context, service) + elif status == "warning": + i_verify_that_service_status_is_warning(context, service) + else: + raise AssertionError( + "Service status {} is not supported".format(status) + ) diff -Nru ubuntu-advantage-tools-30~23.10/features/steps/ubuntu_advantage_tools.py ubuntu-advantage-tools-31.2~23.10/features/steps/ubuntu_advantage_tools.py --- ubuntu-advantage-tools-30~23.10/features/steps/ubuntu_advantage_tools.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/steps/ubuntu_advantage_tools.py 2024-02-14 15:37:46.000000000 +0000 @@ -7,6 +7,7 @@ from features.steps.packages import when_i_apt_install from features.steps.shell import when_i_run_command, when_i_run_shell_command from features.util import ( + ALL_BINARY_PACKAGE_NAMES, SUT, InstallationSource, build_debs, @@ -14,84 +15,22 @@ ) -@when("I install ubuntu-advantage-tools") -def when_i_install_uat(context, machine_name=SUT): +def setup_pro_package_sources(context, machine_name=SUT): instance = context.machines[machine_name].instance - series = context.machines[machine_name].series - is_pro = "pro" in context.pro_config.machine_type + if context.pro_config.install_from is InstallationSource.ARCHIVE: instance.execute("sudo apt update") - when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name - ) - if is_pro: - when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name - ) - elif context.pro_config.install_from is InstallationSource.PREBUILT: - deb_paths = sorted( - get_debs_for_series(context.pro_config.debs_path, series) - ) - logging.info("using debs: {}".format(deb_paths)) - for deb_path in deb_paths: - if "advantage-pro" not in deb_path or is_pro: - instance.push_file(deb_path, "/tmp/behave_ua.deb") - when_i_apt_install( - context, "/tmp/behave_ua.deb", machine_name=machine_name - ) - instance.execute("sudo rm /tmp/behave_ua.deb") - elif context.pro_config.install_from is InstallationSource.LOCAL: - ua_deb_path, pro_deb_path, l10n_deb_path = build_debs( - series, - sbuild_output_to_terminal=context.pro_config.sbuild_output_to_terminal, # noqa: E501 - ) - instance.push_file(ua_deb_path, "/tmp/behave_ua.deb") - when_i_apt_install( - context, "/tmp/behave_ua.deb", machine_name=machine_name - ) - instance.execute("sudo rm /tmp/behave_ua.deb") - instance.push_file(l10n_deb_path, "/tmp/behave_ua.deb") - when_i_apt_install( - context, "/tmp/behave_ua.deb", machine_name=machine_name - ) - instance.execute("sudo rm /tmp/behave_ua.deb") - if is_pro: - instance.push_file(pro_deb_path, "/tmp/behave_ua.deb") - when_i_apt_install( - context, "/tmp/behave_ua.deb", machine_name=machine_name - ) - instance.execute("sudo rm /tmp/behave_ua.deb") elif context.pro_config.install_from is InstallationSource.DAILY: instance.execute("sudo add-apt-repository ppa:ua-client/daily") instance.execute("sudo apt update") - when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name - ) - if is_pro: - when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name - ) elif context.pro_config.install_from is InstallationSource.STAGING: instance.execute("sudo add-apt-repository ppa:ua-client/staging") instance.execute("sudo apt update") - when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name - ) - if is_pro: - when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name - ) elif context.pro_config.install_from is InstallationSource.STABLE: instance.execute("sudo add-apt-repository ppa:ua-client/stable") instance.execute("sudo apt update") - when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name - ) - if is_pro: - when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name - ) elif context.pro_config.install_from is InstallationSource.PROPOSED: + series = context.machines[machine_name].series context.text = "deb http://archive.ubuntu.com/ubuntu/ {series}-proposed main\n".format( # noqa: E501 series=series ) @@ -110,58 +49,90 @@ machine_name=machine_name, ) - context.text = "Package: ubuntu-advantage-tools\nPin: release a={series}-proposed\nPin-Priority: 1001\n".format( # noqa: E501 - series=series - ) - when_i_create_file_with_content( - context, - "/etc/apt/preferences.d/uatools-proposed", - machine_name=machine_name, - ) - - context.text = "Package: ubuntu-advantage-pro\nPin: release a={series}-proposed\nPin-Priority: 1001\n".format( # noqa: E501 - series=series - ) - when_i_create_file_with_content( - context, - "/etc/apt/preferences.d/uapro-proposed", - machine_name=machine_name, - ) + for package in ALL_BINARY_PACKAGE_NAMES: + context.text = "Package: {package}\nPin: release a={series}-proposed\nPin-Priority: 1001\n".format( # noqa: E501 + package=package, + series=series, + ) + when_i_create_file_with_content( + context, + "/etc/apt/preferences.d/{}-proposed".format(package), + machine_name=machine_name, + ) instance.execute("sudo apt update") - when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name - ) - if is_pro: - when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name - ) elif context.pro_config.install_from is InstallationSource.CUSTOM: instance.execute( "sudo add-apt-repository {}".format(context.pro_config.custom_ppa) ) instance.execute("sudo apt update") + + +@when("I install ubuntu-advantage-tools") +def when_i_install_uat(context, machine_name=SUT): + instance = context.machines[machine_name].instance + series = context.machines[machine_name].series + is_pro = "pro" in context.machines[machine_name].machine_type + setup_pro_package_sources(context, machine_name) + + if context.pro_config.install_from is InstallationSource.PREBUILT: + debs = get_debs_for_series(context.pro_config.debs_path, series) + logging.info("using debs: {}".format(debs)) + to_install = [] + for deb_name, deb_path in debs.non_cloud_pro_image_debs(): + instance_tmp_path = "/tmp/behave_{}.deb".format(deb_name) + instance.push_file(deb_path, instance_tmp_path) + to_install.append(instance_tmp_path) + if is_pro: + for deb_name, deb_path in debs.cloud_pro_image_debs(): + instance_tmp_path = "/tmp/behave_{}.deb".format(deb_name) + instance.push_file(deb_path, instance_tmp_path) + to_install.append(instance_tmp_path) when_i_apt_install( - context, "ubuntu-advantage-tools", machine_name=machine_name + context, " ".join(to_install), machine_name=machine_name + ) + elif context.pro_config.install_from is InstallationSource.LOCAL: + debs = build_debs( + series, + sbuild_output_to_terminal=context.pro_config.sbuild_output_to_terminal, # noqa: E501 + ) + to_install = [] + for deb_name, deb_path in debs.non_cloud_pro_image_debs(): + instance_tmp_path = "/tmp/behave_{}.deb".format(deb_name) + instance.push_file(deb_path, instance_tmp_path) + to_install.append(instance_tmp_path) + if is_pro: + for deb_name, deb_path in debs.cloud_pro_image_debs(): + instance_tmp_path = "/tmp/behave_{}.deb".format(deb_name) + instance.push_file(deb_path, instance_tmp_path) + to_install.append(instance_tmp_path) + when_i_apt_install( + context, " ".join(to_install), machine_name=machine_name + ) + else: + when_i_apt_install( + context, "ubuntu-pro-client", machine_name=machine_name ) if is_pro: when_i_apt_install( - context, "ubuntu-advantage-pro", machine_name=machine_name + context, + "ubuntu-pro-auto-attach", + machine_name=machine_name, ) @when("I have the `{series}` debs under test in `{dest}`") def when_i_have_the_debs_under_test(context, series, dest): if context.pro_config.install_from is InstallationSource.LOCAL: - deb_paths = build_debs( + debs = build_debs( series, sbuild_output_to_terminal=context.pro_config.sbuild_output_to_terminal, # noqa: E501 ) - for deb_path in deb_paths: - tools_or_pro = "tools" if "tools" in deb_path else "pro" - dest_path = "{}/ubuntu-advantage-{}.deb".format(dest, tools_or_pro) - context.machines[SUT].instance.push_file(deb_path, dest_path) + for deb_name, deb_path in debs.all_debs(): + context.machines[SUT].instance.push_file( + deb_path, "{}/{}.deb".format(dest, deb_name) + ) else: if context.pro_config.install_from is InstallationSource.PROPOSED: ppa_opts = "" @@ -193,16 +164,12 @@ logging.info("Download command `{}`".format(download_cmd)) logging.info("stdout: {}".format(context.process.stdout)) logging.info("stderr: {}".format(context.process.stderr)) - when_i_run_shell_command( - context, - "cp ubuntu-advantage-tools*.deb ubuntu-advantage-tools.deb", - "with sudo", - ) - when_i_run_shell_command( - context, - "cp ubuntu-advantage-pro*.deb ubuntu-advantage-pro.deb", - "with sudo", - ) + for package in ALL_BINARY_PACKAGE_NAMES: + when_i_run_shell_command( + context, + "cp {package}_*.deb {package}.deb".format(package=package), + "with sudo", + ) @when( @@ -215,7 +182,7 @@ from features.steps.machines import given_a_machine # We need Kinetic or greater to support zstd when creating the PPAs - given_a_machine(context, "lunar", machine_name="ppa") + given_a_machine(context, "mantic", "lxd-container", machine_name="ppa") when_i_run_command( context, "apt-get update", "with sudo", machine_name="ppa" ) @@ -250,9 +217,9 @@ release, sbuild_output_to_terminal=context.pro_config.sbuild_output_to_terminal, ) - for deb in debs: - deb_destination = "/tmp/" + deb.split("/")[-1] - context.machines["ppa"].instance.push_file(deb, deb_destination) + for deb_name, deb_path in debs.all_debs(): + deb_destination = "/tmp/{}.deb".format(deb_name) + context.machines["ppa"].instance.push_file(deb_path, deb_destination) when_i_run_command( context, "aptly repo add repo-{} {}".format(release, deb_destination), @@ -271,22 +238,24 @@ def when_i_install_pro(context, machine_name=SUT): if context.pro_config.install_from is InstallationSource.LOCAL: series = context.machines[machine_name].series - deb_paths = build_debs( + debs = build_debs( series, sbuild_output_to_terminal=context.pro_config.sbuild_output_to_terminal, # noqa: E501 ) - for deb_path in deb_paths: - if "advantage-pro" in deb_path: - context.machines[machine_name].instance.push_file( - deb_path, "/tmp/pro.deb" - ) - when_i_run_command( - context, "dpkg -i /tmp/pro.deb", "with sudo" - ) + to_install = [] + for deb_name, deb_path in debs.cloud_pro_image_debs(): + instance_tmp_path = "/tmp/behave_{}.deb".format(deb_name) + context.machines[machine_name].instance.push_file( + deb_path, instance_tmp_path + ) + to_install.append(instance_tmp_path) + when_i_apt_install( + context, " ".join(to_install), machine_name=machine_name + ) else: - when_i_run_command( - context, "apt-get install ubuntu-advantage-pro", "with sudo" + when_i_apt_install( + context, "ubuntu-pro-auto-attach", machine_name=machine_name ) @@ -307,3 +276,19 @@ for step in context.scenario.steps: if step.name == APT_POLICY_IS: step.text = context.process.stdout + + +@when("I install transition package ubuntu-advantage-tools") +def when_i_install_transition_uat(context, machine_name=SUT): + is_pro = "pro" in context.machines[machine_name].machine_type + setup_pro_package_sources(context, machine_name) + + when_i_apt_install( + context, "ubuntu-advantage-tools", machine_name=machine_name + ) + if is_pro: + when_i_apt_install( + context, + "ubuntu-advantage-pro", + machine_name=machine_name, + ) diff -Nru ubuntu-advantage-tools-30~23.10/features/timer.feature ubuntu-advantage-tools-31.2~23.10/features/timer.feature --- ubuntu-advantage-tools-30~23.10/features/timer.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/timer.feature 2024-01-18 17:34:13.000000000 +0000 @@ -2,12 +2,8 @@ Feature: Timer for regular background jobs while attached # earlies, latest lts, devel - @series.xenial - @series.jammy - @series.mantic - @uses.config.machine_type.lxd.container Scenario Outline: Timer is stopped when detached, started when attached - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed Then I verify the `ua-timer` systemd timer is disabled When I attach `contract_token` with sudo # 6 hour timer with 1 hour randomized delay -> potentially 7 hours @@ -15,7 +11,7 @@ When I run `pro detach --assume-yes` with sudo Then I verify the `ua-timer` systemd timer is disabled Examples: ubuntu release - | release | - | xenial | - | jammy | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/ubuntu_pro.feature ubuntu-advantage-tools-31.2~23.10/features/ubuntu_pro.feature --- ubuntu-advantage-tools-30~23.10/features/ubuntu_pro.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/ubuntu_pro.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,139 +1,36 @@ Feature: Command behaviour when auto-attached in an ubuntu PRO image - @series.lts - @uses.config.machine_type.aws.pro - Scenario Outline: Proxy auto-attach in an Ubuntu pro AWS machine - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine + Scenario Outline: Proxy auto-attach on a cloud Ubuntu Pro machine + Given a `` `` machine with ubuntu-advantage-tools installed + Given a `focal` `` machine named `proxy` with ingress ports `3389` + When I apt install `squid` on the `proxy` machine And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ + """ + dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_port 3389\nhttp_access allow all + """ And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine # This also tests that legacy `ua_config` settings still work When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log ua_config: - http_proxy: http://$behave_var{machine-ip proxy}:3128 - https_proxy: http://$behave_var{machine-ip proxy}:3128 + http_proxy: http://$behave_var{machine-ip proxy}:3389 + https_proxy: http://$behave_var{machine-ip proxy}:3389 """ And I verify `/var/log/squid/access.log` is empty on `proxy` machine When I run `pro auto-attach` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes +n/a +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes +enabled +Canonical Livepatch service - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `livepatch` is enabled When I run `pro enable ` with sudo - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled +Security compliance and audit tools - """ + Then I verify that `` is enabled When I run `pro disable ` with sudo Then stdout matches regexp: - """ - Updating package lists - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled +Security compliance and audit tools - """ - When I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine - Then stdout matches regexp: - """ - .*CONNECT contracts.canonical.com.* - """ - And stdout does not match regexp: - """ - .*CONNECT 169.254.169.254.* - """ - Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | cis_or_usg | - | xenial | disabled | disabled | disabled | cis | - | bionic | disabled | disabled | disabled | cis | - | focal | disabled | n/a | disabled | usg | - - @series.lts - @uses.config.machine_type.azure.pro - Scenario Outline: Proxy auto-attach in an Ubuntu pro Azure machine - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` with ingress ports `3128` - When I run `apt install squid -y` `with sudo` on the `proxy` machine - And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_access allow all - """ - And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - ua_config: - http_proxy: http://$behave_var{machine-ip proxy}:3128 - https_proxy: http://$behave_var{machine-ip proxy}:3128 + Updating package lists """ - And I verify `/var/log/squid/access.log` is empty on `proxy` machine - When I run `pro auto-attach` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes +n/a +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + +Canonical Livepatch service - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `pro enable ` with sudo - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled +Security compliance and audit tools - """ - When I run `pro disable ` with sudo - Then stdout matches regexp: - """ - Updating package lists - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled +Security compliance and audit tools - """ + And I verify that `` is disabled When I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine Then stdout matches regexp: """ @@ -143,395 +40,34 @@ """ .*CONNECT 169.254.169.254.* """ - Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | livepatch-s | cis_or_usg | - | xenial | disabled | disabled | disabled | enabled | cis | - | bionic | disabled | disabled | disabled | enabled | cis | - | focal | disabled | n/a | disabled | enabled | usg | - - @series.lts - @uses.config.machine_type.gcp.pro - Scenario Outline: Proxy auto-attach in an Ubuntu Pro GCP machine - Given a `` machine with ubuntu-advantage-tools installed - Given a `focal` machine named `proxy` - When I run `apt install squid -y` `with sudo` on the `proxy` machine - And I add this text on `/etc/squid/squid.conf` on `proxy` above `http_access deny all`: - """ - dns_v4_first on\nacl all src 0.0.0.0\/0\nhttp_port 3389\nhttp_access allow all - """ - And I run `systemctl restart squid.service` `with sudo` on the `proxy` machine - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - ua_config: - http_proxy: http://$behave_var{machine-ip proxy}:3389 - https_proxy: http://$behave_var{machine-ip proxy}:3389 - """ - And I verify `/var/log/squid/access.log` is empty on `proxy` machine - When I run `pro auto-attach` with sudo - When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes +n/a +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + + - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `pro enable ` with sudo - And I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +enabled +Security compliance and audit tools - """ - When I run `pro disable ` with sudo - Then stdout matches regexp: - """ - Updating package lists - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - +yes +disabled +Security compliance and audit tools - """ - When I run `cat /var/log/squid/access.log` `with sudo` on the `proxy` machine - Then stdout matches regexp: - """ - .*CONNECT contracts.canonical.com.* - """ And stdout does not match regexp: """ .*CONNECT metadata.* """ - Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | livepatch-s | lp-desc | cis_or_usg | - | xenial | n/a | disabled | disabled | warning | Current kernel is not supported | cis | - | bionic | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | - | focal | disabled | n/a | disabled | enabled | Canonical Livepatch service | usg | - - @series.lts - @uses.config.machine_type.aws.pro - Scenario Outline: Attached refresh in an Ubuntu pro AWS machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --all --wait` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + +(Canonical Livepatch service|Current kernel is not supported) - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `pro status --all` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + +(Canonical Livepatch service|Current kernel is not supported) - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `systemctl start ua-auto-attach.service` with sudo - And I verify that running `systemctl status ua-auto-attach.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - And stdout matches regexp: - """ - Active: inactive \(dead\).* - \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met - """ - When I verify that running `pro auto-attach` `with sudo` exits `2` - Then stderr matches regexp: - """ - This machine is already attached to '.*' - To use a different subscription first run: sudo pro detach. - """ - When I run `apt-cache policy` with sudo - Then apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And stdout matches regexp: - """ - Installed: .*[~+]esm - """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - Version table: - \s*\*\*\* .* 510 - \s*510 https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - When I create the file `/var/lib/ubuntu-advantage/marker-reboot-cmds-required` with the following: - """ - """ - And I reboot the machine - And I verify that running `systemctl status ua-reboot-cmds.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - When I run `ua api u.pro.attach.auto.should_auto_attach.v1` with sudo - Then stdout matches regexp: - """ - {"_schema_version": "v1", "data": {"attributes": {"should_auto_attach": true}, "meta": {"environment_vars": \[\]}, "type": "ShouldAutoAttach"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} - """ - - Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | infra-pkg | apps-pkg | cis_or_usg | livepatch-s | fips-p | - | xenial | disabled | disabled | disabled | libkrad0 | jq | cis | enabled | n/a | - | bionic | disabled | disabled | disabled | libkrad0 | bundler | cis | enabled | n/a | - | focal | disabled | n/a | disabled | hello | ant | usg | enabled | n/a | - | jammy | n/a | n/a | disabled | hello | hello | usg | enabled | disabled | - - - @series.lts - @uses.config.machine_type.azure.pro - Scenario Outline: Attached refresh in an Ubuntu pro Azure machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --all --wait` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + +Canonical Livepatch service - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `pro status --all` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + +Canonical Livepatch service - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `systemctl start ua-auto-attach.service` with sudo - And I verify that running `systemctl status ua-auto-attach.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - And stdout matches regexp: - """ - Active: inactive \(dead\).* - \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met - """ - When I verify that running `pro auto-attach` `with sudo` exits `2` - Then stderr matches regexp: - """ - This machine is already attached to '.*' - To use a different subscription first run: sudo pro detach. - """ - When I run `apt-cache policy` with sudo - Then apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And stdout matches regexp: - """ - Installed: .*[~+]esm - """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - Version table: - \s*\*\*\* .* 510 - \s*510 https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - When I create the file `/var/lib/ubuntu-advantage/marker-reboot-cmds-required` with the following: - """ - """ - And I reboot the machine - And I verify that running `systemctl status ua-reboot-cmds.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - When I run `ua api u.pro.attach.auto.should_auto_attach.v1` with sudo - Then stdout matches regexp: - """ - {"_schema_version": "v1", "data": {"attributes": {"should_auto_attach": true}, "meta": {"environment_vars": \[\]}, "type": "ShouldAutoAttach"}, "errors": \[\], "result": "success", "version": ".*", "warnings": \[\]} - """ Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | infra-pkg | apps-pkg | livepatch | cis_or_usg | fips-p | - | xenial | disabled | disabled | disabled | libkrad0 | jq | enabled | cis | n/a | - | bionic | disabled | disabled | disabled | libkrad0 | bundler | enabled | cis | n/a | - | focal | disabled | n/a | disabled | hello | ant | enabled | usg | n/a | - | jammy | n/a | n/a | disabled | hello | hello | enabled | usg | disabled | + | release | machine_type | fips-s | cc-eal-s | cis-s | livepatch-s | lp-desc | cis_or_usg | + | xenial | aws.pro | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | xenial | azure.pro | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | xenial | gcp.pro | n/a | disabled | disabled | warning | Current kernel is not supported | cis | + | bionic | aws.pro | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | bionic | azure.pro | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | bionic | gcp.pro | disabled | disabled | disabled | enabled | Canonical Livepatch service | cis | + | focal | aws.pro | disabled | n/a | disabled | enabled | Canonical Livepatch service | usg | + | focal | azure.pro | disabled | n/a | disabled | enabled | Canonical Livepatch service | usg | + | focal | gcp.pro | disabled | n/a | disabled | enabled | Canonical Livepatch service | usg | - @series.lts - @uses.config.machine_type.gcp.pro - Scenario Outline: Attached refresh in an Ubuntu pro GCP machine - Given a `` machine with ubuntu-advantage-tools installed + Scenario Outline: Attached refresh in an Ubuntu pro cloud machine + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log """ And I run `pro auto-attach` with sudo - And I run `pro status --all --wait` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + + - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ - When I run `pro status --all` as non-root - Then stdout matches regexp: - """ - SERVICE +ENTITLED +STATUS +DESCRIPTION - anbox-cloud +(yes|no) .* - cc-eal +yes + +Common Criteria EAL2 Provisioning Packages - """ - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes + +NIST-certified FIPS crypto packages - fips-preview +yes + +.* - fips-updates +yes + +FIPS compliant crypto packages with stable security updates - livepatch +yes + + - """ - Then stdout matches regexp: - """ - +yes + +Security compliance and audit tools - """ + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `livepatch` is enabled When I run `systemctl start ua-auto-attach.service` with sudo And I verify that running `systemctl status ua-auto-attach.service` `as non-root` exits `0,3` Then stdout matches regexp: @@ -567,8 +103,8 @@ """ https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] + And I ensure apt update runs without errors + When I apt install `/-infra-security` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -578,7 +114,7 @@ """ Installed: .*[~+]esm """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] + When I apt install `/-apps-security` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -602,18 +138,22 @@ """ Examples: ubuntu release - | release | fips-s | cc-eal-s | cis-s | infra-pkg | apps-pkg | livepatch | lp-desc | cis_or_usg | fips-p | - | xenial | n/a | disabled | disabled | libkrad0 | jq | warning | Current kernel is not supported | cis | n/a | - | bionic | disabled | disabled | disabled | libkrad0 | bundler | enabled | Canonical Livepatch service | cis | n/a | - | focal | disabled | n/a | disabled | hello | ant | enabled | Canonical Livepatch service | usg | n/a | - | jammy | n/a | n/a | disabled | hello | hello | enabled | Canonical Livepatch service | usg | disabled | + | release | machine_type | infra-pkg | apps-pkg | + | xenial | aws.pro | libkrad0 | jq | + | xenial | azure.pro | libkrad0 | jq | + | xenial | gcp.pro | libkrad0 | jq | + | bionic | aws.pro | libkrad0 | bundler | + | bionic | azure.pro | libkrad0 | bundler | + | bionic | gcp.pro | libkrad0 | bundler | + | focal | aws.pro | hello | ant | + | focal | azure.pro | hello | ant | + | focal | gcp.pro | hello | ant | + | jammy | aws.pro | hello | hello | + | jammy | azure.pro | hello | hello | + | jammy | gcp.pro | hello | hello | - @series.lts - @uses.config.machine_type.gcp.pro - @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro Scenario Outline: Auto-attach service works on Pro Machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `systemctl start ua-auto-attach.service` with sudo And I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ @@ -631,18 +171,22 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | - @series.lts - @uses.config.machine_type.gcp.pro - @uses.config.machine_type.aws.pro - @uses.config.machine_type.azure.pro Scenario Outline: Auto-attach no-op when cloud-init has ubuntu_advantage on userdata - Given a `` machine with ubuntu-advantage-tools installed adding this cloud-init user_data: + Given a `` `` machine with ubuntu-advantage-tools installed adding this cloud-init user_data: # This user_data should not do anything, just guarantee that the ua-auto-attach service # does nothing """ @@ -691,16 +235,22 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.pro | + | xenial | azure.pro | + | xenial | gcp.pro | + | bionic | aws.pro | + | bionic | azure.pro | + | bionic | gcp.pro | + | focal | aws.pro | + | focal | azure.pro | + | focal | gcp.pro | + | jammy | aws.pro | + | jammy | azure.pro | + | jammy | gcp.pro | - @series.lts - @uses.config.machine_type.aws.generic Scenario Outline: Unregistered Pro machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro auto-attach` `with sudo` exits `1` Then stderr matches regexp: """ @@ -709,8 +259,8 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | + | release | machine_type | + | xenial | aws.generic | + | bionic | aws.generic | + | focal | aws.generic | + | jammy | aws.generic | diff -Nru ubuntu-advantage-tools-30~23.10/features/ubuntu_pro_fips.feature ubuntu-advantage-tools-31.2~23.10/features/ubuntu_pro_fips.feature --- ubuntu-advantage-tools-30~23.10/features/ubuntu_pro_fips.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/ubuntu_pro_fips.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,36 +1,28 @@ Feature: Command behaviour when auto-attached in an ubuntu PRO fips image - @series.lts - @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips Azure machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log features: allow_xenial_fips_on_cloud: true """ And I run `pro auto-attach` with sudo And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `fips` is enabled + And I verify that `fips-updates` is disabled + And I ensure apt update runs without errors And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` When I run `uname -r` as non-root Then stdout matches regexp: """ """ - When I run `apt-cache policy ubuntu-azure-fips` as non-root + When I run `apt-cache policy ` as non-root Then stdout does not match regexp: """ .*Installed: \(none\) @@ -80,8 +72,8 @@ """ amd64 Packages """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] + And I ensure apt update runs without errors + When I apt install `/-infra-security` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -95,7 +87,7 @@ """ Installed: .*[~+]esm """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] + When I apt install `/-apps-security` And I run `apt-cache policy ` as non-root Then stdout matches regexp: """ @@ -114,13 +106,10 @@ FIPS Updates enabled A reboot is required to complete install. """ + Then I verify that `fips-updates` is enabled When I run `pro status` with sudo Then stdout matches regexp: """ - fips-updates +yes +enabled +FIPS compliant crypto packages with stable security updates - """ - And stdout matches regexp: - """ NOTICES FIPS support requires system reboot to complete configuration. """ @@ -130,7 +119,7 @@ """ """ - When I run `apt-cache policy ubuntu-azure-fips` as non-root + When I run `apt-cache policy ` as non-root Then stdout does not match regexp: """ .*Installed: \(none\) @@ -148,33 +137,30 @@ """ Examples: ubuntu release - | release | infra-pkg | apps-pkg | fips-apt-source | fips-kernel-version | - | xenial | libkrad0 | jq | https://esm.ubuntu.com/fips/ubuntu xenial/main | fips | - | bionic | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | azure-fips | - | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | azure-fips | + | release | machine_type | infra-pkg | apps-pkg | fips-apt-source | fips-kernel-version | fips-package | + | xenial | azure.pro-fips | libkrad0 | jq | https://esm.ubuntu.com/fips/ubuntu xenial/main | fips | ubuntu-fips | + | xenial | aws.pro-fips | libkrad0 | jq | https://esm.ubuntu.com/fips/ubuntu xenial/main | fips | ubuntu-fips | + | bionic | azure.pro-fips | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | azure-fips | ubuntu-azure-fips | + | bionic | aws.pro-fips | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | aws-fips | ubuntu-aws-fips | + | bionic | gcp.pro-fips | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | gcp-fips | ubuntu-gcp-fips | + | focal | azure.pro-fips | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | azure-fips | ubuntu-azure-fips | + | focal | aws.pro-fips | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | aws-fips | ubuntu-aws-fips | + | focal | gcp.pro-fips | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | gcp-fips | ubuntu-gcp-fips | - @series.focal - @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips packages are correctly installed on Azure Focal machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log """ And I run `pro auto-attach` with sudo And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `fips` is enabled + And I verify that `fips-updates` is disabled + And I ensure apt update runs without errors And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` @@ -182,235 +168,13 @@ And I verify that `strongswan-hmac` is installed from apt source `` Examples: ubuntu release - | release | fips-apt-source | - | focal | https://esm.ubuntu.com/fips/ubuntu focal/main | + | release | machine_type | fips-apt-source | + | focal | azure.pro-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | aws.pro-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | + | focal | gcp.pro-fips | https://esm.ubuntu.com/fips/ubuntu focal/main | - @series.xenial - @series.bionic - @uses.config.machine_type.azure.pro-fips Scenario Outline: Check fips packages are correctly installed on Azure Bionic & Xenial machines - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - features: - allow_xenial_fips_on_cloud: true - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - And I verify that `openssh-server` is installed from apt source `` - And I verify that `openssh-client` is installed from apt source `` - And I verify that `strongswan` is installed from apt source `` - And I verify that `openssh-server-hmac` is installed from apt source `` - And I verify that `openssh-client-hmac` is installed from apt source `` - And I verify that `strongswan-hmac` is installed from apt source `` - - Examples: ubuntu release - | release | fips-apt-source | - | xenial | https://esm.ubuntu.com/fips/ubuntu xenial/main | - | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | - - @series.lts - @uses.config.machine_type.aws.pro-fips - Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips AWS machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - - """ - When I run `apt-cache policy ubuntu-aws-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - When I run `systemctl daemon-reload` with sudo - When I run `systemctl start ua-auto-attach.service` with sudo - And I verify that running `systemctl status ua-auto-attach.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - And stdout matches regexp: - """ - Active: inactive \(dead\).* - \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met - """ - When I verify that running `pro auto-attach` `with sudo` exits `2` - Then stderr matches regexp: - """ - This machine is already attached to '.*' - To use a different subscription first run: sudo pro detach. - """ - When I run `apt-cache policy` with sudo - Then apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `1001` - """ - amd64 Packages - """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And stdout matches regexp: - """ - Installed: .*[~+]esm - """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - Version table: - \s*\*\*\* .* 510 - \s*510 https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - When I run `pro enable fips-updates --assume-yes` with sudo - Then I will see the following on stdout: - """ - One moment, checking your subscription first - Disabling incompatible service: FIPS - Updating FIPS Updates package lists - Installing FIPS Updates packages - Updating standard Ubuntu package lists - FIPS Updates enabled - A reboot is required to complete install. - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - fips-updates +yes +enabled +FIPS compliant crypto packages with stable security updates - """ - And stdout matches regexp: - """ - NOTICES - FIPS support requires system reboot to complete configuration. - """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - - """ - When I run `apt-cache policy ubuntu-aws-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - When I run `pro status` with sudo - Then stdout does not match regexp: - """ - NOTICES - FIPS support requires system reboot to complete configuration. - """ - - Examples: ubuntu release - | release | infra-pkg | apps-pkg | fips-apt-source | fips-kernel-version | - | xenial | libkrad0 | jq | https://esm.ubuntu.com/fips/ubuntu xenial/main | fips | - | bionic | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | aws-fips | - | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | aws-fips | - - @series.focal - @uses.config.machine_type.aws.pro-fips - Scenario Outline: Check fips packages are correctly installed on AWS Focal machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - And I verify that `openssh-server` is installed from apt source `` - And I verify that `openssh-client` is installed from apt source `` - And I verify that `strongswan` is installed from apt source `` - And I verify that `strongswan-hmac` is installed from apt source `` - - Examples: ubuntu release - | release | fips-apt-source | - | focal | https://esm.ubuntu.com/fips/ubuntu focal/main | - - @series.xenial - @series.bionic - @uses.config.machine_type.aws.pro-fips - Scenario Outline: Check fips packages are correctly installed on AWS Bionic & Xenial machines - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' @@ -422,15 +186,11 @@ """ And I run `pro auto-attach` with sudo And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` + Then I verify that `esm-apps` is enabled + And I verify that `esm-infra` is enabled + And I verify that `fips` is enabled + And I verify that `fips-updates` is disabled + And I ensure apt update runs without errors And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` And I verify that `openssh-server` is installed from apt source `` And I verify that `openssh-client` is installed from apt source `` @@ -440,33 +200,26 @@ And I verify that `strongswan-hmac` is installed from apt source `` Examples: ubuntu release - | release | fips-apt-source | - | xenial | https://esm.ubuntu.com/fips/ubuntu xenial/main | - | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | release | machine_type | fips-apt-source | + | xenial | azure.pro-fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | xenial | aws.pro-fips | https://esm.ubuntu.com/fips/ubuntu xenial/main | + | bionic | azure.pro-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | aws.pro-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | bionic | gcp.pro-fips | https://esm.ubuntu.com/fips/ubuntu bionic/main | - @series.focal - @uses.config.machine_type.azure.pro-fips - @uses.config.machine_type.aws.pro-fips - @uses.config.machine_type.gcp.pro-fips Scenario Outline: Check fips-updates can be enabled in a focal PRO FIPS machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: """ contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage log_level: debug - log_file: /var/log/ubuntu-advantage.log """ And I run `pro auto-attach` with sudo And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ + Then I verify that `fips` is enabled + And I verify that `fips-updates` is disabled When I run `pro enable fips-updates --assume-yes` with sudo - Then stdout matches regexp: + Then stdout contains substring: """ One moment, checking your subscription first Disabling incompatible service: FIPS @@ -476,11 +229,8 @@ FIPS Updates enabled A reboot is required to complete install. """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - fips-updates +yes +enabled +FIPS compliant crypto packages with stable security updates - """ + And I verify that `fips` is disabled + And I verify that `fips-updates` is enabled When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: @@ -494,222 +244,7 @@ """ Examples: ubuntu release - | release | - | focal | - - @series.focal - @series.bionic - @uses.config.machine_type.gcp.pro-fips - Scenario Outline: Check fips is enabled correctly on Ubuntu pro fips GCP machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - When I run `uname -r` as non-root - Then stdout matches regexp: - """ - - """ - When I run `apt-cache policy ubuntu-gcp-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - When I run `systemctl daemon-reload` with sudo - When I run `systemctl start ua-auto-attach.service` with sudo - And I verify that running `systemctl status ua-auto-attach.service` `as non-root` exits `0,3` - Then stdout matches regexp: - """ - .*status=0\/SUCCESS.* - """ - And stdout matches regexp: - """ - Active: inactive \(dead\).* - \s*Condition: start condition failed.* - .*ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json was not met - """ - When I verify that running `pro auto-attach` `with sudo` exits `2` - Then stderr matches regexp: - """ - This machine is already attached to '.*' - To use a different subscription first run: sudo pro detach. - """ - When I run `apt-cache policy` with sudo - Then apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-updates/main amd64 Packages - """ - And apt-cache policy for the following url has priority `510` - """ - https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - And apt-cache policy for the following url has priority `1001` - """ - amd64 Packages - """ - And I verify that running `apt update` `with sudo` exits `0` - When I run `apt install -y /-infra-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-security/main amd64 Packages - """ - Then stdout matches regexp: - """ - \s*510 https://esm.ubuntu.com/infra/ubuntu -infra-updates/main amd64 Packages - """ - And stdout matches regexp: - """ - Installed: .*[~+]esm - """ - When I run `apt install -y /-apps-security` with sudo, retrying exit [100] - And I run `apt-cache policy ` as non-root - Then stdout matches regexp: - """ - Version table: - \s*\*\*\* .* 510 - \s*510 https://esm.ubuntu.com/apps/ubuntu -apps-security/main amd64 Packages - """ - When I run `pro enable fips-updates --assume-yes` with sudo - Then I will see the following on stdout: - """ - One moment, checking your subscription first - Disabling incompatible service: FIPS - Updating FIPS Updates package lists - Installing FIPS Updates packages - Updating standard Ubuntu package lists - FIPS Updates enabled - A reboot is required to complete install. - """ - When I run `pro status` with sudo - Then stdout matches regexp: - """ - fips-updates +yes +enabled +FIPS compliant crypto packages with stable security updates - """ - And stdout matches regexp: - """ - NOTICES - FIPS support requires system reboot to complete configuration. - """ - When I reboot the machine - And I run `uname -r` as non-root - Then stdout matches regexp: - """ - - """ - When I run `apt-cache policy ubuntu-gcp-fips` as non-root - Then stdout does not match regexp: - """ - .*Installed: \(none\) - """ - When I run `cat /proc/sys/crypto/fips_enabled` with sudo - Then I will see the following on stdout: - """ - 1 - """ - When I run `pro status` with sudo - Then stdout does not match regexp: - """ - NOTICES - FIPS support requires system reboot to complete configuration. - """ - - Examples: ubuntu release - | release | infra-pkg | apps-pkg | fips-apt-source | fips-kernel-version | - | bionic | libkrad0 | bundler | https://esm.ubuntu.com/fips/ubuntu bionic/main | gcp-fips | - | focal | hello | 389-ds | https://esm.ubuntu.com/fips/ubuntu focal/main | gcp-fips | - - @series.focal - @uses.config.machine_type.gcp.pro-fips - Scenario Outline: Check fips packages are correctly installed on GCP Pro Focal machine - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - And I verify that `openssh-server` is installed from apt source `` - And I verify that `openssh-client` is installed from apt source `` - And I verify that `strongswan` is installed from apt source `` - And I verify that `strongswan-hmac` is installed from apt source `` - - Examples: ubuntu release - | release | fips-apt-source | - | focal | https://esm.ubuntu.com/fips/ubuntu focal/main | - - @series.bionic - @uses.config.machine_type.gcp.pro-fips - Scenario Outline: Check fips packages are correctly installed on GCP Pro Bionic machines - Given a `` machine with ubuntu-advantage-tools installed - When I create the file `/etc/ubuntu-advantage/uaclient.conf` with the following: - """ - contract_url: 'https://contracts.canonical.com' - data_dir: /var/lib/ubuntu-advantage - log_level: debug - log_file: /var/log/ubuntu-advantage.log - """ - And I run `pro auto-attach` with sudo - And I run `pro status --wait` as non-root - And I run `pro status` as non-root - Then stdout matches regexp: - """ - esm-apps +yes +enabled +Expanded Security Maintenance for Applications - esm-infra +yes +enabled +Expanded Security Maintenance for Infrastructure - fips +yes +enabled +NIST-certified FIPS crypto packages - fips-updates +yes +disabled +FIPS compliant crypto packages with stable security updates - """ - And I verify that running `apt update` `with sudo` exits `0` - And I verify that running `grep Traceback /var/log/ubuntu-advantage.log` `with sudo` exits `1` - And I verify that `openssh-server` is installed from apt source `` - And I verify that `openssh-client` is installed from apt source `` - And I verify that `strongswan` is installed from apt source `` - And I verify that `openssh-server-hmac` is installed from apt source `` - And I verify that `openssh-client-hmac` is installed from apt source `` - And I verify that `strongswan-hmac` is installed from apt source `` - - Examples: ubuntu release - | release | fips-apt-source | - | bionic | https://esm.ubuntu.com/fips/ubuntu bionic/main | + | release | machine_type | + | focal | aws.pro-fips | + | focal | azure.pro-fips | + | focal | gcp.pro-fips | diff -Nru ubuntu-advantage-tools-30~23.10/features/ubuntu_upgrade.feature ubuntu-advantage-tools-31.2~23.10/features/ubuntu_upgrade.feature --- ubuntu-advantage-tools-30~23.10/features/ubuntu_upgrade.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/ubuntu_upgrade.feature 2024-01-18 17:34:13.000000000 +0000 @@ -2,16 +2,14 @@ Feature: Upgrade between releases when uaclient is attached @slow - @series.all - @uses.config.machine_type.lxd-container @upgrade Scenario Outline: Attached upgrade - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo And I run `` with sudo # Local PPAs are prepared and served only when testing with local debs And I prepare the local PPAs to upgrade from `` to `` - And I run `apt-get dist-upgrade --assume-yes` with sudo + And I run `DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade --assume-yes` with sudo # Some packages upgrade may require a reboot And I reboot the machine And I create the file `/etc/update-manager/release-upgrades.d/ua-test.cfg` with the following @@ -48,37 +46,33 @@ """ Examples: ubuntu release - | release | next_release | prompt | devel_release | service1 | service1_status | service2 | service2_status | before_cmd | - | xenial | bionic | lts | | esm-infra | enabled | esm-apps | enabled | true | - | bionic | focal | lts | | esm-infra | enabled | esm-apps | enabled | true | - | bionic | focal | lts | | usg | enabled | usg | enabled | pro enable cis | - | focal | jammy | lts | | esm-infra | enabled | esm-apps | enabled | true | - | jammy | lunar | normal | | esm-infra | n/a | esm-apps | n/a | true | - | lunar | mantic | normal | | esm-infra | n/a | esm-apps | n/a | true | + | release | machine_type | next_release | prompt | devel_release | service1 | service1_status | service2 | service2_status | before_cmd | + | xenial | lxd-container | bionic | lts | | esm-infra | enabled | esm-apps | enabled | true | + | bionic | lxd-container | focal | lts | | esm-infra | enabled | esm-apps | enabled | true | + | bionic | lxd-container | focal | lts | | usg | enabled | usg | enabled | pro enable cis | + | focal | lxd-container | jammy | lts | | esm-infra | enabled | esm-apps | enabled | true | + | jammy | lxd-container | mantic | normal | | esm-infra | n/a | esm-apps | n/a | true | + | mantic | lxd-container | noble | normal | --devel-release | esm-infra | n/a | esm-apps | n/a | true | @slow - @series.xenial - @uses.config.machine_type.lxd-vm @upgrade Scenario Outline: Attached FIPS upgrade across LTS releases - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I attach `contract_token` with sudo - And I run `apt-get install lsof` with sudo, retrying exit [100] + And I apt install `lsof` And I run `pro disable livepatch` with sudo And I run `pro enable --assume-yes` with sudo - Then stdout matches regexp: + Then stdout contains substring: """ Updating package lists Installing packages + Updating standard Ubuntu package lists enabled - A reboot is required to complete install + A reboot is required to complete install. """ When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ - And I verify that running `apt update` `with sudo` exits `0` + Then I verify that `` is enabled + And I ensure apt update runs without errors When I reboot the machine And I run `uname -r` as non-root Then stdout matches regexp: @@ -92,7 +86,7 @@ """ # Local PPAs are prepared and served only when testing with local debs When I prepare the local PPAs to upgrade from `` to `` - And I run `apt-get dist-upgrade -y --allow-downgrades` with sudo + And I run `DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y --allow-downgrades` with sudo # A package may need a reboot after running dist-upgrade And I reboot the machine And I create the file `/etc/update-manager/release-upgrades.d/ua-test.cfg` with the following @@ -112,10 +106,7 @@ """ """ When I run `pro status --all` with sudo - Then stdout matches regexp: - """ - +yes enabled - """ + Then I verify that `` is enabled When I run `uname -r` as non-root Then stdout matches regexp: """ @@ -128,6 +119,6 @@ """ Examples: ubuntu release - | release | next_release | fips-service | fips-name | source-file | - | xenial | bionic | fips | FIPS | ubuntu-fips | - | xenial | bionic | fips-updates | FIPS Updates | ubuntu-fips-updates | + | release | machine_type | next_release | fips-service | fips-name | source-file | + | xenial | lxd-vm | bionic | fips | FIPS | ubuntu-fips | + | xenial | lxd-vm | bionic | fips-updates | FIPS Updates | ubuntu-fips-updates | diff -Nru ubuntu-advantage-tools-30~23.10/features/ubuntu_upgrade_unattached.feature ubuntu-advantage-tools-31.2~23.10/features/ubuntu_upgrade_unattached.feature --- ubuntu-advantage-tools-30~23.10/features/ubuntu_upgrade_unattached.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/ubuntu_upgrade_unattached.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,26 +1,26 @@ Feature: Upgrade between releases when uaclient is unattached @slow - @series.all - @uses.config.machine_type.lxd-container @upgrade @uses.config.contract_token Scenario Outline: Unattached upgrade - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # Local PPAs are prepared and served only when testing with local debs When I prepare the local PPAs to upgrade from `` to `` - And I run `apt update` with sudo + # in case this still exists + And I delete the file `/var/lib/ubuntu-advantage/apt-esm/etc/apt/sources.list.d/ubuntu-esm-infra.list` + And I apt update And I run `sleep 30` as non-root - And I run shell command `cat /var/lib/ubuntu-advantage/apt-esm/etc/apt/sources.list.d/ubuntu-esm-infra.list || true` with sudo - Then if `` in `xenial` and stdout matches regexp: + And I run shell command `cat /var/lib/ubuntu-advantage/apt-esm/etc/apt/sources.list.d/ubuntu-esm-infra.sources || true` with sudo + Then if `` in `xenial or bionic` and stdout matches regexp: """ - deb https://esm.ubuntu.com/infra/ubuntu -infra-security main + Types: deb + URIs: https://esm.ubuntu.com/infra/ubuntu + Suites: -infra-security -infra-updates + Components: main + Signed-By: /usr/share/keyrings/ubuntu-pro-esm-infra.gpg """ - And if `` in `xenial` and stdout matches regexp: - """ - deb https://esm.ubuntu.com/infra/ubuntu -infra-updates main - """ - When I run `apt-get dist-upgrade --assume-yes` with sudo + When I apt dist-upgrade # Some packages upgrade may require a reboot And I reboot the machine And I create the file `/etc/update-manager/release-upgrades.d/ua-test.cfg` with the following @@ -41,15 +41,15 @@ """ """ And I verify that the folder `/var/lib/ubuntu-advantage/apt-esm` does not exist - When I run `apt update` with sudo - And I run shell command `cat /var/lib/ubuntu-advantage/apt-esm/etc/apt/sources.list.d/ubuntu-esm-apps.list || true` with sudo - Then if `` not in `lunar or mantic` and stdout matches regexp: - """ - deb https://esm.ubuntu.com/apps/ubuntu -apps-security main - """ - And if `` not in `lunar or mantic` and stdout matches regexp: - """ - deb https://esm.ubuntu.com/apps/ubuntu -apps-updates main + When I apt update + And I run shell command `cat /var/lib/ubuntu-advantage/apt-esm/etc/apt/sources.list.d/ubuntu-esm-apps.sources || true` with sudo + Then if `` not in `mantic or noble` and stdout matches regexp: + """ + Types: deb + URIs: https://esm.ubuntu.com/apps/ubuntu + Suites: -apps-security -apps-updates + Components: main + Signed-By: /usr/share/keyrings/ubuntu-pro-esm-apps.gpg """ When I attach `contract_token` with sudo And I run `pro status --all` with sudo @@ -59,9 +59,9 @@ """ Examples: ubuntu release - | release | next_release | prompt | devel_release | service_status | - | xenial | bionic | lts | | enabled | - | bionic | focal | lts | | enabled | - | focal | jammy | lts | | enabled | - | jammy | lunar | normal | | n/a | - | lunar | mantic | normal | | n/a | + | release | machine_type | next_release | prompt | devel_release | service_status | + | xenial | lxd-container | bionic | lts | | enabled | + | bionic | lxd-container | focal | lts | | enabled | + | focal | lxd-container | jammy | lts | | enabled | + | jammy | lxd-container | mantic | normal | | n/a | + | mantic | lxd-container | noble | normal | --devel-release | n/a | diff -Nru ubuntu-advantage-tools-30~23.10/features/unattached_commands.feature ubuntu-advantage-tools-31.2~23.10/features/unattached_commands.feature --- ubuntu-advantage-tools-30~23.10/features/unattached_commands.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/unattached_commands.feature 2024-02-14 15:37:46.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Command behaviour when unattached - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Unattached auto-attach does nothing in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # Validate systemd unit/timer syntax When I run `systemd-analyze verify /lib/systemd/system/ua-timer.timer` with sudo Then stderr does not match regexp: @@ -23,18 +21,15 @@ """ Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Unattached commands that requires enabled user in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro ` `as non-root` exits `1` Then I will see the following on stderr: """ @@ -48,24 +43,20 @@ """ Examples: pro commands - | release | command | - | bionic | detach | - | bionic | refresh | - | focal | detach | - | focal | refresh | - | xenial | detach | - | xenial | refresh | - | jammy | detach | - | jammy | refresh | - | lunar | detach | - | lunar | refresh | - | mantic | detach | - | mantic | refresh | + | release | machine_type | command | + | bionic | lxd-container | detach | + | bionic | lxd-container | refresh | + | focal | lxd-container | detach | + | focal | lxd-container | refresh | + | xenial | lxd-container | detach | + | xenial | lxd-container | refresh | + | jammy | lxd-container | detach | + | jammy | lxd-container | refresh | + | mantic | lxd-container | detach | + | mantic | lxd-container | refresh | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Help command on an unattached machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro help esm-infra` as non-root Then I will see the following on stdout: """ @@ -101,18 +92,15 @@ """ Examples: ubuntu release - | release | infra-available | - | xenial | yes | - | bionic | yes | - | focal | yes | - | jammy | yes | - | lunar | no | - | mantic | no | + | release | machine_type | infra-available | + | xenial | lxd-container | yes | + | bionic | lxd-container | yes | + | focal | lxd-container | yes | + | jammy | lxd-container | yes | + | mantic | lxd-container | no | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Unattached enable/disable fails in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify that running `pro esm-infra` `as non-root` exits `1` Then I will see the following on stderr: """ @@ -169,24 +157,20 @@ """ Examples: ubuntu release - | release | command | - | xenial | enable | - | xenial | disable | - | bionic | enable | - | bionic | disable | - | focal | enable | - | focal | disable | - | jammy | enable | - | jammy | disable | - | lunar | enable | - | lunar | disable | - | mantic | enable | - | mantic | disable | + | release | machine_type | command | + | xenial | lxd-container | enable | + | xenial | lxd-container | disable | + | bionic | lxd-container | enable | + | bionic | lxd-container | disable | + | focal | lxd-container | enable | + | focal | lxd-container | disable | + | jammy | lxd-container | enable | + | jammy | lxd-container | disable | + | mantic | lxd-container | enable | + | mantic | lxd-container | disable | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Check for newer versions of the client in an ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed # Make sure we have a fresh, just rebooted, environment When I reboot the machine Then I verify that no files exist matching `/run/ubuntu-advantage/candidate-version` @@ -195,7 +179,7 @@ """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ And I verify that files exist matching `/run/ubuntu-advantage/candidate-version` @@ -210,7 +194,7 @@ """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ When I run `pro status --format json` as non-root @@ -218,7 +202,7 @@ """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ When I run `pro config show` as non-root @@ -226,7 +210,7 @@ """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ When I run `pro api u.pro.version.v1` as non-root @@ -244,96 +228,31 @@ """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ - When I run `apt-get update` with sudo - # apt-get update will bring a new candidate, which is the current installed version + When I apt update + # The update will bring a new candidate, which is the current installed version And I run `pro status` as non-root Then stderr does not match regexp: """ .*\[info\].* A new version is available: 2:99.9.9 Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features. """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container - # Side effect: this verifies that `ua` still works as a command - Scenario Outline: Verify autocomplete options - Given a `` machine with ubuntu-advantage-tools installed - When I prepare the autocomplete test - And I press tab twice to autocomplete the `ua` command - Then stdout matches regexp: - """ - --debug +auto-attach +enable +status\r - --help +collect-logs +fix +system\r - --version +config +help +version\r - api +detach +refresh +\r - attach +disable +security-status - """ - When I press tab twice to autocomplete the `pro` command - Then stdout matches regexp: - """ - --debug +auto-attach +enable +status\r - --help +collect-logs +fix +system\r - --version +config +help +version\r - api +detach +refresh +\r - attach +disable +security-status - """ - When I press tab twice to autocomplete the `ua enable` command - Then stdout matches regexp: - """ - anbox-cloud +esm-apps +fips-updates +realtime-kernel\r - cc-eal +esm-infra +landscape +ros\r - cis +fips +livepatch +ros-updates - """ - When I press tab twice to autocomplete the `pro enable` command - Then stdout matches regexp: - """ - anbox-cloud +esm-apps +fips-updates +realtime-kernel\r - cc-eal +esm-infra +landscape +ros\r - cis +fips +livepatch +ros-updates - """ - When I press tab twice to autocomplete the `ua disable` command - Then stdout matches regexp: - """ - anbox-cloud +esm-apps +fips-updates +realtime-kernel\r - cc-eal +esm-infra +landscape +ros\r - cis +fips +livepatch +ros-updates - """ - When I press tab twice to autocomplete the `pro disable` command - Then stdout matches regexp: - """ - anbox-cloud +esm-apps +fips-updates +realtime-kernel\r - cc-eal +esm-infra +landscape +ros\r - cis +fips +livepatch +ros-updates - """ + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - Examples: ubuntu release - | release | - # | xenial | Can't rely on Xenial because of bash sorting things weirdly - | bionic | - - @series.focal - @series.jammy - @series.lunar - @series.mantic - @uses.config.machine_type.lxd-container # Side effect: this verifies that `ua` still works as a command Scenario Outline: Verify autocomplete options - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I prepare the autocomplete test And I press tab twice to autocomplete the `ua` command Then stdout matches regexp: @@ -356,45 +275,48 @@ When I press tab twice to autocomplete the `ua enable` command Then stdout matches regexp: """ - anbox-cloud +esm-infra +landscape +ros\r - cc-eal +fips +livepatch +ros-updates\r - esm-apps +fips-updates +realtime-kernel +usg + anbox-cloud +esm-infra +livepatch +usg\s* + cc-eal +fips +realtime-kernel\s* + cis +fips-updates +ros\s* + esm-apps +landscape +ros-updates\s* """ When I press tab twice to autocomplete the `pro enable` command Then stdout matches regexp: """ - anbox-cloud +esm-infra +landscape +ros\r - cc-eal +fips +livepatch +ros-updates\r - esm-apps +fips-updates +realtime-kernel +usg + anbox-cloud +esm-infra +livepatch +usg\s* + cc-eal +fips +realtime-kernel\s* + cis +fips-updates +ros\s* + esm-apps +landscape +ros-updates\s* """ When I press tab twice to autocomplete the `ua disable` command Then stdout matches regexp: """ - anbox-cloud +esm-infra +landscape +ros\r - cc-eal +fips +livepatch +ros-updates\r - esm-apps +fips-updates +realtime-kernel +usg + anbox-cloud +esm-infra +livepatch +usg\s* + cc-eal +fips +realtime-kernel\s* + cis +fips-updates +ros\s* + esm-apps +landscape +ros-updates\s* """ When I press tab twice to autocomplete the `pro disable` command Then stdout matches regexp: """ - anbox-cloud +esm-infra +landscape +ros\r - cc-eal +fips +livepatch +ros-updates\r - esm-apps +fips-updates +realtime-kernel +usg + anbox-cloud +esm-infra +livepatch +usg\s* + cc-eal +fips +realtime-kernel\s* + cis +fips-updates +ros\s* + esm-apps +landscape +ros-updates\s* """ Examples: ubuntu release - | release | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + # | xenial | lxd-container | Can't rely on Xenial because of bash sorting things weirdly + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | - @series.lts - @uses.config.machine_type.lxd-container Scenario Outline: esm cache failures don't generate errors - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I disable access to esm.ubuntu.com - And I run `apt update` with sudo + And I apt update # Wait for the hook to fail When I wait `5` seconds And I run `systemctl --failed` with sudo @@ -419,20 +341,16 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - - @series.jammy - @series.lunar - @series.mantic - @uses.config.machine_type.lxd-container + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + # Services fail, degraded systemctl, but no crashes. Scenario Outline: services fail gracefully when yaml is broken/absent - Given a `` machine with ubuntu-advantage-tools installed - When I run `apt update` with sudo + Given a `` `` machine with ubuntu-advantage-tools installed + When I apt update And I run `rm -rf /usr/lib/python3/dist-packages/yaml` with sudo And I verify that running `pro status` `with sudo` exits `1` Then stderr matches regexp: @@ -459,7 +377,7 @@ """ esm-cache.service """ - When I run `apt install python3-pip -y` with sudo + When I apt install `python3-pip` And I run `pip3 install pyyaml==3.10 ` with sudo And I run `ls /usr/local/lib//dist-packages/` with sudo Then stdout matches regexp: @@ -494,17 +412,14 @@ """ Examples: ubuntu release - | release | python_version | suffix | - | jammy | python3.10 | | - # Lunar+ has a BIG error message explaining why this is a clear user error... - | lunar | python3.11 | --break-system-packages | - | mantic | python3.11 | --break-system-packages | + | release | machine_type | python_version | suffix | + | jammy | lxd-container | python3.10 | | + # mantic+ has a BIG error message explaining why this is a clear user error... + | mantic | lxd-container | python3.11 | --break-system-packages | - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Warn users not to redirect/pipe human readable output - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run shell command `pro version | cat` as non-root Then I will see the following on stderr """ @@ -587,10 +502,9 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | - | focal | - | jammy | - | lunar | - | mantic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | + | focal | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/unattached_status.feature ubuntu-advantage-tools-31.2~23.10/features/unattached_status.feature --- ubuntu-advantage-tools-30~23.10/features/unattached_status.feature 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/unattached_status.feature 2024-01-18 17:34:13.000000000 +0000 @@ -1,9 +1,7 @@ Feature: Unattached status - @series.all - @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine - formatted - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `pro status --format json` as non-root Then stdout is a json matching the `ua_status` schema When I run `pro status --format yaml` as non-root @@ -11,41 +9,39 @@ When I run `sed -i 's/contracts.can/invalidurl.notcan/' /etc/ubuntu-advantage/uaclient.conf` with sudo And I verify that running `pro status --format json` `as non-root` exits `1` Then stdout is a json matching the `ua_status` schema - And I will see the following on stdout: - """ - {"environment_vars": [], "errors": [{"message": "Failed to connect to authentication server\nCheck your Internet connection and try again.", "message_code": "connectivity-error", "service": null, "type": "system"}], "result": "failure", "services": [], "warnings": []} - """ + And stdout matches regexp: + """ + {"environment_vars": \[\], "errors": \[{"message": "Failed to connect to .*\\n\[Errno -2\] Name or service not known\\n", "message_code": "connectivity-error", "service": null, "type": "system"}\], "result": "failure", "services": \[\], "warnings": \[\]} + """ And I verify that running `pro status --format yaml` `as non-root` exits `1` Then stdout is a yaml matching the `ua_status` schema - And I will see the following on stdout: - """ - environment_vars: [] - errors: - - message: 'Failed to connect to authentication server + And stdout matches regexp: + """ + environment_vars: \[\] + errors: + - message: 'Failed to connect to https://invalidurl.notcanonical.com/v1/resources(.*) - Check your Internet connection and try again.' - message_code: connectivity-error - service: null - type: system - result: failure - services: [] - warnings: [] - """ + \[Errno -2\] Name or service not known + + ' + message_code: connectivity-error + service: null + type: system + result: failure + services: \[\] + warnings: \[\] + """ Examples: ubuntu release - | release | - | bionic | - | focal | - | xenial | - | jammy | - | lunar | - | mantic | - - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container + | release | machine_type | + | bionic | lxd-container | + | focal | lxd-container | + | xenial | lxd-container | + | jammy | lxd-container | + | mantic | lxd-container | + Scenario Outline: Unattached status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output And I run `pro status` as non-root Then stdout matches regexp: @@ -120,14 +116,12 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output When I run `pro status` as non-root Then stdout matches regexp: @@ -198,13 +192,11 @@ """ Examples: ubuntu release - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.jammy - @uses.config.machine_type.lxd-container Scenario Outline: Unattached status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I verify root and non-root `pro status` calls have the same output And I run `pro status` as non-root Then stdout matches regexp: @@ -214,6 +206,7 @@ esm-apps +yes +Expanded Security Maintenance for Applications esm-infra +yes +Expanded Security Maintenance for Infrastructure fips-preview +yes +.* + fips-updates +yes +FIPS compliant crypto packages with stable security updates livepatch +yes +Canonical Livepatch service realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated usg +yes +Security compliance and audit tools @@ -234,7 +227,7 @@ esm-infra +yes +Expanded Security Maintenance for Infrastructure fips +no +NIST-certified FIPS crypto packages fips-preview +yes +.* - fips-updates +no +FIPS compliant crypto packages with stable security updates + fips-updates +yes +FIPS compliant crypto packages with stable security updates landscape +no +Management and administration tool for Ubuntu livepatch +yes +Canonical Livepatch service realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated @@ -259,6 +252,7 @@ esm-apps +yes +Expanded Security Maintenance for Applications esm-infra +yes +Expanded Security Maintenance for Infrastructure fips-preview +yes +.* + fips-updates +yes +FIPS compliant crypto packages with stable security updates livepatch +yes +Canonical Livepatch service realtime-kernel +yes +Ubuntu kernel with PREEMPT_RT patches integrated usg +yes +Security compliance and audit tools @@ -273,15 +267,12 @@ """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I do a preflight check for `contract_token` without the all flag Then stdout matches regexp: """ @@ -338,15 +329,13 @@ warnings: [] """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I do a preflight check for `contract_token` without the all flag Then stdout matches regexp: """ @@ -404,14 +393,12 @@ """ Examples: ubuntu release - | release | - | focal | + | release | machine_type | + | focal | lxd-container | - @series.jammy - @uses.config.machine_type.lxd-container @uses.config.contract_token Scenario Outline: Simulate status in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I do a preflight check for `contract_token` without the all flag Then stdout matches regexp: """ @@ -420,6 +407,7 @@ esm-apps +yes +yes +yes +Expanded Security Maintenance for Applications esm-infra +yes +yes +yes +Expanded Security Maintenance for Infrastructure fips-preview +yes +yes +no +.* + fips-updates +yes +yes +no +FIPS compliant crypto packages with stable security updates livepatch +yes +yes +yes +Canonical Livepatch service realtime-kernel +yes +yes +no +Ubuntu kernel with PREEMPT_RT patches integrated usg +yes +yes +no +Security compliance and audit tools @@ -434,7 +422,7 @@ esm-infra +yes +yes +yes +Expanded Security Maintenance for Infrastructure fips +no +yes +no +NIST-certified FIPS crypto packages fips-preview +yes +yes +no +.* - fips-updates +no +yes +no +FIPS compliant crypto packages with stable security updates + fips-updates +yes +yes +no +FIPS compliant crypto packages with stable security updates landscape +no +yes +no +Management and administration tool for Ubuntu livepatch +yes +yes +yes +Canonical Livepatch service realtime-kernel +yes +yes +no +Ubuntu kernel with PREEMPT_RT patches integrated @@ -468,16 +456,13 @@ """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | - @series.xenial - @series.bionic - @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `sed -i 's/contracts.can/contracts.staging.can/' /etc/ubuntu-advantage/uaclient.conf` with sudo And I verify that a preflight check for `contract_token_staging_expired` formatted as json exits 1 Then stdout is a json matching the `ua_status` schema @@ -520,15 +505,13 @@ """ Examples: ubuntu release - | release | - | xenial | - | bionic | + | release | machine_type | + | xenial | lxd-container | + | bionic | lxd-container | - @series.focal - @uses.config.machine_type.lxd-container @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `sed -i 's/contracts.can/contracts.staging.can/' /etc/ubuntu-advantage/uaclient.conf` with sudo And I verify that a preflight check for `contract_token_staging_expired` formatted as json exits 1 Then stdout is a json matching the `ua_status` schema @@ -569,14 +552,12 @@ """ Examples: ubuntu release - | release | - | focal | - - @series.jammy - @uses.config.machine_type.lxd-container + | release | machine_type | + | focal | lxd-container | + @uses.config.contract_token_staging_expired Scenario Outline: Simulate status with expired token in a ubuntu machine - Given a `` machine with ubuntu-advantage-tools installed + Given a `` `` machine with ubuntu-advantage-tools installed When I run `sed -i 's/contracts.can/contracts.staging.can/' /etc/ubuntu-advantage/uaclient.conf` with sudo And I verify that a preflight check for `contract_token_staging_expired` formatted as json exits 1 Then stdout is a json matching the `ua_status` schema @@ -610,10 +591,11 @@ esm-apps +yes +no +no +Expanded Security Maintenance for Applications esm-infra +yes +yes +yes +Expanded Security Maintenance for Infrastructure fips +yes +yes +no +NIST-certified FIPS crypto packages - fips-preview +yes +yes +no +.* + fips-preview +yes +yes +no +Preview of FIPS crypto packages undergoing certification with NIST + fips-updates +yes +yes +no +.* livepatch +yes +yes +yes +Canonical Livepatch service """ Examples: ubuntu release - | release | - | jammy | + | release | machine_type | + | jammy | lxd-container | diff -Nru ubuntu-advantage-tools-30~23.10/features/util.py ubuntu-advantage-tools-31.2~23.10/features/util.py --- ubuntu-advantage-tools-30~23.10/features/util.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/features/util.py 2024-02-14 15:37:46.000000000 +0000 @@ -11,8 +11,9 @@ import tempfile import time from base64 import b64encode +from dataclasses import dataclass from enum import Enum -from typing import Callable, Iterable, List, Optional +from typing import Callable, Iterable, List, Optional, Tuple from urllib.parse import quote from urllib.request import Request, urlopen @@ -21,6 +22,7 @@ from uaclient.system import get_dpkg_arch SUT = "system-under-test" +BUILDER_NAME_PREFIX = "builder-" LXC_PROPERTY_MAP = { "image": {"series": "properties.release", "machine_type": "Type"}, "container": {"series": "image.release", "machine_type": "image.type"}, @@ -32,6 +34,43 @@ UA_DEB_BUILD_CACHE = os.path.join(UA_TMP_DIR, "deb-cache") +ALL_BINARY_PACKAGE_NAMES = [ + "ubuntu-pro-client", + "ubuntu-pro-client-l10n", + "ubuntu-pro-auto-attach", + "ubuntu-advantage-tools", + "ubuntu-advantage-pro", +] + + +@dataclass +class ProDebPaths: + ubuntu_pro_client: str + ubuntu_pro_image_auto_attach: str + ubuntu_pro_client_l10n: str + ubuntu_advantage_tools: str + ubuntu_advantage_pro: str + + def non_cloud_pro_image_debs(self) -> List[Tuple[str, str]]: + return [ + ("ubuntu-pro-client", self.ubuntu_pro_client), + ("ubuntu-advantage-tools", self.ubuntu_advantage_tools), + ("ubuntu-pro-client-l10n", self.ubuntu_pro_client_l10n), + ] + + def cloud_pro_image_debs(self) -> List[Tuple[str, str]]: + return [ + ( + "ubuntu-pro-auto-attach", + self.ubuntu_pro_image_auto_attach, + ), + ("ubuntu-advantage-pro", self.ubuntu_advantage_pro), + ] + + def all_debs(self) -> List[Tuple[str, str]]: + return self.non_cloud_pro_image_debs() + self.cloud_pro_image_debs() + + class InstallationSource(Enum): ARCHIVE = "archive" PREBUILT = "prebuilt" @@ -43,51 +82,6 @@ CUSTOM = "custom" -def lxc_get_property(name: str, property_name: str, image: bool = False): - """Check series name of either an image or a container. - - :param name: - The name of the container or the image to check its series. - :param property_name: - The name of the property to return. - :param image: - If image==True will check image properties - If image==False it will check container configuration to get - properties. - - :return: - The value of the container or image property. - `None` if it could not detect it ( - some images don't have this field in properties). - """ - if not image: - property_name = LXC_PROPERTY_MAP["container"][property_name] - output = subprocess.check_output( - ["lxc", "config", "get", name, property_name], - universal_newlines=True, - ) - return output.rstrip() - else: - property_keys = LXC_PROPERTY_MAP["image"][property_name].split(".") - output = subprocess.check_output( - ["lxc", "image", "show", name], universal_newlines=True - ) - image_config = yaml.safe_load(output) - logging.info("--- `lxc image show` output: ", image_config) - value = image_config - for key_name in property_keys: - value = image_config.get(value, {}) - if not value: - logging.info( - "--- Could not detect image property {name}." - " Add it via `lxc image edit`".format( - name=".".join(property_keys) - ) - ) - return None - return value - - def repo_state_hash( exclude_dirs: Iterable[str] = ( ".github", @@ -158,12 +152,36 @@ return hashlib.md5(output_to_hash).hexdigest() -def get_debs_for_series(debs_path: str, series: str) -> List[str]: - return [ - os.path.join(debs_path, deb_file) - for deb_file in os.listdir(debs_path) - if series in deb_file - ] +def get_debs_for_series(debs_path: str, series: str) -> ProDebPaths: + ubuntu_pro_client = "" + ubuntu_pro_client_l10n = "" + ubuntu_pro_image_auto_attach = "" + ubuntu_advantage_tools = "" + ubuntu_advantage_pro = "" + for deb_file in os.listdir(debs_path): + if series in deb_file: + full_path = os.path.join(debs_path, deb_file) + if "ubuntu-pro-client-l10n" in deb_file: + ubuntu_pro_client_l10n = full_path + elif "ubuntu-pro-client" in deb_file: + ubuntu_pro_client = full_path + elif "ubuntu-pro-auto-attach" in deb_file: + ubuntu_pro_image_auto_attach = full_path + elif "ubuntu-advantage-tools" in deb_file: + ubuntu_advantage_tools = full_path + elif "ubuntu-advantage-pro" in deb_file: + ubuntu_advantage_pro = full_path + return ProDebPaths( + ubuntu_pro_client=ubuntu_pro_client, + ubuntu_pro_client_l10n=ubuntu_pro_client_l10n, + ubuntu_pro_image_auto_attach=ubuntu_pro_image_auto_attach, + ubuntu_advantage_tools=ubuntu_advantage_tools, + ubuntu_advantage_pro=ubuntu_advantage_pro, + ) + + +def _create_deb_path(prefix: str, name: str): + return os.path.join(UA_DEB_BUILD_CACHE, "{}{}.deb".format(prefix, name)) def build_debs( @@ -171,7 +189,7 @@ architecture: Optional[str] = None, chroot: Optional[str] = None, sbuild_output_to_terminal: bool = False, -) -> List[str]: +) -> ProDebPaths: """ Build the package through sbuild and store the debs into output_deb_dir @@ -185,25 +203,28 @@ architecture = get_dpkg_arch() deb_prefix = "{}-{}-{}-".format(series, architecture, repo_state_hash()) - tools_deb_name = "{}ubuntu-advantage-tools.deb".format(deb_prefix) - pro_deb_name = "{}ubuntu-advantage-pro.deb".format(deb_prefix) - l10n_deb_name = "{}ubuntu-pro-client-l10n.deb".format(deb_prefix) - tools_deb_cache_path = os.path.join(UA_DEB_BUILD_CACHE, tools_deb_name) - pro_deb_cache_path = os.path.join(UA_DEB_BUILD_CACHE, pro_deb_name) - l10n_deb_cache_path = os.path.join(UA_DEB_BUILD_CACHE, l10n_deb_name) + deb_paths = ProDebPaths( + ubuntu_pro_client=_create_deb_path(deb_prefix, "ubuntu-pro-client"), + ubuntu_pro_image_auto_attach=_create_deb_path( + deb_prefix, "ubuntu-pro-auto-attach" + ), + ubuntu_pro_client_l10n=_create_deb_path( + deb_prefix, "ubuntu-pro-client-l10n" + ), + ubuntu_advantage_tools=_create_deb_path( + deb_prefix, "ubuntu-advantage-tools" + ), + ubuntu_advantage_pro=_create_deb_path( + deb_prefix, "ubuntu-advantage-pro" + ), + ) if not os.path.exists(UA_DEB_BUILD_CACHE): os.makedirs(UA_DEB_BUILD_CACHE) - if os.path.exists(tools_deb_cache_path) and os.path.exists( - pro_deb_cache_path - ): - logging.info( - "--- Using debs in cache: {} and {} and {}".format( - tools_deb_cache_path, pro_deb_cache_path, l10n_deb_cache_path - ) - ) - return [tools_deb_cache_path, pro_deb_cache_path, l10n_deb_cache_path] + if os.path.exists(deb_paths.ubuntu_pro_client): + logging.info("--- Using debs in cache") + return deb_paths logging.info("--- Creating: {}".format(SOURCE_PR_TGZ)) @@ -292,17 +313,21 @@ for f in os.listdir(SBUILD_DIR): if f.endswith(".deb"): - if "l10n" in f: - dest = l10n_deb_cache_path - elif "pro" in f: - dest = pro_deb_cache_path - elif "tools" in f: - dest = tools_deb_cache_path + if "ubuntu-pro-client-l10n" in f: + dest = deb_paths.ubuntu_pro_client_l10n + elif "ubuntu-pro-client" in f: + dest = deb_paths.ubuntu_pro_client + elif "ubuntu-pro-auto-attach" in f: + dest = deb_paths.ubuntu_pro_image_auto_attach + elif "ubuntu-advantage-tools" in f: + dest = deb_paths.ubuntu_advantage_tools + elif "ubuntu-advantage-pro" in f: + dest = deb_paths.ubuntu_advantage_pro else: continue shutil.copy(os.path.join(SBUILD_DIR, f), dest) - return [tools_deb_cache_path, pro_deb_cache_path, l10n_deb_cache_path] + return deb_paths class SafeLoaderWithoutDatetime(yaml.SafeLoader): @@ -353,12 +378,13 @@ logger_fn, ) elif function_name == "cloud": - processed_template = _replace_and_log( - processed_template, - match.group(0), - context.pro_config.default_cloud.name, - logger_fn, - ) + if args[1] in context.machines: + processed_template = _replace_and_log( + processed_template, + match.group(0), + context.machines[args[1]].cloud, + logger_fn, + ) elif function_name == "today": dt = datetime.datetime.utcnow() if len(args) == 2: @@ -417,8 +443,7 @@ "version": "2011-08-01", **action_params, } - method = "POST" - uri = "https://landscape.canonical.com/api/" + method = "GET" host = "landscape.canonical.com" path = "/api/" @@ -439,11 +464,13 @@ signature = b64encode(digest) formatted_params += "&signature=" + quote(signature) + uri = "https://{host}{path}?{params}".format( + host=host, path=path, params=formatted_params + ) request = Request( uri, headers={"Host": host}, method=method, - data=formatted_params.encode(), ) response = urlopen(request) diff -Nru ubuntu-advantage-tools-30~23.10/lib/convert_list_to_deb822.py ubuntu-advantage-tools-31.2~23.10/lib/convert_list_to_deb822.py --- ubuntu-advantage-tools-30~23.10/lib/convert_list_to_deb822.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/lib/convert_list_to_deb822.py 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +""" +This script is called after running do-release-upgrade in a machine. +This converts list files to deb822 files when upgrading to Noble. +""" + +import logging +import os +import sys + +from aptsources.sourceslist import SourceEntry # type: ignore + +from uaclient import entitlements +from uaclient.apt import _get_sources_file_content +from uaclient.cli import setup_logging +from uaclient.config import UAConfig +from uaclient.system import ( + ensure_file_absent, + get_release_info, + load_file, + write_file, +) +from uaclient.util import set_filename_extension + +if __name__ == "__main__": + series = get_release_info().series + if series != "noble": + sys.exit(0) + + setup_logging(logging.DEBUG) + cfg = UAConfig() + + for entitlement_class in entitlements.ENTITLEMENT_CLASSES: + if not issubclass( + entitlement_class, entitlements.repo.RepoEntitlement + ): + continue + + entitlement = entitlement_class(cfg) + + filename = set_filename_extension(entitlement.repo_file, "list") + if os.path.exists(filename): + # If do-release-upgrade commented out the file, whether the + # repository is not reachable or is considered a third party, then + # it will be handled in upgrade_lts_contract. This script only + # changes services which are enabled, active and reachable. + valid_sources = [ + SourceEntry(line) + for line in load_file(filename).strip().split("\n") + if line.strip().startswith("deb") + ] + if valid_sources: + # get this information from the file, to avoid interacting with + # the entitlement_config + suites = list(set(source.dist for source in valid_sources)) + repo_url = valid_sources[0].uri + include_deb_src = any( + source.type == "deb-src" for source in valid_sources + ) + content = _get_sources_file_content( + suites, + series, + True, + repo_url, + entitlement.repo_key_file, + include_deb_src, + ) + write_file(entitlement.repo_file, content) + + ensure_file_absent(filename) diff -Nru ubuntu-advantage-tools-30~23.10/lib/daemon.py ubuntu-advantage-tools-31.2~23.10/lib/daemon.py --- ubuntu-advantage-tools-30~23.10/lib/daemon.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/lib/daemon.py 2024-02-14 15:37:46.000000000 +0000 @@ -1,8 +1,9 @@ import logging import os import sys +import time -from uaclient import http +from uaclient import http, system from uaclient.config import UAConfig from uaclient.daemon import poll_for_pro_license, retry_auto_attach from uaclient.log import setup_journald_logging @@ -10,6 +11,34 @@ LOG = logging.getLogger("ubuntupro.daemon") +# 10 seconds times 120 = 20 minutes +WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME = 10 +WAIT_FOR_CLOUD_CONFIG_POLL_TIMES = 120 + + +def _wait_for_cloud_config(): + LOG.debug("waiting for cloud-config.service to finish") + for i in range(WAIT_FOR_CLOUD_CONFIG_POLL_TIMES + 1): + state = system.get_systemd_unit_active_state("cloud-config.service") + LOG.debug("cloud-config.service state: %r", state) + if state is not None and state == "activating": + if i < WAIT_FOR_CLOUD_CONFIG_POLL_TIMES: + LOG.debug( + "cloud-config.service is activating. " + "waiting to check again." + ) + time.sleep(WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME) + else: + LOG.warning( + "cloud-config.service is still activating after " + "20 minutes. continuing anyway" + ) + return + else: + LOG.debug("cloud-config.service is not activating. continuing") + return + + def main() -> int: setup_journald_logging(logging.DEBUG, LOG) # Make sure the ubuntupro.daemon logger does not generate double logging @@ -22,6 +51,9 @@ LOG.debug("daemon starting") + _wait_for_cloud_config() + + LOG.debug("checking for condition files") is_correct_cloud = any( os.path.exists("/run/cloud-init/cloud-id-{}".format(cloud)) for cloud in ("gce", "azure") diff -Nru ubuntu-advantage-tools-30~23.10/lib/reboot_cmds.py ubuntu-advantage-tools-31.2~23.10/lib/reboot_cmds.py --- ubuntu-advantage-tools-30~23.10/lib/reboot_cmds.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/lib/reboot_cmds.py 2024-02-14 15:37:46.000000000 +0000 @@ -66,7 +66,7 @@ def refresh_contract(cfg: config.UAConfig): try: contract.refresh(cfg) - except exceptions.UrlError: + except exceptions.ConnectivityError: LOG.warning("Failed to refresh contract") raise @@ -85,7 +85,7 @@ LOG.debug("Running reboot commands...") try: - with lock.SpinLock(cfg=cfg, lock_holder="pro-reboot-cmds"): + with lock.RetryLock(cfg=cfg, lock_holder="pro-reboot-cmds"): fix_pro_pkg_holds(cfg) refresh_contract(cfg) upgrade_lts_contract.process_contract_delta_after_apt_lock(cfg) diff -Nru ubuntu-advantage-tools-30~23.10/preferences.d/ubuntu-pro-esm-apps ubuntu-advantage-tools-31.2~23.10/preferences.d/ubuntu-pro-esm-apps --- ubuntu-advantage-tools-30~23.10/preferences.d/ubuntu-pro-esm-apps 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/preferences.d/ubuntu-pro-esm-apps 2024-02-14 15:37:46.000000000 +0000 @@ -1,4 +1,4 @@ -# This file is used by Ubuntu Pro and supplied by the ubuntu-advantage-tools +# This file is used by Ubuntu Pro and supplied by the ubuntu-pro-client # package. It has no effect if Ubuntu Pro services are not in use since no # other apt repositories are expected to match o=UbuntuESMApps. # diff -Nru ubuntu-advantage-tools-30~23.10/preferences.d/ubuntu-pro-esm-infra ubuntu-advantage-tools-31.2~23.10/preferences.d/ubuntu-pro-esm-infra --- ubuntu-advantage-tools-30~23.10/preferences.d/ubuntu-pro-esm-infra 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/preferences.d/ubuntu-pro-esm-infra 2024-02-14 15:37:46.000000000 +0000 @@ -1,4 +1,4 @@ -# This file is used by Ubuntu Pro and supplied by the ubuntu-advantage-tools +# This file is used by Ubuntu Pro and supplied by the ubuntu-pro-client # package. It has no effect if Ubuntu Pro services are not in use since no # other apt repositories are expected to match o=UbuntuESM. # diff -Nru ubuntu-advantage-tools-30~23.10/release-upgrades.d/ubuntu-advantage-upgrades.cfg ubuntu-advantage-tools-31.2~23.10/release-upgrades.d/ubuntu-advantage-upgrades.cfg --- ubuntu-advantage-tools-30~23.10/release-upgrades.d/ubuntu-advantage-upgrades.cfg 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/release-upgrades.d/ubuntu-advantage-upgrades.cfg 2024-01-18 17:34:13.000000000 +0000 @@ -1,4 +1,4 @@ [Sources] Pockets=security,updates,proposed,backports,infra-security,infra-updates,apps-security,apps-updates [Distro] -PostInstallScripts=./xorg_fix_proprietary.py, /usr/lib/ubuntu-advantage/upgrade_lts_contract.py +PostInstallScripts=./xorg_fix_proprietary.py, /usr/lib/ubuntu-advantage/convert_list_to_deb822.py, /usr/lib/ubuntu-advantage/upgrade_lts_contract.py diff -Nru ubuntu-advantage-tools-30~23.10/setup.py ubuntu-advantage-tools-31.2~23.10/setup.py --- ubuntu-advantage-tools-30~23.10/setup.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/setup.py 2024-02-14 15:37:46.000000000 +0000 @@ -7,7 +7,7 @@ from uaclient import defaults -NAME = "ubuntu-advantage-tools" +NAME = "ubuntu-pro-client" INSTALL_REQUIRES = open("requirements.txt").read().rstrip("\n").split("\n") diff -Nru ubuntu-advantage-tools-30~23.10/sru/release-31/test-jobs-status-world-readable.sh ubuntu-advantage-tools-31.2~23.10/sru/release-31/test-jobs-status-world-readable.sh --- ubuntu-advantage-tools-30~23.10/sru/release-31/test-jobs-status-world-readable.sh 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/sru/release-31/test-jobs-status-world-readable.sh 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,61 @@ +#!/bin/bash +set -e + +series=$1 +install_from=$2 # either path to a .deb, or 'staging', or 'proposed' + +name=$series-dev + +function cleanup { + lxc delete $name --force +} + +function on_err { + echo -e "Test Failed" + cleanup + exit 1 +} +trap on_err ERR + + +lxc launch ubuntu-daily:$series $name +sleep 5 + +# Install latest ubuntu-advantage-tools +lxc exec $name -- apt-get update > /dev/null +lxc exec $name -- apt-get install -y ubuntu-advantage-tools > /dev/null +echo -e "\n* Latest u-a-t is installed" +echo "###########################################" +lxc exec $name -- apt-cache policy ubuntu-advantage-tools +echo -e "###########################################\n" + +echo -e "\n* Create jobs-status file" +echo "###########################################" +lxc exec $name -- python3 /usr/lib/ubuntu-advantage/timer.py +lxc exec $name -- ls -la /var/lib/ubuntu-advantage/jobs-status.json +echo -e "###########################################\n" + + +# Upgrade u-a-t to new version +# ---------------------------------------------------------------- +if [ $install_from == 'staging' ]; then + lxc exec $name -- sudo add-apt-repository ppa:ua-client/staging -y > /dev/null + lxc exec $name -- apt-get update > /dev/null + lxc exec $name -- apt-get install ubuntu-advantage-tools -y > /dev/null +elif [ $install_from == 'proposed' ]; then + lxc exec $name -- sh -c "echo \"deb http://archive.ubuntu.com/ubuntu $series-proposed main\" | tee /etc/apt/sources.list.d/proposed.list" + lxc exec $name -- apt-get update > /dev/null + lxc exec $name -- apt-get install ubuntu-advantage-tools -y > /dev/null +else + lxc file push $install_from $name/new-ua.deb + lxc exec $name -- dpkg -i /new-ua.deb > /dev/null +fi +# ---------------------------------------------------------------- + +echo -e "\n* re-create jobs-status file and notice that is now world-readable" +echo "###########################################" +lxc exec $name -- python3 /usr/lib/ubuntu-advantage/timer.py +lxc exec $name -- ls -la /var/lib/ubuntu-advantage/jobs-status.json +echo -e "###########################################\n" + +cleanup diff -Nru ubuntu-advantage-tools-30~23.10/systemd/apt-news.service ubuntu-advantage-tools-31.2~23.10/systemd/apt-news.service --- ubuntu-advantage-tools-30~23.10/systemd/apt-news.service 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/systemd/apt-news.service 2024-01-18 17:34:13.000000000 +0000 @@ -14,3 +14,29 @@ [Service] Type=oneshot ExecStart=/usr/bin/python3 /usr/lib/ubuntu-advantage/apt_news.py +AppArmorProfile=ubuntu_pro_apt_news +CapabilityBoundingSet=~CAP_SYS_ADMIN +CapabilityBoundingSet=~CAP_NET_ADMIN +CapabilityBoundingSet=~CAP_NET_BIND_SERVICE +CapabilityBoundingSet=~CAP_SYS_PTRACE +CapabilityBoundingSet=~CAP_NET_RAW +PrivateTmp=true +RestrictAddressFamilies=~AF_NETLINK +RestrictAddressFamilies=~AF_PACKET +# These may break some tests, and should be enabled carefully +#NoNewPrivileges=true +#PrivateDevices=true +#ProtectControlGroups=true +# ProtectHome=true seems to reliably break the GH integration test with a lunar lxd on jammy host +#ProtectHome=true +#ProtectKernelModules=true +#ProtectKernelTunables=true +#ProtectSystem=full +#RestrictSUIDSGID=true +# Unsupported in bionic +# Suggestion from systemd.exec(5) manpage on SystemCallFilter +#SystemCallFilter=@system-service +#SystemCallFilter=~@mount +#SystemCallErrorNumber=EPERM +#ProtectClock=true +#ProtectKernelLogs=true diff -Nru ubuntu-advantage-tools-30~23.10/systemd/ubuntu-advantage.service ubuntu-advantage-tools-31.2~23.10/systemd/ubuntu-advantage.service --- ubuntu-advantage-tools-30~23.10/systemd/ubuntu-advantage.service 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/systemd/ubuntu-advantage.service 2024-02-14 15:37:46.000000000 +0000 @@ -10,7 +10,11 @@ [Unit] Description=Ubuntu Pro Background Auto Attach Documentation=man:ubuntu-advantage https://ubuntu.com/advantage -After=network.target network-online.target systemd-networkd.service ua-auto-attach.service cloud-config.service ubuntu-advantage-cloud-id-shim.service +# Note: This is NOT After=cloud-config.service to avoid deadlock when +# cloud-init installs this package. +# The python script will wait until cloud-config.service is done +# before doing anything. +After=network.target network-online.target systemd-networkd.service ua-auto-attach.service ubuntu-advantage-cloud-id-shim.service # Only run if not already attached ConditionPathExists=!/var/lib/ubuntu-advantage/private/machine-token.json diff -Nru ubuntu-advantage-tools-30~23.10/test-requirements.txt ubuntu-advantage-tools-31.2~23.10/test-requirements.txt --- ubuntu-advantage-tools-30~23.10/test-requirements.txt 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/test-requirements.txt 2024-02-29 14:03:11.000000000 +0000 @@ -10,4 +10,12 @@ # python3-apt # We know it is in the distro, but for testing in venvs we need to install it # And well, there is no pypi package -git+https://salsa.debian.org/apt-team/python-apt@main +# We are using git-ubuntu to get the proper version by python version. +# We try to support the latest LTS, the latest released version, +# and the devel version. +# fixme: This may cause weird behavior in the future, or even break, if +# two releases have the same python_version. +git+https://git.launchpad.net/ubuntu/+source/python-apt@ubuntu/jammy-updates ; python_version == '3.10' +git+https://git.launchpad.net/ubuntu/+source/python-apt@ubuntu/mantic ; python_version == '3.11' +# need to keep an aye to bump this when python-apt is in noble-updates +git+https://git.launchpad.net/ubuntu/+source/python-apt@ubuntu/noble ; python_version == '3.12' diff -Nru ubuntu-advantage-tools-30~23.10/tools/build.py ubuntu-advantage-tools-31.2~23.10/tools/build.py --- ubuntu-advantage-tools-30~23.10/tools/build.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/build.py 2024-02-14 15:37:46.000000000 +0000 @@ -39,7 +39,7 @@ chroot=chroot, sbuild_output_to_terminal=not quiet, architecture=arch, - ), + ).__dict__, } ) ) diff -Nru ubuntu-advantage-tools-30~23.10/tools/create-lp-release-branches.sh ubuntu-advantage-tools-31.2~23.10/tools/create-lp-release-branches.sh --- ubuntu-advantage-tools-30~23.10/tools/create-lp-release-branches.sh 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/create-lp-release-branches.sh 2024-01-18 17:34:13.000000000 +0000 @@ -54,7 +54,8 @@ bionic) version=${UA_VERSION}~18.04;; focal) version=${UA_VERSION}~20.04;; jammy) version=${UA_VERSION}~22.04;; - lunar) version=${UA_VERSION}~23.04;; + mantic) version=${UA_VERSION}~23.10;; + noble) version=${UA_VERSION}~24.04;; esac dch_cmd=(dch -m -v "${version}" -D "${release}" -b "Backport new upstream release to $release (LP: #${SRU_BUG})") if [ -z "$DO_IT" ]; then diff -Nru ubuntu-advantage-tools-30~23.10/tools/run-integration-tests.py ubuntu-advantage-tools-31.2~23.10/tools/run-integration-tests.py --- ubuntu-advantage-tools-30~23.10/tools/run-integration-tests.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/run-integration-tests.py 2024-01-18 17:34:13.000000000 +0000 @@ -13,7 +13,6 @@ "bionic": "18.04", "focal": "20.04", "jammy": "22.04", - "lunar": "23.04", "mantic": "23.10", } @@ -24,19 +23,35 @@ } PLATFORM_SERIES_TESTS = { - "azuregeneric": ["xenial", "bionic", "focal", "jammy", "lunar"], - "azurepro": ["xenial", "bionic", "focal", "jammy"], - "azurepro-fips": ["xenial", "bionic", "focal"], - "awsgeneric": ["xenial", "bionic", "focal", "jammy"], - "awspro": ["xenial", "bionic", "focal", "jammy"], - "awspro-fips": ["xenial", "bionic", "focal"], + "aws.generic": ["xenial", "bionic", "focal", "jammy"], + "aws.pro": ["xenial", "bionic", "focal", "jammy"], + "aws.pro-fips": ["xenial", "bionic", "focal"], + "azure.generic": ["xenial", "bionic", "focal", "jammy", "mantic"], + "azure.pro": ["xenial", "bionic", "focal", "jammy"], + "azure.pro-fips": ["xenial", "bionic", "focal"], + "gcp.generic": ["xenial", "bionic", "focal", "jammy", "mantic"], + "gcp.pro": ["xenial", "bionic", "focal", "jammy"], + "gcp.pro-fips": ["bionic", "focal"], + "lxd-container": ["xenial", "bionic", "focal", "jammy", "mantic"], + "lxd-vm": ["xenial", "bionic", "focal", "jammy", "mantic"], "docker": ["focal"], - "gcpgeneric": ["xenial", "bionic", "focal", "jammy", "lunar"], - "gcppro": ["xenial", "bionic", "focal", "jammy"], - "gcppro-fips": ["bionic", "focal"], - "lxd": ["xenial", "bionic", "focal", "jammy", "lunar", "mantic"], - "vm": ["xenial", "bionic", "focal", "jammy", "lunar", "mantic"], - "upgrade": ["xenial", "bionic", "focal", "jammy", "lunar"], + "upgrade": ["xenial", "bionic", "focal", "jammy", "mantic"], +} + +PLATFORM_ARGS = { + "aws.generic": ["-D", "machine_types=aws.generic"], + "aws.pro": ["-D", "machine_types=aws.pro"], + "aws.pro-fips": ["-D", "machine_types=aws.pro-fips"], + "azure.generic": ["-D", "machine_types=azure.generic"], + "azure.pro": ["-D", "machine_types=azure.pro"], + "azure.pro-fips": ["-D", "machine_types=azure.pro-fips"], + "gcp.generic": ["-D", "machine_types=gcp.generic"], + "gcp.pro": ["-D", "machine_types=gcp.pro"], + "gcp.pro-fips": ["-D", "machine_types=gcp.pro-fips"], + "lxd-container": ["-D", "machine_types=lxd-container", "--tags=-upgrade"], + "lxd-vm": ["-D", "machine_types=lxd-vm", "--tags=-docker"], + "docker": ["--tags=docker", "features/docker.feature"], + "upgrade": ["--tags=upgrade"], } @@ -66,10 +81,13 @@ command = [ "tox", "-e", - "behave-{}-{}".format(p, series_version), + "behave", "--", "-D", "install_from={}".format(install_from), + "-D", + "releases={}".format(s), + *PLATFORM_ARGS[p], ] if check_version: @@ -87,7 +105,13 @@ if wip: command.extend(["--tags=wip", "--stop"]) - commands.append((command, env)) + commands.append( + ( + "behave-{}-{}".format(p.replace(".", "-"), s), + command, + env, + ) + ) return commands @@ -170,31 +194,29 @@ os.makedirs(output_dir, exist_ok=True) error = False processes = [] - for command, env in commands: + for name, command, env in commands: print("Running {}".format(command)) - with open( - "{}/{}.txt".format(output_dir, command[2]), "wb" - ) as result_file: + with open("{}/{}.txt".format(output_dir, name), "wb") as result_file: process = subprocess.Popen( command, env=env, stdout=result_file, stderr=stdout ) - processes.append(process) + processes.append((name, process)) while processes: - for process in processes: + for name, process in processes: result = process.poll() if result is not None: if result == 0: print("{} finished sucessfully".format(process.args)) else: print("Failing tests for {}".format(process.args)) - result_filename = "{}.txt".format(process.args[2]) + result_filename = "{}.txt".format(name) os.rename( "{}/{}".format(output_dir, result_filename), "{}/failed-{}".format(output_dir, result_filename), ) error = True - processes.remove(process) + processes.remove((name, process)) time.sleep(5) if install_from == "proposed" and not error: diff -Nru ubuntu-advantage-tools-30~23.10/tools/spellcheck-allowed-words.txt ubuntu-advantage-tools-31.2~23.10/tools/spellcheck-allowed-words.txt --- ubuntu-advantage-tools-30~23.10/tools/spellcheck-allowed-words.txt 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/spellcheck-allowed-words.txt 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,190 @@ +0m +1m +37m +500MB +91m +92m +93m +94m +affordances +AFFORDANCES +AMS +anbox +Anbox +ANBOX +ancel +api +aptKey +APTNEWS +aptURL +arg +args +ARGS +aws +bootable +Cancelling +Canonical's +cfg +CFG +cli +CLI +cmd +CMD +cmds +config +CONFIG +crypto +CSEC +customizable +CVE +CVEs +CVES +datestring +depedent +DISA +DISABLEGREY +distro +DISTRO +DNE +E501 +eal +EAL2 +ENDC +enew +enum +ENUM +eol +EOL +EPILOG +esm +ESM +experied +F401 +FedRAMP +filepath +fips +FIPS +FISMA +fn +FormattedNamedMessage +gcp +GCP +gettext +gpg +GPG +HIPAA +HTTPS +IEC +imds +IMDS +INFOBLUE +IOTG +json +JSON +keyfile +linux +livepatch +Livepatch +LIVEPATCH +livepatches +Livepatches +LTS +METAPACKAGE +MOTD +msg +nable +NamedMessage +nError +ngettext +NIST +nnn +nnnn +nnnnnnn +nonroot +NONROOT +noqa +nSee +NullTranslations +num +OKGREEN +openssl +OPENSSL +optimised +option1 +option2 +param +PARAM +params +PCI +Pi4 +Pi5 +pid +pkgs +PKGS +PluralizableString +posint +Power8 +PPA +PRE +pycurl +PycURL +PYCURL +python3 +PYTHONPATH +RASPI +realtime +Realtime +REALTIME +repo +REPO +repos +repr +ros +ROS +saas +SaaS +SAAS +SECURITYSTATUS +snapd +SNAPD +ssl +SSL +stderr +stdout +STIG +SUBCOMMAND +subcommands +SUBP +sudo +sys +systemd +SYSTEMD +Tegra +telco +THIRDPARTY +tmpl +TMPL +ttach +TxtColor +UA +uaclient +UACLIENT +ubscribe +ubuntu +unbootable +UNENTITLED +url +urls +usg +USG +usns +USNs +USNS +UTF +ver +VER +WARNINGYELLOW +x86 +yaml +YAML +yyyy diff -Nru ubuntu-advantage-tools-30~23.10/tools/test-in-lxd.sh ubuntu-advantage-tools-31.2~23.10/tools/test-in-lxd.sh --- ubuntu-advantage-tools-30~23.10/tools/test-in-lxd.sh 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/test-in-lxd.sh 2024-02-14 15:37:46.000000000 +0000 @@ -13,8 +13,9 @@ series=${1:-jammy} build_out=$(./tools/build.sh "$series") hash=$(echo "$build_out" | jq -r .state_hash) -tools_deb=$(echo "$build_out" | jq -r '.debs[]' | grep tools) -l10n_deb=$(echo "$build_out" | jq -r '.debs[]' | grep l10n) +ubuntu_advantage_tools_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_advantage_tools') +ubuntu_pro_client_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_pro_client') +ubuntu_pro_client_l10n_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_pro_client_l10n') name=ua-$series-$hash flags= @@ -29,8 +30,9 @@ echo "vms take a while before the agent is ready" sleep 30 fi -lxc file push "$tools_deb" "${name}/tmp/ua_tools.deb" -lxc file push "$l10n_deb" "${name}/tmp/ua_l10n.deb" +lxc file push "$ubuntu_advantage_tools_deb" "${name}/tmp/ua_tools.deb" +lxc file push "$ubuntu_pro_client_deb" "${name}/tmp/pro.deb" +lxc file push "$ubuntu_pro_client_l10n_deb" "${name}/tmp/pro_l10n.deb" if [[ "$SHELL_BEFORE" -ne 0 ]]; then set +x @@ -43,6 +45,5 @@ lxc exec "$name" bash fi -lxc exec "$name" -- dpkg -i /tmp/ua_tools.deb -lxc exec "$name" -- dpkg -i /tmp/ua_l10n.deb +lxc exec "$name" -- apt install /tmp/ua_tools.deb /tmp/pro.deb /tmp/pro_l10n.deb lxc shell "$name" diff -Nru ubuntu-advantage-tools-30~23.10/tools/test-in-multipass.sh ubuntu-advantage-tools-31.2~23.10/tools/test-in-multipass.sh --- ubuntu-advantage-tools-30~23.10/tools/test-in-multipass.sh 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/test-in-multipass.sh 2024-02-14 15:37:46.000000000 +0000 @@ -11,18 +11,20 @@ series=${1:-jammy} build_out=$(./tools/build.sh "$series") hash=$(echo "$build_out" | jq -r .state_hash) -tools_deb=$(echo "$build_out" | jq -r '.debs[]' | grep tools) -l10n_deb=$(echo "$build_out" | jq -r '.debs[]' | grep l10n) +ubuntu_advantage_tools_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_advantage_tools') +ubuntu_pro_client_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_pro_client') +ubuntu_pro_client_l10n_deb=$(echo "$build_out" | jq -r '.debs.ubuntu_pro_client_l10n') name=ua-$series-$hash multipass delete "$name" --purge || true multipass launch "$series" --name "$name" -sleep 30 # Snaps won't access /tmp -cp "$tools_deb" ~/ua_tools.deb -cp "$l10n_deb" ~/ua_l10n.deb +cp "$ubuntu_advantage_tools_deb" ~/ua_tools.deb +cp "$ubuntu_pro_client_l10n_deb" ~/pro_l10n.deb +cp "$ubuntu_pro_client_deb" ~/pro.deb multipass transfer ~/ua_tools.deb "${name}:/tmp/ua_tools.deb" -multipass transfer ~/ua_l10n.deb "${name}:/tmp/ua_l10n.deb" +multipass transfer ~/pro_l10n.deb "${name}:/tmp/pro_l10n.deb" +multipass transfer ~/pro.deb "${name}:/tmp/pro.deb" rm -f ~/ua_tools.deb rm -f ~/ua_l10n.deb @@ -37,6 +39,5 @@ multipass exec "$name" bash fi -multipass exec "$name" -- sudo dpkg -i /tmp/ua_tools.deb -multipass exec "$name" -- sudo dpkg -i /tmp/ua_l10n.deb +multipass exec "$name" -- sudo apt install /tmp/ua_tools.deb /tmp/pro.deb /tmp/pro_l10n.deb multipass shell "$name" diff -Nru ubuntu-advantage-tools-30~23.10/tools/ua.bash ubuntu-advantage-tools-31.2~23.10/tools/ua.bash --- ubuntu-advantage-tools-30~23.10/tools/ua.bash 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tools/ua.bash 2024-02-14 15:37:46.000000000 +0000 @@ -1,41 +1,55 @@ -# bash completion for ubuntu-advantage-tools +# bash completion for ubuntu-pro-client . /etc/os-release # For VERSION_ID +API_ENDPOINTS=$(/usr/bin/python3 -c 'from uaclient.api.api import VALID_ENDPOINTS; print(" ".join(VALID_ENDPOINTS))') +SERVICES="anbox-cloud cc-eal cis esm-apps esm-infra fips fips-updates landscape livepatch realtime-kernel ros ros-updates usg" +SUBCMDS="--debug --help --version api attach auto-attach collect-logs config detach disable enable fix help refresh security-status status system version" + _ua_complete() { - local cur_word prev_word services subcmds base_params + local cur_word prev_word cur_word="${COMP_WORDS[COMP_CWORD]}" prev_word="${COMP_WORDS[COMP_CWORD-1]}" - if [ "$VERSION_ID" = "16.04" ] || [ "$VERSION_ID" == "18.04" ]; then - services="anbox-cloud cc-eal cis esm-apps esm-infra fips fips-updates landscape livepatch realtime-kernel ros ros-updates" - else - services="anbox-cloud cc-eal esm-apps esm-infra fips fips-updates landscape livepatch realtime-kernel ros ros-updates usg" - fi - - subcmds="--debug --help --version api attach auto-attach collect-logs config detach disable enable fix help refresh security-status status system version" - base_params="" - case ${COMP_CWORD} in 1) # shellcheck disable=SC2207 - COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word)) + COMPREPLY=($(compgen -W "$SUBCMDS" -- $cur_word)) ;; 2) case ${prev_word} in disable) # shellcheck disable=SC2207 - COMPREPLY=($(compgen -W "$services" -- $cur_word)) + COMPREPLY=($(compgen -W "$SERVICES" -- $cur_word)) ;; enable) # shellcheck disable=SC2207 - COMPREPLY=($(compgen -W "$services" -- $cur_word)) + COMPREPLY=($(compgen -W "$SERVICES" -- $cur_word)) + ;; + api) + # shellcheck disable=SC2207 + COMPREPLY=($(compgen -W "$API_ENDPOINTS" -- $cur_word)) ;; esac ;; *) - COMPREPLY=() + local subcmd + subcmd="${COMP_WORDS[1]}" + case ${subcmd} in + disable) + # shellcheck disable=SC2207 + COMPREPLY=($(compgen -W "$SERVICES" -- $cur_word)) + ;; + enable) + # shellcheck disable=SC2207 + COMPREPLY=($(compgen -W "$SERVICES" -- $cur_word)) + ;; + *) + COMPREPLY=() + ;; + + esac ;; esac } diff -Nru ubuntu-advantage-tools-30~23.10/tox.ini ubuntu-advantage-tools-31.2~23.10/tox.ini --- ubuntu-advantage-tools-30~23.10/tox.ini 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/tox.ini 2024-01-18 17:34:13.000000000 +0000 @@ -17,18 +17,6 @@ AZURE_CONFIG_DIR UACLIENT_BEHAVE_* https_proxy -setenv = - awsgeneric: UACLIENT_BEHAVE_MACHINE_TYPE = aws.generic - awspro: UACLIENT_BEHAVE_MACHINE_TYPE = aws.pro - awspro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = aws.pro-fips - azuregeneric: UACLIENT_BEHAVE_MACHINE_TYPE = azure.generic - azurepro: UACLIENT_BEHAVE_MACHINE_TYPE = azure.pro - azurepro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = azure.pro-fips - gcpgeneric: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.generic - gcppro: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.pro - gcppro-fips: UACLIENT_BEHAVE_MACHINE_TYPE = gcp.pro-fips - vm: UACLIENT_BEHAVE_MACHINE_TYPE = lxd-vm - docker: UACLIENT_BEHAVE_MACHINE_TYPE = lxd-vm commands = test: py.test --junitxml=pytest_results.xml {posargs:--cov uaclient uaclient} flake8: flake8 uaclient lib setup.py features @@ -36,72 +24,7 @@ black: black --check --diff uaclient/ features/ lib/ setup.py isort: isort --check --diff uaclient/ features/ lib/ setup.py shellcheck: bash -O extglob -O nullglob -c "shellcheck -S warning tools/*.sh debian/*.{config,postinst,postrm,prerm} lib/*.sh sru/*.sh update-motd.d/*" - behave-any: behave -v -D machine_type=any {posargs} - - behave-lxd-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-lxd-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-lxd-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-lxd-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - behave-lxd-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.lunar,series.all" --tags="~upgrade" - behave-lxd-23.10: behave -v {posargs} --tags="uses.config.machine_type.lxd-container" --tags="series.mantic,series.all" --tags="~upgrade" - - behave-vm-16.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.xenial,series.all,series.lts" --tags="~upgrade" - behave-vm-18.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.bionic,series.all,series.lts" --tags="~upgrade" - behave-vm-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.focal,series.all,series.lts" --tags="~upgrade" --tags="~docker" - behave-vm-22.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.jammy,series.all,series.lts" --tags="~upgrade" - behave-vm-23.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.lunar,series.all" --tags="~upgrade" - behave-vm-23.10: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.mantic,series.all" --tags="~upgrade" - - behave-upgrade-16.04: behave -v {posargs} --tags="upgrade" --tags="series.xenial,series.all" - behave-upgrade-18.04: behave -v {posargs} --tags="upgrade" --tags="series.bionic,series.all" - behave-upgrade-20.04: behave -v {posargs} --tags="upgrade" --tags="series.focal,series.all" - behave-upgrade-22.04: behave -v {posargs} --tags="upgrade" --tags="series.jammy,series.all" - behave-upgrade-23.04: behave -v {posargs} --tags="upgrade" --tags="series.lunar,series.all" - - behave-docker-20.04: behave -v {posargs} --tags="uses.config.machine_type.lxd-vm" --tags="series.focal" features/docker.feature - - behave-awsgeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-awsgeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-awsgeneric-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-awsgeneric-22.04: behave -v {posargs} --tags="uses.config.machine_type.aws.generic" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - - behave-awspro-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.xenial,series.lts,series.all" - behave-awspro-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.bionic,series.lts,series.all" - behave-awspro-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.focal,series.lts,series.all" - behave-awspro-22.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro" --tags="series.jammy,series.lts,series.all" - - behave-awspro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.xenial,series.lts,series.all" - behave-awspro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.bionic,series.lts,series.all" - behave-awspro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.aws.pro-fips" --tags="series.focal,series.lts,series.all" - - behave-azuregeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-azuregeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-azuregeneric-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-azuregeneric-22.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - behave-azuregeneric-23.04: behave -v {posargs} --tags="uses.config.machine_type.azure.generic" --tags="series.lunar,series.all" --tags="~upgrade" - - behave-azurepro-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.xenial,series.lts,series.all" - behave-azurepro-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.bionic,series.lts,series.all" - behave-azurepro-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.focal,series.lts,series.all" - behave-azurepro-22.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro" --tags="series.jammy,series.lts,series.all" - - behave-azurepro-fips-16.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.xenial,series.lts,series.all" - behave-azurepro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.bionic,series.lts,series.all" - behave-azurepro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.azure.pro-fips" --tags="series.focal,series.lts,series.all" - - behave-gcpgeneric-16.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-gcpgeneric-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-gcpgeneric-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-gcpgeneric-22.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - behave-gcpgeneric-23.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.generic" --tags="series.lunar,series.all" --tags="~upgrade" - - behave-gcppro-16.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.xenial,series.lts,series.all" --tags="~upgrade" - behave-gcppro-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-gcppro-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.focal,series.lts,series.all" --tags="~upgrade" - behave-gcppro-22.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro" --tags="series.jammy,series.lts,series.all" --tags="~upgrade" - - behave-gcppro-fips-18.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro-fips" --tags="series.bionic,series.lts,series.all" --tags="~upgrade" - behave-gcppro-fips-20.04: behave -v {posargs} --tags="uses.config.machine_type.gcp.pro-fips" --tags="series.focal,series.lts,series.all" --tags="~upgrade" + behave: behave -v {posargs} [flake8] # E251: Older versions of flake8 et al don't permit the diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/actions.py ubuntu-advantage-tools-31.2~23.10/uaclient/actions.py --- ubuntu-advantage-tools-30~23.10/uaclient/actions.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/actions.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,6 +2,8 @@ import glob import logging import os +import re +import shutil from typing import List, Optional # noqa: F401 from uaclient import ( @@ -18,6 +20,7 @@ from uaclient.clouds import AutoAttachCloudInstance # noqa: F401 from uaclient.clouds import identity from uaclient.defaults import ( + APPARMOR_PROFILES, CLOUD_BUILD_INFO, DEFAULT_CONFIG_FILE, DEFAULT_LOG_PREFIX, @@ -32,6 +35,8 @@ UA_SERVICES = ( + "apt-news.service", + "esm-cache.service", "ua-timer.service", "ua-timer.timer", "ua-auto-attach.path", @@ -48,7 +53,7 @@ ) -> None: """ Common functionality to take a token and attach via contract backend - :raise UrlError: On unexpected connectivity issues to contract + :raise ConnectivityError: On unexpected connectivity issues to contract server or inability to access identity doc from metadata service. :raise ContractAPIError: On unexpected errors when talking to the contract server. @@ -57,14 +62,9 @@ contract_client = contract.UAContractClient(cfg) attached_at = datetime.datetime.now(tz=datetime.timezone.utc) - - try: - new_machine_token = contract_client.add_contract_machine( - contract_token=token, attachment_dt=attached_at - ) - except exceptions.UrlError as e: - LOG.exception(str(e)) - raise exceptions.ConnectivityError() + new_machine_token = contract_client.add_contract_machine( + contract_token=token, attachment_dt=attached_at + ) cfg.machine_token_file.write(new_machine_token) @@ -84,7 +84,7 @@ cfg.machine_token_file.entitlements, allow_enable, ) - except (exceptions.UrlError, exceptions.UbuntuProError) as exc: + except (exceptions.ConnectivityError, exceptions.UbuntuProError) as exc: # Persist updated status in the event of partial attach attachment_data_file.write(AttachmentData(attached_at=attached_at)) ua_status.status(cfg=cfg) @@ -107,7 +107,7 @@ allow_enable=True, ) -> None: """ - :raise UrlError: On unexpected connectivity issues to contract + :raise ConnectivityError: On unexpected connectivity issues to contract server or inability to access identity doc from metadata service. :raise ContractAPIError: On unexpected errors when talking to the contract server. @@ -176,6 +176,30 @@ return status, ret +def _write_apparmor_logs_to_file(filename: str) -> None: + """ + Helper which gets ubuntu_pro apparmor logs from the kernel from the last + day and writes them to the specified filename. + """ + # can't use journalctl's --grep, because xenial doesn't support it :/ + cmd = ["journalctl", "-b", "-k", "--since=1 day ago"] + apparmor_re = r"apparmor=\".*(profile=\"ubuntu_pro_|name=\"ubuntu_pro_)" + kernel_logs = None + try: + kernel_logs, _ = system.subp(cmd) + except exceptions.ProcessExecutionError as e: + LOG.warning("Failed to collect kernel logs:\n%s", str(e)) + system.write_file("{}-error".format(filename), str(e)) + else: + if kernel_logs: # some unit tests mock subp to return (None,None) + apparmor_logs = [] + # filter out only what interests us + for kernel_line in kernel_logs.split("\n"): + if re.search(apparmor_re, kernel_line): + apparmor_logs.append(kernel_line) + system.write_file(filename, "\n".join(apparmor_logs)) + + def _write_command_output_to_file( cmd, filename: str, return_codes: Optional[List[int]] = None ) -> None: @@ -196,7 +220,7 @@ timer_jobs_state_file.ua_file.path, CLOUD_BUILD_INFO, *( - entitlement.repo_list_file_tmpl.format(name=entitlement.name) + entitlement(cfg).repo_file for entitlement in entitlements.ENTITLEMENT_CLASSES if issubclass(entitlement, entitlements.repo.RepoEntitlement) ), @@ -283,3 +307,15 @@ system.write_file( os.path.join(output_dir, os.path.basename(f)), content ) + + # get apparmor logs + _write_apparmor_logs_to_file("{}/apparmor_logs.txt".format(output_dir)) + + # include apparmor profiles + for f in APPARMOR_PROFILES: + if os.path.isfile(f): + try: + shutil.copy(f, output_dir) + except Exception as e: + LOG.warning("Failed to copy file: %s\n%s", f, str(e)) + continue diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_attach_auto_full_auto_attach_v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_attach_auto_full_auto_attach_v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_attach_auto_full_auto_attach_v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_attach_auto_full_auto_attach_v1.py 2024-02-14 15:37:46.000000000 +0000 @@ -234,7 +234,7 @@ assert 4 == enable_ent_by_name.call_count @mock.patch( - "uaclient.lock.SpinLock.__enter__", + "uaclient.lock.RetryLock.__enter__", side_effect=[ exceptions.LockHeldError( lock_request="request", lock_holder="holder", pid=10 @@ -242,7 +242,7 @@ ], ) def test_lock_held( - self, _m_spinlock_enter, _notice_remove, _notice_read, FakeConfig + self, _m_retrylock_enter, _notice_remove, _notice_read, FakeConfig ): with pytest.raises(exceptions.LockHeldError): _full_auto_attach(FullAutoAttachOptions, FakeConfig()) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_attach_magic_wait_v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_attach_magic_wait_v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_attach_magic_wait_v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_attach_magic_wait_v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -67,10 +67,10 @@ ): magic_token = "test-id" m_attach_token_info.side_effect = [ - exceptions.ConnectivityError(), - exceptions.ConnectivityError(), - exceptions.ConnectivityError(), - exceptions.ConnectivityError(), + exceptions.ConnectivityError(url="url", cause="cause"), + exceptions.ConnectivityError(url="url", cause="cause"), + exceptions.ConnectivityError(url="url", cause="cause"), + exceptions.ConnectivityError(url="url", cause="cause"), ] options = MagicAttachWaitOptions(magic_token=magic_token) @@ -86,9 +86,9 @@ ): magic_token = "test-id" m_attach_token_info.side_effect = [ - exceptions.ConnectivityError(), - exceptions.ConnectivityError(), - exceptions.ConnectivityError(), + exceptions.ConnectivityError(url="url", cause="cause"), + exceptions.ConnectivityError(url="url", cause="cause"), + exceptions.ConnectivityError(url="url", cause="cause"), { "token": magic_token, "expires": "2100-06-09T18:14:55.323733Z", @@ -113,7 +113,7 @@ ): magic_token = "test-id" m_attach_token_info.side_effect = [ - exceptions.ConnectivityError(), + exceptions.ConnectivityError(url="url", cause="cause"), exceptions.MagicAttachUnavailable(), exceptions.MagicAttachUnavailable(), { diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix.py 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,1110 @@ +import copy +from collections import defaultdict + +import mock +import pytest + +from uaclient import exceptions, http +from uaclient.api.u.pro.security.fix._common import ( + API_V1_CVE_TMPL, + API_V1_CVES, + API_V1_NOTICE_TMPL, + API_V1_NOTICES, + CVE, + USN, + CVEPackageStatus, + UASecurityClient, + get_cve_affected_source_packages_status, + get_related_usns, + get_usn_affected_packages_status, + merge_usn_released_binary_package_versions, + override_usn_release_package_status, + query_installed_source_pkg_versions, +) + +M_PATH = "uaclient.api.u.pro.security.fix._common." + +SAMPLE_GET_CVES_QUERY_PARAMS = { + "query": "vq", + "priority": "vpr", + "package": "vpa", + "limit": 1, + "offset": 2, + "component": "vc", + "version": "vv", + "status": "vs", +} + +SAMPLE_GET_NOTICES_QUERY_PARAMS = { + "details": "cve", + "release": "vq", + "limit": 1, + "offset": 2, + "order": "vo", +} + + +CVE_ESM_PACKAGE_STATUS_RESPONSE = { + "component": None, + "description": "1.17-6ubuntu4.1+esm1", + "pocket": "esm-infra", + "release_codename": "focal", + "status": "released", +} + + +SAMBA_CVE_STATUS_BIONIC = { + "component": None, + "description": "2:4.7.6+dfsg~ubuntu-0ubuntu2.19", + "pocket": None, + "release_codename": "bionic", + "status": "released", +} +SAMBA_CVE_STATUS_FOCAL = { + "component": None, + "description": "2:4.11.6+dfsg-0ubuntu1.4", + "pocket": None, + "release_codename": "focal", + "status": "not-affected", +} +SAMBA_CVE_STATUS_UPSTREAM = { + "component": None, + "description": "", + "pocket": None, + "release_codename": "upstream", + "status": "needs-triage", +} + +SAMPLE_CVE_RESPONSE = { + "bugs": ["https://bugzilla.samba.org/show_bug.cgi?id=14497"], + "description": "\nAn elevation of privilege vulnerability exists ...", + "id": "CVE-2020-1472", + "notes": [{"author": "..", "note": "..."}], + "notices_ids": ["USN-4510-1", "USN-4510-2", "USN-4559-1"], + "packages": [ + { + "debian": "https://tracker.debian.org/pkg/samba", + "name": "samba", + "source": "https://ubuntu.com/security/cve?package=samba", + "statuses": [ + SAMBA_CVE_STATUS_BIONIC, + SAMBA_CVE_STATUS_FOCAL, + SAMBA_CVE_STATUS_UPSTREAM, + ], + } + ], + "status": "active", +} + +SAMPLE_USN_RESPONSE = { + "cves_ids": ["CVE-2020-1473", "CVE-2020-1472"], + "id": "USN-4510-2", + "instructions": "In general, a standard system update will make all ...\n", + "references": [], + "release_packages": { + "series-example-1": [ + { + "description": "SMB/CIFS file, print, and login ... Unix", + "is_source": True, + "name": "samba", + "version": "2:4.3.11+dfsg-0ubuntu0.14.04.20+esm9", + }, + { + "is_source": False, + "name": "samba", + "source_link": "https://launchpad.net/ubuntu/+source/samba", + "version": "2~14.04.1+esm9", + "version_link": "https://....11+dfsg-0ubuntu0.14.04.20+esm9", + }, + ], + "series-example-2": [ + { + "description": "high-level 3D graphics kit implementing ...", + "is_source": True, + "name": "coin3", + "version": "3.1.4~abc9f50-4ubuntu2+esm1", + }, + { + "is_source": False, + "name": "libcoin80-runtime", + "source_link": "https://launchpad.net/ubuntu/+source/coin3", + "version": "3~18.04.1+esm2", + "version_link": "https://coin3...18.04.1+esm2", + }, + ], + }, + "summary": "Samba would allow unintended access to files over the ....\n", + "title": "Samba vulnerability", + "type": "USN", +} + + +SAMPLE_USN_RESPONSE_NO_CVES = { + "cves_ids": [], + "id": "USN-4038-3", + "instructions": "In general, a standard system update will make all ...\n", + "references": ["https://launchpad.net/bugs/1834494"], + "release_packages": { + "bionic": [ + { + "description": "high-level 3D graphics kit implementing ...", + "is_source": True, + "name": "coin3", + "version": "3.1.4~abc9f50-4ubuntu2+esm1", + }, + { + "is_source": False, + "name": "libcoin80-runtime", + "source_link": "https://launchpad.net/ubuntu/+source/coin3", + "version": "3~18.04.1+esm2", + "version_link": "https://coin3...18.04.1+esm2", + "pocket": "security", + }, + ] + }, + "summary": "", + "title": "USN vulnerability", + "type": "USN", +} + +CVE_PKG_STATUS_NEEDED = { + "description": "2.1", + "pocket": None, + "status": "needed", +} +CVE_PKG_STATUS_IGNORED = { + "description": "2.1", + "pocket": None, + "status": "ignored", +} +CVE_PKG_STATUS_DEFERRED = { + "description": "2.1", + "pocket": None, + "status": "deferred", +} +CVE_PKG_STATUS_NEEDS_TRIAGE = { + "description": "2.1", + "pocket": None, + "status": "needs-triage", +} +CVE_PKG_STATUS_PENDING = { + "description": "2.1", + "pocket": None, + "status": "pending", +} +CVE_PKG_STATUS_RELEASED = { + "description": "2.1", + "pocket": "updates", + "status": "released", +} +CVE_PKG_STATUS_RELEASED_ESM_INFRA = { + "description": "2.1", + "pocket": "esm-infra", + "status": "released", +} +CVE_PKG_STATUS_RELEASED_ESM_APPS = { + "description": "2.1", + "pocket": "esm-apps", + "status": "released", +} +CVE_PKG_STATUS_NEEDED = {"description": "", "pocket": None, "status": "needed"} + + +def shallow_merge_dicts(a, b): + c = a.copy() + c.update(b) + return c + + +class TestCVE: + def test_cve_init_attributes(self, FakeConfig): + """CVE.__init__ saves client and response on instance.""" + client = UASecurityClient(FakeConfig()) + cve = CVE(client, {"some": "response"}) + assert client == cve.client + assert {"some": "response"} == cve.response + + @pytest.mark.parametrize( + "cve1,cve2,are_equal", + ( + (CVE(None, {"1": "2"}), CVE(None, {"1": "2"}), True), + (CVE("A", {"1": "2"}), CVE("B", {"1": "2"}), True), + (CVE(None, {}), CVE("B", {"1": "2"}), False), + (CVE(None, {"1": "2"}), USN(None, {"1": "2"}), False), + ), + ) + def test_equality(self, cve1, cve2, are_equal): + """Equality is based instance type and CVE.response value""" + if are_equal: + assert cve1.response == cve2.response + assert cve1 == cve2 + else: + if isinstance(cve1, CVE) and isinstance(cve2, CVE): + assert cve1.response != cve2.response + assert cve1 != cve2 + + @pytest.mark.parametrize( + "attr_name,expected,response", + ( + ("description", None, {}), + ("description", "descr", {"description": "descr"}), + ("id", "UNKNOWN_CVE_ID", {}), + ( + "id", + "CVE-123", + {"id": "cve-123"}, + ), # Uppercase of id value is used + ("notices_ids", [], {}), + ("notices_ids", [], {"notices_ids": []}), + ("notices_ids", ["1", "2"], {"notices_ids": ["1", "2"]}), + ), + ) + def test_cve_basic_properties_from_response( + self, attr_name, expected, response, FakeConfig + ): + """CVE instance properties are set from Security API CVE response.""" + client = UASecurityClient(FakeConfig()) + cve = CVE(client, response) + assert expected == getattr(cve, attr_name) + + @pytest.mark.parametrize( + "usns_response,expected", + ( + (None, []), + ([], []), + ( # USNs are properly sorted by id + [{"id": "USN-1"}, {"id": "USN-2"}, {"id": "LSN-3"}], + [USN(None, {"id": "USN-2"}), USN(None, {"id": "USN-1"})], + ), + ), + ) + def test_notices_cached_from_usns_response( + self, usns_response, expected, FakeConfig + ): + """List of USNs returned from CVE 'usns' response if present.""" + client = UASecurityClient(FakeConfig()) + cve_response = copy.deepcopy(SAMPLE_CVE_RESPONSE) + if usns_response is not None: + cve_response["notices"] = usns_response + cve = CVE(client, cve_response) + assert expected == cve.notices + # clear box test caching in effect + cve.response = "junk" + assert expected == cve.notices + + +class TestUSN: + def test_usn_init_attributes(self, FakeConfig): + """USN.__init__ saves client and response on instance.""" + client = UASecurityClient(FakeConfig()) + cve = USN(client, {"some": "response"}) + assert client == cve.client + assert {"some": "response"} == cve.response + + @pytest.mark.parametrize( + "usn1,usn2,are_equal", + ( + (USN(None, {"1": "2"}), USN(None, {"1": "2"}), True), + (USN("A", {"1": "2"}), USN("B", {"1": "2"}), True), + (USN(None, {}), USN("B", {"1": "2"}), False), + (USN(None, {"1": "2"}), CVE(None, {"1": "2"}), False), + ), + ) + def test_equality(self, usn1, usn2, are_equal): + """Equality is based instance type and USN.response value""" + if are_equal: + assert usn1.response == usn2.response + assert usn1 == usn2 + else: + if isinstance(usn1, USN) and isinstance(usn2, USN): + assert usn1.response != usn2.response + assert usn1 != usn2 + + @pytest.mark.parametrize( + "attr_name,expected,response", + ( + ("title", None, {}), + ("title", "my title", {"title": "my title"}), + ("id", "UNKNOWN_USN_ID", {}), + ( + "id", + "USN-123", + {"id": "usn-123"}, + ), # Uppercase of id value is used + ("cves_ids", [], {}), + ("cves_ids", [], {"cves_ids": []}), + ("cves_ids", ["1", "2"], {"cves_ids": ["1", "2"]}), + ("cves", [], {}), + ("cves", [], {"cves": []}), + ), + ) + def test_usn_basic_properties_from_response( + self, attr_name, expected, response, FakeConfig + ): + """USN instance properties are set from Security API USN response.""" + client = UASecurityClient(FakeConfig()) + usn = USN(client, response) + assert expected == getattr(usn, attr_name) + + @pytest.mark.parametrize( + "series,expected", + ( + ( + "series-example-1", + { + "samba": { + "source": { + "description": ( + "SMB/CIFS file, print, and login ... Unix" + ), + "is_source": True, + "name": "samba", + "version": "2:4.3.11+dfsg-0ubuntu0.14.04.20+esm9", + }, + "samba": { + "is_source": False, + "name": "samba", + "source_link": ( + "https://launchpad.net/ubuntu/+source/samba" + ), + "version": "2~14.04.1+esm9", + "version_link": ( + "https://....11+dfsg-0ubuntu0.14.04.20+esm9" + ), + }, + } + }, + ), + ( + "series-example-2", + { + "coin3": { + "source": { + "description": ( + "high-level 3D graphics kit implementing ..." + ), + "is_source": True, + "name": "coin3", + "version": "3.1.4~abc9f50-4ubuntu2+esm1", + }, + "libcoin80-runtime": { + "is_source": False, + "name": "libcoin80-runtime", + "source_link": ( + "https://launchpad.net/ubuntu/+source/coin3" + ), + "version": "3~18.04.1+esm2", + "version_link": "https://coin3...18.04.1+esm2", + }, + } + }, + ), + ("series-example-3", {}), + ), + ) + @mock.patch("uaclient.system.get_release_info") + def test_release_packages_returns_source_and_binary_pkgs_for_series( + self, m_get_release_info, series, expected, FakeConfig + ): + m_get_release_info.return_value = mock.MagicMock(series=series) + client = UASecurityClient(FakeConfig()) + usn = USN(client, SAMPLE_USN_RESPONSE) + + assert expected == usn.release_packages + usn._release_packages = {"sl": "1.0"} + assert {"sl": "1.0"} == usn.release_packages + + @pytest.mark.parametrize( + "source_link,error_msg", + ( + ( + None, + ( + "Metadata for USN-4510-2 is invalid. " + "Error: USN-4510-2 metadata does not define " + "release_packages source_link for samba2." + ), + ), + ( + "unknown format", + ( + "Metadata for USN-4510-2 is invalid. " + "Error: USN-4510-2 metadata has unexpected " + "release_packages source_link value for samba2: " + "unknown format." + ), + ), + ), + ) + @mock.patch("uaclient.system.get_release_info") + def test_release_packages_errors_on_sparse_source_url( + self, m_get_release_info, source_link, error_msg, FakeConfig + ): + """Raise errors when USN metadata contains no valid source_link.""" + m_get_release_info.return_value = mock.MagicMock( + series="series-example-1" + ) + client = UASecurityClient(FakeConfig()) + sparse_md = copy.deepcopy(SAMPLE_USN_RESPONSE) + sparse_md["release_packages"]["series-example-1"].append( + { + "is_source": False, + "name": "samba2", + "source_link": source_link, + "version": "2~14.04.1+esm9", + "version_link": "https://....11+dfsg-0ubuntu0.14.04.20+esm9", + } + ) + usn = USN(client, sparse_md) + with pytest.raises(exceptions.SecurityAPIMetadataError) as exc: + usn.release_packages + assert error_msg in str(exc.value) + + @pytest.mark.parametrize( + "cves_response,expected", + ( + (None, []), + ([], []), + ( # CVEs are properly sorted by id + [{"id": "1"}, {"id": "2"}], + [CVE(None, {"id": "2"}), CVE(None, {"id": "1"})], + ), + ), + ) + def test_cves_cached_and_sorted_from_cves_response( + self, cves_response, expected, FakeConfig + ): + """List of USNs returned from CVE 'usns' response if present.""" + client = UASecurityClient(FakeConfig()) + usn_response = copy.deepcopy(SAMPLE_USN_RESPONSE) + if cves_response is not None: + usn_response["cves"] = cves_response + usn = USN(client, usn_response) + assert expected == usn.cves + # clear box test caching in effect + usn.response = "junk" + assert expected == usn.cves + + +class TestCVEPackageStatus: + def test_simple_properties_from_response(self): + pkg_status = CVEPackageStatus( + cve_response=CVE_ESM_PACKAGE_STATUS_RESPONSE + ) + assert CVE_ESM_PACKAGE_STATUS_RESPONSE == pkg_status.response + assert pkg_status.response["description"] == pkg_status.description + assert pkg_status.description == pkg_status.fixed_version + assert pkg_status.response["pocket"] == pkg_status.pocket + assert ( + pkg_status.response["release_codename"] + == pkg_status.release_codename + ) + assert pkg_status.response["status"] == pkg_status.status + + @pytest.mark.parametrize( + "pocket,description,expected", + ( + ("esm-infra", "1.2", "Ubuntu Pro: ESM Infra"), + ("esm-apps", "1.2", "Ubuntu Pro: ESM Apps"), + ("updates", "1.2esm", "Ubuntu standard updates"), + ("security", "1.2esm", "Ubuntu standard updates"), + (None, "1.2", "Ubuntu standard updates"), + (None, "1.2esm", "Ubuntu Pro: ESM Infra"), + ), + ) + def test_pocket_source_from_response(self, pocket, description, expected): + cve_response = {"pocket": pocket, "description": description} + pkg_status = CVEPackageStatus(cve_response=cve_response) + assert expected == pkg_status.pocket_source + + @pytest.mark.parametrize( + "pocket,description,expected", + ( + ("esm-infra", "1.2", True), + ("esm-apps", "1.2", True), + ("updates", "1.2esm", False), + ("security", "1.2esm", False), + (None, "1.2", False), + (None, "1.2esm", True), + ), + ) + def test_requires_pro_from_response(self, pocket, description, expected): + """requires_pro is derived from response pocket and description.""" + cve_response = {"pocket": pocket, "description": description} + pkg_status = CVEPackageStatus(cve_response=cve_response) + assert expected is pkg_status.requires_ua + + @pytest.mark.parametrize( + "status,pocket,expected", + ( + ( + "not-affected", + "", + "Source package is not affected on this release.", + ), + ("DNE", "", "Source package does not exist on this release."), + ( + "needs-triage", + "esm-infra", + "Ubuntu security engineers are investigating this issue.", + ), + ("needed", "esm-infra", "Sorry, no fix is available yet."), + ( + "pending", + "esm-infra", + "A fix is coming soon. Try again tomorrow.", + ), + ("ignored", "esm-infra", "Sorry, no fix is available."), + ( + "released", + "esm-infra", + "A fix is available in Ubuntu Pro: ESM Infra.", + ), + ( + "released", + "security", + "A fix is available in Ubuntu standard updates.", + ), + ("bogus", "1.2", "UNKNOWN: bogus"), + ), + ) + def test_status_message_from_response(self, status, pocket, expected): + cve_response = {"pocket": pocket, "status": status} + pkg_status = CVEPackageStatus(cve_response=cve_response) + assert expected == pkg_status.status_message + + +@mock.patch(M_PATH + "UASecurityClient.request_url") +class TestUASecurityClient: + @pytest.mark.parametrize( + "m_kwargs,expected_error, extra_security_params", + ( + ({}, None, None), + ({"query": "vq"}, None, {"test": "blah"}), + (SAMPLE_GET_CVES_QUERY_PARAMS, None, None), + ({"invalidparam": "vv"}, TypeError, None), + ), + ) + def test_get_cves_sets_query_params_on_get_cves_route( + self, + request_url, + m_kwargs, + expected_error, + extra_security_params, + FakeConfig, + ): + """GET CVE instances from API_V1_CVES route with querystrings""" + cfg = FakeConfig() + if extra_security_params: + cfg.override_features( + {"extra_security_params": extra_security_params} + ) + + client = UASecurityClient(cfg) + if expected_error: + with pytest.raises(expected_error) as exc: + client.get_cves(**m_kwargs) + assert ( + "get_cves() got an unexpected keyword argument 'invalidparam'" + ) in str(exc.value) + assert 0 == request_url.call_count + else: + for key in SAMPLE_GET_CVES_QUERY_PARAMS: + if key not in m_kwargs: + m_kwargs[key] = None + request_url.return_value = http.HTTPResponse( + code=200, + headers={}, + body="", + json_dict={}, + json_list=["body1", "body2"], + ) + [cve1, cve2] = client.get_cves(**m_kwargs) + assert isinstance(cve1, CVE) + assert isinstance(cve2, CVE) + assert "body1" == cve1.response + assert "body2" == cve2.response + # get_cves transposes "query" to "q" + m_kwargs["q"] = m_kwargs.pop("query") + + assert [ + mock.call(API_V1_CVES, query_params=m_kwargs) + ] == request_url.call_args_list + + @pytest.mark.parametrize( + "m_kwargs,expected_error, extra_security_params", + ( + ({}, None, None), + ({"details": "cve"}, None, None), + (SAMPLE_GET_NOTICES_QUERY_PARAMS, None, {"test": "blah"}), + ({"invalidparam": "vv"}, TypeError, None), + ), + ) + def test_get_notices_sets_query_params_on_get_cves_route( + self, + request_url, + m_kwargs, + expected_error, + extra_security_params, + FakeConfig, + ): + """GET body from API_V1_NOTICES route with appropriate querystring""" + cfg = FakeConfig() + if extra_security_params: + cfg.override_features( + {"extra_security_params": extra_security_params} + ) + + client = UASecurityClient(cfg) + if expected_error: + with pytest.raises(expected_error) as exc: + client.get_notices(**m_kwargs) + assert ( + "get_notices() got an unexpected keyword argument" + " 'invalidparam'" + ) in str(exc.value) + assert 0 == request_url.call_count + else: + for key in SAMPLE_GET_NOTICES_QUERY_PARAMS: + if key not in m_kwargs: + m_kwargs[key] = None + request_url.return_value = http.HTTPResponse( + code=200, + headers={}, + body="", + json_dict={ + "notices": [ + {"id": "USN-2", "cves_ids": ["cve"]}, + {"id": "USN-1", "cves_ids": ["cve"]}, + {"id": "LSN-3", "cves_ids": ["cve"]}, + ] + }, + json_list=[], + ) + [usn1, usn2] = client.get_notices(**m_kwargs) + assert isinstance(usn1, USN) + assert isinstance(usn2, USN) + assert "USN-1" == usn1.id + assert "USN-2" == usn2.id + assert [ + mock.call(API_V1_NOTICES, query_params=m_kwargs) + ] == request_url.call_args_list + + @pytest.mark.parametrize("details", (("cve1"), (None))) + def test_get_notices_filter_usns_when_setting_details_param( + self, request_url, details, FakeConfig + ): + """Test if details are used to filter the returned USNs.""" + cfg = FakeConfig() + client = UASecurityClient(cfg) + request_url.return_value = http.HTTPResponse( + code=200, + headers={}, + body="", + json_dict={ + "notices": [ + {"id": "USN-2", "cves_ids": ["cve2"]}, + {"id": "USN-1", "cves_ids": ["cve1"]}, + {"id": "LSN-3", "cves_ids": ["cve3"]}, + ] + }, + json_list=[], + ) + usns = client.get_notices(details=details) + + if details: + assert len(usns) == 1 + assert usns[0].id == "USN-1" + else: + assert len(usns) == 2 + assert usns[0].id == "USN-1" + assert usns[1].id == "USN-2" + + @pytest.mark.parametrize( + "m_kwargs,expected_error, extra_security_params", + (({}, TypeError, None), ({"cve_id": "CVE-1"}, None, {"test": "blah"})), + ) + def test_get_cve_provides_response_from_cve_json_route( + self, + request_url, + m_kwargs, + expected_error, + extra_security_params, + FakeConfig, + ): + """GET body from API_V1_CVE_TMPL route with required cve_id.""" + cfg = FakeConfig() + if extra_security_params: + cfg.override_features( + {"extra_security_params": extra_security_params} + ) + client = UASecurityClient(cfg) + if expected_error: + with pytest.raises(expected_error) as exc: + client.get_cve(**m_kwargs) + assert ( + "get_cve() missing 1 required positional argument: 'cve_id'" + ) in str(exc.value) + assert 0 == request_url.call_count + else: + request_url.return_value = http.HTTPResponse( + code=200, + headers={}, + body="", + json_dict={"body": "body"}, + json_list=[], + ) + cve = client.get_cve(**m_kwargs) + assert isinstance(cve, CVE) + assert {"body": "body"} == cve.response + assert [ + mock.call(API_V1_CVE_TMPL.format(cve=m_kwargs["cve_id"])) + ] == request_url.call_args_list + + @pytest.mark.parametrize( + "m_kwargs,expected_error, extra_security_params", + ( + ({}, TypeError, None), + ({"notice_id": "USN-1"}, None, {"test": "blah"}), + ), + ) + def test_get_notice_provides_response_from_notice_json_route( + self, + request_url, + m_kwargs, + expected_error, + extra_security_params, + FakeConfig, + ): + """GET body from API_V1_NOTICE_TMPL route with required notice_id.""" + cfg = FakeConfig() + if extra_security_params: + cfg.override_features( + {"extra_security_params": extra_security_params} + ) + + client = UASecurityClient(cfg) + if expected_error: + with pytest.raises(expected_error) as exc: + client.get_notice(**m_kwargs) + assert ( + "get_notice() missing 1 required positional argument:" + " 'notice_id'" + ) in str(exc.value) + assert 0 == request_url.call_count + else: + request_url.return_value = http.HTTPResponse( + code=200, + headers={}, + body="", + json_dict={"body": "body"}, + json_list=[], + ) + assert {"body": "body"} == client.get_notice(**m_kwargs).response + assert [ + mock.call( + API_V1_NOTICE_TMPL.format(notice=m_kwargs["notice_id"]) + ) + ] == request_url.call_args_list + + +class TestGetCVEAffectedPackageStatus: + @pytest.mark.parametrize( + "series,installed_packages,expected_status", + ( + ("bionic", {}, {}), + # installed package version has no bearing on status filtering + ("bionic", {"samba": "1000"}, SAMBA_CVE_STATUS_BIONIC), + # active series has a bearing on status filtering + ("upstream", {"samba": "1000"}, SAMBA_CVE_STATUS_UPSTREAM), + # package status not-affected gets filtered from affected_pkgs + ("focal", {"samba": "1000"}, {}), + ), + ) + @mock.patch("uaclient.system.get_release_info") + def test_affected_packages_status_filters_by_installed_pkgs_and_series( + self, + m_get_release_info, + series, + installed_packages, + expected_status, + FakeConfig, + ): + """Package statuses are filtered if not installed""" + m_get_release_info.return_value = mock.MagicMock(series=series) + client = UASecurityClient(FakeConfig()) + cve = CVE(client, SAMPLE_CVE_RESPONSE) + affected_packages = get_cve_affected_source_packages_status( + cve, installed_packages=installed_packages + ) + if expected_status: + package_status = affected_packages["samba"] + assert expected_status == package_status.response + else: + assert expected_status == affected_packages + + +class TestQueryInstalledPkgSources: + @pytest.mark.parametrize( + "dpkg_out,results", + ( + # Ignore b non-installed status + ("a,,1.2,installed\nb,b,1.2,config-files", {"a": {"a": "1.2"}}), + # Handle cases where no Source is defined for the pkg + ( + "a,,1.2,installed\nzip,zip,3.0,installed", + {"a": {"a": "1.2"}, "zip": {"zip": "3.0"}}, + ), + # Prefer Source package name to binary package name + ( + "b,bsrc,1.2,installed\nzip,zip,3.0,installed", + {"bsrc": {"b": "1.2"}, "zip": {"zip": "3.0"}}, + ), + ), + ) + @mock.patch(M_PATH + "system.subp") + @mock.patch("uaclient.system.get_release_info") + def test_result_keyed_by_source_package_name( + self, m_get_release_info, subp, dpkg_out, results + ): + m_get_release_info.return_value = mock.MagicMock(series="bionic") + subp.return_value = dpkg_out, "" + assert results == query_installed_source_pkg_versions() + _format = "-f=${Package},${Source},${Version},${db:Status-Status}\n" + assert [ + mock.call(["dpkg-query", _format, "-W"]) + ] == subp.call_args_list + + +class TestGetRelatedUSNs: + def test_no_usns_returned_when_no_cves_are_found(self, FakeConfig): + cfg = FakeConfig() + client = UASecurityClient(cfg=cfg) + usn = USN(client, SAMPLE_USN_RESPONSE_NO_CVES) + + assert [] == get_related_usns(usn, client) + + def test_usns_ignore_non_usns_items(self, FakeConfig): + expected_value = mock.MagicMock(id="USN-1235-1") + + def fake_get_notice(notice_id): + return expected_value + + m_client = mock.MagicMock() + m_client.get_notice.side_effect = fake_get_notice + + m_usn = mock.MagicMock( + cves=[ + mock.MagicMock( + notices_ids=["USN-1235-1", "LSN-0088-1"], + ) + ], + id="USN-8796-1", + ) + + assert [expected_value] == get_related_usns(m_usn, m_client) + + +class TestGetUSNAffectedPackagesStatus: + @pytest.mark.parametrize( + "installed_packages, affected_packages", + ( + ( + {"coin3": {"libcoin80-runtime", "1.0"}}, + { + "coin3": CVEPackageStatus( + defaultdict( + str, {"status": "released", "pocket": "security"} + ) + ) + }, + ), + ), + ) + @mock.patch("uaclient.system.get_release_info") + def test_pkgs_come_from_release_packages_if_usn_has_no_cves( + self, + m_get_release_info, + installed_packages, + affected_packages, + FakeConfig, + ): + m_get_release_info.return_value = mock.MagicMock(series="bionic") + + cfg = FakeConfig() + client = UASecurityClient(cfg=cfg) + usn = USN(client, SAMPLE_USN_RESPONSE_NO_CVES) + actual_value = get_usn_affected_packages_status( + usn, installed_packages + ) + + if not affected_packages: + assert actual_value is {} + else: + assert "coin3" in actual_value + assert ( + affected_packages["coin3"].status + == actual_value["coin3"].status + ) + assert ( + affected_packages["coin3"].pocket_source + == actual_value["coin3"].pocket_source + ) + + +class TestOverrideUSNReleasePackageStatus: + @pytest.mark.parametrize( + "pkg_status", + ( + CVE_PKG_STATUS_IGNORED, + CVE_PKG_STATUS_PENDING, + CVE_PKG_STATUS_NEEDS_TRIAGE, + CVE_PKG_STATUS_NEEDED, + CVE_PKG_STATUS_DEFERRED, + CVE_PKG_STATUS_RELEASED, + CVE_PKG_STATUS_RELEASED_ESM_INFRA, + ), + ) + @pytest.mark.parametrize( + "usn_src_released_pkgs,expected", + ( + ({}, None), + ( # No "source" key, so ignore all binaries + {"somebinary": {"pocket": "my-pocket", "version": "usn-ver"}}, + None, + ), + ( + { + "source": { + "name": "srcpkg", + "version": "usn-source-pkg-ver", + }, + "somebinary": { + "pocket": "my-pocket", + "version": "usn-bin-ver", + }, + }, + { + "pocket": "my-pocket", + "description": "usn-source-pkg-ver", + "status": "released", + }, + ), + ), + ) + def test_override_cve_src_info_with_pocket_and_ver_from_usn( + self, usn_src_released_pkgs, expected, pkg_status + ): + """Override CVEPackageStatus with released/pocket from USN.""" + orig_cve = CVEPackageStatus(pkg_status) + override = override_usn_release_package_status( + orig_cve, usn_src_released_pkgs + ) + if expected is None: # Expect CVEPackageStatus unaltered + assert override.response == orig_cve.response + else: + assert expected == override.response + + +class TestMergeUSNReleasedBinaryPackageVersions: + @pytest.mark.parametrize( + "usns_released_packages, expected_pkgs_dict", + ( + ([{}], {}), + ( + [{"pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}}], + {"pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}}, + ), + ( + [ + { + "pkg1": { + "libpkg1": {"version": "1.0", "name": "libpkg1"} + }, + "pkg2": { + "libpkg2": {"version": "2.0", "name": "libpkg2"}, + "libpkg3": {"version": "3.0", "name": "libpkg3"}, + "libpkg4": {"version": "3.0", "name": "libpkg4"}, + }, + }, + { + "pkg2": { + "libpkg2": {"version": "1.8", "name": "libpkg2"}, + "libpkg4": {"version": "3.2", "name": "libpkg4"}, + } + }, + ], + { + "pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}, + "pkg2": { + "libpkg2": {"version": "2.0", "name": "libpkg2"}, + "libpkg3": {"version": "3.0", "name": "libpkg3"}, + "libpkg4": {"version": "3.2", "name": "libpkg4"}, + }, + }, + ), + ( + [ + { + "pkg1": { + "libpkg1": {"version": "1.0", "name": "libpkg1"}, + "source": {"version": "2.0", "name": "pkg1"}, + } + }, + {"pkg1": {"source": {"version": "2.5", "name": "pkg1"}}}, + ], + { + "pkg1": { + "libpkg1": {"version": "1.0", "name": "libpkg1"}, + "source": {"version": "2.5", "name": "pkg1"}, + } + }, + ), + ( + [ + { + "pkg1": { + "libpkg1": {"version": "1.0", "name": "libpkg1"}, + "source": {"version": "2.0", "name": "pkg1"}, + }, + "pkg2": { + "libpkg2": { + "version": "2.0", + "name": "libpkg2", + "pocket": "esm-apps", + }, + "source": { + "version": "2.0", + "name": "pkg2", + "pocket": "esm-apps", + }, + }, + } + ], + { + "pkg1": { + "libpkg1": {"version": "1.0", "name": "libpkg1"}, + "source": {"version": "2.0", "name": "pkg1"}, + } + }, + ), + ), + ) + def test_merge_usn_released_binary_package_versions( + self, usns_released_packages, expected_pkgs_dict, _subp + ): + usns = [] + beta_packages = {"esm-infra": False, "esm-apps": True} + + for usn_released_pkgs in usns_released_packages: + usn = mock.MagicMock() + type(usn).release_packages = mock.PropertyMock( + return_value=usn_released_pkgs + ) + usns.append(usn) + + with mock.patch("uaclient.system._subp", side_effect=_subp): + usn_pkgs_dict = merge_usn_released_binary_package_versions( + usns, beta_packages + ) + assert expected_pkgs_dict == usn_pkgs_dict diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix_execute.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix_execute.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix_execute.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix_execute.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,7 +2,15 @@ import pytest from uaclient import messages -from uaclient.api.u.pro.security.fix import ( +from uaclient.api.u.pro.security.fix._common import FixStatus +from uaclient.api.u.pro.security.fix._common.execute.v1 import ( + FailedUpgrade, + FixExecuteError, + FixExecuteResult, + UpgradedPackage, + _execute_fix, +) +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( ESM_INFRA_POCKET, STANDARD_UPDATES_POCKET, AptUpgradeData, @@ -22,14 +30,6 @@ PackageCannotBeInstalledData, SecurityIssueNotFixedData, ) -from uaclient.api.u.pro.security.fix._common.execute.v1 import ( - FailedUpgrade, - FixExecuteError, - FixExecuteResult, - UpgradedPackage, - _execute_fix, -) -from uaclient.security import FixStatus class TestExecute: diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix_plan.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix_plan.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_security_fix_plan.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_security_fix_plan.py 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,1104 @@ +import json + +import mock +import pytest + +from uaclient import exceptions, messages +from uaclient.api.u.pro.security.fix._common import ( + CVEPackageStatus, + FixStatus, + UASecurityClient, +) +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( + AdditionalData, + AptUpgradeData, + AttachData, + EnableData, + FixPlanAptUpgradeStep, + FixPlanAttachStep, + FixPlanEnableStep, + FixPlanError, + FixPlanNoOpAlreadyFixedStep, + FixPlanNoOpLivepatchFixStep, + FixPlanNoOpStep, + FixPlanResult, + FixPlanUSNResult, + FixPlanWarningPackageCannotBeInstalled, + FixPlanWarningSecurityIssueNotFixed, + NoOpAlreadyFixedData, + NoOpData, + NoOpLivepatchFixData, + PackageCannotBeInstalledData, + SecurityIssueNotFixedData, + USNAdditionalData, + _get_cve_description, + fix_plan_cve, + fix_plan_usn, +) +from uaclient.api.u.pro.status.enabled_services.v1 import EnabledService +from uaclient.contract import ContractExpiryStatus +from uaclient.messages import INVALID_SECURITY_ISSUE + +M_PATH = "uaclient.api.u.pro.security.fix._common.plan.v1." + + +class TestFixPlan: + @pytest.mark.parametrize( + "issue_id", (("CVE-sdsa"), ("test"), (""), (None)) + ) + def test_fix_plan_cve_invalid_security_issue(self, issue_id): + expected_plan = FixPlanResult( + title=issue_id, + description=None, + expected_status="error", + affected_packages=None, + plan=[], + warnings=[], + error=FixPlanError( + msg=INVALID_SECURITY_ISSUE.format(issue_id=issue_id).msg, + code=INVALID_SECURITY_ISSUE.name, + ), + additional_data=AdditionalData(), + ) + assert expected_plan == fix_plan_cve(issue_id, cfg=mock.MagicMock()) + + @pytest.mark.parametrize( + "issue_id", (("USN-sadsa"), ("test"), (""), (None)) + ) + def test_fix_plan_usn_invalid_security_issue(self, issue_id): + expected_plan = FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title=issue_id, + description=None, + expected_status="error", + affected_packages=None, + plan=[], + warnings=[], + error=FixPlanError( + msg=INVALID_SECURITY_ISSUE.format(issue_id=issue_id).msg, + code=INVALID_SECURITY_ISSUE.name, + ), + additional_data=AdditionalData(), + ), + related_usns_plan=[], + ) + assert expected_plan == fix_plan_usn(issue_id, cfg=mock.MagicMock()) + + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_cve_fixed_by_livepatch( + self, + m_check_cve_fixed_by_livepatch, + ): + m_check_cve_fixed_by_livepatch.return_value = ( + FixStatus.SYSTEM_NON_VULNERABLE, + "1.0", + ) + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description=None, + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=None, + plan=[ + FixPlanNoOpLivepatchFixStep( + data=NoOpLivepatchFixData( + status="cve-fixed-by-livepatch", + patch_version="1.0", + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=AdditionalData(), + ) + + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_no_affected_packages( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + } + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[mock.MagicMock(title="test")], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = {} + m_merge_usn_pkgs.return_value = {} + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="test", + expected_status=str(FixStatus.SYSTEM_NOT_AFFECTED), + affected_packages=[], + plan=[ + FixPlanNoOpStep( + data=NoOpData( + status="system-not-affected", + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=AdditionalData(), + ) + + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_cve( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_pkg_candidate_version, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + } + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[mock.MagicMock(title="test")], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + } + m_merge_usn_pkgs.return_value = { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.1", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.1", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.2", + }, + } + } + m_get_pkg_candidate_version.side_effect = ["1.1", "1.2"] + + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="test", + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=["pkg1"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin1", "bin2"], + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=AdditionalData(), + ) + + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch(M_PATH + "_enabled_services") + @mock.patch(M_PATH + "_is_attached") + @mock.patch(M_PATH + "get_contract_expiry_status") + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_cve_that_requires_pro_services( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_pkg_candidate_version, + m_get_contract_expiry_status, + m_is_attached, + m_enabled_services, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + "pkg2": { + "bin3": "1.5", + }, + "pkg3": { + "bin4": "1.8", + }, + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + "pkg2": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "esm-infra", + } + ), + "pkg3": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "esm-apps", + } + ), + } + m_merge_usn_pkgs.return_value = { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.1", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.1", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.2", + }, + }, + "pkg2": { + "source": { + "description": "description", + "name": "pkg2", + "is_source": True, + "version": "1.5", + }, + "bin3": { + "is_source": False, + "name": "bin3", + "version": "1.6~esm1", + }, + }, + "pkg3": { + "source": { + "description": "description", + "name": "pkg3", + "is_source": True, + "version": "1.8", + }, + "bin4": { + "is_source": False, + "name": "bin4", + "version": "1.8.1~esm1", + }, + }, + } + m_get_pkg_candidate_version.side_effect = [ + "1.1", + "1.2", + "1.6~esm1", + "1.8.1~esm1", + ] + m_get_contract_expiry_status.return_value = ( + ContractExpiryStatus.ACTIVE, + None, + ) + m_is_attached.side_effect = [ + mock.MagicMock(is_attached=False), + mock.MagicMock(is_attached=True), + ] + m_enabled_services.side_effect = [ + mock.MagicMock(enabled_services=None), + mock.MagicMock( + enabled_services=[ + EnabledService(name="esm-infra", variant_enabled=False) + ] + ), + ] + + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="descr", + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=["pkg1", "pkg2", "pkg3"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin1", "bin2"], + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=1, + ), + FixPlanAttachStep( + data=AttachData( + reason="required-pro-service", + required_service="esm-infra", + source_packages=["pkg2"], + ), + order=2, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-infra", + source_packages=["pkg2"], + ), + order=3, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin3"], + source_packages=["pkg2"], + pocket="esm-infra", + ), + order=4, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-apps", + source_packages=["pkg3"], + ), + order=5, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin4"], + source_packages=["pkg3"], + pocket="esm-apps", + ), + order=6, + ), + ], + warnings=[], + error=None, + additional_data=AdditionalData(), + ) + + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_cve_when_package_cannot_be_installed( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_pkg_candidate_version, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[mock.MagicMock(title="test")], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + } + m_merge_usn_pkgs.return_value = { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.1", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.1", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.2", + }, + }, + } + m_get_pkg_candidate_version.side_effect = ["1.1", "1.1"] + + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="test", + expected_status=str(FixStatus.SYSTEM_STILL_VULNERABLE), + affected_packages=["pkg1"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin1"], + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=2, + ), + ], + warnings=[ + FixPlanWarningPackageCannotBeInstalled( + data=PackageCannotBeInstalledData( + binary_package="bin2", + source_package="pkg1", + binary_package_version="1.2", + pocket="standard-updates", + related_source_packages=["pkg1"], + ), + order=1, + ) + ], + error=None, + additional_data=AdditionalData(), + ) + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_cve_with_not_released_status( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_pkg_candidate_version, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + "pkg2": { + "bin3": "1.2", + }, + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[mock.MagicMock(title="test")], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + "pkg2": CVEPackageStatus( + cve_response={ + "status": "needed", + "pocket": "updates", + } + ), + } + m_merge_usn_pkgs.return_value = { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.1", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.1", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.2", + }, + } + } + m_get_pkg_candidate_version.side_effect = ["1.1", "1.2"] + + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="test", + expected_status=str(FixStatus.SYSTEM_STILL_VULNERABLE), + affected_packages=["pkg1", "pkg2"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin1", "bin2"], + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=2, + ), + ], + warnings=[ + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg2"], + status="needed", + ), + order=1, + ) + ], + error=None, + additional_data=AdditionalData(), + ) + + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_affected_packages_from_usn") + @mock.patch(M_PATH + "_get_usn_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + def test_fix_plan_for_usn( + self, + m_query_installed_pkgs, + m_get_usn_data, + m_get_affected_packages_from_usn, + m_merge_usn_released_binary_package, + m_get_pkg_candidate_version, + ): + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + "pkg2": { + "bin3": "1.2", + }, + "pkg3": { + "bin4": "1.3", + }, + } + m_get_usn_data.return_value = ( + mock.MagicMock(cves_ids=[], references=[], title="test"), + [ + mock.MagicMock( + id="USN-2345-1", + cves_ids=["CVE-1234-12345"], + references=[], + title="test2", + ), + mock.MagicMock( + id="USN-3456-8", + cves_ids=[], + references=["https://launchpad.net/bugs/BUG"], + title="test3", + ), + ], + ) + + m_get_affected_packages_from_usn.side_effect = [ + { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + }, + { + "pkg2": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + }, + { + "pkg3": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + }, + ] + m_merge_usn_released_binary_package.side_effect = [ + { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.1", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.1", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.2", + }, + }, + }, + { + "pkg2": { + "source": { + "description": "description", + "name": "pkg2", + "is_source": True, + "version": "1.3", + }, + "bin3": { + "is_source": False, + "name": "bin3", + "version": "1.3", + }, + }, + }, + { + "pkg3": { + "source": { + "description": "description", + "name": "pkg3", + "is_source": True, + "version": "1.4", + }, + "bin4": { + "is_source": False, + "name": "bin4", + "version": "1.4", + }, + } + }, + ] + m_get_pkg_candidate_version.side_effect = ["1.1", "1.2", "1.3", "1.4"] + + expected_plan = FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title="USN-1234-1", + description="test", + affected_packages=["pkg1"], + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin1", "bin2"], + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + related_usns_plan=[ + FixPlanResult( + title="USN-2345-1", + description="test2", + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=["pkg2"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin3"], + source_packages=["pkg2"], + pocket="standard-updates", + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=["CVE-1234-12345"], + associated_launchpad_bugs=[], + ), + ), + FixPlanResult( + title="USN-3456-8", + description="test3", + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=["pkg3"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["bin4"], + source_packages=["pkg3"], + pocket="standard-updates", + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[ + "https://launchpad.net/bugs/BUG" + ], + ), + ), + ], + ) + + assert expected_plan == fix_plan_usn( + issue_id="usn-1234-1", cfg=mock.MagicMock() + ) + + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + @mock.patch(M_PATH + "get_cve_affected_source_packages_status") + @mock.patch("uaclient.apt.get_pkg_candidate_version") + @mock.patch(M_PATH + "_get_cve_data") + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") + def test_fix_plan_for_cve_when_package_already_installed( + self, + m_check_cve_fixed_by_livepatch, + m_query_installed_pkgs, + m_get_cve_data, + m_get_pkg_candidate_version, + m_get_cve_affected_pkgs, + m_merge_usn_pkgs, + ): + m_check_cve_fixed_by_livepatch.return_value = (None, None) + m_query_installed_pkgs.return_value = { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + } + m_get_cve_data.return_value = ( + mock.MagicMock( + description="descr", + notices=[mock.MagicMock(title="test")], + ), + [], + ) + m_get_cve_affected_pkgs.return_value = { + "pkg1": CVEPackageStatus( + cve_response={ + "status": "released", + "pocket": "security", + } + ), + } + m_merge_usn_pkgs.return_valuev = { + "pkg1": { + "source": { + "description": "description", + "name": "pkg1", + "is_source": True, + "version": "1.0", + }, + "bin1": { + "is_source": False, + "name": "bin1", + "version": "1.0", + }, + "bin2": { + "is_source": False, + "name": "bin2", + "version": "1.1", + }, + }, + } + m_get_pkg_candidate_version.side_effect = ["1.1", "1.1"] + + expected_plan = FixPlanResult( + title="CVE-1234-1235", + description="test", + expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), + affected_packages=["pkg1"], + plan=[ + FixPlanNoOpAlreadyFixedStep( + data=NoOpAlreadyFixedData( + status="cve-already-fixed", + source_packages=["pkg1"], + pocket="standard-updates", + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=AdditionalData(), + ) + assert expected_plan == fix_plan_cve( + issue_id="cve-1234-1235", cfg=mock.MagicMock() + ) + + +class TestGetUsnData: + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + @mock.patch( + "uaclient.api.u.pro.security.fix._common.get_usn_affected_packages_status" # noqa + ) + @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") + def test_error_msg_when_usn_does_not_have_any_related_usns( + self, + m_merge_usn, + m_usn_affected_pkgs, + m_query_installed_pkgs, + FakeConfig, + ): + m_query_installed_pkgs.return_value = {} + m_usn_affected_pkgs.return_value = {} + m_merge_usn.return_value = {} + with mock.patch.object(UASecurityClient, "get_notice") as m_notice: + with mock.patch.object( + UASecurityClient, "get_notices" + ) as m_notices: + usn_mock = mock.MagicMock() + cve_mock = mock.MagicMock() + + type(cve_mock).notices_ids = mock.PropertyMock( + return_value=["USN-123"] + ) + type(usn_mock).cves = mock.PropertyMock( + return_value=[cve_mock] + ) + type(usn_mock).response = mock.PropertyMock( + return_value={"release_packages": {}} + ) + type(usn_mock).cves_ids = mock.PropertyMock( + return_value=["cve-123"] + ) + type(usn_mock).id = mock.PropertyMock(return_value="id") + + m_notice.return_value = usn_mock + m_notices.return_value = [usn_mock] + + with pytest.raises(exceptions.SecurityAPIMetadataError) as exc: + fix_plan_usn("USN-1235-1", FakeConfig()) + + expected_msg = messages.E_SECURITY_API_INVALID_METADATA.format( + error_msg="metadata defines no fixed package versions.", + issue="USN-1235-1", + extra_info="", + ).msg + assert expected_msg in exc.value.msg + + +class TestSecurityIssueData: + @pytest.mark.parametrize("error_code", ((404), (400))) + @pytest.mark.parametrize("issue_id", (("CVE-1800-123456"), ("USN-1235-1"))) + @mock.patch(M_PATH + "query_installed_source_pkg_versions") + def test_error_msg_when_issue_id_is_not_found( + self, _m_query_versions, issue_id, error_code, FakeConfig + ): + expected_message = "Error: {} not found.".format(issue_id) + if "CVE" in issue_id: + mock_func = "get_cve" + issue_type = "CVE" + call_func = fix_plan_cve + else: + mock_func = "get_notice" + issue_type = "USN" + call_func = fix_plan_usn + + with mock.patch.object(UASecurityClient, mock_func) as m_func: + msg = "{} with id 'ID' does not exist".format(issue_type) + + m_func.side_effect = exceptions.SecurityAPIError( + url="URL", code=error_code, body=json.dumps({"message": msg}) + ) + + cfg = FakeConfig() + fix_plan = call_func(issue_id, cfg) + + if error_code == 404: + expected_message = "Error: {} not found.".format(issue_id) + else: + expected_message = ( + "Error connecting to URL: " + + str(error_code) + + " " + + json.dumps({"message": msg}) + ) + + if "CVE" in issue_id: + assert fix_plan.error.msg == expected_message + else: + assert fix_plan.target_usn_plan.error.msg == expected_message + + +class TestGetCVEDescription: + @pytest.mark.parametrize( + "installed_pkgs,notices,cve_description,expected_description", + ( + ({}, [], "cve_description", "cve_description"), + ( + { + "pkg1": { + "bin1": "1.0", + "bin2": "1.1", + }, + }, + [ + mock.MagicMock( + title="usn2", + release_packages={ + "pkg2": { + "libpkg2": { + "version": "1.0", + "name": "libpkg2", + }, + "source": {"version": "2.0", "name": "pkg2"}, + } + }, + ), + mock.MagicMock( + title="usn1", + release_packages={ + "pkg1": { + "libpkg1": { + "version": "1.0", + "name": "libpkg1", + }, + "source": {"version": "2.0", "name": "pkg1"}, + } + }, + ), + ], + "cve_description", + "usn1", + ), + ( + { + "pkg3": { + "bin1": "1.0", + "bin2": "1.1", + }, + }, + [ + mock.MagicMock( + title="usn2", + release_packages={ + "pkg2": { + "libpkg2": { + "version": "1.0", + "name": "libpkg2", + }, + "source": {"version": "2.0", "name": "pkg2"}, + } + }, + ), + mock.MagicMock( + title="usn1", + release_packages={ + "pkg1": { + "libpkg1": { + "version": "1.0", + "name": "libpkg1", + }, + "source": {"version": "2.0", "name": "pkg1"}, + } + }, + ), + ], + "cve_description", + "usn2", + ), + ), + ) + def test_get_cve_description( + self, + installed_pkgs, + notices, + cve_description, + expected_description, + ): + cve = mock.MagicMock(notices=notices, description=cve_description) + assert expected_description == _get_cve_description( + cve, installed_pkgs + ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_api_u_pro_status_enabled_services_v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,11 +1,13 @@ import mock from uaclient import entitlements +from uaclient.api.data_types import ErrorWarningObject from uaclient.api.u.pro.status.enabled_services.v1 import ( EnabledService, _enabled_services, ) from uaclient.entitlements.entitlement_status import UserFacingStatus +from uaclient.messages import NamedMessage class TestEnabledServicesV1: @@ -15,7 +17,9 @@ m_cls_1 = mock.MagicMock() m_inst_1 = mock.MagicMock(variants={}) - type(m_inst_1).name = mock.PropertyMock(return_value="ent1") + type(m_inst_1).presentation_name = mock.PropertyMock( + return_value="ent1" + ) m_inst_1.user_facing_status.return_value = ( UserFacingStatus.ACTIVE, "", @@ -32,7 +36,9 @@ m_cls_2 = mock.MagicMock() m_inst_2 = mock.MagicMock(variants={"variant": m_variant_cls}) - type(m_inst_2).name = mock.PropertyMock(return_value="ent2") + type(m_inst_2).presentation_name = mock.PropertyMock( + return_value="ent2" + ) m_inst_2.user_facing_status.return_value = ( UserFacingStatus.ACTIVE, "", @@ -41,13 +47,38 @@ m_cls_3 = mock.MagicMock() m_inst_3 = mock.MagicMock() - type(m_inst_3).name = mock.PropertyMock(return_value="ent3") + type(m_inst_3).presentation_name = mock.PropertyMock( + return_value="ent3" + ) m_inst_3.user_facing_status.return_value = ( UserFacingStatus.INACTIVE, "", ) + m_cls_3.return_value = m_inst_3 + + m_cls_4 = mock.MagicMock() + m_inst_4 = mock.MagicMock() + type(m_inst_4).presentation_name = mock.PropertyMock( + return_value="ent4" + ) + m_inst_4.user_facing_status.return_value = ( + UserFacingStatus.INAPPLICABLE, + "", + ) + m_cls_4.return_value = m_inst_4 - ents = [m_cls_1, m_cls_2, m_cls_3] + m_cls_5 = mock.MagicMock() + m_inst_5 = mock.MagicMock() + type(m_inst_5).presentation_name = mock.PropertyMock( + return_value="ent5" + ) + m_inst_5.user_facing_status.return_value = ( + UserFacingStatus.WARNING, + NamedMessage(name="warning_code", msg="warning_msg"), + ) + m_cls_5.return_value = m_inst_5 + + ents = [m_cls_1, m_cls_2, m_cls_3, m_cls_4, m_cls_5] expected_enabled_services = [ EnabledService(name="ent1"), EnabledService( @@ -55,15 +86,25 @@ variant_enabled=True, variant_name="variant", ), + EnabledService(name="ent5"), + ] + + expected_warnings = [ + ErrorWarningObject( + title="warning_msg", + code="warning_code", + meta={"service": "ent5"}, + ) ] with mock.patch.object(entitlements, "ENTITLEMENT_CLASSES", ents): - actual_enabled_services = _enabled_services( - cfg=mock.MagicMock() - ).enabled_services + enabled_services_ret = _enabled_services(cfg=mock.MagicMock()) assert 1 == m_is_attached.call_count - assert expected_enabled_services == actual_enabled_services + assert ( + expected_enabled_services == enabled_services_ret.enabled_services + ) + assert expected_warnings == enabled_services_ret.warnings @mock.patch("uaclient.api.u.pro.status.enabled_services.v1._is_attached") def test_enabled_services_when_unattached(self, m_is_attached): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_fix.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_fix.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/tests/test_fix.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/tests/test_fix.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,912 +0,0 @@ -import mock -import pytest - -from uaclient.api.u.pro.security.fix import ( - AdditionalData, - AptUpgradeData, - AttachData, - EnableData, - FixPlanAptUpgradeStep, - FixPlanAttachStep, - FixPlanEnableStep, - FixPlanError, - FixPlanNoOpAlreadyFixedStep, - FixPlanNoOpLivepatchFixStep, - FixPlanNoOpStep, - FixPlanResult, - FixPlanUSNResult, - FixPlanWarningPackageCannotBeInstalled, - FixPlanWarningSecurityIssueNotFixed, - NoOpAlreadyFixedData, - NoOpData, - NoOpLivepatchFixData, - PackageCannotBeInstalledData, - SecurityIssueNotFixedData, - USNAdditionalData, - fix_plan_cve, - fix_plan_usn, -) -from uaclient.api.u.pro.status.enabled_services.v1 import EnabledService -from uaclient.contract import ContractExpiryStatus -from uaclient.messages import INVALID_SECURITY_ISSUE -from uaclient.security import CVEPackageStatus, FixStatus - -M_PATH = "uaclient.api.u.pro.security.fix." - - -class TestFixPlan: - @pytest.mark.parametrize( - "issue_id", (("CVE-sdsa"), ("test"), (""), (None)) - ) - def test_fix_plan_cve_invalid_security_issue(self, issue_id): - expected_plan = FixPlanResult( - title=issue_id, - description=None, - expected_status="error", - affected_packages=None, - plan=[], - warnings=[], - error=FixPlanError( - msg=INVALID_SECURITY_ISSUE.format(issue_id=issue_id).msg, - code=INVALID_SECURITY_ISSUE.name, - ), - additional_data=AdditionalData(), - ) - assert expected_plan == fix_plan_cve(issue_id, cfg=mock.MagicMock()) - - @pytest.mark.parametrize( - "issue_id", (("USN-sadsa"), ("test"), (""), (None)) - ) - def test_fix_plan_usn_invalid_security_issue(self, issue_id): - expected_plan = FixPlanUSNResult( - target_usn_plan=FixPlanResult( - title=issue_id, - description=None, - expected_status="error", - affected_packages=None, - plan=[], - warnings=[], - error=FixPlanError( - msg=INVALID_SECURITY_ISSUE.format(issue_id=issue_id).msg, - code=INVALID_SECURITY_ISSUE.name, - ), - additional_data=AdditionalData(), - ), - related_usns_plan=[], - ) - assert expected_plan == fix_plan_usn(issue_id, cfg=mock.MagicMock()) - - @mock.patch( - "uaclient.api.u.pro.security.fix._check_cve_fixed_by_livepatch" - ) - def test_fix_plan_cve_fixed_by_livepatch( - self, - m_check_cve_fixed_by_livepatch, - ): - m_check_cve_fixed_by_livepatch.return_value = ( - FixStatus.SYSTEM_NON_VULNERABLE, - "1.0", - ) - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description=None, - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=None, - plan=[ - FixPlanNoOpLivepatchFixStep( - data=NoOpLivepatchFixData( - status="cve-fixed-by-livepatch", - patch_version="1.0", - ), - order=1, - ) - ], - warnings=[], - error=None, - additional_data=AdditionalData(), - ) - - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch(M_PATH + "_get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_no_affected_packages( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - } - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[mock.MagicMock(title="test")], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = {} - m_merge_usn_pkgs.return_value = {} - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="test", - expected_status=str(FixStatus.SYSTEM_NOT_AFFECTED), - affected_packages=[], - plan=[ - FixPlanNoOpStep( - data=NoOpData( - status="system-not-affected", - ), - order=1, - ) - ], - warnings=[], - error=None, - additional_data=AdditionalData(), - ) - - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch(M_PATH + "_get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_cve( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_pkg_candidate_version, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - } - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[mock.MagicMock(title="test")], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - } - m_merge_usn_pkgs.return_value = { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.1", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.1", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.2", - }, - } - } - m_get_pkg_candidate_version.side_effect = ["1.1", "1.2"] - - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="test", - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=["pkg1"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin1", "bin2"], - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=1, - ) - ], - warnings=[], - error=None, - additional_data=AdditionalData(), - ) - - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch(M_PATH + "_enabled_services") - @mock.patch(M_PATH + "_is_attached") - @mock.patch(M_PATH + "get_contract_expiry_status") - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch(M_PATH + "_get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_cve_that_requires_pro_services( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_pkg_candidate_version, - m_get_contract_expiry_status, - m_is_attached, - m_enabled_services, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - }, - "pkg2": { - "bin3": "1.5", - }, - "pkg3": { - "bin4": "1.8", - }, - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - "pkg2": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "esm-infra", - } - ), - "pkg3": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "esm-apps", - } - ), - } - m_merge_usn_pkgs.return_value = { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.1", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.1", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.2", - }, - }, - "pkg2": { - "source": { - "description": "description", - "name": "pkg2", - "is_source": True, - "version": "1.5", - }, - "bin3": { - "is_source": False, - "name": "bin3", - "version": "1.6~esm1", - }, - }, - "pkg3": { - "source": { - "description": "description", - "name": "pkg3", - "is_source": True, - "version": "1.8", - }, - "bin4": { - "is_source": False, - "name": "bin4", - "version": "1.8.1~esm1", - }, - }, - } - m_get_pkg_candidate_version.side_effect = [ - "1.1", - "1.2", - "1.6~esm1", - "1.8.1~esm1", - ] - m_get_contract_expiry_status.return_value = ( - ContractExpiryStatus.ACTIVE, - None, - ) - m_is_attached.side_effect = [ - mock.MagicMock(is_attached=False), - mock.MagicMock(is_attached=True), - ] - m_enabled_services.side_effect = [ - mock.MagicMock(enabled_services=None), - mock.MagicMock( - enabled_services=[ - EnabledService(name="esm-infra", variant_enabled=False) - ] - ), - ] - - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="descr", - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=["pkg1", "pkg2", "pkg3"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin1", "bin2"], - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=1, - ), - FixPlanAttachStep( - data=AttachData( - reason="required-pro-service", - required_service="esm-infra", - source_packages=["pkg2"], - ), - order=2, - ), - FixPlanEnableStep( - data=EnableData( - service="esm-infra", - source_packages=["pkg2"], - ), - order=3, - ), - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin3"], - source_packages=["pkg2"], - pocket="esm-infra", - ), - order=4, - ), - FixPlanEnableStep( - data=EnableData( - service="esm-apps", - source_packages=["pkg3"], - ), - order=5, - ), - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin4"], - source_packages=["pkg3"], - pocket="esm-apps", - ), - order=6, - ), - ], - warnings=[], - error=None, - additional_data=AdditionalData(), - ) - - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch("uaclient.api.u.pro.security.fix._get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_cve_when_package_cannot_be_installed( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_pkg_candidate_version, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - }, - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[mock.MagicMock(title="test")], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - } - m_merge_usn_pkgs.return_value = { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.1", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.1", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.2", - }, - }, - } - m_get_pkg_candidate_version.side_effect = ["1.1", "1.1"] - - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="test", - expected_status=str(FixStatus.SYSTEM_STILL_VULNERABLE), - affected_packages=["pkg1"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin1"], - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=2, - ), - ], - warnings=[ - FixPlanWarningPackageCannotBeInstalled( - data=PackageCannotBeInstalledData( - binary_package="bin2", - source_package="pkg1", - binary_package_version="1.2", - pocket="standard-updates", - related_source_packages=["pkg1"], - ), - order=1, - ) - ], - error=None, - additional_data=AdditionalData(), - ) - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch("uaclient.api.u.pro.security.fix._get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_cve_with_not_released_status( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_pkg_candidate_version, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - }, - "pkg2": { - "bin3": "1.2", - }, - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[mock.MagicMock(title="test")], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - "pkg2": CVEPackageStatus( - cve_response={ - "status": "needed", - "pocket": "updates", - } - ), - } - m_merge_usn_pkgs.return_value = { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.1", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.1", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.2", - }, - } - } - m_get_pkg_candidate_version.side_effect = ["1.1", "1.2"] - - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="test", - expected_status=str(FixStatus.SYSTEM_STILL_VULNERABLE), - affected_packages=["pkg1", "pkg2"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin1", "bin2"], - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=2, - ), - ], - warnings=[ - FixPlanWarningSecurityIssueNotFixed( - data=SecurityIssueNotFixedData( - source_packages=["pkg2"], - status="needed", - ), - order=1, - ) - ], - error=None, - additional_data=AdditionalData(), - ) - - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) - - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_affected_packages_from_usn") - @mock.patch(M_PATH + "_get_usn_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - def test_fix_plan_for_usn( - self, - m_query_installed_pkgs, - m_get_usn_data, - m_get_affected_packages_from_usn, - m_merge_usn_released_binary_package, - m_get_pkg_candidate_version, - ): - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - }, - "pkg2": { - "bin3": "1.2", - }, - "pkg3": { - "bin4": "1.3", - }, - } - m_get_usn_data.return_value = ( - mock.MagicMock(cves_ids=[], references=[], title="test"), - [ - mock.MagicMock( - id="USN-2345-1", - cves_ids=["CVE-1234-12345"], - references=[], - title="test2", - ), - mock.MagicMock( - id="USN-3456-8", - cves_ids=[], - references=["https://launchpad.net/bugs/BUG"], - title="test3", - ), - ], - ) - - m_get_affected_packages_from_usn.side_effect = [ - { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - }, - { - "pkg2": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - }, - { - "pkg3": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - }, - ] - m_merge_usn_released_binary_package.side_effect = [ - { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.1", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.1", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.2", - }, - }, - }, - { - "pkg2": { - "source": { - "description": "description", - "name": "pkg2", - "is_source": True, - "version": "1.3", - }, - "bin3": { - "is_source": False, - "name": "bin3", - "version": "1.3", - }, - }, - }, - { - "pkg3": { - "source": { - "description": "description", - "name": "pkg3", - "is_source": True, - "version": "1.4", - }, - "bin4": { - "is_source": False, - "name": "bin4", - "version": "1.4", - }, - } - }, - ] - m_get_pkg_candidate_version.side_effect = ["1.1", "1.2", "1.3", "1.4"] - - expected_plan = FixPlanUSNResult( - target_usn_plan=FixPlanResult( - title="USN-1234-1", - description="test", - affected_packages=["pkg1"], - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin1", "bin2"], - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=1, - ), - ], - warnings=[], - error=None, - additional_data=USNAdditionalData( - associated_cves=[], - associated_launchpad_bugs=[], - ), - ), - related_usns_plan=[ - FixPlanResult( - title="USN-2345-1", - description="test2", - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=["pkg2"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin3"], - source_packages=["pkg2"], - pocket="standard-updates", - ), - order=1, - ), - ], - warnings=[], - error=None, - additional_data=USNAdditionalData( - associated_cves=["CVE-1234-12345"], - associated_launchpad_bugs=[], - ), - ), - FixPlanResult( - title="USN-3456-8", - description="test3", - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=["pkg3"], - plan=[ - FixPlanAptUpgradeStep( - data=AptUpgradeData( - binary_packages=["bin4"], - source_packages=["pkg3"], - pocket="standard-updates", - ), - order=1, - ), - ], - warnings=[], - error=None, - additional_data=USNAdditionalData( - associated_cves=[], - associated_launchpad_bugs=[ - "https://launchpad.net/bugs/BUG" - ], - ), - ), - ], - ) - - assert expected_plan == fix_plan_usn( - issue_id="usn-1234-1", cfg=mock.MagicMock() - ) - - @mock.patch(M_PATH + "merge_usn_released_binary_package_versions") - @mock.patch(M_PATH + "get_cve_affected_source_packages_status") - @mock.patch("uaclient.apt.get_pkg_candidate_version") - @mock.patch(M_PATH + "_get_cve_data") - @mock.patch(M_PATH + "query_installed_source_pkg_versions") - @mock.patch(M_PATH + "_check_cve_fixed_by_livepatch") - def test_fix_plan_for_cve_when_package_already_installed( - self, - m_check_cve_fixed_by_livepatch, - m_query_installed_pkgs, - m_get_cve_data, - m_get_pkg_candidate_version, - m_get_cve_affected_pkgs, - m_merge_usn_pkgs, - ): - m_check_cve_fixed_by_livepatch.return_value = (None, None) - m_query_installed_pkgs.return_value = { - "pkg1": { - "bin1": "1.0", - "bin2": "1.1", - }, - } - m_get_cve_data.return_value = ( - mock.MagicMock( - description="descr", - notices=[mock.MagicMock(title="test")], - ), - [], - ) - m_get_cve_affected_pkgs.return_value = { - "pkg1": CVEPackageStatus( - cve_response={ - "status": "released", - "pocket": "security", - } - ), - } - m_merge_usn_pkgs.return_valuev = { - "pkg1": { - "source": { - "description": "description", - "name": "pkg1", - "is_source": True, - "version": "1.0", - }, - "bin1": { - "is_source": False, - "name": "bin1", - "version": "1.0", - }, - "bin2": { - "is_source": False, - "name": "bin2", - "version": "1.1", - }, - }, - } - m_get_pkg_candidate_version.side_effect = ["1.1", "1.1"] - - expected_plan = FixPlanResult( - title="CVE-1234-1235", - description="test", - expected_status=str(FixStatus.SYSTEM_NON_VULNERABLE), - affected_packages=["pkg1"], - plan=[ - FixPlanNoOpAlreadyFixedStep( - data=NoOpAlreadyFixedData( - status="cve-already-fixed", - source_packages=["pkg1"], - pocket="standard-updates", - ), - order=1, - ), - ], - warnings=[], - error=None, - additional_data=AdditionalData(), - ) - assert expected_plan == fix_plan_cve( - issue_id="cve-1234-1235", cfg=mock.MagicMock() - ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/attach/auto/full_auto_attach/v1.py 2024-02-14 15:37:46.000000000 +0000 @@ -18,15 +18,18 @@ fields = [ Field("enable", data_list(StringDataValue), False), Field("enable_beta", data_list(StringDataValue), False), + Field("cloud_override", StringDataValue, False), ] def __init__( self, enable: Optional[List[str]] = None, enable_beta: Optional[List[str]] = None, + cloud_override: Optional[str] = None, ): self.enable = enable self.enable_beta = enable_beta + self.cloud_override = cloud_override class FullAutoAttachResult(DataObject, AdditionalInfo): @@ -75,7 +78,7 @@ mode: event_logger.EventLoggerMode = event_logger.EventLoggerMode.JSON ) -> FullAutoAttachResult: try: - with lock.SpinLock( + with lock.RetryLock( cfg=cfg, lock_holder="pro.api.u.pro.attach.auto.full_auto_attach.v1", ): @@ -103,7 +106,9 @@ ): raise exceptions.AutoAttachDisabledError() - instance = identity.cloud_instance_factory() + instance = identity.cloud_instance_factory( + cloud_override=options.cloud_override + ) enable_default_services = ( options.enable is None and options.enable_beta is None ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/attach/auto/should_auto_attach/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/attach/auto/should_auto_attach/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/attach/auto/should_auto_attach/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/attach/auto/should_auto_attach/v1.py 2024-02-29 14:03:11.000000000 +0000 @@ -29,7 +29,8 @@ ) return ShouldAutoAttachResult( - should_auto_attach=is_installed("ubuntu-advantage-pro"), + should_auto_attach=is_installed("ubuntu-advantage-pro") + or is_installed("ubuntu-pro-auto-attach"), ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/__init__.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/__init__.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/__init__.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/__init__.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,928 +1,25 @@ -import enum -import re -from collections import defaultdict -from typing import Any, Dict, List, NamedTuple, Optional, Tuple - -from uaclient import apt, exceptions, messages -from uaclient.api.u.pro.status.enabled_services.v1 import _enabled_services -from uaclient.api.u.pro.status.is_attached.v1 import _is_attached -from uaclient.config import UAConfig -from uaclient.contract import ContractExpiryStatus, get_contract_expiry_status -from uaclient.data_types import ( - DataObject, - Field, - IntDataValue, - StringDataValue, - data_list, -) -from uaclient.security import ( - CVE, - CVE_OR_USN_REGEX, - USN, - BinaryPackageFix, - CVEPackageStatus, - FixStatus, - UASecurityClient, - _check_cve_fixed_by_livepatch, - get_affected_packages_from_usn, - get_cve_affected_source_packages_status, - get_related_usns, - group_by_usn_package_status, - merge_usn_released_binary_package_versions, - query_installed_source_pkg_versions, -) - -STANDARD_UPDATES_POCKET = "standard-updates" -ESM_INFRA_POCKET = "esm-infra" -ESM_APPS_POCKET = "esm-apps" - -UnfixedPackage = NamedTuple( - "UnfixedPackage", - [ - ("source_package", str), - ("binary_package", str), - ("version", Optional[str]), - ], +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( + AptUpgradeData, + AttachData, + EnableData, + FixPlanError, + FixPlanResult, + FixPlanStep, + FixPlanWarning, + NoOpData, + PackageCannotBeInstalledData, + SecurityIssueNotFixedData, ) - -@enum.unique -class FixStepType(enum.Enum): - ATTACH = "attach" - ENABLE = "enable" - NOOP = "no-op" - APT_UPGRADE = "apt-upgrade" - - -@enum.unique -class FixPlanNoOpStatus(enum.Enum): - ALREADY_FIXED = "cve-already-fixed" - NOT_AFFECTED = "system-not-affected" - FIXED_BY_LIVEPATCH = "cve-fixed-by-livepatch" - - -@enum.unique -class FixPlanAttachReason(enum.Enum): - EXPIRED_CONTRACT = "expired-contract-token" - REQUIRED_PRO_SERVICE = "required-pro-service" - - -@enum.unique -class FixWarningType(enum.Enum): - PACKAGE_CANNOT_BE_INSTALLED = "package-cannot-be-installed" - SECURITY_ISSUE_NOT_FIXED = "security-issue-not-fixed" - - -class FixPlanStep(DataObject): - fields = [ - Field("operation", StringDataValue), - Field("order", IntDataValue), - ] - - def __init__(self, *, operation: str, order: int): - self.operation = operation - self.order = order - - -class AptUpgradeData(DataObject): - fields = [ - Field("binary_packages", data_list(StringDataValue)), - Field("source_packages", data_list(StringDataValue)), - Field("pocket", StringDataValue), - ] - - def __init__( - self, - *, - binary_packages: List[str], - source_packages: List[str], - pocket: str - ): - self.binary_packages = binary_packages - self.source_packages = source_packages - self.pocket = pocket - - -class FixPlanAptUpgradeStep(FixPlanStep): - fields = [ - Field("operation", StringDataValue), - Field("data", AptUpgradeData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: AptUpgradeData, order: int): - super().__init__(operation=FixStepType.APT_UPGRADE.value, order=order) - self.data = data - - -class AttachData(DataObject): - fields = [ - Field("reason", StringDataValue), - Field("required_service", StringDataValue), - Field("source_packages", data_list(StringDataValue)), - ] - - def __init__( - self, *, reason: str, source_packages: List[str], required_service: str - ): - self.reason = reason - self.source_packages = source_packages - self.required_service = required_service - - -class FixPlanAttachStep(FixPlanStep): - fields = [ - Field("operation", StringDataValue), - Field("data", AttachData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: AttachData, order: int): - super().__init__(operation=FixStepType.ATTACH.value, order=order) - self.data = data - - -class EnableData(DataObject): - fields = [ - Field("service", StringDataValue), - Field("source_packages", data_list(StringDataValue)), - ] - - def __init__(self, *, service: str, source_packages: List[str]): - self.service = service - self.source_packages = source_packages - - -class FixPlanEnableStep(FixPlanStep): - fields = [ - Field("operation", StringDataValue), - Field("data", EnableData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: EnableData, order: int): - super().__init__(operation=FixStepType.ENABLE.value, order=order) - self.data = data - - -class NoOpData(DataObject): - fields = [ - Field("status", StringDataValue), - ] - - def __init__(self, *, status: str): - self.status = status - - -class FixPlanNoOpStep(FixPlanStep): - fields = [ - Field("operation", StringDataValue), - Field("data", NoOpData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: NoOpData, order: int): - super().__init__(operation=FixStepType.NOOP.value, order=order) - self.data = data - - -class NoOpLivepatchFixData(NoOpData): - fields = [ - Field("status", StringDataValue), - Field("patch_version", StringDataValue), - ] - - def __init__(self, *, status: str, patch_version: str): - super().__init__(status=status) - self.patch_version = patch_version - - -class FixPlanNoOpLivepatchFixStep(FixPlanNoOpStep): - fields = [ - Field("operation", StringDataValue), - Field("data", NoOpLivepatchFixData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: NoOpLivepatchFixData, order: int): - super().__init__(data=data, order=order) - - -class NoOpAlreadyFixedData(NoOpData): - fields = [ - Field("status", StringDataValue), - Field("source_packages", data_list(StringDataValue)), - Field("pocket", StringDataValue), - ] - - def __init__( - self, *, status: str, source_packages: List[str], pocket: str - ): - super().__init__(status=status) - self.source_packages = source_packages - self.pocket = pocket - - -class FixPlanNoOpAlreadyFixedStep(FixPlanNoOpStep): - fields = [ - Field("operation", StringDataValue), - Field("data", NoOpLivepatchFixData), - Field("order", IntDataValue), - ] - - def __init__(self, *, data: NoOpAlreadyFixedData, order: int): - super().__init__(data=data, order=order) - - -class FixPlanWarning(DataObject): - fields = [ - Field("warning_type", StringDataValue), - Field("order", IntDataValue), - ] - - def __init__(self, *, warning_type: str, order: int): - self.warning_type = warning_type - self.order = order - - -class SecurityIssueNotFixedData(DataObject): - fields = [ - Field("source_packages", data_list(StringDataValue)), - Field("status", StringDataValue), - ] - - def __init__(self, *, source_packages: List[str], status: str): - self.source_packages = source_packages - self.status = status - - -class FixPlanWarningSecurityIssueNotFixed(FixPlanWarning): - fields = [ - Field("warning_type", StringDataValue), - Field("order", IntDataValue), - Field("data", SecurityIssueNotFixedData), - ] - - def __init__(self, *, order: int, data: SecurityIssueNotFixedData): - super().__init__( - warning_type=FixWarningType.SECURITY_ISSUE_NOT_FIXED.value, - order=order, - ) - self.data = data - - -class PackageCannotBeInstalledData(DataObject): - fields = [ - Field("binary_package", StringDataValue), - Field("binary_package_version", StringDataValue), - Field("source_package", StringDataValue), - Field("related_source_packages", data_list(StringDataValue)), - Field("pocket", StringDataValue), - ] - - def __init__( - self, - *, - binary_package: str, - binary_package_version: str, - source_package: str, - pocket: str, - related_source_packages: List[str] - ): - self.source_package = source_package - self.binary_package = binary_package - self.binary_package_version = binary_package_version - self.pocket = pocket - self.related_source_packages = related_source_packages - - -class FixPlanWarningPackageCannotBeInstalled(FixPlanWarning): - fields = [ - Field("warning_type", StringDataValue), - Field("order", IntDataValue), - Field("data", SecurityIssueNotFixedData), - ] - - def __init__(self, *, order: int, data: PackageCannotBeInstalledData): - super().__init__( - warning_type=FixWarningType.PACKAGE_CANNOT_BE_INSTALLED.value, - order=order, - ) - self.data = data - - -class FixPlanError(DataObject): - fields = [ - Field("msg", StringDataValue), - Field("code", StringDataValue, required=False), - ] - - def __init__(self, *, msg: str, code: Optional[str]): - self.msg = msg - self.code = code - - -class AdditionalData(DataObject): - pass - - -class USNAdditionalData(AdditionalData): - - fields = [ - Field("associated_cves", data_list(StringDataValue)), - Field("associated_launchpad_bugs", data_list(StringDataValue)), - ] - - def __init__( - self, - *, - associated_cves: List[str], - associated_launchpad_bugs: List[str] - ): - self.associated_cves = associated_cves - self.associated_launchpad_bugs = associated_launchpad_bugs - - -class FixPlanResult(DataObject): - fields = [ - Field("title", StringDataValue), - Field("description", StringDataValue, required=False), - Field("expected_status", StringDataValue), - Field("affected_packages", data_list(StringDataValue), required=False), - Field("plan", data_list(FixPlanStep)), - Field("warnings", data_list(FixPlanWarning), required=False), - Field("error", FixPlanError, required=False), - Field("additional_data", AdditionalData, required=False), - ] - - def __init__( - self, - *, - title: str, - expected_status: str, - plan: List[FixPlanStep], - warnings: List[FixPlanWarning], - error: Optional[FixPlanError], - additional_data: AdditionalData, - description: Optional[str] = None, - affected_packages: Optional[List[str]] = None - ): - self.title = title - self.description = description - self.expected_status = expected_status - self.affected_packages = affected_packages - self.plan = plan - self.warnings = warnings - self.error = error - self.additional_data = additional_data - - -class FixPlanUSNResult(DataObject): - fields = [ - Field("target_usn_plan", FixPlanResult), - Field("related_usns_plan", data_list(FixPlanResult), required=False), - ] - - def __init__( - self, - *, - target_usn_plan: FixPlanResult, - related_usns_plan: List[FixPlanResult] - ): - self.target_usn_plan = target_usn_plan - self.related_usns_plan = related_usns_plan - - -class FixPlan: - def __init__( - self, - title: str, - description: Optional[str], - affected_packages: Optional[List[str]] = None, - ): - self.order = 1 - self.title = title - self.description = description - self.affected_packages = affected_packages - self.fix_steps = [] # type: List[FixPlanStep] - self.fix_warnings = [] # type: List[FixPlanWarning] - self.error = None # type: Optional[FixPlanError] - self.additional_data = AdditionalData() - - def register_step( - self, - operation: FixStepType, - data: Dict[str, Any], - ): - # just to make mypy happy - fix_step = None # type: Optional[FixPlanStep] - - if operation == FixStepType.ATTACH: - fix_step = FixPlanAttachStep( - order=self.order, data=AttachData.from_dict(data) - ) - elif operation == FixStepType.ENABLE: - fix_step = FixPlanEnableStep( - order=self.order, data=EnableData.from_dict(data) - ) - elif operation == FixStepType.NOOP: - if "patch_version" in data: - fix_step = FixPlanNoOpLivepatchFixStep( - order=self.order, data=NoOpLivepatchFixData.from_dict(data) - ) - elif "source_packages" in data: - fix_step = FixPlanNoOpAlreadyFixedStep( - order=self.order, data=NoOpAlreadyFixedData.from_dict(data) - ) - else: - fix_step = FixPlanNoOpStep( - order=self.order, data=NoOpData.from_dict(data) - ) - else: - fix_step = FixPlanAptUpgradeStep( - order=self.order, data=AptUpgradeData.from_dict(data) - ) - - self.fix_steps.append(fix_step) - self.order += 1 - - def register_warning( - self, warning_type: FixWarningType, data: Dict[str, Any] - ): - fix_warning = None # type: Optional[FixPlanWarning] - - if warning_type == FixWarningType.SECURITY_ISSUE_NOT_FIXED: - fix_warning = FixPlanWarningSecurityIssueNotFixed( - order=self.order, - data=SecurityIssueNotFixedData.from_dict(data), - ) - else: - fix_warning = FixPlanWarningPackageCannotBeInstalled( - order=self.order, - data=PackageCannotBeInstalledData.from_dict(data), - ) - - self.fix_warnings.append(fix_warning) - self.order += 1 - - def register_error(self, error_msg: str, error_code: Optional[str]): - self.error = FixPlanError(msg=error_msg, code=error_code) - - def register_additional_data(self, additional_data: Dict[str, Any]): - self.additional_data = AdditionalData(**additional_data) - - def _get_status(self) -> str: - if self.error: - return "error" - - if ( - len(self.fix_steps) == 1 - and isinstance(self.fix_steps[0], FixPlanNoOpStep) - and self.fix_steps[0].data.status == "system-not-affected" - ): - return str(FixStatus.SYSTEM_NOT_AFFECTED) - elif self.fix_warnings: - return str(FixStatus.SYSTEM_STILL_VULNERABLE) - else: - return str(FixStatus.SYSTEM_NON_VULNERABLE) - - @property - def fix_plan(self): - return FixPlanResult( - title=self.title, - description=self.description, - expected_status=self._get_status(), - affected_packages=self.affected_packages, - plan=self.fix_steps, - warnings=self.fix_warnings, - error=self.error, - additional_data=self.additional_data, - ) - - -class USNFixPlan(FixPlan): - def register_additional_data(self, additional_data: Dict[str, Any]): - self.additional_data = USNAdditionalData(**additional_data) - - -def get_fix_plan( - title: str, - description: Optional[str] = None, - affected_packages: Optional[List[str]] = None, -): - if not title or "cve" in title.lower(): - return FixPlan( - title=title, - description=description, - affected_packages=affected_packages, - ) - - return USNFixPlan( - title=title, - description=description, - affected_packages=affected_packages, - ) - - -def _get_cve_data( - issue_id: str, - client: UASecurityClient, -) -> Tuple[CVE, List[USN]]: - try: - cve = client.get_cve(cve_id=issue_id) - usns = client.get_notices(details=issue_id) - except exceptions.SecurityAPIError as e: - if e.code == 404: - raise exceptions.SecurityIssueNotFound(issue_id=issue_id) - raise e - - return cve, usns - - -def _get_usn_data( - issue_id: str, client: UASecurityClient -) -> Tuple[USN, List[USN]]: - try: - usn = client.get_notice(notice_id=issue_id) - usns = get_related_usns(usn, client) - except exceptions.SecurityAPIError as e: - if e.code == 404: - raise exceptions.SecurityIssueNotFound(issue_id=issue_id) - raise e - - if not usn.response["release_packages"]: - # Since usn.release_packages filters to our current release only - # check overall metadata and error if empty. - raise exceptions.SecurityAPIMetadataError( - error_msg="metadata defines no fixed package versions.", - issue=issue_id, - extra_info="", - ) - - return usn, usns - - -def _get_upgradable_pkgs( - binary_pkgs: List[BinaryPackageFix], - pocket: str, -) -> Tuple[List[str], List[UnfixedPackage]]: - upgrade_pkgs = [] - unfixed_pkgs = [] - - for binary_pkg in sorted(binary_pkgs): - check_esm_cache = ( - pocket != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET - ) - candidate_version = apt.get_pkg_candidate_version( - binary_pkg.binary_pkg, check_esm_cache=check_esm_cache - ) - if ( - candidate_version - and apt.version_compare( - binary_pkg.fixed_version, candidate_version - ) - <= 0 - ): - upgrade_pkgs.append(binary_pkg.binary_pkg) - else: - unfixed_pkgs.append( - UnfixedPackage( - source_package=binary_pkg.source_pkg, - binary_package=binary_pkg.binary_pkg, - version=binary_pkg.fixed_version, - ) - ) - - return upgrade_pkgs, unfixed_pkgs - - -def _get_upgradable_package_candidates_by_pocket( - pkg_status_group: List[Tuple[str, CVEPackageStatus]], - usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], - installed_pkgs: Dict[str, Dict[str, str]], -): - binary_pocket_pkgs = defaultdict(list) - src_pocket_pkgs = defaultdict(list) - - for src_pkg, pkg_status in pkg_status_group: - src_pocket_pkgs[pkg_status.pocket_source].append((src_pkg, pkg_status)) - for binary_pkg, version in installed_pkgs[src_pkg].items(): - usn_released_src = usn_released_pkgs.get(src_pkg, {}) - if binary_pkg not in usn_released_src: - continue - fixed_version = usn_released_src.get(binary_pkg, {}).get( - "version", "" - ) - - if apt.version_compare(fixed_version, version) > 0: - binary_pocket_pkgs[pkg_status.pocket_source].append( - BinaryPackageFix( - source_pkg=src_pkg, - binary_pkg=binary_pkg, - fixed_version=fixed_version, - ) - ) - - return src_pocket_pkgs, binary_pocket_pkgs - - -def _fix_plan_cve(issue_id: str, cfg: UAConfig) -> FixPlanResult: - livepatch_cve_status, patch_version = _check_cve_fixed_by_livepatch( - issue_id - ) - - if livepatch_cve_status: - fix_plan = get_fix_plan(title=issue_id) - fix_plan.register_step( - operation=FixStepType.NOOP, - data={ - "status": FixPlanNoOpStatus.FIXED_BY_LIVEPATCH.value, - "patch_version": patch_version, - }, - ) - return fix_plan.fix_plan - - client = UASecurityClient(cfg=cfg) - installed_pkgs = query_installed_source_pkg_versions() - - try: - cve, usns = _get_cve_data(issue_id=issue_id, client=client) - except exceptions.SecurityIssueNotFound as e: - fix_plan = get_fix_plan(title=issue_id) - fix_plan.register_error(error_msg=e.msg, error_code=e.msg_code) - return fix_plan.fix_plan - - affected_pkg_status = get_cve_affected_source_packages_status( - cve=cve, installed_packages=installed_pkgs - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - usns, beta_pockets={} - ) - - cve_description = cve.description - for notice in cve.notices: - # Only look at the most recent USN title - cve_description = notice.title - break - - return _generate_fix_plan( - issue_id=issue_id, - issue_description=cve_description, - affected_pkg_status=affected_pkg_status, - usn_released_pkgs=usn_released_pkgs, - installed_pkgs=installed_pkgs, - cfg=cfg, - ) - - -def _fix_plan_usn(issue_id: str, cfg: UAConfig) -> FixPlanUSNResult: - client = UASecurityClient(cfg=cfg) - installed_pkgs = query_installed_source_pkg_versions() - - try: - usn, related_usns = _get_usn_data(issue_id=issue_id, client=client) - except exceptions.SecurityIssueNotFound as e: - fix_plan = get_fix_plan(title=issue_id) - fix_plan.register_error(error_msg=e.msg, error_code=e.msg_code) - return FixPlanUSNResult( - target_usn_plan=fix_plan.fix_plan, - related_usns_plan=[], - ) - - affected_pkg_status = get_affected_packages_from_usn( - usn=usn, installed_packages=installed_pkgs - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - [usn], beta_pockets={} - ) - additional_data = { - "associated_cves": [] if not usn.cves_ids else usn.cves_ids, - "associated_launchpad_bugs": [] - if not usn.references - else usn.references, - } - - target_usn_plan = _generate_fix_plan( - issue_id=issue_id, - issue_description=usn.title, - affected_pkg_status=affected_pkg_status, - usn_released_pkgs=usn_released_pkgs, - installed_pkgs=installed_pkgs, - cfg=cfg, - additional_data=additional_data, - ) - - related_usns_plan = [] # type: List[FixPlanResult] - for usn in related_usns: - affected_pkg_status = get_affected_packages_from_usn( - usn=usn, installed_packages=installed_pkgs - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - [usn], beta_pockets={} - ) - additional_data = { - "associated_cves": [] if not usn.cves_ids else usn.cves_ids, - "associated_launchpad_bugs": [] - if not usn.references - else usn.references, - } - - related_usns_plan.append( - _generate_fix_plan( - issue_id=usn.id, - issue_description=usn.title, - affected_pkg_status=affected_pkg_status, - usn_released_pkgs=usn_released_pkgs, - installed_pkgs=installed_pkgs, - cfg=cfg, - additional_data=additional_data, - ) - ) - - return FixPlanUSNResult( - target_usn_plan=target_usn_plan, - related_usns_plan=related_usns_plan, - ) - - -def fix_plan_cve(issue_id: str, cfg: UAConfig) -> FixPlanResult: - if not issue_id or not re.match(CVE_OR_USN_REGEX, issue_id): - fix_plan = get_fix_plan(title=issue_id) - msg = messages.INVALID_SECURITY_ISSUE.format(issue_id=issue_id) - fix_plan.register_error(error_msg=msg.msg, error_code=msg.name) - return fix_plan.fix_plan - - issue_id = issue_id.upper() - return _fix_plan_cve(issue_id, cfg) - - -def fix_plan_usn(issue_id: str, cfg: UAConfig) -> FixPlanUSNResult: - if not issue_id or not re.match(CVE_OR_USN_REGEX, issue_id): - fix_plan = get_fix_plan(title=issue_id) - msg = messages.INVALID_SECURITY_ISSUE.format(issue_id=issue_id) - fix_plan.register_error(error_msg=msg.msg, error_code=msg.name) - return FixPlanUSNResult( - target_usn_plan=fix_plan.fix_plan, - related_usns_plan=[], - ) - - issue_id = issue_id.upper() - return _fix_plan_usn(issue_id, cfg) - - -def get_pocket_short_name(pocket: str): - if pocket == messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET: - return STANDARD_UPDATES_POCKET - elif pocket == messages.SECURITY_UA_INFRA_POCKET: - return ESM_INFRA_POCKET - elif pocket == messages.SECURITY_UA_APPS_POCKET: - return ESM_APPS_POCKET - else: - return pocket - - -def _generate_fix_plan( - *, - issue_id: str, - issue_description: str, - affected_pkg_status: Dict[str, CVEPackageStatus], - usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], - installed_pkgs: Dict[str, Dict[str, str]], - cfg: UAConfig, - additional_data=None -) -> FixPlanResult: - count = len(affected_pkg_status) - src_pocket_pkgs = defaultdict(list) - - fix_plan = get_fix_plan( - title=issue_id, - description=issue_description, - affected_packages=sorted(list(affected_pkg_status.keys())), - ) - - if additional_data: - fix_plan.register_additional_data(additional_data) - - if count == 0: - fix_plan.register_step( - operation=FixStepType.NOOP, - data={"status": FixPlanNoOpStatus.NOT_AFFECTED.value}, - ) - return fix_plan.fix_plan - - pkg_status_groups = group_by_usn_package_status( - affected_pkg_status, usn_released_pkgs - ) - - for status_value, pkg_status_group in sorted(pkg_status_groups.items()): - if status_value != "released": - fix_plan.register_warning( - warning_type=FixWarningType.SECURITY_ISSUE_NOT_FIXED, - data={ - "source_packages": [ - src_pkg for src_pkg, _ in pkg_status_group - ], - "status": status_value, - }, - ) - else: - ( - src_pocket_pkgs, - binary_pocket_pkgs, - ) = _get_upgradable_package_candidates_by_pocket( - pkg_status_group, - usn_released_pkgs, - installed_pkgs, - ) - - if not src_pocket_pkgs: - return fix_plan.fix_plan - - for pocket in [ - messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET, - messages.SECURITY_UA_INFRA_POCKET, - messages.SECURITY_UA_APPS_POCKET, - ]: - pkg_src_group = src_pocket_pkgs[pocket] - binary_pkgs = binary_pocket_pkgs[pocket] - source_pkgs = [src_pkg for src_pkg, _ in pkg_src_group] - pocket_name = get_pocket_short_name(pocket) - - if not binary_pkgs: - if source_pkgs: - fix_plan.register_step( - operation=FixStepType.NOOP, - data={ - "status": FixPlanNoOpStatus.ALREADY_FIXED.value, - "source_packages": source_pkgs, - "pocket": pocket_name, - }, - ) - continue - - upgrade_pkgs, unfixed_pkgs = _get_upgradable_pkgs(binary_pkgs, pocket) - - if unfixed_pkgs: - for unfixed_pkg in unfixed_pkgs: - fix_plan.register_warning( - warning_type=FixWarningType.PACKAGE_CANNOT_BE_INSTALLED, - data={ - "binary_package": unfixed_pkg.binary_package, - "binary_package_version": unfixed_pkg.version, - "source_package": unfixed_pkg.source_package, - "related_source_packages": source_pkgs, - "pocket": pocket_name, - }, - ) - - if pocket != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET: - if pocket == messages.SECURITY_UA_INFRA_POCKET: - service_to_check = "esm-infra" - else: - service_to_check = "esm-apps" - - if not _is_attached(cfg).is_attached: - fix_plan.register_step( - operation=FixStepType.ATTACH, - data={ - "reason": "required-pro-service", - "source_packages": source_pkgs, - "required_service": service_to_check, - }, - ) - else: - contract_expiry_status, _ = get_contract_expiry_status(cfg) - if contract_expiry_status != ContractExpiryStatus.ACTIVE: - fix_plan.register_step( - operation=FixStepType.ATTACH, - data={ - "reason": FixPlanAttachReason.EXPIRED_CONTRACT.value, # noqa - "source_packages": source_pkgs, - }, - ) - - enabled_services = _enabled_services(cfg).enabled_services or [] - enabled_services_names = ( - [service.name for service in enabled_services] - if enabled_services - else [] - ) - if service_to_check not in enabled_services_names: - fix_plan.register_step( - operation=FixStepType.ENABLE, - data={ - "service": service_to_check, - "source_packages": source_pkgs, - }, - ) - - fix_plan.register_step( - operation=FixStepType.APT_UPGRADE, - data={ - "binary_packages": upgrade_pkgs, - "source_packages": source_pkgs, - "pocket": pocket_name, - }, - ) - - return fix_plan.fix_plan +__all__ = [ + "AptUpgradeData", + "AttachData", + "EnableData", + "FixPlanError", + "FixPlanResult", + "FixPlanStep", + "FixPlanWarning", + "NoOpData", + "PackageCannotBeInstalledData", + "SecurityIssueNotFixedData", +] diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/__init__.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/__init__.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/__init__.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/__init__.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,7 +1,703 @@ -from typing import Optional +import copy +import enum +import socket +from collections import defaultdict +from typing import Any, Dict, List, NamedTuple, Optional, Tuple # noqa: F401 -from uaclient import messages -from uaclient.security import FixStatus +from uaclient import apt, exceptions, livepatch, messages, system, util +from uaclient.http import serviceclient + +CVE_OR_USN_REGEX = ( + r"((CVE|cve)-\d{4}-\d{4,7}$|(USN|usn|LSN|lsn)-\d{1,5}-\d{1,2}$)" +) + +API_V1_CVES = "cves.json" +API_V1_CVE_TMPL = "cves/{cve}.json" +API_V1_NOTICES = "notices.json" +API_V1_NOTICE_TMPL = "notices/{notice}.json" + +STANDARD_UPDATES_POCKET = "standard-updates" +ESM_INFRA_POCKET = "esm-infra" +ESM_APPS_POCKET = "esm-apps" + +BinaryPackageFix = NamedTuple( + "BinaryPackageFix", + [ + ("source_pkg", str), + ("binary_pkg", str), + ("fixed_version", str), + ], +) + +UnfixedPackage = NamedTuple( + "UnfixedPackage", + [ + ("pkg", str), + ("unfixed_reason", str), + ], +) + + +class FixStatus(enum.Enum): + """ + An enum to represent the system status after fix operation + """ + + class _Value: + def __init__(self, value: int, msg: str): + self.value = value + self.msg = msg + + SYSTEM_NON_VULNERABLE = _Value(0, "fixed") + SYSTEM_NOT_AFFECTED = _Value(0, "not-affected") + SYSTEM_STILL_VULNERABLE = _Value(1, "still-affected") + SYSTEM_VULNERABLE_UNTIL_REBOOT = _Value(2, "affected-until-reboot") + + @property + def exit_code(self): + return self.value.value + + def __str__(self): + return self.value.msg + + +class UASecurityClient(serviceclient.UAServiceClient): + + url_timeout = 20 + cfg_url_base_attr = "security_url" + + def _get_query_params( + self, query_params: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Update query params with data from feature config. + """ + extra_security_params = self.cfg.cfg.get("features", {}).get( + "extra_security_params", {} + ) + + if query_params: + query_params.update(extra_security_params) + return query_params + + return extra_security_params + + @util.retry(socket.timeout, retry_sleeps=[1, 3, 5]) + def request_url( + self, path, data=None, headers=None, method=None, query_params=None + ): + query_params = self._get_query_params(query_params) + return super().request_url( + path=path, + data=data, + headers=headers, + method=method, + query_params=query_params, + log_response_body=False, + ) + + def get_cves( + self, + query: Optional[str] = None, + priority: Optional[str] = None, + package: Optional[str] = None, + limit: Optional[int] = None, + offset: Optional[int] = None, + component: Optional[str] = None, + version: Optional[str] = None, + status: Optional[List[str]] = None, + ) -> List["CVE"]: + """Query to match multiple-CVEs. + + @return: List of CVE instances based on the the JSON response. + """ + query_params = { + "q": query, + "priority": priority, + "package": package, + "limit": limit, + "offset": offset, + "component": component, + "version": version, + "status": status, + } + response = self.request_url(API_V1_CVES, query_params=query_params) + if response.code != 200: + raise exceptions.SecurityAPIError( + url=API_V1_CVES, code=response.code, body=response.body + ) + return [ + CVE(client=self, response=cve_md) for cve_md in response.json_list + ] + + def get_cve(self, cve_id: str) -> "CVE": + """Query to match single-CVE. + + @return: CVE instance for JSON response from the Security API. + """ + url = API_V1_CVE_TMPL.format(cve=cve_id) + response = self.request_url(url) + if response.code != 200: + raise exceptions.SecurityAPIError( + url=url, code=response.code, body=response.body + ) + return CVE(client=self, response=response.json_dict) + + def get_notices( + self, + details: Optional[str] = None, + release: Optional[str] = None, + limit: Optional[int] = None, + offset: Optional[int] = None, + order: Optional[str] = None, + ) -> List["USN"]: + """Query to match multiple-USNs. + + @return: Sorted list of USN instances based on the the JSON response. + """ + query_params = { + "details": details, + "release": release, + "limit": limit, + "offset": offset, + "order": order, + } + response = self.request_url(API_V1_NOTICES, query_params=query_params) + if response.code != 200: + raise exceptions.SecurityAPIError( + url=API_V1_NOTICES, code=response.code, body=response.body + ) + + return sorted( + [ + USN(client=self, response=usn_md) + for usn_md in response.json_dict.get("notices", []) + if (details is None or details in usn_md.get("cves_ids", [])) + and usn_md.get("id", "").startswith("USN-") + ], + key=lambda x: x.id, + ) + + def get_notice(self, notice_id: str) -> "USN": + """Query to match single-USN. + + @return: USN instance representing the JSON response. + """ + url = API_V1_NOTICE_TMPL.format(notice=notice_id) + response = self.request_url(url) + if response.code != 200: + raise exceptions.SecurityAPIError( + url=url, code=response.code, body=response.body + ) + return USN(client=self, response=response.json_dict) + + +# Model for Security API responses +class CVEPackageStatus: + """Class representing specific CVE PackageStatus on an Ubuntu series""" + + def __init__(self, cve_response: Dict[str, Any]): + self.response = cve_response + + @property + def description(self): + return self.response["description"] + + @property + def fixed_version(self): + return self.description + + @property + def pocket(self): + return self.response["pocket"] + + @property + def release_codename(self): + return self.response["release_codename"] + + @property + def status(self): + return self.response["status"] + + @property + def status_message(self): + if self.status == "needed": + return messages.SECURITY_CVE_STATUS_NEEDED + elif self.status == "needs-triage": + return messages.SECURITY_CVE_STATUS_TRIAGE + elif self.status == "pending": + return messages.SECURITY_CVE_STATUS_PENDING + elif self.status in ("ignored", "deferred"): + return messages.SECURITY_CVE_STATUS_IGNORED + elif self.status == "DNE": + return messages.SECURITY_CVE_STATUS_DNE + elif self.status == "not-affected": + return messages.SECURITY_CVE_STATUS_NOT_AFFECTED + elif self.status == "released": + return messages.SECURITY_FIX_RELEASE_STREAM.format( + fix_stream=self.pocket_source + ) + return messages.SECURITY_CVE_STATUS_UNKNOWN.format(status=self.status) + + @property + def requires_ua(self) -> bool: + """Return True if the package requires an active Pro subscription.""" + return bool( + self.pocket_source + != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET + ) + + @property + def pocket_source(self): + """Human-readable string representing where the fix is published.""" + if self.pocket == "esm-infra": + fix_source = messages.SECURITY_UA_INFRA_POCKET + elif self.pocket == "esm-apps": + fix_source = messages.SECURITY_UA_APPS_POCKET + elif self.pocket in ("updates", "security"): + fix_source = messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET + else: + # TODO(GH: #1376 drop this when esm* pockets supplied by API) + if "esm" in self.fixed_version: + fix_source = messages.SECURITY_UA_INFRA_POCKET + else: + fix_source = messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET + return fix_source + + +class CVE: + """Class representing CVE response from the SecurityClient""" + + def __init__(self, client: UASecurityClient, response: Dict[str, Any]): + self.response = response + self.client = client + + def __eq__(self, other) -> bool: + if not isinstance(other, CVE): + return False + return self.response == other.response + + @property + def id(self): + return self.response.get("id", "UNKNOWN_CVE_ID").upper() + + @property + def notices_ids(self) -> List[str]: + return self.response.get("notices_ids", []) + + @property + def notices(self) -> List["USN"]: + """Return a list of USN instances from API response 'notices'. + + Cache the value to avoid extra work on multiple calls. + """ + if not hasattr(self, "_notices"): + self._notices = sorted( + [ + USN(self.client, notice) + for notice in self.response.get("notices", []) + if notice and notice.get("id", "").startswith("USN-") + ], + key=lambda n: n.id, + reverse=True, + ) + return self._notices + + @property + def description(self): + return self.response.get("description") + + @property + def packages_status(self) -> Dict[str, CVEPackageStatus]: + """Dict of package status dicts for the current Ubuntu series. + + Top-level keys are source packages names and each value is a + CVEPackageStatus object + """ + if hasattr(self, "_packages_status"): + return self._packages_status # type: ignore + self._packages_status = {} + series = system.get_release_info().series + for package in self.response["packages"]: + for pkg_status in package["statuses"]: + if pkg_status["release_codename"] == series: + self._packages_status[package["name"]] = CVEPackageStatus( + pkg_status + ) + return self._packages_status + + +class USN: + """Class representing USN response from the SecurityClient""" + + def __init__(self, client: UASecurityClient, response: Dict[str, Any]): + self.response = response + self.client = client + + def __eq__(self, other) -> bool: + if not isinstance(other, USN): + return False + return self.response == other.response + + @property + def id(self) -> str: + return self.response.get("id", "UNKNOWN_USN_ID").upper() + + @property + def cves_ids(self) -> List[str]: + """List of CVE IDs related to this USN.""" + return self.response.get("cves_ids", []) + + @property + def cves(self) -> List[CVE]: + """List of CVE instances based on API response 'cves' key. + + Cache the values to avoid extra work for multiple call-sites. + """ + if not hasattr(self, "_cves"): + self._cves = sorted( + [ + CVE(self.client, cve) + for cve in self.response.get("cves", []) + ], + key=lambda n: n.id, + reverse=True, + ) # type: List[CVE] + return self._cves + + @property + def title(self): + return self.response.get("title") + + @property + def references(self): + return self.response.get("references") + + @property + def release_packages(self) -> Dict[str, Dict[str, Dict[str, str]]]: + """Binary package information available for this release. + + + Reformat the USN.release_packages response to key it based on source + package name and related binary package names. + + :return: Dict keyed by source package name. The second-level key will + be binary package names generated from that source package and the + values will be the dict response from USN.release_packages for + that binary package. The binary metadata contains the following + keys: name, version. + Optional additional keys: pocket and component. + """ + if hasattr(self, "_release_packages"): + return self._release_packages + series = system.get_release_info().series + self._release_packages = {} # type: Dict[str, Dict[str, Any]] + # Organize source and binary packages under a common source package key + for pkg in self.response.get("release_packages", {}).get(series, []): + if pkg.get("is_source"): + # Create a "source" key under src_pkg_name with API response + if pkg["name"] in self._release_packages: + if "source" in self._release_packages[pkg["name"]]: + raise exceptions.SecurityAPIMetadataError( + error_msg=( + "{usn} metadata defines duplicate source" + " packages {pkg}" + ).format(usn=self.id, pkg=pkg["name"]), + issue=self.id, + extra_info="", + ) + self._release_packages[pkg["name"]]["source"] = pkg + else: + self._release_packages[pkg["name"]] = {"source": pkg} + else: + # is_source == False or None, then this is a binary package. + # If processed before a source item, the top-level key will + # not exist yet. + # TODO(GH: 1465: determine if this is expected on kern pkgs) + if not pkg.get("source_link"): + raise exceptions.SecurityAPIMetadataError( + error_msg=( + "{issue} metadata does not define release_packages" + " source_link for {bin_pkg}." + ).format(issue=self.id, bin_pkg=pkg["name"]), + issue=self.id, + extra_info="", + ) + elif "/" not in pkg["source_link"]: + raise exceptions.SecurityAPIMetadataError( + error_msg=( + "{issue} metadata has unexpected release_packages" + " source_link value for {bin_pkg}: {link}" + ).format( + issue=self.id, + bin_pkg=pkg["name"], + link=pkg["source_link"], + ), + issue=self.id, + extra_info="", + ) + source_pkg_name = pkg["source_link"].split("/")[-1] + if source_pkg_name not in self._release_packages: + self._release_packages[source_pkg_name] = {} + self._release_packages[source_pkg_name][pkg["name"]] = pkg + return self._release_packages + + +def get_cve_affected_source_packages_status( + cve: CVE, installed_packages: Dict[str, Dict[str, str]] +) -> Dict[str, CVEPackageStatus]: + """Get a dict of any CVEPackageStatuses affecting this Ubuntu release. + + :return: Dict of active CVEPackageStatus keyed by source package names. + """ + affected_pkg_versions = {} + for source_pkg, package_status in cve.packages_status.items(): + if package_status.status == "not-affected": + continue + if source_pkg in installed_packages: + affected_pkg_versions[source_pkg] = package_status + return affected_pkg_versions + + +def query_installed_source_pkg_versions() -> Dict[str, Dict[str, str]]: + """Return a dict of all source packages installed on the system. + + The dict keys will be source package name: "krb5". The value will be a dict + with keys binary_pkg and version. + """ + status_field = "${db:Status-Status}" + out, _err = system.subp( + [ + "dpkg-query", + "-f=${Package},${Source},${Version}," + status_field + "\n", + "-W", + ] + ) + installed_packages = {} # type: Dict[str, Dict[str, str]] + for pkg_line in out.splitlines(): + pkg_name, source_pkg_name, pkg_version, status = pkg_line.split(",") + if not source_pkg_name: + # some package don't define the Source + source_pkg_name = pkg_name + if "installed" not in status: + continue + if source_pkg_name in installed_packages: + installed_packages[source_pkg_name][pkg_name] = pkg_version + else: + installed_packages[source_pkg_name] = {pkg_name: pkg_version} + return installed_packages + + +def get_related_usns(usn, client): + """For a give usn, get the related USNs for it. + + For each CVE associated with the given USN, we capture + other USNs that are related to the CVE. We consider those + USNs related to the original USN. + """ + + # If the usn does not have any associated cves on it, + # we cannot establish a relation between USNs + if not usn.cves: + return [] + + related_usns = {} + for cve in usn.cves: + for related_usn_id in cve.notices_ids: + # We should ignore any other item that is not a USN + # For example, LSNs + if not related_usn_id.startswith("USN-"): + continue + if related_usn_id == usn.id: + continue + if related_usn_id not in related_usns: + related_usns[related_usn_id] = client.get_notice( + notice_id=related_usn_id + ) + + return list(sorted(related_usns.values(), key=lambda x: x.id)) + + +def get_affected_packages_from_cves(cves, installed_packages): + affected_pkgs = {} # type: Dict[str, CVEPackageStatus] + + for cve in cves: + for pkg_name, pkg_status in get_cve_affected_source_packages_status( + cve, installed_packages + ).items(): + if pkg_name not in affected_pkgs: + affected_pkgs[pkg_name] = pkg_status + else: + current_ver = affected_pkgs[pkg_name].fixed_version + if ( + apt.version_compare(current_ver, pkg_status.fixed_version) + > 0 + ): + affected_pkgs[pkg_name] = pkg_status + + return affected_pkgs + + +def get_affected_packages_from_usn(usn, installed_packages): + affected_pkgs = {} # type: Dict[str, CVEPackageStatus] + for pkg_name, pkg_info in usn.release_packages.items(): + if pkg_name not in installed_packages: + continue + + cve_response = defaultdict(str) + cve_response["status"] = "released" + # Here we are assuming that the pocket will be the same one across + # all the different binary packages. + all_pockets = { + pkg_bin_info["pocket"] + for _, pkg_bin_info in pkg_info.items() + if pkg_bin_info.get("pocket") + } + if not all_pockets: + raise exceptions.SecurityAPIMetadataError( + error_msg=( + "{} metadata defines no pocket information for " + "any release packages." + ).format(usn.id), + issue=usn.id, + extra_info="", + ) + cve_response["pocket"] = all_pockets.pop() + + affected_pkgs[pkg_name] = CVEPackageStatus(cve_response=cve_response) + + return affected_pkgs + + +def get_usn_affected_packages_status( + usn: USN, installed_packages: Dict[str, Dict[str, str]] +) -> Dict[str, CVEPackageStatus]: + """Walk CVEs related to a USN and return a dict of all affected packages. + + :return: Dict keyed on source package name, with active CVEPackageStatus + for the current Ubuntu release. + """ + if usn.cves: + return get_affected_packages_from_cves(usn.cves, installed_packages) + else: + return get_affected_packages_from_usn(usn, installed_packages) + + +def override_usn_release_package_status( + pkg_status: CVEPackageStatus, + usn_src_released_pkgs: Dict[str, Dict[str, str]], +) -> CVEPackageStatus: + """Parse release status based on both pkg_status and USN.release_packages. + + Since some source packages in universe are not represented in + CVEPackageStatus, rely on presence of such source packages in + usn_src_released_pkgs to represent package as a "released" status. + + :param pkg_status: the CVEPackageStatus for this source package. + :param usn_src_released_pkgs: The USN.release_packages representing only + this source package. Normally, release_packages would have data on + multiple source packages. + + :return: Tuple of: + human-readable status message, boolean whether released, + boolean whether the fix requires access to UA + """ + + usn_pkg_status = copy.deepcopy(pkg_status) + if usn_src_released_pkgs and usn_src_released_pkgs.get("source"): + usn_pkg_status.response["status"] = "released" + usn_pkg_status.response["description"] = usn_src_released_pkgs[ + "source" + ]["version"] + for pkg_name, usn_released_pkg in usn_src_released_pkgs.items(): + # Copy the pocket from any valid binary package + pocket = usn_released_pkg.get("pocket") + if pocket: + usn_pkg_status.response["pocket"] = pocket + break + return usn_pkg_status + + +def group_by_usn_package_status(affected_pkg_status, usn_released_pkgs): + status_groups = {} # type: Dict[str, List[Tuple[str, CVEPackageStatus]]] + for src_pkg, pkg_status in sorted(affected_pkg_status.items()): + usn_released_src = usn_released_pkgs.get(src_pkg, {}) + usn_pkg_status = override_usn_release_package_status( + pkg_status, usn_released_src + ) + status_group = usn_pkg_status.status.replace("ignored", "deferred") + if status_group not in status_groups: + status_groups[status_group] = [] + status_groups[status_group].append((src_pkg, usn_pkg_status)) + return status_groups + + +def merge_usn_released_binary_package_versions( + usns: List[USN], beta_pockets: Dict[str, bool] +) -> Dict[str, Dict[str, Dict[str, str]]]: + """Walk related USNs, merging the released binary package versions. + + For each USN, iterate over release_packages to collect released binary + package names and required fix version. If multiple related USNs + require different version fixes to the same binary package, track the + maximum version required across all USNs. + + :param usns: List of USN response instances from which to calculate merge. + :param beta_pockets: Dict keyed on service name: esm-infra, esm-apps + the values of which will be true of USN response instances + from which to calculate merge. + + :return: Dict keyed by source package name. Under each source package will + be a dict with binary package name as keys and binary package metadata + as the value. + """ + usn_pkg_versions = {} + for usn in usns: + # Aggregate USN.release_package binary versions into usn_pkg_versions + for src_pkg, binary_pkg_versions in usn.release_packages.items(): + public_bin_pkg_versions = { + bin_pkg_name: bin_pkg_md + for bin_pkg_name, bin_pkg_md in binary_pkg_versions.items() + if False + is beta_pockets.get(bin_pkg_md.get("pocket", "None"), False) + } + if src_pkg not in usn_pkg_versions and public_bin_pkg_versions: + usn_pkg_versions[src_pkg] = public_bin_pkg_versions + elif src_pkg in usn_pkg_versions: + # Since src_pkg exists, only record this USN's binary version + # when it is greater than the previous version in usn_src_pkg. + usn_src_pkg = usn_pkg_versions[src_pkg] + for bin_pkg, binary_pkg_md in public_bin_pkg_versions.items(): + if bin_pkg not in usn_src_pkg: + usn_src_pkg[bin_pkg] = binary_pkg_md + else: + prev_version = usn_src_pkg[bin_pkg]["version"] + current_version = binary_pkg_md["version"] + if ( + apt.version_compare(current_version, prev_version) + > 0 + ): + # binary_version is greater than prev_version + usn_src_pkg[bin_pkg] = binary_pkg_md + return usn_pkg_versions + + +def _check_cve_fixed_by_livepatch( + issue_id: str, +) -> Tuple[Optional[FixStatus], Optional[str]]: + # Check livepatch status for CVE in fixes before checking CVE api + lp_status = livepatch.status() + if ( + lp_status is not None + and lp_status.livepatch is not None + and lp_status.livepatch.fixes is not None + ): + for fix in lp_status.livepatch.fixes: + if fix.name == issue_id.lower() and fix.patched: + version = lp_status.livepatch.version or "N/A" + return (FixStatus.SYSTEM_NON_VULNERABLE, version) + + return (None, None) def status_message(status, pocket_source: Optional[str] = None): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/execute/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/execute/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/execute/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/execute/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,7 +1,8 @@ from typing import List, Optional from uaclient import apt, messages, util -from uaclient.api.u.pro.security.fix import ( +from uaclient.api.u.pro.security.fix._common import FixStatus, status_message +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( FixPlanAptUpgradeStep, FixPlanAttachStep, FixPlanEnableStep, @@ -11,9 +12,7 @@ FixPlanWarningPackageCannotBeInstalled, FixPlanWarningSecurityIssueNotFixed, ) -from uaclient.api.u.pro.security.fix._common import status_message from uaclient.data_types import DataObject, Field, StringDataValue, data_list -from uaclient.security import FixStatus class UpgradedPackage(DataObject): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/plan/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/plan/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/_common/plan/v1.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/_common/plan/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -0,0 +1,946 @@ +import enum +import re +from collections import defaultdict +from typing import Any, Dict, List, NamedTuple, Optional, Tuple + +from uaclient import apt, exceptions, messages +from uaclient.api.u.pro.security.fix._common import ( + CVE, + CVE_OR_USN_REGEX, + USN, + BinaryPackageFix, + CVEPackageStatus, + FixStatus, + UASecurityClient, + _check_cve_fixed_by_livepatch, + get_affected_packages_from_usn, + get_cve_affected_source_packages_status, + get_related_usns, + group_by_usn_package_status, + merge_usn_released_binary_package_versions, + query_installed_source_pkg_versions, +) +from uaclient.api.u.pro.status.enabled_services.v1 import _enabled_services +from uaclient.api.u.pro.status.is_attached.v1 import _is_attached +from uaclient.config import UAConfig +from uaclient.contract import ContractExpiryStatus, get_contract_expiry_status +from uaclient.data_types import ( + DataObject, + Field, + IntDataValue, + StringDataValue, + data_list, +) + +STANDARD_UPDATES_POCKET = "standard-updates" +ESM_INFRA_POCKET = "esm-infra" +ESM_APPS_POCKET = "esm-apps" + +UnfixedPackage = NamedTuple( + "UnfixedPackage", + [ + ("source_package", str), + ("binary_package", str), + ("version", Optional[str]), + ], +) + + +@enum.unique +class FixStepType(enum.Enum): + ATTACH = "attach" + ENABLE = "enable" + NOOP = "no-op" + APT_UPGRADE = "apt-upgrade" + + +@enum.unique +class FixPlanNoOpStatus(enum.Enum): + ALREADY_FIXED = "cve-already-fixed" + NOT_AFFECTED = "system-not-affected" + FIXED_BY_LIVEPATCH = "cve-fixed-by-livepatch" + + +@enum.unique +class FixPlanAttachReason(enum.Enum): + EXPIRED_CONTRACT = "expired-contract-token" + REQUIRED_PRO_SERVICE = "required-pro-service" + + +@enum.unique +class FixWarningType(enum.Enum): + PACKAGE_CANNOT_BE_INSTALLED = "package-cannot-be-installed" + SECURITY_ISSUE_NOT_FIXED = "security-issue-not-fixed" + + +class FixPlanStep(DataObject): + fields = [ + Field("operation", StringDataValue), + Field("order", IntDataValue), + ] + + def __init__(self, *, operation: str, order: int): + self.operation = operation + self.order = order + + +class AptUpgradeData(DataObject): + fields = [ + Field("binary_packages", data_list(StringDataValue)), + Field("source_packages", data_list(StringDataValue)), + Field("pocket", StringDataValue), + ] + + def __init__( + self, + *, + binary_packages: List[str], + source_packages: List[str], + pocket: str + ): + self.binary_packages = binary_packages + self.source_packages = source_packages + self.pocket = pocket + + +class FixPlanAptUpgradeStep(FixPlanStep): + fields = [ + Field("operation", StringDataValue), + Field("data", AptUpgradeData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: AptUpgradeData, order: int): + super().__init__(operation=FixStepType.APT_UPGRADE.value, order=order) + self.data = data + + +class AttachData(DataObject): + fields = [ + Field("reason", StringDataValue), + Field("required_service", StringDataValue), + Field("source_packages", data_list(StringDataValue)), + ] + + def __init__( + self, *, reason: str, source_packages: List[str], required_service: str + ): + self.reason = reason + self.source_packages = source_packages + self.required_service = required_service + + +class FixPlanAttachStep(FixPlanStep): + fields = [ + Field("operation", StringDataValue), + Field("data", AttachData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: AttachData, order: int): + super().__init__(operation=FixStepType.ATTACH.value, order=order) + self.data = data + + +class EnableData(DataObject): + fields = [ + Field("service", StringDataValue), + Field("source_packages", data_list(StringDataValue)), + ] + + def __init__(self, *, service: str, source_packages: List[str]): + self.service = service + self.source_packages = source_packages + + +class FixPlanEnableStep(FixPlanStep): + fields = [ + Field("operation", StringDataValue), + Field("data", EnableData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: EnableData, order: int): + super().__init__(operation=FixStepType.ENABLE.value, order=order) + self.data = data + + +class NoOpData(DataObject): + fields = [ + Field("status", StringDataValue), + ] + + def __init__(self, *, status: str): + self.status = status + + +class FixPlanNoOpStep(FixPlanStep): + fields = [ + Field("operation", StringDataValue), + Field("data", NoOpData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: NoOpData, order: int): + super().__init__(operation=FixStepType.NOOP.value, order=order) + self.data = data + + +class NoOpLivepatchFixData(NoOpData): + fields = [ + Field("status", StringDataValue), + Field("patch_version", StringDataValue), + ] + + def __init__(self, *, status: str, patch_version: str): + super().__init__(status=status) + self.patch_version = patch_version + + +class FixPlanNoOpLivepatchFixStep(FixPlanNoOpStep): + fields = [ + Field("operation", StringDataValue), + Field("data", NoOpLivepatchFixData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: NoOpLivepatchFixData, order: int): + super().__init__(data=data, order=order) + + +class NoOpAlreadyFixedData(NoOpData): + fields = [ + Field("status", StringDataValue), + Field("source_packages", data_list(StringDataValue)), + Field("pocket", StringDataValue), + ] + + def __init__( + self, *, status: str, source_packages: List[str], pocket: str + ): + super().__init__(status=status) + self.source_packages = source_packages + self.pocket = pocket + + +class FixPlanNoOpAlreadyFixedStep(FixPlanNoOpStep): + fields = [ + Field("operation", StringDataValue), + Field("data", NoOpLivepatchFixData), + Field("order", IntDataValue), + ] + + def __init__(self, *, data: NoOpAlreadyFixedData, order: int): + super().__init__(data=data, order=order) + + +class FixPlanWarning(DataObject): + fields = [ + Field("warning_type", StringDataValue), + Field("order", IntDataValue), + ] + + def __init__(self, *, warning_type: str, order: int): + self.warning_type = warning_type + self.order = order + + +class SecurityIssueNotFixedData(DataObject): + fields = [ + Field("source_packages", data_list(StringDataValue)), + Field("status", StringDataValue), + ] + + def __init__(self, *, source_packages: List[str], status: str): + self.source_packages = source_packages + self.status = status + + +class FixPlanWarningSecurityIssueNotFixed(FixPlanWarning): + fields = [ + Field("warning_type", StringDataValue), + Field("order", IntDataValue), + Field("data", SecurityIssueNotFixedData), + ] + + def __init__(self, *, order: int, data: SecurityIssueNotFixedData): + super().__init__( + warning_type=FixWarningType.SECURITY_ISSUE_NOT_FIXED.value, + order=order, + ) + self.data = data + + +class PackageCannotBeInstalledData(DataObject): + fields = [ + Field("binary_package", StringDataValue), + Field("binary_package_version", StringDataValue), + Field("source_package", StringDataValue), + Field("related_source_packages", data_list(StringDataValue)), + Field("pocket", StringDataValue), + ] + + def __init__( + self, + *, + binary_package: str, + binary_package_version: str, + source_package: str, + pocket: str, + related_source_packages: List[str] + ): + self.source_package = source_package + self.binary_package = binary_package + self.binary_package_version = binary_package_version + self.pocket = pocket + self.related_source_packages = related_source_packages + + +class FixPlanWarningPackageCannotBeInstalled(FixPlanWarning): + fields = [ + Field("warning_type", StringDataValue), + Field("order", IntDataValue), + Field("data", SecurityIssueNotFixedData), + ] + + def __init__(self, *, order: int, data: PackageCannotBeInstalledData): + super().__init__( + warning_type=FixWarningType.PACKAGE_CANNOT_BE_INSTALLED.value, + order=order, + ) + self.data = data + + +class FixPlanError(DataObject): + fields = [ + Field("msg", StringDataValue), + Field("code", StringDataValue, required=False), + ] + + def __init__(self, *, msg: str, code: Optional[str]): + self.msg = msg + self.code = code + + +class AdditionalData(DataObject): + pass + + +class USNAdditionalData(AdditionalData): + + fields = [ + Field("associated_cves", data_list(StringDataValue)), + Field("associated_launchpad_bugs", data_list(StringDataValue)), + ] + + def __init__( + self, + *, + associated_cves: List[str], + associated_launchpad_bugs: List[str] + ): + self.associated_cves = associated_cves + self.associated_launchpad_bugs = associated_launchpad_bugs + + +class FixPlanResult(DataObject): + fields = [ + Field("title", StringDataValue), + Field("description", StringDataValue, required=False), + Field("expected_status", StringDataValue), + Field("affected_packages", data_list(StringDataValue), required=False), + Field("plan", data_list(FixPlanStep)), + Field("warnings", data_list(FixPlanWarning), required=False), + Field("error", FixPlanError, required=False), + Field("additional_data", AdditionalData, required=False), + ] + + def __init__( + self, + *, + title: str, + expected_status: str, + plan: List[FixPlanStep], + warnings: List[FixPlanWarning], + error: Optional[FixPlanError], + additional_data: AdditionalData, + description: Optional[str] = None, + affected_packages: Optional[List[str]] = None + ): + self.title = title + self.description = description + self.expected_status = expected_status + self.affected_packages = affected_packages + self.plan = plan + self.warnings = warnings + self.error = error + self.additional_data = additional_data + + +class FixPlanUSNResult(DataObject): + fields = [ + Field("target_usn_plan", FixPlanResult), + Field("related_usns_plan", data_list(FixPlanResult), required=False), + ] + + def __init__( + self, + *, + target_usn_plan: FixPlanResult, + related_usns_plan: List[FixPlanResult] + ): + self.target_usn_plan = target_usn_plan + self.related_usns_plan = related_usns_plan + + +class FixPlan: + def __init__( + self, + title: str, + description: Optional[str], + affected_packages: Optional[List[str]] = None, + ): + self.order = 1 + self.title = title + self.description = description + self.affected_packages = affected_packages + self.fix_steps = [] # type: List[FixPlanStep] + self.fix_warnings = [] # type: List[FixPlanWarning] + self.error = None # type: Optional[FixPlanError] + self.additional_data = AdditionalData() + + def register_step( + self, + operation: FixStepType, + data: Dict[str, Any], + ): + # just to make mypy happy + fix_step = None # type: Optional[FixPlanStep] + + if operation == FixStepType.ATTACH: + fix_step = FixPlanAttachStep( + order=self.order, data=AttachData.from_dict(data) + ) + elif operation == FixStepType.ENABLE: + fix_step = FixPlanEnableStep( + order=self.order, data=EnableData.from_dict(data) + ) + elif operation == FixStepType.NOOP: + if "patch_version" in data: + fix_step = FixPlanNoOpLivepatchFixStep( + order=self.order, data=NoOpLivepatchFixData.from_dict(data) + ) + elif "source_packages" in data: + fix_step = FixPlanNoOpAlreadyFixedStep( + order=self.order, data=NoOpAlreadyFixedData.from_dict(data) + ) + else: + fix_step = FixPlanNoOpStep( + order=self.order, data=NoOpData.from_dict(data) + ) + else: + fix_step = FixPlanAptUpgradeStep( + order=self.order, data=AptUpgradeData.from_dict(data) + ) + + self.fix_steps.append(fix_step) + self.order += 1 + + def register_warning( + self, warning_type: FixWarningType, data: Dict[str, Any] + ): + fix_warning = None # type: Optional[FixPlanWarning] + + if warning_type == FixWarningType.SECURITY_ISSUE_NOT_FIXED: + fix_warning = FixPlanWarningSecurityIssueNotFixed( + order=self.order, + data=SecurityIssueNotFixedData.from_dict(data), + ) + else: + fix_warning = FixPlanWarningPackageCannotBeInstalled( + order=self.order, + data=PackageCannotBeInstalledData.from_dict(data), + ) + + self.fix_warnings.append(fix_warning) + self.order += 1 + + def register_error(self, error_msg: str, error_code: Optional[str]): + self.error = FixPlanError(msg=error_msg, code=error_code) + + def register_additional_data(self, additional_data: Dict[str, Any]): + self.additional_data = AdditionalData(**additional_data) + + def _get_status(self) -> str: + if self.error: + return "error" + + if ( + len(self.fix_steps) == 1 + and isinstance(self.fix_steps[0], FixPlanNoOpStep) + and self.fix_steps[0].data.status == "system-not-affected" + ): + return str(FixStatus.SYSTEM_NOT_AFFECTED) + elif self.fix_warnings: + return str(FixStatus.SYSTEM_STILL_VULNERABLE) + else: + return str(FixStatus.SYSTEM_NON_VULNERABLE) + + @property + def fix_plan(self): + return FixPlanResult( + title=self.title, + description=self.description, + expected_status=self._get_status(), + affected_packages=self.affected_packages, + plan=self.fix_steps, + warnings=self.fix_warnings, + error=self.error, + additional_data=self.additional_data, + ) + + +class USNFixPlan(FixPlan): + def register_additional_data(self, additional_data: Dict[str, Any]): + self.additional_data = USNAdditionalData(**additional_data) + + +def get_fix_plan( + title: str, + description: Optional[str] = None, + affected_packages: Optional[List[str]] = None, +): + if not title or "cve" in title.lower(): + return FixPlan( + title=title, + description=description, + affected_packages=affected_packages, + ) + + return USNFixPlan( + title=title, + description=description, + affected_packages=affected_packages, + ) + + +def _get_cve_data( + issue_id: str, + client: UASecurityClient, +) -> Tuple[CVE, List[USN]]: + try: + cve = client.get_cve(cve_id=issue_id) + usns = client.get_notices(details=issue_id) + except exceptions.SecurityAPIError as e: + if e.code == 404: + raise exceptions.SecurityIssueNotFound(issue_id=issue_id) + raise e + + return cve, usns + + +def _get_usn_data( + issue_id: str, client: UASecurityClient +) -> Tuple[USN, List[USN]]: + try: + usn = client.get_notice(notice_id=issue_id) + usns = get_related_usns(usn, client) + except exceptions.SecurityAPIError as e: + if e.code == 404: + raise exceptions.SecurityIssueNotFound(issue_id=issue_id) + raise e + + if not usn.response["release_packages"]: + # Since usn.release_packages filters to our current release only + # check overall metadata and error if empty. + raise exceptions.SecurityAPIMetadataError( + error_msg="metadata defines no fixed package versions.", + issue=issue_id, + extra_info="", + ) + + return usn, usns + + +def _get_upgradable_pkgs( + binary_pkgs: List[BinaryPackageFix], + pocket: str, +) -> Tuple[List[str], List[UnfixedPackage]]: + upgrade_pkgs = [] + unfixed_pkgs = [] + + for binary_pkg in sorted(binary_pkgs): + check_esm_cache = ( + pocket != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET + ) + candidate_version = apt.get_pkg_candidate_version( + binary_pkg.binary_pkg, check_esm_cache=check_esm_cache + ) + if ( + candidate_version + and apt.version_compare( + binary_pkg.fixed_version, candidate_version + ) + <= 0 + ): + upgrade_pkgs.append(binary_pkg.binary_pkg) + else: + unfixed_pkgs.append( + UnfixedPackage( + source_package=binary_pkg.source_pkg, + binary_package=binary_pkg.binary_pkg, + version=binary_pkg.fixed_version, + ) + ) + + return upgrade_pkgs, unfixed_pkgs + + +def _get_upgradable_package_candidates_by_pocket( + pkg_status_group: List[Tuple[str, CVEPackageStatus]], + usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], + installed_pkgs: Dict[str, Dict[str, str]], +): + binary_pocket_pkgs = defaultdict(list) + src_pocket_pkgs = defaultdict(list) + + for src_pkg, pkg_status in pkg_status_group: + src_pocket_pkgs[pkg_status.pocket_source].append((src_pkg, pkg_status)) + for binary_pkg, version in installed_pkgs[src_pkg].items(): + usn_released_src = usn_released_pkgs.get(src_pkg, {}) + if binary_pkg not in usn_released_src: + continue + fixed_version = usn_released_src.get(binary_pkg, {}).get( + "version", "" + ) + + if apt.version_compare(fixed_version, version) > 0: + binary_pocket_pkgs[pkg_status.pocket_source].append( + BinaryPackageFix( + source_pkg=src_pkg, + binary_pkg=binary_pkg, + fixed_version=fixed_version, + ) + ) + + return src_pocket_pkgs, binary_pocket_pkgs + + +def _get_cve_description( + cve: CVE, + installed_pkgs: Dict[str, Dict[str, str]], +): + if not cve.notices: + return cve.description + + for notice in cve.notices: + usn_pkgs = notice.release_packages.keys() + for pkg in usn_pkgs: + if pkg in installed_pkgs: + return notice.title + + return cve.notices[0].title + + +def _fix_plan_cve(issue_id: str, cfg: UAConfig) -> FixPlanResult: + livepatch_cve_status, patch_version = _check_cve_fixed_by_livepatch( + issue_id + ) + + if livepatch_cve_status: + fix_plan = get_fix_plan(title=issue_id) + fix_plan.register_step( + operation=FixStepType.NOOP, + data={ + "status": FixPlanNoOpStatus.FIXED_BY_LIVEPATCH.value, + "patch_version": patch_version, + }, + ) + return fix_plan.fix_plan + + client = UASecurityClient(cfg=cfg) + installed_pkgs = query_installed_source_pkg_versions() + + try: + cve, usns = _get_cve_data(issue_id=issue_id, client=client) + except ( + exceptions.SecurityIssueNotFound, + exceptions.SecurityAPIError, + ) as e: + fix_plan = get_fix_plan(title=issue_id) + fix_plan.register_error(error_msg=e.msg, error_code=e.msg_code) + return fix_plan.fix_plan + + affected_pkg_status = get_cve_affected_source_packages_status( + cve=cve, installed_packages=installed_pkgs + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + usns, beta_pockets={} + ) + + cve_description = _get_cve_description(cve, installed_pkgs) + + return _generate_fix_plan( + issue_id=issue_id, + issue_description=cve_description, + affected_pkg_status=affected_pkg_status, + usn_released_pkgs=usn_released_pkgs, + installed_pkgs=installed_pkgs, + cfg=cfg, + ) + + +def _fix_plan_usn(issue_id: str, cfg: UAConfig) -> FixPlanUSNResult: + client = UASecurityClient(cfg=cfg) + installed_pkgs = query_installed_source_pkg_versions() + + try: + usn, related_usns = _get_usn_data(issue_id=issue_id, client=client) + except ( + exceptions.SecurityIssueNotFound, + exceptions.SecurityAPIError, + ) as e: + fix_plan = get_fix_plan(title=issue_id) + fix_plan.register_error(error_msg=e.msg, error_code=e.msg_code) + return FixPlanUSNResult( + target_usn_plan=fix_plan.fix_plan, + related_usns_plan=[], + ) + + affected_pkg_status = get_affected_packages_from_usn( + usn=usn, installed_packages=installed_pkgs + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + [usn], beta_pockets={} + ) + additional_data = { + "associated_cves": [] if not usn.cves_ids else usn.cves_ids, + "associated_launchpad_bugs": [] + if not usn.references + else usn.references, + } + + target_usn_plan = _generate_fix_plan( + issue_id=issue_id, + issue_description=usn.title, + affected_pkg_status=affected_pkg_status, + usn_released_pkgs=usn_released_pkgs, + installed_pkgs=installed_pkgs, + cfg=cfg, + additional_data=additional_data, + ) + + related_usns_plan = [] # type: List[FixPlanResult] + for usn in related_usns: + affected_pkg_status = get_affected_packages_from_usn( + usn=usn, installed_packages=installed_pkgs + ) + usn_released_pkgs = merge_usn_released_binary_package_versions( + [usn], beta_pockets={} + ) + additional_data = { + "associated_cves": [] if not usn.cves_ids else usn.cves_ids, + "associated_launchpad_bugs": [] + if not usn.references + else usn.references, + } + + related_usns_plan.append( + _generate_fix_plan( + issue_id=usn.id, + issue_description=usn.title, + affected_pkg_status=affected_pkg_status, + usn_released_pkgs=usn_released_pkgs, + installed_pkgs=installed_pkgs, + cfg=cfg, + additional_data=additional_data, + ) + ) + + return FixPlanUSNResult( + target_usn_plan=target_usn_plan, + related_usns_plan=related_usns_plan, + ) + + +def fix_plan_cve(issue_id: str, cfg: UAConfig) -> FixPlanResult: + if not issue_id or not re.match(CVE_OR_USN_REGEX, issue_id): + fix_plan = get_fix_plan(title=issue_id) + msg = messages.INVALID_SECURITY_ISSUE.format(issue_id=issue_id) + fix_plan.register_error(error_msg=msg.msg, error_code=msg.name) + return fix_plan.fix_plan + + issue_id = issue_id.upper() + return _fix_plan_cve(issue_id, cfg) + + +def fix_plan_usn(issue_id: str, cfg: UAConfig) -> FixPlanUSNResult: + if not issue_id or not re.match(CVE_OR_USN_REGEX, issue_id): + fix_plan = get_fix_plan(title=issue_id) + msg = messages.INVALID_SECURITY_ISSUE.format(issue_id=issue_id) + fix_plan.register_error(error_msg=msg.msg, error_code=msg.name) + return FixPlanUSNResult( + target_usn_plan=fix_plan.fix_plan, + related_usns_plan=[], + ) + + issue_id = issue_id.upper() + return _fix_plan_usn(issue_id, cfg) + + +def get_pocket_short_name(pocket: str): + if pocket == messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET: + return STANDARD_UPDATES_POCKET + elif pocket == messages.SECURITY_UA_INFRA_POCKET: + return ESM_INFRA_POCKET + elif pocket == messages.SECURITY_UA_APPS_POCKET: + return ESM_APPS_POCKET + else: + return pocket + + +def _generate_fix_plan( + *, + issue_id: str, + issue_description: str, + affected_pkg_status: Dict[str, CVEPackageStatus], + usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], + installed_pkgs: Dict[str, Dict[str, str]], + cfg: UAConfig, + additional_data=None +) -> FixPlanResult: + count = len(affected_pkg_status) + src_pocket_pkgs = defaultdict(list) + + fix_plan = get_fix_plan( + title=issue_id, + description=issue_description, + affected_packages=sorted(list(affected_pkg_status.keys())), + ) + + if additional_data: + fix_plan.register_additional_data(additional_data) + + if count == 0: + fix_plan.register_step( + operation=FixStepType.NOOP, + data={"status": FixPlanNoOpStatus.NOT_AFFECTED.value}, + ) + return fix_plan.fix_plan + + pkg_status_groups = group_by_usn_package_status( + affected_pkg_status, usn_released_pkgs + ) + + for status_value, pkg_status_group in sorted(pkg_status_groups.items()): + if status_value != "released": + fix_plan.register_warning( + warning_type=FixWarningType.SECURITY_ISSUE_NOT_FIXED, + data={ + "source_packages": [ + src_pkg for src_pkg, _ in pkg_status_group + ], + "status": status_value, + }, + ) + else: + ( + src_pocket_pkgs, + binary_pocket_pkgs, + ) = _get_upgradable_package_candidates_by_pocket( + pkg_status_group, + usn_released_pkgs, + installed_pkgs, + ) + + if not src_pocket_pkgs: + return fix_plan.fix_plan + + for pocket in [ + messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET, + messages.SECURITY_UA_INFRA_POCKET, + messages.SECURITY_UA_APPS_POCKET, + ]: + pkg_src_group = src_pocket_pkgs[pocket] + binary_pkgs = binary_pocket_pkgs[pocket] + source_pkgs = [src_pkg for src_pkg, _ in pkg_src_group] + pocket_name = get_pocket_short_name(pocket) + + if not binary_pkgs: + if source_pkgs: + fix_plan.register_step( + operation=FixStepType.NOOP, + data={ + "status": FixPlanNoOpStatus.ALREADY_FIXED.value, + "source_packages": source_pkgs, + "pocket": pocket_name, + }, + ) + continue + + upgrade_pkgs, unfixed_pkgs = _get_upgradable_pkgs(binary_pkgs, pocket) + + if unfixed_pkgs: + for unfixed_pkg in unfixed_pkgs: + fix_plan.register_warning( + warning_type=FixWarningType.PACKAGE_CANNOT_BE_INSTALLED, + data={ + "binary_package": unfixed_pkg.binary_package, + "binary_package_version": unfixed_pkg.version, + "source_package": unfixed_pkg.source_package, + "related_source_packages": source_pkgs, + "pocket": pocket_name, + }, + ) + + if pocket != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET: + if pocket == messages.SECURITY_UA_INFRA_POCKET: + service_to_check = "esm-infra" + else: + service_to_check = "esm-apps" + + if not _is_attached(cfg).is_attached: + fix_plan.register_step( + operation=FixStepType.ATTACH, + data={ + "reason": "required-pro-service", + "source_packages": source_pkgs, + "required_service": service_to_check, + }, + ) + else: + contract_expiry_status, _ = get_contract_expiry_status(cfg) + if contract_expiry_status != ContractExpiryStatus.ACTIVE: + fix_plan.register_step( + operation=FixStepType.ATTACH, + data={ + "reason": FixPlanAttachReason.EXPIRED_CONTRACT.value, # noqa + "source_packages": source_pkgs, + }, + ) + + enabled_services = _enabled_services(cfg).enabled_services or [] + enabled_services_names = ( + [service.name for service in enabled_services] + if enabled_services + else [] + ) + if service_to_check not in enabled_services_names: + fix_plan.register_step( + operation=FixStepType.ENABLE, + data={ + "service": service_to_check, + "source_packages": source_pkgs, + }, + ) + + fix_plan.register_step( + operation=FixStepType.APT_UPGRADE, + data={ + "binary_packages": upgrade_pkgs, + "source_packages": source_pkgs, + "pocket": pocket_name, + }, + ) + + return fix_plan.fix_plan diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/cve/execute/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/cve/execute/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/cve/execute/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/cve/execute/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,9 +2,19 @@ from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo -from uaclient.api.u.pro.security.fix._common import get_expected_overall_status -from uaclient.api.u.pro.security.fix._common.execute.v1 import ( +from uaclient.api.u.pro.security.fix._common import ( + FixStatus, + get_expected_overall_status, +) + +# Some of these imports are intentionally not used in this module. +# The rationale is that we want users to import such Data Objects +# directly from the associated endpoints and not through the _common module +from uaclient.api.u.pro.security.fix._common.execute.v1 import ( # noqa: F401 + FailedUpgrade, + FixExecuteError, FixExecuteResult, + UpgradedPackage, _execute_fix, ) from uaclient.api.u.pro.security.fix.cve.plan.v1 import ( @@ -13,7 +23,6 @@ ) from uaclient.config import UAConfig from uaclient.data_types import DataObject, Field, StringDataValue, data_list -from uaclient.security import FixStatus class CVEFixExecuteOptions(DataObject): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/cve/plan/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/cve/plan/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/cve/plan/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/cve/plan/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,8 +2,24 @@ from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo -from uaclient.api.u.pro.security.fix import FixPlanResult, fix_plan_cve from uaclient.api.u.pro.security.fix._common import get_expected_overall_status + +# Some of these imports are intentionally not used in this module. +# The rationale is that we want users to import such Data Objects +# directly from the associated endpoints and not through the _common module +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( # noqa: F401 + AptUpgradeData, + AttachData, + EnableData, + FixPlanError, + FixPlanResult, + FixPlanStep, + FixPlanWarning, + NoOpData, + PackageCannotBeInstalledData, + SecurityIssueNotFixedData, + fix_plan_cve, +) from uaclient.config import UAConfig from uaclient.data_types import DataObject, Field, StringDataValue, data_list diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/usn/execute/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/usn/execute/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/usn/execute/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/usn/execute/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,9 +2,19 @@ from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo -from uaclient.api.u.pro.security.fix._common import get_expected_overall_status -from uaclient.api.u.pro.security.fix._common.execute.v1 import ( +from uaclient.api.u.pro.security.fix._common import ( + FixStatus, + get_expected_overall_status, +) + +# Some of these imports are intentionally not used in this module. +# The rationale is that we want users to import such Data Objects +# directly from the associated endpoints and not through the _common module +from uaclient.api.u.pro.security.fix._common.execute.v1 import ( # noqa: F401 + FailedUpgrade, + FixExecuteError, FixExecuteResult, + UpgradedPackage, _execute_fix, ) from uaclient.api.u.pro.security.fix.usn.plan.v1 import ( @@ -13,7 +23,6 @@ ) from uaclient.config import UAConfig from uaclient.data_types import DataObject, Field, StringDataValue, data_list -from uaclient.security import FixStatus class USNFixExecuteOptions(DataObject): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/usn/plan/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/usn/plan/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/fix/usn/plan/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/fix/usn/plan/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -2,8 +2,25 @@ from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo -from uaclient.api.u.pro.security.fix import FixPlanUSNResult, fix_plan_usn from uaclient.api.u.pro.security.fix._common import get_expected_overall_status + +# Some of these imports are intentionally not used in this module. +# The rationale is that we want users to import such Data Objects +# directly from the associated endpoints and not through the _common module +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( # noqa: F401 + AptUpgradeData, + AttachData, + EnableData, + FixPlanError, + FixPlanResult, + FixPlanStep, + FixPlanUSNResult, + FixPlanWarning, + NoOpData, + PackageCannotBeInstalledData, + SecurityIssueNotFixedData, + fix_plan_usn, +) from uaclient.config import UAConfig from uaclient.data_types import DataObject, Field, StringDataValue, data_list diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/status/reboot_required/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/status/reboot_required/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/security/status/reboot_required/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/security/status/reboot_required/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,7 +1,7 @@ from enum import Enum from typing import List, Optional -from uaclient import livepatch +from uaclient import exceptions, livepatch from uaclient.api.api import APIEndpoint from uaclient.api.data_types import AdditionalInfo from uaclient.config import UAConfig @@ -92,7 +92,12 @@ return RebootStatus.REBOOT_REQUIRED our_kernel_version = get_kernel_info().proc_version_signature_version - lp_status = livepatch.status() + + try: + lp_status = livepatch.status() + except exceptions.ProcessExecutionError: + return RebootStatus.REBOOT_REQUIRED + if ( lp_status is not None and our_kernel_version is not None diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/status/enabled_services/v1.py ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/status/enabled_services/v1.py --- ubuntu-advantage-tools-30~23.10/uaclient/api/u/pro/status/enabled_services/v1.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/api/u/pro/status/enabled_services/v1.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,7 +1,7 @@ from typing import List, Optional from uaclient.api.api import APIEndpoint -from uaclient.api.data_types import AdditionalInfo +from uaclient.api.data_types import AdditionalInfo, ErrorWarningObject from uaclient.api.u.pro.status.is_attached.v1 import _is_attached from uaclient.config import UAConfig from uaclient.data_types import ( @@ -53,16 +53,21 @@ return EnabledServicesResult(enabled_services=[]) enabled_services = [] # type: List[EnabledService] + warnings = [] # type: List[ErrorWarningObject] for ent_cls in ENTITLEMENT_CLASSES: ent = ent_cls(cfg) - if ent.user_facing_status()[0] == UserFacingStatus.ACTIVE: - enabled_service = EnabledService(name=ent.name) + ent_status, details = ent.user_facing_status() + + if ent_status in (UserFacingStatus.ACTIVE, UserFacingStatus.WARNING): + ent_name = ent.presentation_name + enabled_service = EnabledService(name=ent_name) + for _, variant_cls in ent.variants.items(): variant = variant_cls(cfg) if variant.user_facing_status()[0] == UserFacingStatus.ACTIVE: enabled_service = EnabledService( - name=ent.name, + name=ent_name, variant_enabled=True, variant_name=variant.variant_name, ) @@ -70,9 +75,21 @@ enabled_services.append(enabled_service) - return EnabledServicesResult( - enabled_services=sorted(enabled_services, key=lambda x: x.name) + if ent_status == UserFacingStatus.WARNING and details: + warnings.append( + ErrorWarningObject( + title=details.msg or "", + code=details.name or "", + meta={"service": ent_name}, + ) + ) + + result = EnabledServicesResult( + enabled_services=sorted(enabled_services, key=lambda x: x.name), ) + result.warnings = warnings + + return result endpoint = APIEndpoint( diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/apt.py ubuntu-advantage-tools-31.2~23.10/uaclient/apt.py --- ubuntu-advantage-tools-30~23.10/uaclient/apt.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/apt.py 2024-02-14 15:37:46.000000000 +0000 @@ -6,7 +6,6 @@ import os import re import subprocess -import sys import tempfile from functools import lru_cache, wraps from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Union @@ -18,16 +17,16 @@ from uaclient.defaults import ESM_APT_ROOTDIR APT_HELPER_TIMEOUT = 60.0 # 60 second timeout used for apt-helper call -APT_AUTH_COMMENT = " # ubuntu-advantage-tools" +APT_AUTH_COMMENT = " # ubuntu-pro-client" APT_CONFIG_AUTH_FILE = "Dir::Etc::netrc/" APT_CONFIG_AUTH_PARTS_DIR = "Dir::Etc::netrcparts/" APT_CONFIG_LISTS_DIR = "Dir::State::lists/" APT_PROXY_CONFIG_HEADER = """\ /* - * Autogenerated by ubuntu-advantage-tools + * Autogenerated by ubuntu-pro-client * Do not edit this file directly * - * To change what ubuntu-advantage-tools sets, use the `pro config set` + * To change what ubuntu-pro-client sets, use the `pro config set` * or the `pro config unset` commands to set/unset either: * global_apt_http_proxy and global_apt_https_proxy * for a global apt proxy @@ -52,17 +51,18 @@ APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" +SERIES_NOT_USING_DEB822 = ("xenial", "bionic", "focal", "jammy", "mantic") -ESM_REPO_FILE_CONTENT = """\ -# Written by ubuntu-advantage-tools - -deb https://esm.ubuntu.com/{name}/ubuntu {series}-{name}-security main -# deb-src https://esm.ubuntu.com/{name}/ubuntu {series}-{name}-security main - -deb https://esm.ubuntu.com/{name}/ubuntu {series}-{name}-updates main -# deb-src https://esm.ubuntu.com/{name}/ubuntu {series}-{name}-updates main +DEB822_REPO_FILE_CONTENT = """\ +# Written by ubuntu-pro-client +Types: deb{deb_src} +URIs: {url} +Suites: {suites} +Components: main +Signed-By: {keyrings_dir}/{keyring_file} """ + ESM_BASIC_FILE_STRUCTURE = { "files": [ os.path.join(ESM_APT_ROOTDIR, "etc/apt/sources.list"), @@ -473,6 +473,64 @@ return valid_versions +def _get_list_file_content( + suites: List[str], series: str, updates_enabled: bool, repo_url: str +) -> str: + content = "" + for suite in suites: + if series not in suite: + continue # Only enable suites matching this current series + maybe_comment = "" + if "-updates" in suite and not updates_enabled: + LOG.warning( + 'Not enabling apt suite "%s" because "%s-updates" is not' + " enabled", + suite, + series, + ) + maybe_comment = "# " + content += ( + "{maybe_comment}deb {url} {suite} main\n" + "# deb-src {url} {suite} main\n".format( + maybe_comment=maybe_comment, url=repo_url, suite=suite + ) + ) + + return content + + +def _get_sources_file_content( + suites: List[str], + series: str, + updates_enabled: bool, + repo_url: str, + keyring_file: str, + include_deb_src: bool = False, +) -> str: + appliable_suites = [suite for suite in suites if series in suite] + if not updates_enabled: + LOG.warning( + "Not enabling service-related -updates suites because" + ' "%s-updates" is not enabled', + series, + ) + appliable_suites = [ + suite for suite in appliable_suites if "-updates" not in suite + ] + + deb_src = " deb-src" if include_deb_src else "" + + content = DEB822_REPO_FILE_CONTENT.format( + url=repo_url, + suites=" ".join(appliable_suites), + keyrings_dir=KEYRINGS_DIR, + keyring_file=keyring_file, + deb_src=deb_src, + ) + + return content + + def add_auth_apt_repo( repo_filename: str, repo_url: str, @@ -510,30 +568,22 @@ updates_enabled = True break - content = "" - for suite in suites: - if series not in suite: - continue # Only enable suites matching this current series - maybe_comment = "" - if "-updates" in suite and not updates_enabled: - LOG.warning( - 'Not enabling apt suite "%s" because "%s-updates" is not' - " enabled", - suite, - series, - ) - maybe_comment = "# " - content += ( - "{maybe_comment}deb {url} {suite} main\n" - "# deb-src {url} {suite} main\n".format( - maybe_comment=maybe_comment, url=repo_url, suite=suite - ) + add_apt_auth_conf_entry(repo_url, username, password) + + if series in SERIES_NOT_USING_DEB822: + source_keyring_file = os.path.join(KEYRINGS_DIR, keyring_file) + destination_keyring_file = os.path.join(APT_KEYS_DIR, keyring_file) + gpg.export_gpg_key(source_keyring_file, destination_keyring_file) + + content = _get_list_file_content( + suites, series, updates_enabled, repo_url + ) + else: + content = _get_sources_file_content( + suites, series, updates_enabled, repo_url, keyring_file ) + system.write_file(repo_filename, content) - add_apt_auth_conf_entry(repo_url, username, password) - source_keyring_file = os.path.join(KEYRINGS_DIR, keyring_file) - destination_keyring_file = os.path.join(APT_KEYS_DIR, keyring_file) - gpg.export_gpg_key(source_keyring_file, destination_keyring_file) def add_apt_auth_conf_entry(repo_url, login, password): @@ -602,6 +652,12 @@ ) -> None: """Remove an authenticated apt repo and credentials to the system""" system.ensure_file_absent(repo_filename) + # Also try to remove old .list files for compatibility with older releases. + if repo_filename.endswith(".sources"): + system.ensure_file_absent( + util.set_filename_extension(repo_filename, "list") + ) + if keyring_file: keyring_file = os.path.join(APT_KEYS_DIR, keyring_file) system.ensure_file_absent(keyring_file) @@ -663,43 +719,6 @@ system.ensure_file_absent(path) -def clean_apt_files(*, _entitlements=None): - """ - Clean apt files written by uaclient - - :param _entitlements: - The uaclient.entitlements module to use, defaults to - uaclient.entitlements. (This is only present for testing, because the - import happens within the function to avoid circular imports.) - """ - from uaclient.entitlements.repo import RepoEntitlement - - if _entitlements is None: - from uaclient import entitlements as __entitlements - - _entitlements = __entitlements - - for ent_cls in _entitlements.ENTITLEMENT_CLASSES: - if not issubclass(ent_cls, RepoEntitlement): - continue - repo_file = ent_cls.repo_list_file_tmpl.format(name=ent_cls.name) - pref_file = ent_cls.repo_pref_file_tmpl.format(name=ent_cls.name) - if os.path.exists(repo_file): - event.info( - messages.APT_REMOVING_SOURCE_FILE.format(filename=repo_file), - file_type=sys.stderr, - ) - system.ensure_file_absent(repo_file) - if os.path.exists(pref_file): - event.info( - messages.APT_REMOVING_PREFERENCES_FILE.format( - filename=pref_file - ), - file_type=sys.stderr, - ) - system.ensure_file_absent(pref_file) - - def is_installed(pkg: str) -> bool: return pkg in get_installed_packages_names() @@ -993,3 +1012,13 @@ apt_cfg_dict[cfg_name] = cfg_value return apt_cfg_dict + + +def get_system_sources_file() -> str: + old_sources_path = "/etc/apt/sources.list" + new_sources_path = "/etc/apt/sources.list.d/ubuntu.sources" + return ( + new_sources_path + if os.path.exists(new_sources_path) + else old_sources_path + ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/__init__.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/__init__.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/__init__.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/__init__.py 2024-02-14 15:37:46.000000000 +0000 @@ -180,7 +180,9 @@ def wrapper(f): @wraps(f) def new_f(*args, cfg, **kwargs): - with lock.SingleAttemptLock(cfg=cfg, lock_holder=lock_holder): + with lock.RetryLock( + cfg=cfg, lock_holder=lock_holder, sleep_time=1 + ): retval = f(*args, cfg=cfg, **kwargs) return retval @@ -1065,7 +1067,7 @@ event.info(messages.REFRESH_CONTRACT_ENABLE) try: contract.refresh(cfg) - except (exceptions.UrlError, exceptions.UbuntuProError): + except (exceptions.ConnectivityError, exceptions.UbuntuProError): # Inability to refresh is not a critical issue during enable LOG.warning("Failed to refresh contract", exc_info=True) event.warning(warning_msg=messages.E_REFRESH_CONTRACT_FAILURE) @@ -1231,7 +1233,7 @@ cfg=cfg, mode=event_logger.EventLoggerMode.CLI, ) - except exceptions.UrlError: + except exceptions.ConnectivityError: event.info(messages.E_ATTACH_FAILURE.msg) return 1 else: @@ -1301,7 +1303,7 @@ try: actions.attach_with_token(cfg, token=token, allow_enable=allow_enable) - except exceptions.UrlError: + except exceptions.ConnectivityError: raise exceptions.AttachError() else: ret = 0 @@ -1527,8 +1529,7 @@ def _action_refresh_contract(_args, cfg: config.UAConfig): try: contract.refresh(cfg) - except exceptions.UrlError as exc: - LOG.exception(exc) + except exceptions.ConnectivityError: raise exceptions.RefreshContractFailure() print(messages.REFRESH_CONTRACT_SUCCESS) @@ -1670,7 +1671,6 @@ if not logger: logger = logging.getLogger("ubuntupro") logger.setLevel(log_level) - logger.addFilter(pro_log.RedactionFilter()) # Clear all handlers, so they are replaced for this logger logger.handlers = [] @@ -1684,6 +1684,7 @@ file_handler.setFormatter(JsonArrayFormatter()) file_handler.setLevel(log_level) file_handler.set_name("upro-file") + file_handler.addFilter(pro_log.RedactionFilter()) logger.addHandler(file_handler) @@ -1707,7 +1708,7 @@ print(messages.CLI_INTERRUPT_RECEIVED, file=sys.stderr) lock.clear_lock_file_if_present() sys.exit(1) - except exceptions.UrlError as exc: + except exceptions.ConnectivityError as exc: if "CERTIFICATE_VERIFY_FAILED" in str(exc): tmpl = messages.SSL_VERIFICATION_ERROR_CA_CERTIFICATES if apt.is_installed("ca-certificates"): @@ -1720,7 +1721,10 @@ "Failed to access URL: %s", exc.url, exc_info=exc ) - msg = messages.E_CONNECTIVITY_ERROR + msg = messages.E_CONNECTIVITY_ERROR.format( + url=exc.url, + cause_error=exc.cause_error, + ) event.error(error_msg=msg.msg, error_code=msg.name) event.info(info_msg=msg.msg, file_type=sys.stderr) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/fix.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/fix.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/fix.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/fix.py 2024-01-18 17:34:13.000000000 +0000 @@ -10,7 +10,7 @@ Union, ) -from uaclient import apt, exceptions, messages, security, system, util +from uaclient import apt, exceptions, messages, system, util from uaclient.actions import attach_with_token, enable_entitlement_by_name from uaclient.api.u.pro.attach.magic.initiate.v1 import _initiate from uaclient.api.u.pro.attach.magic.revoke.v1 import ( @@ -21,7 +21,13 @@ MagicAttachWaitOptions, _wait, ) -from uaclient.api.u.pro.security.fix import ( # noqa: F401 +from uaclient.api.u.pro.security.fix._common import ( + CVE_OR_USN_REGEX, + FixStatus, + UnfixedPackage, + status_message, +) +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( # noqa: F401 ESM_APPS_POCKET, ESM_INFRA_POCKET, STANDARD_UPDATES_POCKET, @@ -42,7 +48,6 @@ NoOpLivepatchFixData, USNAdditionalData, ) -from uaclient.api.u.pro.security.fix._common import status_message from uaclient.api.u.pro.security.fix.cve.plan.v1 import CVEFixPlanOptions from uaclient.api.u.pro.security.fix.cve.plan.v1 import _plan as cve_plan from uaclient.api.u.pro.security.fix.usn.plan.v1 import USNFixPlanOptions @@ -66,7 +71,6 @@ from uaclient.files import notices from uaclient.files.notices import Notice from uaclient.messages.urls import PRO_HOME_PAGE -from uaclient.security import FixStatus from uaclient.status import colorize_commands @@ -79,7 +83,7 @@ cfg: UAConfig, ): self.pkg_index = 0 - self.unfixed_pkgs = [] # type: List[security.UnfixedPackage] + self.unfixed_pkgs = [] # type: List[UnfixedPackage] self.installed_pkgs = set() # type: Set[str] self.fix_status = FixStatus.SYSTEM_NON_VULNERABLE self.title = title @@ -88,6 +92,7 @@ self.cfg = cfg self.should_print_pkg_header = True self.warn_package_cannot_be_installed = False + self.fixed_by_livepatch = False def print_fix_header(self): if self.affected_pkgs: @@ -128,7 +133,7 @@ def add_unfixed_packages(self, pkgs: List[str], unfixed_reason: str): for pkg in pkgs: self.unfixed_pkgs.append( - security.UnfixedPackage(pkg=pkg, unfixed_reason=unfixed_reason) + UnfixedPackage(pkg=pkg, unfixed_reason=unfixed_reason) ) @@ -261,7 +266,7 @@ print("\n" + messages.SECURITY_FIXING_RELATED_USNS) related_usn_status = ( {} - ) # type: Dict[str, Tuple[FixStatus, List[security.UnfixedPackage]]] + ) # type: Dict[str, Tuple[FixStatus, List[UnfixedPackage]]] for related_usn_plan in related_usns_plan: print("- {}".format(related_usn_plan.title)) related_usn_status[related_usn_plan.title] = execute_fix_plan( @@ -774,11 +779,6 @@ fix_context.cfg, fix_context.dry_run, ): - print( - messages.SECURITY_UA_SERVICE_NOT_ENABLED.format( - service=step.data.service - ) - ) fix_context.add_unfixed_packages( pkgs=step.data.source_packages, unfixed_reason=messages.SECURITY_UA_SERVICE_NOT_ENABLED_SHORT.format( # noqa @@ -809,6 +809,7 @@ version=step.data.patch_version, ) ) + fix_context.fixed_by_livepatch = True def _execute_noop_already_fixed_step( @@ -826,7 +827,7 @@ def execute_fix_plan( fix_plan: FixPlanResult, dry_run: bool, cfg: UAConfig -) -> Tuple[FixStatus, List[security.UnfixedPackage]]: +) -> Tuple[FixStatus, List[UnfixedPackage]]: full_plan = [ *fix_plan.plan, *fix_plan.warnings, @@ -898,12 +899,14 @@ operation="fix operation", ) - _handle_fix_status_message(fix_context.fix_status, fix_plan.title) + if not fix_context.fixed_by_livepatch: + _handle_fix_status_message(fix_context.fix_status, fix_plan.title) + return (fix_context.fix_status, fix_context.unfixed_pkgs) def action_fix(args, *, cfg, **kwargs): - if not re.match(security.CVE_OR_USN_REGEX, args.security_issue): + if not re.match(CVE_OR_USN_REGEX, args.security_issue): raise exceptions.InvalidSecurityIssueIdFormat( issue=args.security_issue ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli.py 2024-01-18 17:34:13.000000000 +0000 @@ -645,8 +645,9 @@ expected_log_call, ): m_args = m_get_parser.return_value.parse_args.return_value - m_args.action.side_effect = exceptions.UrlError( - socket.gaierror(-2, "Name or service not known"), url=error_url + m_args.action.side_effect = exceptions.ConnectivityError( + cause=socket.gaierror(-2, "Name or service not known"), + url=error_url, ) with pytest.raises(SystemExit) as excinfo: @@ -657,7 +658,11 @@ assert [ mock.call( - info_msg=messages.E_CONNECTIVITY_ERROR.msg, file_type=mock.ANY + info_msg=messages.E_CONNECTIVITY_ERROR.format( + url=error_url, + cause_error="[Errno -2] Name or service not known", + ).msg, + file_type=mock.ANY, ) ] == m_event_info.call_args_list assert [expected_log_call] == m_log_exception.call_args_list @@ -857,13 +862,6 @@ assert expected_ents_not_found == actual_ents_not_found -expected_notice = r""".*[info].* A new version is available: 1.2.3 -Please run: - sudo apt-get install ubuntu-advantage-tools -to get the latest bug fixes and new features. -""" - - # There is a fixture for this function to avoid leaking, as it is called in # the main CLI function. So, instead of importing it directly, we are using # the reference for the fixture to test it. diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_attach.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_attach.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_attach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_attach.py 2024-01-18 17:34:13.000000000 +0000 @@ -9,6 +9,7 @@ from uaclient import event_logger, http, messages, util from uaclient.cli import ( + _post_cli_attach, action_attach, attach_parser, get_parser, @@ -30,7 +31,7 @@ """\ usage: pro attach [flags] -Attach this machine to Ubuntu Pro with a token obtained from: +Attach this machine to an Ubuntu Pro subscription with a token obtained from: https://ubuntu.com/pro/dashboard When running this command without a token, it will generate a short code @@ -176,10 +177,12 @@ } assert expected == json.loads(capsys.readouterr()[0]) + @mock.patch("time.sleep") @mock.patch("uaclient.system.subp") def test_lock_file_exists( self, m_subp, + m_sleep, capsys, FakeConfig, event, @@ -189,7 +192,7 @@ cfg.write_cache("lock", "123:pro disable") with pytest.raises(LockHeldError) as exc_info: action_attach(mock.MagicMock(), cfg=cfg) - assert [mock.call(["ps", "123"])] == m_subp.call_args_list + assert [mock.call(["ps", "123"])] * 12 == m_subp.call_args_list assert ( "Unable to perform: pro attach.\n" "Operation in progress: pro disable (pid:123)" @@ -232,6 +235,25 @@ assert expected == json.loads(capsys.readouterr()[0]) @mock.patch( + "uaclient.status.format_tabular", return_value="mock_tabular_status" + ) + @mock.patch("uaclient.actions.status", return_value=("", 0)) + @mock.patch(M_PATH + "daemon") + def test_post_cli_attach( + self, m_daemon, m_status, m_format_tabular, capsys, FakeConfig + ): + cfg = FakeConfig.for_attached_machine() + _post_cli_attach(cfg) + + assert [mock.call()] == m_daemon.stop.call_args_list + assert [mock.call(cfg)] == m_daemon.cleanup.call_args_list + assert [mock.call(cfg)] == m_status.call_args_list + assert [mock.call("")] == m_format_tabular.call_args_list + out, _ = capsys.readouterr() + assert "This machine is now attached to 'test_contract'" in out + assert "mock_tabular_status" in out + + @mock.patch( M_PATH + "contract.UAContractClient.update_activity_token", ) @mock.patch("uaclient.files.state_files.attachment_data_file.write") @@ -239,12 +261,10 @@ @mock.patch("uaclient.files.notices.NoticesManager.remove") @mock.patch("uaclient.timer.update_messaging.update_motd_messages") @mock.patch(M_PATH + "contract.UAContractClient.add_contract_machine") - @mock.patch("uaclient.actions.status", return_value=("", 0)) - @mock.patch("uaclient.status.format_tabular") + @mock.patch(M_PATH + "_post_cli_attach") def test_happy_path_with_token_arg( self, - m_format_tabular, - m_status, + m_post_cli, contract_machine_attach, m_update_apt_and_motd_msgs, _m_remove_notice, @@ -270,46 +290,13 @@ ret = action_attach(args, cfg) assert 0 == ret - assert 1 == m_status.call_count - assert 1 == m_format_tabular.call_count expected_calls = [ mock.call(contract_token=token, attachment_dt=mock.ANY) ] assert expected_calls == contract_machine_attach.call_args_list assert [mock.call(cfg)] == m_update_apt_and_motd_msgs.call_args_list assert 1 == m_update_activity_token.call_count - - # We need to do that since all config objects in this - # test will share the same data dir. Since this will - # test a successful attach, in the end we write a machine token - # file, which will make all other cfg objects here to report - # as attached - cfg.delete_cache() - cfg.machine_token_file.delete() - - cfg = FakeConfig() - args = mock.MagicMock(token=token, attach_config=None) - with mock.patch.object( - event, "_event_logger_mode", event_logger.EventLoggerMode.JSON - ): - with mock.patch.object( - cfg, "check_lock_info" - ) as m_check_lock_info: - m_check_lock_info.return_value = (0, "lock_holder") - fake_stdout = io.StringIO() - with contextlib.redirect_stdout(fake_stdout): - main_error_handler(action_attach)(args, cfg) - - expected = { - "_schema_version": event_logger.JSON_SCHEMA_VERSION, - "result": "success", - "errors": [], - "failed_services": [], - "needs_reboot": False, - "processed_services": [], - "warnings": [], - } - assert expected == json.loads(fake_stdout.getvalue()) + assert [mock.call(cfg)] == m_post_cli.call_args_list @pytest.mark.parametrize("auto_enable", (True, False)) @mock.patch( diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_auto_attach.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_auto_attach.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_auto_attach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_auto_attach.py 2024-01-18 17:34:13.000000000 +0000 @@ -85,7 +85,9 @@ "api_side_effect,expected_err,expected_ret", ( ( - exceptions.UrlError("does-not-matter", "url"), + exceptions.ConnectivityError( + cause=Exception("does-not-matter"), url="url" + ), messages.E_ATTACH_FAILURE.msg, 1, ), diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_collect_logs.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_collect_logs.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_collect_logs.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_collect_logs.py 2024-01-18 17:34:13.000000000 +0000 @@ -10,6 +10,7 @@ get_parser, main, ) +from uaclient.defaults import APPARMOR_PROFILES M_PATH = "uaclient.cli." @@ -46,6 +47,9 @@ out, _err = capsys.readouterr() assert re.match(HELP_OUTPUT, out) + @pytest.mark.parametrize( + "series,extension", (("jammy", "list"), ("noble", "sources")) + ) @pytest.mark.parametrize("is_root", ((True), (False))) @mock.patch("uaclient.util.we_are_currently_root") @mock.patch( @@ -62,18 +66,22 @@ @mock.patch("pathlib.Path.stat") @mock.patch("os.chown") @mock.patch("os.path.isfile", return_value=True) + @mock.patch("shutil.copy") @mock.patch("uaclient.system.write_file") @mock.patch("uaclient.system.load_file") @mock.patch("uaclient.system.subp", return_value=(None, None)) @mock.patch("uaclient.log.get_user_log_file") @mock.patch("uaclient.log.get_all_user_log_files") + @mock.patch("uaclient.system.get_release_info") def test_collect_logs( self, + m_get_release_info, m_get_users, m_get_user, m_subp, _load_file, _write_file, + m_shutilcopy, m_isfile, _chown, _stat, @@ -83,16 +91,19 @@ _glob, util_we_are_currently_root, is_root, + series, + extension, FakeConfig, tmpdir, ): + m_get_release_info.return_value.series = series util_we_are_currently_root.return_value = is_root m_get_user.return_value = tmpdir.join("user-log").strpath m_get_users.return_value = [ tmpdir.join("user1-log").strpath, tmpdir.join("user2-log").strpath, ] - is_file_calls = 17 + is_file_calls = 17 + len(APPARMOR_PROFILES) user_log_files = [mock.call(m_get_user())] if util_we_are_currently_root(): user_log_files = [ @@ -129,6 +140,10 @@ "-o", "short-precise", "-u", + "apt-news.service", + "-u", + "esm-cache.service", + "-u", "ua-timer.service", "-u", "ua-auto-attach.service", @@ -139,6 +154,10 @@ ], rcs=None, ), + mock.call(["systemctl", "status", "apt-news.service"], rcs=[0, 3]), + mock.call( + ["systemctl", "status", "esm-cache.service"], rcs=[0, 3] + ), mock.call(["systemctl", "status", "ua-timer.service"], rcs=[0, 3]), mock.call(["systemctl", "status", "ua-timer.timer"], rcs=[0, 3]), mock.call( @@ -153,6 +172,7 @@ mock.call( ["systemctl", "status", "ubuntu-advantage.service"], rcs=[0, 3] ), + mock.call(["journalctl", "-b", "-k", "--since=1 day ago"]), ] assert m_isfile.call_count == is_file_calls @@ -161,21 +181,58 @@ mock.call(cfg.log_file), mock.call("/var/lib/ubuntu-advantage/jobs-status.json"), mock.call("/etc/cloud/build.info"), - mock.call("/etc/apt/sources.list.d/ubuntu-anbox-cloud.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-cc-eal.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-cis.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-esm-apps.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-esm-infra.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-fips.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-fips-updates.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-fips-preview.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-realtime-kernel.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-ros.list"), - mock.call("/etc/apt/sources.list.d/ubuntu-ros-updates.list"), + mock.call( + "/etc/apt/sources.list.d/ubuntu-anbox-cloud.{}".format( + extension + ) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-cc-eal.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-cis.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-esm-apps.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-esm-infra.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-fips.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-fips-updates.{}".format( + extension + ) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-fips-preview.{}".format( + extension + ) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-realtime-kernel.{}".format( + extension + ) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-ros.{}".format(extension) + ), + mock.call( + "/etc/apt/sources.list.d/ubuntu-ros-updates.{}".format( + extension + ) + ), mock.call("/var/log/ubuntu-advantage.log"), mock.call("/var/log/ubuntu-advantage.log.1"), + *[mock.call(f) for f in APPARMOR_PROFILES], ] - assert redact.call_count == is_file_calls + len(user_log_files) + # APPARMOR_PROFILES are not redacted + assert redact.call_count == is_file_calls + len(user_log_files) - len( + APPARMOR_PROFILES + ) + assert m_shutilcopy.call_count == len(APPARMOR_PROFILES) class TestParser: diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_config.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_config.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_config.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_config.py 2024-01-18 17:34:13.000000000 +0000 @@ -15,7 +15,7 @@ Available Commands: - show Show customisable configuration settings + show Show customizable configuration settings set Set and apply Ubuntu Pro configuration settings unset Unset Ubuntu Pro configuration setting """ # noqa diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_config_show.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_config_show.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_config_show.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_config_show.py 2024-01-18 17:34:13.000000000 +0000 @@ -8,7 +8,7 @@ HELP_OUTPUT = """\ usage: pro config show [key] [flags] -Show customisable configuration settings +Show customizable configuration settings positional arguments: key Optional key or key(s) to show configuration settings. diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_detach.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_detach.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_detach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_detach.py 2024-01-18 17:34:13.000000000 +0000 @@ -103,10 +103,12 @@ } assert expected == json.loads(capsys.readouterr()[0]) + @mock.patch("time.sleep") @mock.patch("uaclient.system.subp") def test_lock_file_exists( self, m_subp, + m_sleep, m_prompt, FakeConfig, capsys, @@ -118,7 +120,7 @@ cfg.write_cache("lock", "123:pro enable") with pytest.raises(exceptions.LockHeldError) as err: action_detach(args, cfg=cfg) - assert [mock.call(["ps", "123"])] == m_subp.call_args_list + assert [mock.call(["ps", "123"])] * 12 == m_subp.call_args_list expected_error_msg = messages.E_LOCK_HELD_ERROR.format( lock_request="pro detach", lock_holder="pro enable", pid="123" ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_disable.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_disable.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_disable.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_disable.py 2024-01-18 17:34:13.000000000 +0000 @@ -532,10 +532,12 @@ expected["errors"][0]["additional_info"] = expected_info assert expected == json.loads(fake_stdout.getvalue()) + @mock.patch("time.sleep") @mock.patch("uaclient.system.subp") def test_lock_file_exists( self, m_subp, + m_sleep, FakeConfig, event, ): @@ -549,7 +551,7 @@ with pytest.raises(exceptions.LockHeldError) as err: args.service = ["esm-infra"] action_disable(args, cfg) - assert [mock.call(["ps", "123"])] == m_subp.call_args_list + assert [mock.call(["ps", "123"])] * 12 == m_subp.call_args_list assert expected_error.msg == err.value.msg args.assume_yes = True diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_enable.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_enable.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_enable.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_enable.py 2024-01-18 17:34:13.000000000 +0000 @@ -131,10 +131,12 @@ } assert expected == json.loads(capsys.readouterr()[0]) + @mock.patch("time.sleep") @mock.patch("uaclient.system.subp") def test_lock_file_exists( self, m_subp, + m_sleep, _refresh, capsys, event, @@ -147,7 +149,7 @@ with pytest.raises(exceptions.LockHeldError) as err: action_enable(args, cfg=cfg) - assert [mock.call(["ps", "123"])] == m_subp.call_args_list + assert [mock.call(["ps", "123"])] * 12 == m_subp.call_args_list expected_message = messages.E_LOCK_HELD_ERROR.format( lock_request="pro enable", lock_holder="pro disable", pid="123" diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_fix.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_fix.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_fix.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_fix.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,14 +1,61 @@ +import contextlib +import io import textwrap import mock import pytest -from uaclient import exceptions +from uaclient import exceptions, messages +from uaclient.api.u.pro.security.fix._common import FixStatus, UnfixedPackage +from uaclient.api.u.pro.security.fix._common.plan.v1 import ( + ESM_APPS_POCKET, + ESM_INFRA_POCKET, + STANDARD_UPDATES_POCKET, + AptUpgradeData, + AttachData, + EnableData, + FixPlanAptUpgradeStep, + FixPlanAttachStep, + FixPlanEnableStep, + FixPlanNoOpLivepatchFixStep, + FixPlanNoOpStatus, + FixPlanNoOpStep, + FixPlanResult, + FixPlanUSNResult, + FixPlanWarningPackageCannotBeInstalled, + FixPlanWarningSecurityIssueNotFixed, + NoOpData, + NoOpLivepatchFixData, + PackageCannotBeInstalledData, + SecurityIssueNotFixedData, + USNAdditionalData, +) +from uaclient.api.u.pro.security.fix.usn.plan.v1 import ( + USNFixPlanResult, + USNSFixPlanResult, +) from uaclient.cli import main -from uaclient.cli.fix import action_fix -from uaclient.security import FixStatus +from uaclient.cli.fix import ( + FixContext, + _execute_apt_upgrade_step, + _execute_attach_step, + _execute_enable_step, + _handle_subscription_for_required_service, + _perform_magic_attach, + action_fix, + execute_fix_plan, + fix_usn, + print_cve_header, + print_usn_header, +) +from uaclient.entitlements.entitlement_status import ( + ApplicabilityStatus, + UserFacingStatus, +) +from uaclient.files.notices import Notice +from uaclient.status import colorize_commands -M_PATH = "uaclient.cli." +M_PATH = "uaclient.cli.fix." HELP_OUTPUT = textwrap.dedent( """\ @@ -91,3 +138,2185 @@ assert expected_msg == str(excinfo.value) assert 0 == m_fix_cve.call_count assert 0 == m_fix_usn.call_count + + def test_cve_header(self): + cve = FixPlanResult( + title="CVE-2020-1472", + description="Samba vulnerability", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1", "pkg2"], + plan=[], + warnings=None, + error=None, + additional_data=None, + ) + + fake_stdout = io.StringIO() + with contextlib.redirect_stdout(fake_stdout): + print_cve_header(cve) + + assert ( + textwrap.dedent( + """\ + CVE-2020-1472: Samba vulnerability + - https://ubuntu.com/security/CVE-2020-1472""" + ) + == fake_stdout.getvalue().strip() + ) + + @pytest.mark.parametrize( + "usn,expected_output", + ( + ( + FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title="USN-4510-2", + description="Samba vulnerability", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg1", "pkg2"], + plan=[], + warnings=None, + error=None, + additional_data=USNAdditionalData( + associated_cves=["CVE-2020-1473", "CVE-2020-1472"], + associated_launchpad_bugs=[], + ), + ), + related_usns_plan=[], + ), + textwrap.dedent( + """\ + USN-4510-2: Samba vulnerability + Associated CVEs: + - https://ubuntu.com/security/CVE-2020-1473 + - https://ubuntu.com/security/CVE-2020-1472""" + ), + ), + ( + FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title="USN-4038-3", + description="USN vulnerability", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg1", "pkg2"], + plan=[], + warnings=None, + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[ + "https://launchpad.net/bugs/1834494" + ], + ), + ), + related_usns_plan=[], + ), + textwrap.dedent( + """\ + USN-4038-3: USN vulnerability + Found Launchpad bugs: + - https://launchpad.net/bugs/1834494""" + ), + ), + ), + ) + def test_usn_header(self, usn, expected_output): + fake_stdout = io.StringIO() + with contextlib.redirect_stdout(fake_stdout): + print_usn_header(usn) + assert expected_output == fake_stdout.getvalue().strip() + + +class TestExecuteFixPlan: + @pytest.mark.parametrize( + "fix_plan,dry_run,cloud_type,expected_output," + "expected_fix_status,expected_unfixed_pkgs", + ( + ( # No affected_packages listed + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NOT_AFFECTED.value.msg, + affected_packages=[], + plan=[ + FixPlanNoOpStep( + data=NoOpData( + status=FixPlanNoOpStatus.NOT_AFFECTED.value + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + (None, None), + textwrap.dedent( + """\ + No affected source packages are installed. + + {check} USN-### does not affect your system. + """.format( + check=messages.OKGREEN_CHECK # noqa: E126 + ) # noqa: E126 + ), + FixStatus.SYSTEM_NOT_AFFECTED, + [], + ), + ( # CVE already fixed by Livepatch + FixPlanResult( + title="CVE-###", + description="test", + expected_status=FixStatus.SYSTEM_NOT_AFFECTED.value.msg, + affected_packages=[], + plan=[ + FixPlanNoOpLivepatchFixStep( + data=NoOpLivepatchFixData( + status="cve-fixed-by-livepatch", + patch_version="87.1", + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + (None, None), + messages.CVE_FIXED_BY_LIVEPATCH.format( + issue="CVE-###", + version="87.1", + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( # version is >= released affected package + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["slsrc"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=[], + source_packages=["slsrc"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + (None, None), + textwrap.dedent( + """\ + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu standard updates. + The update is already installed. + + {check} USN-### is resolved. + """.format( + check=messages.OKGREEN_CHECK # noqa: E126 + ) # noqa: E126 + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( # installing package fix + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["slsrc"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["sl"], + source_packages=["slsrc"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + (None, None), + textwrap.dedent( + """\ + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y sl"]] + ) + + "\n\n" + + "{check} USN-### is resolved.\n".format( + check=messages.OKGREEN_CHECK + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( # installing package fix that comes from esm-infra + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["slsrc"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["slsrc"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["slsrc"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["sl"], + source_packages=["slsrc"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + ("azure", None), + textwrap.dedent( + """\ + 1 affected source package is installed: slsrc + (1/1) slsrc: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + "\n".join( + [ + messages.SECURITY_USE_PRO_TMPL.format( + title="Azure", + cloud_specific_url="https://ubuntu.com/azure/pro", + ), + messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, + ] + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="slsrc", + unfixed_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa + service="esm-infra" + ), + ) + ], + ), + ( # installing package fixes that comes from + # standard-updates and esm-infra + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["curl", "slsrc"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["curl"], + source_packages=["curl"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ), + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_APPS_POCKET, + source_packages=["slsrc"], + ), + order=2, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_APPS_POCKET, + source_packages=["slsrc"], + ), + order=3, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["sl"], + source_packages=["slsrc"], + pocket=ESM_APPS_POCKET, + ), + order=4, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + ("gce", None), + textwrap.dedent( + """\ + 2 affected source packages are installed: curl, slsrc + (1/2) curl: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y curl"]] + ) + + "\n" + + textwrap.dedent( + """\ + (2/2) slsrc: + A fix is available in Ubuntu Pro: ESM Apps. + """ + ) + + "\n".join( + [ + messages.SECURITY_USE_PRO_TMPL.format( + title="GCP", + cloud_specific_url="https://ubuntu.com/gcp/pro", + ), + messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, + ] + ) + + "\n\n" + + "1 package is still affected: slsrc", + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="slsrc", + unfixed_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa + service="esm-apps" + ), + ) + ], + ), + ( # installing package fix that are not yet available + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=[ + "pkg1", + "pkg2", + "pkg3", + "pkg4", + "pkg5", + "pkg6", + "pkg7", + "pkg8", + "pkg9", + "pkg10", + "pkg11", + "pkg12", + "pkg13", + "pkg14", + "pkg15", + ], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg10", "pkg11"], + source_packages=["pkg10", "pkg11"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=5, + ), + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["pkg12", "pkg13"], + ), + order=6, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg12", "pkg13"], + ), + order=7, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg12", "pkg13"], + source_packages=["pkg12", "pkg13"], + pocket=ESM_INFRA_POCKET, + ), + order=8, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg14", "pkg15"], + source_packages=["pkg14", "pkg15"], + pocket=ESM_APPS_POCKET, + ), + order=9, + ), + ], + warnings=[ + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg1", "pkg2", "pkg9"], + status="ignored", + ), + order=1, + ), + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg7", "pkg8"], + status="needed", + ), + order=2, + ), + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg5", "pkg6"], + status="needs-triage", + ), + order=3, + ), + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg3", "pkg4"], + status="pending", + ), + order=4, + ), + ], + error=None, + additional_data=None, + ), + False, + ("gce", None), + textwrap.dedent( + """\ + 15 affected source packages are installed: {} + (1/15, 2/15, 3/15) pkg1, pkg2, pkg9: + Sorry, no fix is available. + (4/15, 5/15) pkg7, pkg8: + Sorry, no fix is available yet. + (6/15, 7/15) pkg5, pkg6: + Ubuntu security engineers are investigating this issue. + (8/15, 9/15) pkg3, pkg4: + A fix is coming soon. Try again tomorrow. + (10/15, 11/15) pkg10, pkg11: + A fix is available in Ubuntu standard updates. + """ + ).format( + ( + "pkg1, pkg10, pkg11, pkg12, pkg13,\n" + " pkg14, pkg15, pkg2, pkg3, pkg4, pkg5, pkg6, pkg7," + " pkg8, pkg9" + ) + ) + + colorize_commands( + [ + [ + "apt update && apt install --only-upgrade" + " -y pkg10 pkg11" + ] + ] + ) + + "\n" + + textwrap.dedent( + """\ + (12/15, 13/15) pkg12, pkg13: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + "\n".join( + [ + messages.SECURITY_USE_PRO_TMPL.format( + title="GCP", + cloud_specific_url="https://ubuntu.com/gcp/pro", + ), + messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, + ] + ) + + "\n\n" + + "11 packages are still affected: {}".format( + ( + "pkg1, pkg12, pkg13, pkg2, pkg3, pkg4, pkg5,\n" + " pkg6, pkg7, pkg8, pkg9" + ) + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=messages.SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg2", + unfixed_reason=messages.SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg9", + unfixed_reason=messages.SECURITY_CVE_STATUS_IGNORED, + ), + UnfixedPackage( + pkg="pkg7", + unfixed_reason=messages.SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg8", + unfixed_reason=messages.SECURITY_CVE_STATUS_NEEDED, + ), + UnfixedPackage( + pkg="pkg5", + unfixed_reason=messages.SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg6", + unfixed_reason=messages.SECURITY_CVE_STATUS_TRIAGE, + ), + UnfixedPackage( + pkg="pkg3", + unfixed_reason=messages.SECURITY_CVE_STATUS_PENDING, + ), + UnfixedPackage( + pkg="pkg4", + unfixed_reason=messages.SECURITY_CVE_STATUS_PENDING, + ), + UnfixedPackage( + pkg="pkg12", + unfixed_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa + service="esm-infra" + ), + ), + UnfixedPackage( + pkg="pkg13", + unfixed_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa + service="esm-infra" + ), + ), + ], + ), + ( # installing package fix that are not yet available + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=[ + "longpackagename1", + "longpackagename2", + "longpackagename3", + "longpackagename4", + "longpackagename5", + ], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=[ + "longpackagename1", + "longpackagename2", + "longpackagename3", + "longpackagename4", + "longpackagename5", + ], + source_packages=[ + "longpackagename1", + "longpackagename2", + "longpackagename3", + "longpackagename4", + "longpackagename5", + ], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + ("gce", None), + """\ +5 affected source packages are installed: longpackagename1, longpackagename2, + longpackagename3, longpackagename4, longpackagename5 +(1/5, 2/5, 3/5, 4/5, 5/5) longpackagename1, longpackagename2, longpackagename3, + longpackagename4, longpackagename5: +A fix is available in Ubuntu standard updates.\n""" + + colorize_commands( + [ + [ + "apt update && apt install --only-upgrade" + " -y longpackagename1 longpackagename2 " + "longpackagename3 longpackagename4 " + "longpackagename5" + ] + ] + ) + + "\n\n" + + "{check} USN-### is resolved.\n".format( + check=messages.OKGREEN_CHECK + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_STILL_VULNERABLE.value.msg, # noqa + affected_packages=["pkg1", "pkg2"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg2"], + source_packages=["pkg2"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=2, + ), + ], + warnings=[ + FixPlanWarningPackageCannotBeInstalled( + data=PackageCannotBeInstalledData( + binary_package="pkg1", + binary_package_version="2.0", + source_package="pkg1", + related_source_packages=["pkg1", "pkg2"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + error=None, + additional_data=None, + ), + False, + (None, None), + textwrap.dedent( + """\ + 2 affected source packages are installed: pkg1, pkg2 + (1/2, 2/2) pkg1, pkg2: + A fix is available in Ubuntu standard updates. + - Cannot install package pkg1 version 2.0 + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg2"]] + ) + + "\n\n" + + "1 package is still affected: pkg1" + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason="Cannot install package pkg1 version 2.0", # noqa + ), + ], + ), + ), + ) + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.cli.fix.get_cloud_type") + @mock.patch("uaclient.util.prompt_choices", return_value="c") + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + def test_execute_fix_plan( + self, + _m_apt_run_cmd, + _m_apt_run_update, + _m_prompt, + m_get_cloud_type, + _m_should_reboot, + fix_plan, + dry_run, + cloud_type, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + capsys, + FakeConfig, + ): + m_get_cloud_type.return_value = cloud_type + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run, cfg=FakeConfig()) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "fix_plan,expected_output,expected_fix_status,expected_unfixed_pkgs", + ( + ( + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1", "pkg2", "pkg3"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1", "pkg2", "pkg3"], + source_packages=["pkg1", "pkg2", "pkg3"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + textwrap.dedent( + """\ + 3 affected source packages are installed: pkg1, pkg2, pkg3 + (1/3, 2/3, 3/3) pkg1, pkg2, pkg3: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [ + [ + "apt update && apt install --only-upgrade" + " -y pkg1 pkg2 pkg3" + ] + ] + ) + + "\n" + + "test exception" + + "\n\n" + + "3 packages are still affected: pkg1, pkg2, pkg3" + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason="test exception", + ), + UnfixedPackage( + pkg="pkg2", + unfixed_reason="test exception", + ), + UnfixedPackage( + pkg="pkg3", + unfixed_reason="test exception", + ), + ], + ), + ), + ) + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.apt.run_apt_update_command") + def test_execute_fix_plan_apt_upgrade_fail( + self, + m_apt_update, + _m_should_reboot, + fix_plan, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + capsys, + FakeConfig, + ): + m_apt_update.side_effect = Exception("test exception") + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run=False, cfg=FakeConfig()) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "fix_plan,expected_output,expected_fix_status," + "expected_unfixed_pkgs", + ( + ( # installing package fixes that comes from + # standard-updates and esm-infra when attaching + # the system + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1", "pkg2", "pkg3"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg2"], + source_packages=["pkg2"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ), + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["pkg3"], + ), + order=2, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg3"], + ), + order=3, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg3"], + source_packages=["pkg3"], + pocket=ESM_INFRA_POCKET, + ), + order=4, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_APPS_POCKET, + source_packages=["pkg1"], + ), + order=5, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_APPS_POCKET, + ), + order=6, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + textwrap.dedent( + """\ + 3 affected source packages are installed: pkg1, pkg2, pkg3 + (1/3) pkg2: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg2"]] + ) + + "\n" + + textwrap.dedent( + """\ + (2/3) pkg3: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION + + "\n" + + messages.PROMPT_ENTER_TOKEN + + "\n" + + colorize_commands([["pro attach pro_token"]]) + + "\n" + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg3"]] + ) + + "\n" + + textwrap.dedent( + """\ + (3/3) pkg1: + A fix is available in Ubuntu Pro: ESM Apps. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-### is resolved.\n".format( + check=messages.OKGREEN_CHECK + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ), + ) + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.util.prompt_choices", return_value="a") + @mock.patch("uaclient.cli.fix.get_cloud_type", return_value=(None, None)) + @mock.patch("builtins.input", return_value="pro_token") + @mock.patch(M_PATH + "_handle_subscription_for_required_service") + @mock.patch("uaclient.cli.fix.attach_with_token") + def test_execute_fix_plan_when_attach_is_needed( + self, + m_attach_with_token, + m_handle_required_service, + _m_input, + _m_get_cloud_type, + _m_prompt, + _m_should_reboot, + _m_run_apt_command, + _m_run_apt_update, + fix_plan, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + FakeConfig, + capsys, + ): + def fake_attach(cfg, token, allow_enable): + cfg.for_attached_machine() + return 0 + + m_attach_with_token.side_effect = fake_attach + m_handle_required_service.return_value = True + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run=False, cfg=FakeConfig()) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "service_status", + ( + (UserFacingStatus.INACTIVE), + (UserFacingStatus.INAPPLICABLE), + (UserFacingStatus.UNAVAILABLE), + ), + ) + @pytest.mark.parametrize( + "fix_plan,expected_output,expected_fix_status," + "expected_unfixed_pkgs", + ( + ( # installing package fixes that comes from + # standard-updates and esm-infra when attaching + # the system + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION + + "\n" + + messages.PROMPT_ENTER_TOKEN + + "\n" + + colorize_commands([["pro attach pro_token"]]) + + "\n" + + messages.SECURITY_UA_SERVICE_NOT_ENTITLED.format( + service="esm-infra" + ) # noqa + + "\n\n" + + "1 package is still affected: pkg1" + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=messages.SECURITY_UA_SERVICE_NOT_ENABLED_SHORT.format( # noqa + service="esm-infra" + ), + ), + ], + ), + ), + ) + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.util.prompt_choices", return_value="a") + @mock.patch(M_PATH + "get_cloud_type", return_value=(None, None)) + @mock.patch("builtins.input", return_value="pro_token") + @mock.patch(M_PATH + "attach_with_token") + def test_execute_fix_plan_when_service_is_not_entitled( + self, + m_attach_with_token, + _m_input, + _m_get_cloud_type, + _m_prompt, + _m_should_reboot, + _m_run_apt_command, + _m_run_apt_update, + fix_plan, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + service_status, + FakeConfig, + capsys, + ): + def fake_attach(cfg, token, allow_enable): + cfg.for_attached_machine() + return 0 + + m_attach_with_token.side_effect = fake_attach + + m_entitlement_cls = mock.MagicMock() + m_entitlement_obj = m_entitlement_cls.return_value + m_entitlement_obj.user_facing_status.return_value = ( + service_status, + "", + ) + m_entitlement_obj.applicability_status.return_value = ( + ApplicabilityStatus.INAPPLICABLE, + "", + ) + m_entitlement_obj.name = "esm-infra" + + with mock.patch( + "uaclient.cli.fix.entitlement_factory", + return_value=m_entitlement_cls, + ): + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock( + return_value="utf-8" + ) + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan( + fix_plan, dry_run=False, cfg=FakeConfig() + ) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "fix_plan,prompt_value,expected_output," + "expected_fix_status,expected_unfixed_pkgs", + ( + ( # installing package fixes that comes from + # standard-updates and esm-infra when attaching + # the system + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=1, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_INFRA_POCKET, + ), + order=2, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + "e", + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_SERVICE_DISABLED.format( + service="esm-infra" + ) + + "\n" + + colorize_commands([["pro enable esm-infra"]]) + + "\n" + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-### is resolved.\n".format( + check=messages.OKGREEN_CHECK + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=1, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_INFRA_POCKET, + ), + order=2, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + "c", + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_SERVICE_DISABLED.format( + service="esm-infra" + ) + + "\n" + + messages.SECURITY_UA_SERVICE_NOT_ENABLED.format( + service="esm-infra" + ) + + "\n\n" + + "1 package is still affected: pkg1" + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=messages.SECURITY_UA_SERVICE_NOT_ENABLED_SHORT.format( # noqa + service="esm-infra" + ), + ) + ], + ), + ), + ) + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch(M_PATH + "enable_entitlement_by_name") + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.util.prompt_choices") + @mock.patch(M_PATH + "get_cloud_type", return_value=(None, None)) + @mock.patch("builtins.input", return_value="pro_token") + def test_execute_fix_plan_when_service_requires_enable( + self, + _m_input, + _m_get_cloud_type, + m_prompt, + _m_should_reboot, + m_enable_ent, + _m_run_apt_command, + _m_run_apt_update, + fix_plan, + prompt_value, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + FakeConfig, + capsys, + ): + cfg = FakeConfig().for_attached_machine + m_enable_ent.return_value = (True, None) + m_prompt.return_value = prompt_value + + m_entitlement_cls = mock.MagicMock() + m_entitlement_obj = m_entitlement_cls.return_value + m_entitlement_obj.user_facing_status.return_value = ( + UserFacingStatus.INACTIVE, + "", + ) + m_entitlement_obj.applicability_status.return_value = ( + ApplicabilityStatus.APPLICABLE, + "", + ) + m_entitlement_obj.name = "esm-infra" + + with mock.patch( + "uaclient.cli.fix.entitlement_factory", + return_value=m_entitlement_cls, + ): + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock( + return_value="utf-8" + ) + out, _ = capsys.readouterr() + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run=False, cfg=cfg) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "fix_plan,prompt_value,expected_output," + "expected_fix_status,expected_unfixed_pkgs", + ( + ( + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + "r", + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_UPDATE_NOT_INSTALLED_EXPIRED + + "\n" + + messages.PROMPT_EXPIRED_ENTER_TOKEN + + "\n" + + colorize_commands([["pro detach"]]) + + "\n" + + colorize_commands([["pro attach pro_token"]]) + + "\n" + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-### is resolved.\n".format( + check=messages.OKGREEN_CHECK + ), + FixStatus.SYSTEM_NON_VULNERABLE, + [], + ), + ( + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["pkg1"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=None, + ), + "c", + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + messages.SECURITY_UPDATE_NOT_INSTALLED_EXPIRED + + "\n\n" + + "1 package is still affected: pkg1" + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_STILL_VULNERABLE, + [ + UnfixedPackage( + pkg="pkg1", + unfixed_reason=messages.SECURITY_UA_SERVICE_WITH_EXPIRED_SUB.format( # noqa + service="esm-infra" + ), + ) + ], + ), + ), + ) + @mock.patch("uaclient.cli.action_detach") + @mock.patch(M_PATH + "attach_with_token") + @mock.patch(M_PATH + "_check_subscription_is_expired") + @mock.patch(M_PATH + "_handle_subscription_for_required_service") + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch("uaclient.system.should_reboot", return_value=False) + @mock.patch("uaclient.util.prompt_choices") + @mock.patch(M_PATH + "get_cloud_type", return_value=(None, None)) + @mock.patch("builtins.input", return_value="pro_token") + def test_execute_fix_plan_when_subscription_is_expired( + self, + _m_input, + _m_get_cloud_type, + m_prompt, + _m_should_reboot, + _m_run_apt_command, + _m_run_apt_update, + m_handle_required_service, + m_check_subscription_expired, + m_attach_with_token, + _m_action_detach, + fix_plan, + prompt_value, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + FakeConfig, + capsys, + ): + cfg = FakeConfig().for_attached_machine() + m_handle_required_service.return_value = True + m_check_subscription_expired.return_value = True + m_prompt.return_value = prompt_value + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + out, _ = capsys.readouterr() + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run=False, cfg=cfg) + + out, _ = capsys.readouterr() + assert expected_output in out + + @pytest.mark.parametrize( + "fix_plan,check_notices,cloud_type,expected_output," + "expected_fix_status,expected_unfixed_pkgs", + ( + ( # No affected_packages listed, but reboot required + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NOT_AFFECTED.value.msg, + affected_packages=[], + plan=[ + FixPlanNoOpStep( + data=NoOpData( + status=FixPlanNoOpStatus.NOT_AFFECTED.value + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + False, + (None, None), + textwrap.dedent( + """\ + No affected source packages are installed. + + {check} USN-### does not affect your system. + """.format( + check=messages.OKGREEN_CHECK # noqa: E126 + ) # noqa: E126 + ), + FixStatus.SYSTEM_NOT_AFFECTED, + [], + ), + ( # installing package fix and reboot required + FixPlanResult( + title="USN-###", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, + affected_packages=["pkg1"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=None, + ), + True, + (None, None), + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "A reboot is required to complete fix operation." + + "\n" + + "{check} USN-### is not resolved.\n".format( + check=messages.FAIL_X + ), + FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT, + [], + ), + ), + ) + @mock.patch("uaclient.files.notices.NoticesManager.add") + @mock.patch("uaclient.system.should_reboot", return_value=True) + @mock.patch("uaclient.cli.fix.get_cloud_type") + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + def test_execute_fix_plan_when_reboot_required_needed( + self, + _m_apt_run_cmd, + _m_apt_run_update, + m_get_cloud_type, + _m_should_reboot, + m_add_notice, + fix_plan, + check_notices, + cloud_type, + expected_output, + expected_fix_status, + expected_unfixed_pkgs, + capsys, + FakeConfig, + ): + m_get_cloud_type.return_value = cloud_type + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + assert ( + expected_fix_status, + expected_unfixed_pkgs, + ) == execute_fix_plan(fix_plan, dry_run=False, cfg=FakeConfig()) + + out, _ = capsys.readouterr() + assert expected_output in out + + if check_notices: + assert [ + mock.call( + Notice.ENABLE_REBOOT_REQUIRED, + messages.ENABLE_REBOOT_REQUIRED_TMPL.format( + operation="fix operation" + ), + ) + ] == m_add_notice.call_args_list + + +class TestExecuteAptUpgradeStep: + @mock.patch("uaclient.util.we_are_currently_root", return_value=False) + def test_execute_apt_upgrade_step_return_early_if_non_root( + self, + _m_we_are_root, + capsys, + ): + step = FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + context = FixContext( + title="test", + dry_run=False, + affected_pkgs=[], + cfg=None, + ) + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + _execute_apt_upgrade_step(context, step) + + out, _ = capsys.readouterr() + assert messages.SECURITY_APT_NON_ROOT in out + + +class TestExecuteAttachStep: + def test_execute_attach_step_print_message_succeed_on_dry_run( + self, capsys, FakeConfig + ): + step = FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_INFRA_POCKET, + source_packages=["slsrc"], + ), + order=1, + ) + context = FixContext( + title="test", + dry_run=True, + affected_pkgs=[], + cfg=FakeConfig(), + ) + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + _execute_attach_step(context, step) + + out, _ = capsys.readouterr() + assert messages.SECURITY_DRY_RUN_UA_NOT_ATTACHED in out + assert context.fix_status == FixStatus.SYSTEM_NON_VULNERABLE + + +class TestExecuteEnableStep: + def test_execute_enable_step_check_service_on_dry_run( + self, capsys, FakeConfig + ): + step = FixPlanEnableStep( + data=EnableData( + service=ESM_INFRA_POCKET, + source_packages=["slsrc"], + ), + order=1, + ) + context = FixContext( + title="test", + dry_run=True, + affected_pkgs=[], + cfg=FakeConfig(), + ) + + m_entitlement_cls = mock.MagicMock() + m_entitlement_obj = m_entitlement_cls.return_value + m_entitlement_obj.user_facing_status.return_value = ( + UserFacingStatus.INACTIVE, + "", + ) + m_entitlement_obj.applicability_status.return_value = ( + ApplicabilityStatus.APPLICABLE, + "", + ) + m_entitlement_obj.name = "esm-infra" + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + with mock.patch( + "uaclient.cli.fix.entitlement_factory", + return_value=m_entitlement_cls, + ): + _execute_enable_step(context, step) + + out, _ = capsys.readouterr() + assert ( + messages.SECURITY_DRY_RUN_UA_SERVICE_NOT_ENABLED.format( + service="esm-infra" + ) + in out + ) + assert context.fix_status == FixStatus.SYSTEM_NON_VULNERABLE + + +class TestHandleSubscriptionForRequiredService: + @pytest.mark.parametrize( + "dry_run", + ((True), (False)), + ) + def test_handle_subscription_when_service_enabled(self, dry_run, capsys): + m_entitlement_cls = mock.MagicMock() + m_entitlement_obj = m_entitlement_cls.return_value + m_entitlement_obj.user_facing_status.return_value = ( + UserFacingStatus.ACTIVE, + "", + ) + m_entitlement_obj.name = "esm-infra" + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + with mock.patch( + "uaclient.cli.fix.entitlement_factory", + return_value=m_entitlement_cls, + ): + assert _handle_subscription_for_required_service( + service="esm-infra", + cfg=None, + dry_run=dry_run, + ) + out, _ = capsys.readouterr() + assert "" == out + + +class TestPerformMagicAttach: + @mock.patch(M_PATH + "_initiate") + @mock.patch(M_PATH + "_wait") + @mock.patch(M_PATH + "_revoke") + def test_magic_attach_revoke_token_if_wait_fails( + self, + m_initiate, + m_wait, + m_revoke, + ): + m_initiate.return_value = mock.MagicMock( + token="token", user_code="user_code" + ) + m_wait.side_effect = exceptions.MagicAttachTokenError() + + with pytest.raises(exceptions.MagicAttachTokenError): + _perform_magic_attach(cfg=None) + + assert 1 == m_initiate.call_count + assert 1 == m_wait.call_count + assert 1 == m_revoke.call_count + + +class TestFixUSN: + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch(M_PATH + "_prompt_for_attach", return_value=False) + @mock.patch(M_PATH + "usn_plan") + def test_fix_usn_with_related_usns( + self, + m_usn_plan, + _m_prompt_for_attach, + _m_run_apt_command, + _m_run_apt_update, + capsys, + FakeConfig, + ): + fix_plan = USNSFixPlanResult( + usns_data=USNFixPlanResult( + expected_status=FixStatus.SYSTEM_NON_VULNERABLE, + usns=[ + FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title="USN-1235-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg1"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + related_usns_plan=[ + FixPlanResult( + title="USN-4561-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg2"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service="esm-infra", + source_packages=["pkg2"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-infra", + source_packages=["pkg2"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg2"], + source_packages=["pkg2"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + FixPlanResult( + title="USN-7891-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg3", "pkg4"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_APPS_POCKET, + source_packages=["pkg3", "pkg4"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-apps", + source_packages=["pkg3", "pkg4"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg3", "pkg4"], + source_packages=["pkg3", "pkg4"], + pocket=ESM_APPS_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + FixPlanResult( + title="USN-8221-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg5"], + plan=[], + warnings=[ + FixPlanWarningSecurityIssueNotFixed( + data=SecurityIssueNotFixedData( + source_packages=["pkg5"], + status="pending", + ), + order=1, + ), + ], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + ], + ) + ], + ) + ) + m_usn_plan.return_value = fix_plan + issue_id = "USN-1231-1" + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + actual_ret = fix_usn( + security_issue=issue_id, + dry_run=False, + no_related=False, + cfg=FakeConfig(), + ) + + expected_msg = ( + "\n" + + messages.SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) + + "\n" + + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-1235-1 is resolved.\n".format( + check=messages.OKGREEN_CHECK + ) + + "\n" + + textwrap.dedent( + """\ + Found related USNs: + - USN-4561-1 + - USN-7891-1 + - USN-8221-1 + """ + ) + + "\n" + + textwrap.dedent( + """\ + Fixing related USNs: + - USN-4561-1 + 1 affected source package is installed: pkg2 + (1/1) pkg2: + A fix is available in Ubuntu Pro: ESM Infra. + """ + ) + + "\n" + + "1 package is still affected: pkg2" + + "\n" + + "{check} USN-4561-1 is not resolved.".format( + check=messages.FAIL_X + ) + + "\n\n" + + textwrap.dedent( + """\ + - USN-7891-1 + 2 affected source packages are installed: pkg3, pkg4 + (1/2, 2/2) pkg3, pkg4: + A fix is available in Ubuntu Pro: ESM Apps. + """ + ) + + "\n" + + "2 packages are still affected: pkg3, pkg4" + + "\n" + + "{check} USN-7891-1 is not resolved.".format( + check=messages.FAIL_X + ) + + "\n\n" + + textwrap.dedent( + """\ + - USN-8221-1 + 1 affected source package is installed: pkg5 + (1/1) pkg5: + A fix is coming soon. Try again tomorrow. + """ + ) + + "\n" + + "1 package is still affected: pkg5" + + "\n" + + "{check} USN-8221-1 is not resolved.".format( + check=messages.FAIL_X + ) + + "\n\n" + + "Summary:" + + "\n" + + "{check} USN-1231-1 [requested] is resolved.".format( + check=messages.OKGREEN_CHECK + ) + + "\n" + + "{check} USN-4561-1 [related] is not resolved.".format( + check=messages.FAIL_X + ) + + "\n" + + " - pkg2: esm-infra is required for upgrade." + + "\n" + + "{check} USN-7891-1 [related] is not resolved.".format( + check=messages.FAIL_X + ) + + "\n" + + " - pkg3: esm-apps is required for upgrade." + + "\n" + + " - pkg4: esm-apps is required for upgrade." + + "\n" + + "{check} USN-8221-1 [related] is not resolved.".format( + check=messages.FAIL_X + ) + + "\n" + + " - pkg5: A fix is coming soon. Try again tomorrow." + + "\n\n" + + messages.SECURITY_RELATED_USN_ERROR.format(issue_id="USN-1231-1") + ) + out, _ = capsys.readouterr() + assert expected_msg in out + assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret + + @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch("uaclient.apt.run_apt_command") + @mock.patch(M_PATH + "usn_plan") + def test_fix_usn_when_no_related_value_is_true( + self, + m_usn_plan, + _m_run_apt_command, + _m_run_apt_update, + capsys, + FakeConfig, + ): + issue_id = "USN-1235-1" + fix_plan = USNSFixPlanResult( + usns_data=USNFixPlanResult( + expected_status=FixStatus.SYSTEM_NON_VULNERABLE, + usns=[ + FixPlanUSNResult( + target_usn_plan=FixPlanResult( + title=issue_id, + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg1"], + plan=[ + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg1"], + source_packages=["pkg1"], + pocket=STANDARD_UPDATES_POCKET, + ), + order=1, + ) + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + related_usns_plan=[ + FixPlanResult( + title="USN-4561-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg2"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service="esm-infra", + source_packages=["pkg2"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-infra", + source_packages=["pkg2"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg2"], + source_packages=["pkg2"], + pocket=ESM_INFRA_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + FixPlanResult( + title="USN-7891-1", + description="test", + expected_status=FixStatus.SYSTEM_NON_VULNERABLE.value.msg, # noqa + affected_packages=["pkg3", "pkg4"], + plan=[ + FixPlanAttachStep( + data=AttachData( + reason="test", + required_service=ESM_APPS_POCKET, + source_packages=["pkg3", "pkg4"], + ), + order=1, + ), + FixPlanEnableStep( + data=EnableData( + service="esm-apps", + source_packages=["pkg3", "pkg4"], + ), + order=2, + ), + FixPlanAptUpgradeStep( + data=AptUpgradeData( + binary_packages=["pkg3", "pkg4"], + source_packages=["pkg3", "pkg4"], + pocket=ESM_APPS_POCKET, + ), + order=3, + ), + ], + warnings=[], + error=None, + additional_data=USNAdditionalData( + associated_cves=[], + associated_launchpad_bugs=[], + ), + ), + ], + ) + ], + ) + ) + m_usn_plan.return_value = fix_plan + + with mock.patch("uaclient.util.sys") as m_sys: + m_stdout = mock.MagicMock() + type(m_sys).stdout = m_stdout + type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") + actual_ret = fix_usn( + security_issue=issue_id, + dry_run=False, + no_related=True, + cfg=FakeConfig(), + ) + + expected_msg = ( + "USN-1235-1: test" + + "\n\n" + + messages.SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) + + "\n" + + textwrap.dedent( + """\ + 1 affected source package is installed: pkg1 + (1/1) pkg1: + A fix is available in Ubuntu standard updates. + """ + ) + + colorize_commands( + [["apt update && apt install --only-upgrade" " -y pkg1"]] + ) + + "\n\n" + + "{check} USN-1235-1 is resolved.\n".format( + check=messages.OKGREEN_CHECK + ) + ) + + out, _ = capsys.readouterr() + assert expected_msg == out + assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_refresh.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_refresh.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_refresh.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_refresh.py 2024-01-18 17:34:13.000000000 +0000 @@ -15,7 +15,7 @@ * messages: Update APT and MOTD messages related to UA. You can individually target any of the three specific actions, -by passing it's target to nome to the command. If no `target` +by passing the target name to the command. If no `target` is specified, all targets are refreshed. positional arguments: @@ -75,14 +75,15 @@ else: action_refresh(mock.MagicMock(target=target), cfg=cfg) + @mock.patch("time.sleep") @mock.patch("uaclient.system.subp") - def test_lock_file_exists(self, m_subp, FakeConfig): + def test_lock_file_exists(self, m_subp, m_sleep, FakeConfig): """Check inability to refresh if operation holds lock file.""" cfg = FakeConfig().for_attached_machine() cfg.write_cache("lock", "123:pro disable") with pytest.raises(exceptions.LockHeldError) as err: action_refresh(mock.MagicMock(), cfg=cfg) - assert [mock.call(["ps", "123"])] == m_subp.call_args_list + assert [mock.call(["ps", "123"])] * 12 == m_subp.call_args_list assert ( "Unable to perform: pro refresh.\n" "Operation in progress: pro disable (pid:123)" @@ -99,7 +100,9 @@ FakeConfig, ): """On failure in request_updates_contract emit an error.""" - refresh.side_effect = exceptions.UrlError(mock.MagicMock(), "url") + refresh.side_effect = exceptions.ConnectivityError( + mock.MagicMock(), "url" + ) cfg = FakeConfig.for_attached_machine() diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_security_status.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_security_status.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_security_status.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_security_status.py 2024-01-18 17:34:13.000000000 +0000 @@ -74,7 +74,6 @@ ): main() out, _err = capsys.readouterr() - print(out) assert re.match(HELP_OUTPUT, out) @pytest.mark.parametrize("output_format", ("json", "yaml", "text")) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_status.py ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_status.py --- ubuntu-advantage-tools-30~23.10/uaclient/cli/tests/test_cli_status.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/cli/tests/test_cli_status.py 2024-01-18 17:34:13.000000000 +0000 @@ -958,14 +958,14 @@ _m_on_supported_kernel, FakeConfig, ): - """Raise UrlError on connectivity issues""" - m_get_avail_resources.side_effect = exceptions.UrlError( - socket.gaierror(-2, "Name or service not known"), "url" + """Raise ConnectivityError on connectivity issues""" + m_get_avail_resources.side_effect = exceptions.ConnectivityError( + cause=socket.gaierror(-2, "Name or service not known"), url="url" ) cfg = FakeConfig() - with pytest.raises(exceptions.UrlError): + with pytest.raises(exceptions.ConnectivityError): action_status( mock.MagicMock(all=False, simulate_with_token=None), cfg=cfg ) @@ -1023,8 +1023,8 @@ "exception_to_throw,exception_type,exception_message", ( ( - exceptions.UrlError("Not found", "url"), - exceptions.UrlError, + exceptions.ConnectivityError(Exception("Not found"), "url"), + exceptions.ConnectivityError, "Not found", ), ( diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/clouds/identity.py ubuntu-advantage-tools-31.2~23.10/uaclient/clouds/identity.py --- ubuntu-advantage-tools-30~23.10/uaclient/clouds/identity.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/clouds/identity.py 2024-01-18 17:34:13.000000000 +0000 @@ -55,7 +55,9 @@ return (None, NoCloudTypeReason.NO_CLOUD_DETECTED) -def cloud_instance_factory() -> clouds.AutoAttachCloudInstance: +def cloud_instance_factory( + cloud_override: Optional[str] = None, +) -> clouds.AutoAttachCloudInstance: """ :raises CloudFactoryError: if no cloud instance object can be constructed :raises CloudFactoryNoCloudError: if no cloud instance object can be @@ -75,7 +77,11 @@ "gce": gcp.UAAutoAttachGCPInstance, } # type: Dict[str, Type[clouds.AutoAttachCloudInstance]] - cloud_type, _ = get_cloud_type() + if cloud_override is not None: + cloud_type = cloud_override + else: + cloud_type, _ = get_cloud_type() + if not cloud_type: raise exceptions.CloudFactoryNoCloudError() cls = cloud_instance_map.get(cloud_type) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/config.py ubuntu-advantage-tools-31.2~23.10/uaclient/config.py --- ubuntu-advantage-tools-30~23.10/uaclient/config.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/config.py 2024-01-18 17:34:13.000000000 +0000 @@ -4,7 +4,7 @@ import os from collections import namedtuple from functools import lru_cache, wraps -from typing import Any, Callable, Dict, Optional, Tuple, TypeVar +from typing import Any, Dict, Optional, Tuple from uaclient import ( apt, @@ -75,14 +75,6 @@ event = event_logger.get_event_logger() -# needed for solving mypy errors dealing with _lru_cache_wrapper -# Found at https://github.com/python/mypy/issues/5858#issuecomment-454144705 -S = TypeVar("S", bound=str) - - -def str_cache(func: Callable[..., S]) -> S: - return lru_cache()(func) # type: ignore - class UAConfig: data_paths = { @@ -201,7 +193,7 @@ state_files.user_config_file.write(self.user_config) @property # type: ignore - @str_cache + @lru_cache(maxsize=None) def global_apt_http_proxy(self) -> Optional[str]: global_val = self.user_config.global_apt_http_proxy if global_val: @@ -225,7 +217,7 @@ state_files.user_config_file.write(self.user_config) @property # type: ignore - @str_cache + @lru_cache(maxsize=None) def global_apt_https_proxy(self) -> Optional[str]: global_val = self.user_config.global_apt_https_proxy if global_val: diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/conftest.py ubuntu-advantage-tools-31.2~23.10/uaclient/conftest.py --- ubuntu-advantage-tools-30~23.10/uaclient/conftest.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/conftest.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,6 +1,8 @@ import datetime import io import logging +import os +import shutil from enum import Enum from typing import Any, Dict @@ -17,6 +19,11 @@ raise +shutil.get_terminal_size = mock.MagicMock( + return_value=os.terminal_size((80, 20)) +) + + @pytest.yield_fixture(scope="session", autouse=True) def _subp(): """ diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/contract.py ubuntu-advantage-tools-31.2~23.10/uaclient/contract.py --- ubuntu-advantage-tools-30~23.10/uaclient/contract.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/contract.py 2024-01-18 17:34:13.000000000 +0000 @@ -259,14 +259,10 @@ """ headers = self.headers() headers.update({"Authorization": "Bearer {}".format(magic_token)}) + response = self.request_url( + API_V1_GET_MAGIC_ATTACH_TOKEN_INFO, headers=headers + ) - try: - response = self.request_url( - API_V1_GET_MAGIC_ATTACH_TOKEN_INFO, headers=headers - ) - except exceptions.UrlError as e: - LOG.exception(e) - raise exceptions.ConnectivityError() if response.code == 401: raise exceptions.MagicAttachTokenError() if response.code == 503: @@ -283,16 +279,12 @@ def new_magic_attach_token(self) -> Dict[str, Any]: """Create a magic attach token for the user.""" headers = self.headers() + response = self.request_url( + API_V1_NEW_MAGIC_ATTACH, + headers=headers, + method="POST", + ) - try: - response = self.request_url( - API_V1_NEW_MAGIC_ATTACH, - headers=headers, - method="POST", - ) - except exceptions.UrlError as e: - LOG.exception(e) - raise exceptions.ConnectivityError() if response.code == 503: raise exceptions.MagicAttachUnavailable() if response.code != 200: @@ -308,16 +300,12 @@ """Revoke a magic attach token for the user.""" headers = self.headers() headers.update({"Authorization": "Bearer {}".format(magic_token)}) + response = self.request_url( + API_V1_REVOKE_MAGIC_ATTACH, + headers=headers, + method="DELETE", + ) - try: - response = self.request_url( - API_V1_REVOKE_MAGIC_ATTACH, - headers=headers, - method="DELETE", - ) - except exceptions.UrlError as e: - LOG.exception(e) - raise exceptions.ConnectivityError() if response.code == 400: raise exceptions.MagicAttachTokenAlreadyActivated() if response.code == 401: @@ -659,7 +647,7 @@ :raise UbuntuProError: on failure to update contract or error processing contract deltas - :raise UrlError: On failure during a connection + :raise ConnectivityError: On failure during a connection """ orig_entitlements = cfg.machine_token_file.entitlements orig_token = cfg.machine_token diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/daemon/poll_for_pro_license.py ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/poll_for_pro_license.py --- ubuntu-advantage-tools-30~23.10/uaclient/daemon/poll_for_pro_license.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/poll_for_pro_license.py 2024-02-14 15:37:46.000000000 +0000 @@ -15,7 +15,7 @@ def attempt_auto_attach(cfg: UAConfig, cloud: AutoAttachCloudInstance): try: - with lock.SpinLock( + with lock.RetryLock( cfg=cfg, lock_holder="pro.daemon.attempt_auto_attach" ): actions.auto_attach(cfg, cloud) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/daemon/retry_auto_attach.py ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/retry_auto_attach.py --- ubuntu-advantage-tools-30~23.10/uaclient/daemon/retry_auto_attach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/retry_auto_attach.py 2024-02-14 15:37:46.000000000 +0000 @@ -52,11 +52,9 @@ error_msg=e.body ) elif isinstance(e, api_exceptions.ConnectivityError): - return messages.RETRY_ERROR_DETAIL_CONNECTIVITY_ERROR - elif isinstance(e, api_exceptions.UrlError): return messages.RETRY_ERROR_DETAIL_URL_ERROR_URL.format( url=e.url - ) + ': "{}"'.format(str(e)) + ) + ': "{}"'.format(str(e.cause_error)) elif isinstance(e, api_exceptions.UbuntuProError): return '"{}"'.format(e.msg) else: @@ -120,7 +118,7 @@ "\n" + auto_attach_status_msg + "\n\n", ) try: - with lock.SpinLock( + with lock.RetryLock( cfg=cfg, lock_holder="pro.daemon.retry_auto_attach.notice_updates", ): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/daemon/tests/test_poll_for_pro_license.py ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/tests/test_poll_for_pro_license.py --- ubuntu-advantage-tools-30~23.10/uaclient/daemon/tests/test_poll_for_pro_license.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/tests/test_poll_for_pro_license.py 2024-02-14 15:37:46.000000000 +0000 @@ -26,7 +26,7 @@ @mock.patch(M_PATH + "LOG.debug") @mock.patch(M_PATH + "actions.auto_attach") -@mock.patch(M_PATH + "lock.SpinLock") +@mock.patch(M_PATH + "lock.RetryLock") class TestAttemptAutoAttach: def test_success( self, m_spin_lock, m_auto_attach, m_log_debug, FakeConfig diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/daemon/tests/test_retry_auto_attach.py ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/tests/test_retry_auto_attach.py --- ubuntu-advantage-tools-30~23.10/uaclient/daemon/tests/test_retry_auto_attach.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/daemon/tests/test_retry_auto_attach.py 2024-01-18 17:34:13.000000000 +0000 @@ -43,11 +43,9 @@ 'an error from Canonical servers: "response"', ), ( - exceptions.ConnectivityError(), - "a connectivity error", - ), - ( - exceptions.UrlError(error.URLError("urlerror"), "url"), + exceptions.ConnectivityError( + cause=error.URLError("urlerror"), url="url" + ), 'an error while reaching url: "urlerror"', ), (fakes.FakeUbuntuProError(), '"This is a test"'), diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/defaults.py ubuntu-advantage-tools-31.2~23.10/uaclient/defaults.py --- ubuntu-advantage-tools-30~23.10/uaclient/defaults.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/defaults.py 2024-01-18 17:34:13.000000000 +0000 @@ -60,3 +60,8 @@ USER_CACHE_SUBDIR = "ubuntu-pro" SSL_CERTS_PATH = "/etc/ssl/certs/ca-certificates.crt" + +# used by apport, collect-logs, and tests +APPARMOR_PROFILES = [ + "/etc/apparmor.d/ubuntu_pro_apt_news", +] diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/base.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/base.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/base.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/base.py 2024-02-14 15:37:46.000000000 +0000 @@ -511,6 +511,16 @@ snap.run_snapd_wait_cmd() + try: + snap.refresh_snap("snapd") + except exceptions.ProcessExecutionError as e: + LOG.warning("Failed to refresh snapd snap", exc_info=e) + event.info( + messages.EXECUTING_COMMAND_FAILED.format( + command="snap refresh snapd" + ) + ) + http_proxy = http.validate_proxy( "http", self.cfg.http_proxy, http.PROXY_VALIDATION_SNAP_HTTP_URL ) @@ -550,6 +560,26 @@ return True + def are_required_packages_installed(self) -> bool: + """install packages necessary to enable a service.""" + required_packages = ( + self.entitlement_cfg.get("entitlement", {}) + .get("directives", {}) + .get("requiredPackages") + ) + + # If we don't have the directive, there is nothing + # to process here + if not required_packages: + return True + + package_names = [package["name"] for package in required_packages] + installed_packages = apt.get_installed_packages_names() + + return all( + [required in installed_packages for required in package_names] + ) + def handle_required_packages(self) -> bool: """install packages necessary to enable a service.""" required_packages = ( @@ -594,6 +624,11 @@ for package in required_packages if package.get("removeOnDisable", False) ] + # If none of the packages have removeOnDisable, then there is nothing + # to process here + if len(package_names) == 0: + return True + LOG.debug("Uninstalling packages %r", package_names) package_names_str = " ".join(package_names) event.info( @@ -658,7 +693,10 @@ ret = [] for service in self.incompatible_services: ent_status, _ = service.entitlement(self.cfg).application_status() - if ent_status == ApplicationStatus.ENABLED: + if ent_status in ( + ApplicationStatus.ENABLED, + ApplicationStatus.WARNING, + ): ret.append(service) return ret @@ -798,6 +836,27 @@ ), ) + applicability_status, _ = self.applicability_status() + # applicability_status() returns APPLICABLE if not self.entitlement_cfg + # but we want to be more strict and prevent disabling if + # not self.entitlement_cfg, so we check it explicitly here. + # This prevents e.g. landscape from being disabled on jammy when it + # doesn't exist for jammy yet but was manually configured separately + # from pro. + if ( + not self.entitlement_cfg + or applicability_status == ApplicabilityStatus.INAPPLICABLE + ): + return ( + False, + CanDisableFailure( + CanDisableFailureReason.NOT_APPLICABLE, + message=messages.CANNOT_DISABLE_NOT_APPLICABLE.format( + title=self.title + ), + ), + ) + if self.dependent_services and not ignore_dependent_services: if self.detect_dependent_services(): return ( @@ -1095,6 +1154,8 @@ if application_status == ApplicationStatus.DISABLED: return UserFacingStatus.INACTIVE, explanation + elif application_status == ApplicationStatus.WARNING: + return UserFacingStatus.WARNING, explanation warning, warn_msg = self.enabled_warning_status() @@ -1271,5 +1332,5 @@ if self._is_sources_list_updated: return event.info(messages.APT_UPDATING_LIST.format(name="standard Ubuntu")) - apt.update_sources_list("/etc/apt/sources.list") + apt.update_sources_list(apt.get_system_sources_file()) self._is_sources_list_updated = True diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/entitlement_status.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/entitlement_status.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/entitlement_status.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/entitlement_status.py 2024-01-18 17:34:13.000000000 +0000 @@ -12,6 +12,7 @@ ENABLED = object() DISABLED = object() + WARNING = object() @enum.unique @@ -116,6 +117,7 @@ """ ALREADY_DISABLED = object() + NOT_APPLICABLE = object() ACTIVE_DEPENDENT_SERVICES = object() PURGE_NOT_SUPPORTED = object() NOT_FOUND_DEPENDENT_SERVICE = object() diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/esm.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/esm.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/esm.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/esm.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,12 +1,13 @@ import os from typing import Tuple, Type, Union -from uaclient import gpg, messages, system -from uaclient.apt import APT_KEYS_DIR, ESM_REPO_FILE_CONTENT, KEYRINGS_DIR +from uaclient import messages, system +from uaclient.apt import APT_KEYS_DIR, DEB822_REPO_FILE_CONTENT, KEYRINGS_DIR from uaclient.defaults import ESM_APT_ROOTDIR from uaclient.entitlements import repo from uaclient.entitlements.base import UAEntitlement from uaclient.entitlements.entitlement_status import CanDisableFailure +from uaclient.util import set_filename_extension class ESMBaseEntitlement(repo.RepoEntitlement): @@ -35,37 +36,58 @@ # Ugly? Yes, but so is python < 3.8 without removeprefix assert self.name.startswith("esm-") esm_name = self.name[len("esm-") :] - repo_filename = os.path.normpath( - ESM_APT_ROOTDIR + self.repo_list_file_tmpl.format(name=self.name), - ) - keyring_file = self.repo_key_file - - # No need to create if already present - if os.path.exists(repo_filename): + sources_repo_filename = set_filename_extension( + os.path.normpath( + ESM_APT_ROOTDIR + self.repo_file, + ), + "sources", + ) + list_repo_filename = set_filename_extension( + os.path.normpath( + ESM_APT_ROOTDIR + self.repo_file, + ), + "list", + ) + + # No need to create if any format already present + if os.path.exists(sources_repo_filename) or os.path.exists( + list_repo_filename + ): return - system.write_file( - repo_filename, - ESM_REPO_FILE_CONTENT.format(name=esm_name, series=series), + esm_url = "https://esm.ubuntu.com/{name}/ubuntu".format(name=esm_name) + suites = "{series}-{name}-security {series}-{name}-updates".format( + series=series, name=esm_name ) - # Set up GPG key - source_keyring_file = os.path.join(KEYRINGS_DIR, keyring_file) - destination_keyring_file = os.path.normpath( - ESM_APT_ROOTDIR + APT_KEYS_DIR + keyring_file + # When writing, use the sources format by default + system.write_file( + sources_repo_filename, + DEB822_REPO_FILE_CONTENT.format( + url=esm_url, + suites=suites, + keyrings_dir=KEYRINGS_DIR, + keyring_file=self.repo_key_file, + deb_src="", + ), ) - os.makedirs(os.path.dirname(destination_keyring_file), exist_ok=True) - gpg.export_gpg_key(source_keyring_file, destination_keyring_file) def disable_local_esm_repo(self) -> None: keyring_file = os.path.normpath( ESM_APT_ROOTDIR + APT_KEYS_DIR + self.repo_key_file ) + system.ensure_file_absent(keyring_file) + repo_filename = os.path.normpath( - ESM_APT_ROOTDIR + self.repo_list_file_tmpl.format(name=self.name), + ESM_APT_ROOTDIR + self.repo_file, + ) + # Remove any instance of the file present in the folder + system.ensure_file_absent( + set_filename_extension(repo_filename, "sources") + ) + system.ensure_file_absent( + set_filename_extension(repo_filename, "list") ) - system.ensure_file_absent(repo_filename) - system.ensure_file_absent(keyring_file) class ESMAppsEntitlement(ESMBaseEntitlement): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/landscape.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/landscape.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/landscape.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/landscape.py 2024-01-18 17:34:13.000000000 +0000 @@ -1,20 +1,13 @@ import logging -import os from typing import Any, Dict, Optional, Tuple -from uaclient import apt, event_logger, exceptions, messages, system, util +from uaclient import event_logger, exceptions, messages, system, util from uaclient.entitlements.base import UAEntitlement from uaclient.entitlements.entitlement_status import ApplicationStatus LOG = logging.getLogger(util.replace_top_level_logger_name(__name__)) event = event_logger.get_event_logger() -LANDSCAPE_CLIENT_PACKAGE_NAME = "landscape-client" -LANDSCAPE_CLIENT_CONFIG_PATH = "/etc/landscape/client.conf" -LANDSCAPE_CLIENT_CONFIG_PATH_DISABLE_BACKUP = ( - "/etc/landscape/client.conf.pro-disable-backup" -) - class LandscapeEntitlement(UAEntitlement): name = "landscape" @@ -68,50 +61,29 @@ event.info(str(e).strip()) event.warning(str(e), self.name) - LOG.debug( - "Backing up %s as %s", - LANDSCAPE_CLIENT_CONFIG_PATH, - LANDSCAPE_CLIENT_CONFIG_PATH_DISABLE_BACKUP, - ) - event.info( - messages.BACKING_UP_FILE.format( - original=LANDSCAPE_CLIENT_CONFIG_PATH, - backup=LANDSCAPE_CLIENT_CONFIG_PATH_DISABLE_BACKUP, - ) - ) - try: - os.rename( - LANDSCAPE_CLIENT_CONFIG_PATH, - LANDSCAPE_CLIENT_CONFIG_PATH_DISABLE_BACKUP, - ) - except FileNotFoundError as e: - LOG.error(e) - event.info(str(e)) - event.warning(str(e), self.name) + event.info(messages.LANDSCAPE_CONFIG_REMAINS) return True def application_status( self, ) -> Tuple[ApplicationStatus, Optional[messages.NamedMessage]]: - if apt.is_installed(LANDSCAPE_CLIENT_PACKAGE_NAME): + if ( + self.are_required_packages_installed() + and system.is_systemd_unit_active("landscape-client") + ): return (ApplicationStatus.ENABLED, None) else: return ( ApplicationStatus.DISABLED, - messages.LANDSCAPE_CLIENT_NOT_INSTALLED, + messages.LANDSCAPE_SERVICE_NOT_ACTIVE, ) def enabled_warning_status( self, ) -> Tuple[bool, Optional[messages.NamedMessage]]: - if not os.path.exists(LANDSCAPE_CLIENT_CONFIG_PATH): - return ( - True, - messages.LANDSCAPE_NOT_CONFIGURED, - ) - # This check wrongly gives warning when non-root + # This will become obsolete soon: #2864 if util.we_are_currently_root(): try: system.subp( @@ -123,12 +95,6 @@ messages.LANDSCAPE_NOT_REGISTERED, ) - if not system.is_systemd_unit_active("landscape-client"): - return ( - True, - messages.LANDSCAPE_SERVICE_NOT_ACTIVE, - ) - return False, None def process_contract_deltas( diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/livepatch.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/livepatch.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/livepatch.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/livepatch.py 2024-02-14 15:37:46.000000000 +0000 @@ -106,6 +106,16 @@ snap.run_snapd_wait_cmd() + try: + snap.refresh_snap("snapd") + except exceptions.ProcessExecutionError as e: + LOG.warning("Failed to refresh snapd snap", exc_info=e) + event.info( + messages.EXECUTING_COMMAND_FAILED.format( + command="snap refresh snapd" + ) + ) + http_proxy = http.validate_proxy( "http", self.cfg.http_proxy, http.PROXY_VALIDATION_SNAP_HTTP_URL ) @@ -214,7 +224,17 @@ if not livepatch.is_livepatch_installed(): return (ApplicationStatus.DISABLED, messages.LIVEPATCH_NOT_ENABLED) - if livepatch.status() is None: + try: + livepatch_status = livepatch.status() + except exceptions.ProcessExecutionError as e: + return ( + ApplicationStatus.WARNING, + messages.LIVEPATCH_CLIENT_FAILURE_WARNING.format( + livepatch_error=e.stderr, + ), + ) + + if livepatch_status is None: # TODO(May want to parse INACTIVE/failure assessment) return ( ApplicationStatus.DISABLED, @@ -249,6 +269,7 @@ True, messages.LIVEPATCH_KERNEL_UPGRADE_REQUIRED, ) + # if on_supported_kernel returns UNKNOWN we default to no warning # because there would be no way for a user to resolve the warning return False, None diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/realtime.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/realtime.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/realtime.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/realtime.py 2024-02-14 15:37:46.000000000 +0000 @@ -37,6 +37,7 @@ return { GenericRealtime.variant_name: GenericRealtime, NvidiaTegraRealtime.variant_name: NvidiaTegraRealtime, + RaspberryPiRealtime.variant_name: RaspberryPiRealtime, IntelIotgRealtime.variant_name: IntelIotgRealtime, } @@ -148,6 +149,14 @@ is_variant = True check_packages_are_installed = True + +class RaspberryPiRealtime(RealtimeVariant): + variant_name = "rpi" + title = messages.REALTIME_RASPI_TITLE + description = messages.REALTIME_RASPI_DESCRIPTION + is_variant = True + check_packages_are_installed = True + class IntelIotgRealtime(RealtimeVariant): variant_name = "intel-iotg" diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/repo.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/repo.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/repo.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/repo.py 2024-01-18 17:34:13.000000000 +0000 @@ -31,7 +31,7 @@ class RepoEntitlement(base.UAEntitlement): - repo_list_file_tmpl = "/etc/apt/sources.list.d/ubuntu-{name}.list" + repo_file_tmpl = "/etc/apt/sources.list.d/ubuntu-{name}.{extension}" repo_pref_file_tmpl = "/etc/apt/preferences.d/ubuntu-{name}" repo_url_tmpl = "{}/ubuntu" @@ -54,6 +54,14 @@ return None @property + def repo_file(self) -> str: + extension = "sources" + series = system.get_release_info().series + if series in apt.SERIES_NOT_USING_DEB822: + extension = "list" + return self.repo_file_tmpl.format(name=self.name, extension=extension) + + @property def packages(self) -> List[str]: """debs to install on enablement""" packages = [] @@ -316,7 +324,7 @@ :return: False if apt url is already found on the source file. True otherwise. """ - apt_file = self.repo_list_file_tmpl.format(name=self.name) + apt_file = self.repo_file # If the apt file is commented out, we will assume that we need # to regenerate the apt file, regardless of the apt url delta if all( @@ -383,8 +391,7 @@ old_url = orig_entitlement.get("directives", {}).get("aptURL") if old_url: # Remove original aptURL and auth and rewrite - repo_filename = self.repo_list_file_tmpl.format(name=self.name) - apt.remove_auth_apt_repo(repo_filename, old_url) + apt.remove_auth_apt_repo(self.repo_file, old_url) self.remove_apt_config() self.setup_apt_config() @@ -497,7 +504,7 @@ apt.setup_apt_proxy( http_proxy=http_proxy, https_proxy=https_proxy, proxy_scope=scope ) - repo_filename = self.repo_list_file_tmpl.format(name=self.name) + repo_filename = self.repo_file resource_cfg = self.entitlement_cfg directives = resource_cfg["entitlement"].get("directives", {}) obligations = resource_cfg["entitlement"].get("obligations", {}) @@ -590,7 +597,7 @@ command after removing the apt files. """ series = system.get_release_info().series - repo_filename = self.repo_list_file_tmpl.format(name=self.name) + repo_filename = self.repo_file entitlement = self.cfg.machine_token_file.entitlements[self.name].get( "entitlement", {} ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_base.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_base.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_base.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_base.py 2024-02-14 16:31:43.000000000 +0000 @@ -154,14 +154,8 @@ class TestUaEntitlement: def test_entitlement_abstract_class(self): """UAEntitlement is abstract requiring concrete methods.""" - with pytest.raises(TypeError) as excinfo: + with pytest.raises(TypeError): base.UAEntitlement() - expected_msg = ( - "Can't instantiate abstract class UAEntitlement with abstract" - " methods _perform_disable, _perform_enable, application_status," - " description, name, title" - ) - assert expected_msg == str(excinfo.value) def test_init_default_sets_up_uaconfig(self): """UAEntitlement sets up a uaconfig instance upon init.""" @@ -1354,21 +1348,22 @@ ), ( [{"name": "package"}], - [mock.call("/etc/apt/sources.list")], + [mock.call()], [mock.call(["package"])], True, ), ( [{"name": "package"}, {"name": "package2"}], - [mock.call("/etc/apt/sources.list")], + [mock.call()], [mock.call(["package", "package2"])], True, ), ], ) - @mock.patch("uaclient.apt.update_sources_list") @mock.patch("uaclient.apt.run_apt_install_command") - @mock.patch("uaclient.apt.run_apt_update_command") + @mock.patch( + "uaclient.entitlements.base.UAEntitlement._update_sources_list" + ) @mock.patch( "uaclient.entitlements.base.UAEntitlement.entitlement_cfg", new_callable=mock.PropertyMock, @@ -1376,9 +1371,8 @@ def test_handle_required_packages( self, m_entitlement_cfg, - m_apt_update, - m_apt_install, m_update_sources_list, + m_apt_install, required_packages_directive, expected_apt_update_calls, expected_apt_install_calls, @@ -1393,7 +1387,6 @@ } assert expected_result == entitlement.handle_required_packages() - assert [] == m_apt_update.call_args_list assert ( expected_apt_update_calls == m_update_sources_list.call_args_list ) @@ -1418,12 +1411,12 @@ ), ( [{"name": "package"}], - [mock.call([], mock.ANY)], + [], True, ), ( [{"name": "package"}, {"name": "package2"}], - [mock.call([], mock.ANY)], + [], True, ), ( diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_esm.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_esm.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_esm.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_esm.py 2024-01-18 17:34:13.000000000 +0000 @@ -5,6 +5,7 @@ from uaclient import apt from uaclient.entitlements.esm import ESMAppsEntitlement, ESMInfraEntitlement +from uaclient.util import set_filename_extension M_PATH = "uaclient.entitlements.esm.ESMInfraEntitlement." M_REPOPATH = "uaclient.entitlements.repo." @@ -98,12 +99,8 @@ @mock.patch("uaclient.apt.os.path.exists") @mock.patch("uaclient.apt.system.get_release_info") @mock.patch("uaclient.apt.system.write_file") - @mock.patch("uaclient.apt.os.makedirs") - @mock.patch("uaclient.apt.gpg.export_gpg_key") def test_setup_local_esm_repo( self, - m_export_gpg, - m_makedirs, m_write_file, m_get_release_info, m_exists, @@ -117,61 +114,56 @@ if file_exists: assert m_write_file.call_count == 0 - assert m_makedirs.call_count == 0 - assert m_export_gpg.call_count == 0 else: + suites = "{series}-{name}-security {series}-{name}-updates".format( + name=entitlement.name[4:], series="example" + ) assert m_write_file.call_args_list == [ mock.call( - os.path.normpath( - apt.ESM_APT_ROOTDIR - + entitlement.repo_list_file_tmpl.format( - name=entitlement.name - ), - ), - apt.ESM_REPO_FILE_CONTENT.format( - name=entitlement.name[4:], series="example" - ), - ) - ] - - assert m_makedirs.call_args_list == [ - mock.call( - os.path.dirname( + set_filename_extension( os.path.normpath( - apt.ESM_APT_ROOTDIR - + apt.APT_KEYS_DIR - + entitlement.repo_key_file + apt.ESM_APT_ROOTDIR + entitlement.repo_file, ), + "sources", ), - exist_ok=True, - ) - ] - - assert m_export_gpg.call_args_list == [ - mock.call( - os.path.join(apt.KEYRINGS_DIR, entitlement.repo_key_file), - os.path.normpath( - apt.ESM_APT_ROOTDIR - + apt.APT_KEYS_DIR - + entitlement.repo_key_file + apt.DEB822_REPO_FILE_CONTENT.format( + url="https://esm.ubuntu.com/{name}/ubuntu".format( + name=entitlement.name[4:] + ), + suites=suites, + keyrings_dir=apt.KEYRINGS_DIR, + keyring_file=entitlement.repo_key_file, + deb_src="", ), ) ] @mock.patch("uaclient.apt.system.ensure_file_absent") - def disable_local_esm_repo(self, m_ensure_file_absent, entitlement): + def test_disable_local_esm_repo(self, m_ensure_file_absent, entitlement): entitlement.disable_local_esm_repo() assert m_ensure_file_absent.call_args_list == [ mock.call( os.path.normpath( apt.ESM_APT_ROOTDIR - + self.repo_list_file_tmpl.format(name=self.name), + + apt.APT_KEYS_DIR + + entitlement.repo_key_file ) ), mock.call( - os.path.normpath( - apt.ESM_APT_ROOTDIR + apt.APT_KEYS_DIR + self.repo_key_file - ) + set_filename_extension( + os.path.normpath( + apt.ESM_APT_ROOTDIR + entitlement.repo_file, + ), + "sources", + ), + ), + mock.call( + set_filename_extension( + os.path.normpath( + apt.ESM_APT_ROOTDIR + entitlement.repo_file, + ), + "list", + ), ), ] diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_landscape.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_landscape.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_landscape.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_landscape.py 2024-01-18 17:34:13.000000000 +0000 @@ -99,174 +99,126 @@ @pytest.mark.parametrize( [ "subp_sideeffect", - "rename_sideeffect", "expected_subp_calls", - "expected_rename_calls", "expected_result", ], [ ( None, - None, [mock.call(["landscape-config", "--disable"])], - [ - mock.call( - "/etc/landscape/client.conf", - "/etc/landscape/client.conf.pro-disable-backup", - ) - ], True, ), ( exceptions.ProcessExecutionError("test"), - None, - [mock.call(["landscape-config", "--disable"])], - [ - mock.call( - "/etc/landscape/client.conf", - "/etc/landscape/client.conf.pro-disable-backup", - ) - ], - True, - ), - ( - None, - FileNotFoundError(), [mock.call(["landscape-config", "--disable"])], - [ - mock.call( - "/etc/landscape/client.conf", - "/etc/landscape/client.conf.pro-disable-backup", - ) - ], True, ), ], ) - @mock.patch("os.rename") @mock.patch("uaclient.system.subp") def test_perform_disable( self, m_subp, - m_rename, subp_sideeffect, - rename_sideeffect, expected_subp_calls, - expected_rename_calls, expected_result, FakeConfig, ): m_subp.side_effect = subp_sideeffect - m_rename.side_effect = rename_sideeffect landscape = LandscapeEntitlement(FakeConfig()) assert expected_result == landscape._perform_disable() assert expected_subp_calls == m_subp.call_args_list - assert expected_rename_calls == m_rename.call_args_list @pytest.mark.parametrize( [ - "is_installed", - "expected_is_installed_calls", + "are_required_packages_installed", + "is_systemd_unit_active", "expected_result", ], [ ( False, - [mock.call("landscape-client")], + False, ( ApplicationStatus.DISABLED, - messages.LANDSCAPE_CLIENT_NOT_INSTALLED, + messages.LANDSCAPE_SERVICE_NOT_ACTIVE, ), ), ( + False, + True, + ( + ApplicationStatus.DISABLED, + messages.LANDSCAPE_SERVICE_NOT_ACTIVE, + ), + ), + ( + True, + False, + ( + ApplicationStatus.DISABLED, + messages.LANDSCAPE_SERVICE_NOT_ACTIVE, + ), + ), + ( + True, True, - [mock.call("landscape-client")], (ApplicationStatus.ENABLED, None), ), ], ) - @mock.patch("uaclient.apt.is_installed") + @mock.patch("uaclient.system.is_systemd_unit_active") + @mock.patch( + "uaclient.entitlements.base.UAEntitlement.are_required_packages_installed" # noqa: E501 + ) def test_application_status( self, - m_is_installed, - is_installed, - expected_is_installed_calls, + m_are_required_packages_installed, + m_is_systemd_unit_active, + are_required_packages_installed, + is_systemd_unit_active, expected_result, FakeConfig, ): - m_is_installed.return_value = is_installed + m_are_required_packages_installed.return_value = ( + are_required_packages_installed + ) + m_is_systemd_unit_active.return_value = is_systemd_unit_active landscape = LandscapeEntitlement(FakeConfig()) assert expected_result == landscape.application_status() - assert expected_is_installed_calls == m_is_installed.call_args_list @pytest.mark.parametrize( [ - "exists", "we_are_currently_root", "subp_sideeffect", - "unit_active", "expected_subp_calls", "expected_result", ], [ ( - False, - None, - None, - None, - [], - (True, messages.LANDSCAPE_NOT_CONFIGURED), - ), - ( - True, True, exceptions.ProcessExecutionError("test"), - None, [mock.call(mock.ANY)], (True, messages.LANDSCAPE_NOT_REGISTERED), ), - ( - True, - True, - None, - False, - [mock.call(mock.ANY)], - (True, messages.LANDSCAPE_SERVICE_NOT_ACTIVE), - ), - (True, True, None, True, [mock.call(mock.ANY)], (False, None)), - ( - True, - False, - None, - False, - [], - (True, messages.LANDSCAPE_SERVICE_NOT_ACTIVE), - ), - (True, False, None, True, [], (False, None)), + (True, None, [mock.call(mock.ANY)], (False, None)), + (False, None, [], (False, None)), ], ) - @mock.patch("uaclient.system.is_systemd_unit_active") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.util.we_are_currently_root") - @mock.patch("os.path.exists") def test_enabled_warning_status( self, - m_exists, m_we_are_currently_root, m_subp, - m_is_systemd_unit_active, - exists, we_are_currently_root, subp_sideeffect, - unit_active, expected_subp_calls, expected_result, FakeConfig, ): - m_exists.return_value = exists m_we_are_currently_root.return_value = we_are_currently_root m_subp.side_effect = subp_sideeffect - m_is_systemd_unit_active.return_value = unit_active landscape = LandscapeEntitlement(FakeConfig()) assert expected_result == landscape.enabled_warning_status() assert expected_subp_calls == m_subp.call_args_list diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_livepatch.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_livepatch.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_livepatch.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_livepatch.py 2024-02-14 15:37:46.000000000 +0000 @@ -398,6 +398,12 @@ ["/usr/bin/snap", "wait", "system", "seed.loaded"], capture=True ) ] + mocks_snapd_refresh = [ + mock.call( + ["/usr/bin/snap", "refresh", "snapd"], + capture=True, + ) + ] mocks_livepatch_install = [ mock.call( ["/usr/bin/snap", "install", "canonical-livepatch"], @@ -409,6 +415,7 @@ mocks_snapd_install + mocks_snapd_install_as_a_snap + mocks_snap_wait_seed + + mocks_snapd_refresh + mocks_livepatch_install ) mocks_config = [ @@ -547,6 +554,7 @@ None, None, None, + None, ] assert entitlement.enable() @@ -601,6 +609,7 @@ assert entitlement.enable() assert ( self.mocks_snap_wait_seed + + self.mocks_snapd_refresh + self.mocks_livepatch_install + self.mocks_config in m_subp.call_args_list @@ -653,6 +662,7 @@ mock.call( [SNAP_CMD, "wait", "system", "seed.loaded"], capture=True ), + mock.call([SNAP_CMD, "refresh", "snapd"], capture=True), mock.call( [ livepatch.LIVEPATCH_CMD, @@ -712,6 +722,7 @@ mock.call( [SNAP_CMD, "wait", "system", "seed.loaded"], capture=True ), + mock.call([SNAP_CMD, "refresh", "snapd"], capture=True), mock.call( [ livepatch.LIVEPATCH_CMD, @@ -804,6 +815,7 @@ stderr=stderr_msg, ), True, + True, ] fake_stdout = io.StringIO() @@ -953,3 +965,19 @@ else: assert status == ApplicationStatus.ENABLED assert details is None + + @mock.patch("uaclient.livepatch.is_livepatch_installed", return_value=True) + @mock.patch("uaclient.livepatch.status") + def test_application_status_when_canonical_livepatch_fails( + self, m_status, _m_livepatch_installed, entitlement + ): + m_status.side_effect = exceptions.ProcessExecutionError( + cmd="test", stdout="", stderr="livepatch error" + ) + + status, details = entitlement.application_status() + + assert status == ApplicationStatus.WARNING + assert details == messages.LIVEPATCH_CLIENT_FAILURE_WARNING.format( + livepatch_error="livepatch error" + ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_repo.py ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_repo.py --- ubuntu-advantage-tools-30~23.10/uaclient/entitlements/tests/test_repo.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/entitlements/tests/test_repo.py 2024-01-18 17:34:13.000000000 +0000 @@ -244,6 +244,9 @@ assert [] == m_remove_auth_apt_repo.call_args_list assert 1 == m_check_apt_url_applied.call_count + @pytest.mark.parametrize( + "series,file_extension", (("jammy", "list"), ("noble", "sources")) + ) @mock.patch( "uaclient.entitlements.base.UAEntitlement.process_contract_deltas" ) @@ -262,6 +265,8 @@ m_release_info, m_read_cache, m_process_contract_deltas, + series, + file_extension, entitlement, ): """Remove old apt url when aptURL delta occurs on active service.""" @@ -270,6 +275,7 @@ m_read_cache.return_value = { "services": [{"name": "repotest", "status": "enabled"}] } + m_release_info.return_value.series = series assert entitlement.process_contract_deltas( { "entitlement": { @@ -289,7 +295,10 @@ assert [mock.call()] == m_setup_apt_config.call_args_list apt_auth_remove_calls = [ mock.call( - "/etc/apt/sources.list.d/ubuntu-repotest.list", "http://old" + "/etc/apt/sources.list.d/ubuntu-repotest.{}".format( + file_extension + ), + "http://old", ) ] assert apt_auth_remove_calls == m_remove_auth_apt_repo.call_args_list @@ -421,6 +430,9 @@ @pytest.mark.parametrize("should_reboot", (False, True)) @pytest.mark.parametrize("with_pre_install_msg", (False, True)) @pytest.mark.parametrize("packages", (["a"], [], None)) + @pytest.mark.parametrize( + "series,file_extension", (("xenial", "list"), ("noble", "sources")) + ) @mock.patch("uaclient.apt.update_sources_list") @mock.patch("uaclient.apt.setup_apt_proxy") @mock.patch(M_PATH + "system.should_reboot") @@ -446,11 +458,13 @@ caplog_text, event, packages, + series, + file_extension, with_pre_install_msg, should_reboot, ): """On enable add authenticated apt repo and refresh package lists.""" - m_release_info.return_value = mock.MagicMock(series="xenial") + m_release_info.return_value = mock.MagicMock(series=series) m_should_reboot.return_value = should_reboot pre_install_msgs = ["Some pre-install information", "Some more info"] @@ -526,7 +540,9 @@ ) add_apt_calls = [ mock.call( - "/etc/apt/sources.list.d/ubuntu-repotest.list", + "/etc/apt/sources.list.d/ubuntu-repotest.{}".format( + file_extension + ), "http://REPOTEST/ubuntu", "repotest-token", ["xenial"], diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/exceptions.py ubuntu-advantage-tools-31.2~23.10/uaclient/exceptions.py --- ubuntu-advantage-tools-30~23.10/uaclient/exceptions.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/exceptions.py 2024-01-18 17:34:13.000000000 +0000 @@ -19,20 +19,6 @@ pass -class UrlError(IOError): - def __init__( - self, - cause: Exception, - url: str, - ): - if getattr(cause, "reason", None): - cause_error = str(getattr(cause, "reason")) - else: - cause_error = str(cause) - super().__init__(cause_error) - self.url = url - - class ProcessExecutionError(IOError): def __init__( self, @@ -206,10 +192,6 @@ _msg = messages.E_PROXY_AUTH_FAIL -class ConnectivityError(UbuntuProError): - _msg = messages.E_CONNECTIVITY_ERROR - - class ExternalAPIError(UbuntuProError): _formatted_msg = messages.E_EXTERNAL_API_ERROR code = None # type: int @@ -236,6 +218,31 @@ self.url = url +class ConnectivityError(UbuntuProError, IOError): + _formatted_msg = messages.E_CONNECTIVITY_ERROR + + def __init__( + self, + cause: Exception, + url: str, + ): + if getattr(cause, "reason", None): + cause_error = str(getattr(cause, "reason")) + else: + cause_error = str(cause) + IOError.__init__(self, cause_error) + UbuntuProError.__init__(self, cause_error=cause_error, url=url) + + # Even though we already set those variable through UbuntuProError + # we need to set them again to avoid mypy warnings + self.cause_error = cause_error + self.url = url + + +# We are doing that just to keep backwards compatibility +# for our custom UrlError exception +UrlError = ConnectivityError + ############################################################################### # ATTACH/ENABLE # ############################################################################### diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/files/state_files.py ubuntu-advantage-tools-31.2~23.10/uaclient/files/state_files.py --- ubuntu-advantage-tools-30~23.10/uaclient/files/state_files.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/files/state_files.py 2024-01-18 17:34:13.000000000 +0000 @@ -130,7 +130,7 @@ timer_jobs_state_file = DataObjectFile( AllTimerJobsState, - UAFile("jobs-status.json"), + UAFile("jobs-status.json", private=False), DataObjectFileFormat.JSON, ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/http/__init__.py ubuntu-advantage-tools-31.2~23.10/uaclient/http/__init__.py --- ubuntu-advantage-tools-30~23.10/uaclient/http/__init__.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/http/__init__.py 2024-01-18 17:34:13.000000000 +0000 @@ -168,7 +168,8 @@ except error.HTTPError as e: resp = e except error.URLError as e: - raise exceptions.UrlError(e, url=req.full_url) + LOG.exception(str(e.reason)) + raise exceptions.ConnectivityError(cause=e, url=req.full_url) body = resp.read().decode("utf-8") diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/livepatch.py ubuntu-advantage-tools-31.2~23.10/uaclient/livepatch.py --- ubuntu-advantage-tools-30~23.10/uaclient/livepatch.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/livepatch.py 2024-02-14 15:37:46.000000000 +0000 @@ -130,9 +130,17 @@ out, _ = system.subp( [LIVEPATCH_CMD, "status", "--verbose", "--format", "json"] ) - except exceptions.ProcessExecutionError: - LOG.warning("canonical-livepatch returned error when checking status") - return None + except exceptions.ProcessExecutionError as e: + # only raise an error if there is a legitimate problem, not just lack + # of enablement + if "Machine is not enabled" in e.stderr: + LOG.warning(e.stderr) + return None + LOG.warning( + "canonical-livepatch returned error when checking status:\n%s", + exc_info=e, + ) + raise e try: status_json = json.loads(out) @@ -225,7 +233,11 @@ def _on_supported_kernel_cli() -> Optional[LivepatchSupport]: - lp_status = status() + try: + lp_status = status() + except exceptions.ProcessExecutionError: + return None + if lp_status is None: return None return _convert_str_to_livepatch_support_status(lp_status.supported) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/lock.py ubuntu-advantage-tools-31.2~23.10/uaclient/lock.py --- ubuntu-advantage-tools-30~23.10/uaclient/lock.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/lock.py 2024-02-14 15:37:46.000000000 +0000 @@ -21,23 +21,41 @@ clear_lock_file() -class SingleAttemptLock: +class RetryLock: """ Context manager for gaining exclusive access to the lock file. + Create a lock file if absent. The lock file will contain a pid of the running process, and a customer-visible description of the lock holder. + The RetryLock will try several times to acquire the lock before giving up. + The number of times to try and how long to sleep in between tries is + configurable. + :param lock_holder: String with the service name or command which is holding the lock. This lock_holder string will be customer visible in status.json. - :raises: LockHeldError if lock is held. + :param sleep_time: Number of seconds to sleep before retrying if the lock + is already held. + :param max_retries: Maximum number of times to try to grab the lock before + giving up and raising a LockHeldError. + :raises: LockHeldError if lock is held after (sleep_time * max_retries) """ - def __init__(self, *_args, cfg: config.UAConfig, lock_holder: str): + def __init__( + self, + *_args, + cfg: config.UAConfig, + lock_holder: str, + sleep_time: int = 10, + max_retries: int = 12 + ): self.cfg = cfg self.lock_holder = lock_holder + self.sleep_time = sleep_time + self.max_retries = max_retries - def __enter__(self): + def grab_lock(self): global clear_lock_file (lock_pid, cur_lock_holder) = self.cfg.check_lock_info() if lock_pid > 0: @@ -55,54 +73,24 @@ ) clear_lock_file = functools.partial(self.cfg.delete_cache_key, "lock") - def __exit__(self, _exc_type, _exc_value, _traceback): - global clear_lock_file - self.cfg.delete_cache_key("lock") - clear_lock_file = None # Unset due to successful lock delete - - -class SpinLock(SingleAttemptLock): - """ - Context manager for gaining exclusive access to the lock file. In contrast - to the SingleAttemptLock, the SpinLock will try several times to acquire - the lock before giving up. The number of times to try and how long to sleep - in between tries is configurable. - - :param lock_holder: String with the service name or command which is - holding the lock. This lock_holder string will be customer visible in - status.json. - :param sleep_time: Number of seconds to sleep before retrying if the lock - is already held. - :param max_retries: Maximum number of times to try to grab the lock before - giving up and raising a LockHeldError. - :raises: LockHeldError if lock is held after (sleep_time * max_retries) - """ - - def __init__( - self, - *_args, - cfg: config.UAConfig, - lock_holder: str, - sleep_time: int = 10, - max_retries: int = 12 - ): - super().__init__(cfg=cfg, lock_holder=lock_holder) - self.sleep_time = sleep_time - self.max_retries = max_retries - def __enter__(self): LOG.debug("spin lock starting for %s", self.lock_holder) tries = 0 while True: try: - super().__enter__() + self.grab_lock() break except exceptions.LockHeldError as e: LOG.debug( - "SpinLock Attempt %d. %s. Spinning...", tries + 1, e.msg + "RetryLock Attempt %d. %s. Spinning...", tries + 1, e.msg ) tries += 1 if tries >= self.max_retries: raise e else: time.sleep(self.sleep_time) + + def __exit__(self, _exc_type, _exc_value, _traceback): + global clear_lock_file + self.cfg.delete_cache_key("lock") + clear_lock_file = None # Unset due to successful lock delete diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/log.py ubuntu-advantage-tools-31.2~23.10/uaclient/log.py --- ubuntu-advantage-tools-30~23.10/uaclient/log.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/log.py 2024-02-14 15:37:46.000000000 +0000 @@ -88,8 +88,8 @@ def setup_journald_logging(log_level, logger): logger.setLevel(log_level) - logger.addFilter(RedactionFilter()) console_handler = logging.StreamHandler() console_handler.setFormatter(JsonArrayFormatter()) console_handler.setLevel(log_level) + console_handler.addFilter(RedactionFilter()) logger.addHandler(console_handler) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/messages/__init__.py ubuntu-advantage-tools-31.2~23.10/uaclient/messages/__init__.py --- ubuntu-advantage-tools-30~23.10/uaclient/messages/__init__.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/messages/__init__.py 2024-02-14 15:37:46.000000000 +0000 @@ -104,7 +104,7 @@ """\ A new version is available: {version} Please run: - sudo apt-get install ubuntu-advantage-tools + sudo apt install ubuntu-pro-client to get the latest bug fixes and new features.""" ) ) @@ -161,12 +161,12 @@ lambda n: t.ngettext( """\ *Your Ubuntu Pro subscription has EXPIRED* -{{pkg_num}} additional security update(s) require Ubuntu Pro with '{{service}}' enabled. -Renew your service at {url}""", # noqa: E501 +{{pkg_num}} additional security update require Ubuntu Pro with '{{service}}' enabled. +Renew your subscription at {url}""", # noqa: E501 """\ *Your Ubuntu Pro subscription has EXPIRED* -{{pkg_num}} additional security update(s) require Ubuntu Pro with '{{service}}' enabled. -Renew your service at {url}""", # noqa: E501 +{{pkg_num}} additional security updates require Ubuntu Pro with '{{service}}' enabled. +Renew your subscription at {url}""", # noqa: E501 n, ).format(url=urls.PRO_DASHBOARD) ) @@ -201,7 +201,7 @@ CONTRACT_EXPIRED = t.gettext( """\ *Your Ubuntu Pro subscription has EXPIRED* -Renew your service at {url}""" +Renew your subscription at {url}""" ).format(url=urls.PRO_DASHBOARD) @@ -270,7 +270,7 @@ # ATTACH AUTO_ATTACH_RUNNING = t.gettext( "Currently attempting to automatically attach this machine to " - "Ubuntu Pro services" + "an Ubuntu Pro subscription" ) ATTACH_SUCCESS_TMPL = t.gettext( """\ @@ -384,15 +384,15 @@ # These are for the retry-auto-attach functionality AUTO_ATTACH_RETRY_NOTICE = t.gettext( """\ -Failed to automatically attach to Ubuntu Pro services {num_attempts} time(s). +Failed to automatically attach to an Ubuntu Pro subscription {num_attempts} time(s). The failure was due to: {reason}. The next attempt is scheduled for {next_run_datestring}. -You can try manually with `sudo pro auto-attach`.""" +You can try manually with `sudo pro auto-attach`.""" # noqa: E501 ) AUTO_ATTACH_RETRY_TOTAL_FAILURE_NOTICE = t.gettext( """\ -Failed to automatically attach to Ubuntu Pro services {num_attempts} time(s). +Failed to automatically attach to an Ubuntu Pro subscription {num_attempts} time(s). The most recent failure was due to: {reason}. Try re-launching the instance or report this issue by running `ubuntu-bug ubuntu-advantage-tools` You can try manually with `sudo pro auto-attach`.""" # noqa: E501 @@ -596,7 +596,7 @@ UNKNOWN: {status}""" ) -SECURITY_FOUND_CVES = t.gettext("Found CVEs:") +SECURITY_FOUND_CVES = t.gettext("Associated CVEs:") SECURITY_FOUND_LAUNCHPAD_BUGS = t.gettext("Found Launchpad bugs:") SECURITY_FIXING_REQUESTED_USN = t.gettext( """\ @@ -682,7 +682,7 @@ SS_UPDATE_CALL = t.gettext( """\ Make sure to run - sudo apt-get update + sudo apt update to get the latest package information from apt.""" ) SS_UPDATE_DAYS = ( @@ -898,7 +898,7 @@ "tarball where the logs will be stored. (Defaults to " "./ua_logs.tar.gz)" ) -CLI_CONFIG_SHOW_DESC = t.gettext("Show customisable configuration settings") +CLI_CONFIG_SHOW_DESC = t.gettext("Show customizable configuration settings") CLI_CONFIG_SHOW_KEY = t.gettext( "Optional key or key(s) to show configuration settings." ) @@ -917,7 +917,7 @@ CLI_ATTACH_DESC = t.gettext( """\ -Attach this machine to Ubuntu Pro with a token obtained from: +Attach this machine to an Ubuntu Pro subscription with a token obtained from: {url} When running this command without a token, it will generate a short code @@ -993,13 +993,15 @@ * messages: Update APT and MOTD messages related to UA. You can individually target any of the three specific actions, -by passing it's target to nome to the command. If no `target` +by passing the target name to the command. If no `target` is specified, all targets are refreshed. """ ) CLI_REFRESH_TARGET = t.gettext("Target to refresh.") -CLI_DETACH_DESC = t.gettext("Detach this machine from Ubuntu Pro services.") +CLI_DETACH_DESC = t.gettext( + "Detach this machine from an Ubuntu Pro subscription." +) CLI_HELP_DESC = t.gettext( "Provide detailed information about Ubuntu Pro services." @@ -1198,7 +1200,7 @@ ) CIS_IS_NOW_USG = t.gettext( """\ -From Ubuntu 20.04 and onwards 'pro enable cis' has been +From Ubuntu 20.04 onward 'pro enable cis' has been replaced by 'pro enable usg'. See more information at: {url}""" ).format(url=urls.USG_DOCS) @@ -1255,7 +1257,7 @@ To fix it, run the following commands: 1. sudo pro disable fips - 2. sudo apt-get remove ubuntu-fips + 2. sudo apt remove ubuntu-fips 3. sudo pro enable fips --assume-yes 4. sudo reboot """ @@ -1367,6 +1369,13 @@ install_url=urls.LANDSCAPE_DOCS_INSTALL, home_url=urls.LANDSCAPE_HOME_PAGE, ) +LANDSCAPE_CONFIG_REMAINS = t.gettext( + """\ +/etc/landscape/client.conf contains your landscape-client configuration. +To re-enable Landscape with the same configuration, run: + sudo pro enable landscape --assume-yes +""" +) LIVEPATCH_TITLE = t.gettext("Livepatch") LIVEPATCH_DESCRIPTION = t.gettext("Canonical Livepatch service") @@ -1417,6 +1426,10 @@ REALTIME_NVIDIA_DESCRIPTION = t.gettext( "RT kernel optimized for NVIDIA Tegra platform" ) +REALTIME_RASPI_TITLE = t.gettext("Raspberry Pi Real-time for Pi5/Pi4") +REALTIME_RASPI_DESCRIPTION = t.gettext( + "24.04 Real-time kernel optimised for Raspberry Pi" +) REALTIME_INTEL_TITLE = t.gettext("Real-time Intel IOTG Kernel") REALTIME_INTEL_DESCRIPTION = t.gettext( "RT kernel optimized for Intel IOTG platform" @@ -1660,6 +1673,13 @@ {title} is not currently enabled\nSee: sudo pro status""" ), ) +CANNOT_DISABLE_NOT_APPLICABLE = FormattedNamedMessage( + "cannot-disable-not-applicable", + t.gettext( + """\ +Disabling {title} with pro is not supported.\nSee: sudo pro status""" + ), +) ALREADY_ENABLED = FormattedNamedMessage( "service-already-enabled", t.gettext( @@ -1812,6 +1832,15 @@ t.gettext("canonical-livepatch status didn't finish successfully"), ) +LIVEPATCH_CLIENT_FAILURE_WARNING = FormattedNamedMessage( + "livepatch-client-failure-warning", + t.gettext( + """\ +Error running canonical-livepatch status: +{livepatch_error}""" + ), +) + REALTIME_FIPS_INCOMPATIBLE = NamedMessage( "realtime-fips-incompatible", t.gettext( @@ -1858,19 +1887,6 @@ t.gettext("unattended-upgrades package is not installed"), ) -LANDSCAPE_CLIENT_NOT_INSTALLED = NamedMessage( - "landscape-client-not-installed", - t.gettext("lanscape-client is not installed"), -) -LANDSCAPE_NOT_CONFIGURED = NamedMessage( - "landscape-not-configured", - t.gettext( - """\ -Landscape is installed but not configured. -Run `sudo landscape-config` to set it up, or run `sudo pro disable landscape`\ -""" - ), -) LANDSCAPE_NOT_REGISTERED = NamedMessage( "landscape-not-registered", t.gettext( @@ -1883,10 +1899,7 @@ LANDSCAPE_SERVICE_NOT_ACTIVE = NamedMessage( "landscape-service-not-active", t.gettext( - """\ -Landscape is installed and configured and registered but not running. -Run `sudo landscape-config` to start it, or run `sudo pro disable landscape`\ -""" + "landscape-client is either not installed or installed but disabled." ), ) LANDSCAPE_CONFIG_FAILED = NamedMessage( @@ -2022,12 +2035,13 @@ "proxy-auth-fail", t.gettext("Proxy authentication failed") ) -E_CONNECTIVITY_ERROR = NamedMessage( +E_CONNECTIVITY_ERROR = FormattedNamedMessage( "connectivity-error", t.gettext( """\ -Failed to connect to authentication server -Check your Internet connection and try again.""" +Failed to connect to {url} +{cause_error} +""" ), ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/security.py ubuntu-advantage-tools-31.2~23.10/uaclient/security.py --- ubuntu-advantage-tools-30~23.10/uaclient/security.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/security.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1708 +0,0 @@ -import copy -import enum -import socket -import textwrap -from collections import defaultdict -from datetime import datetime -from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple - -from uaclient import apt, exceptions, livepatch, messages, system, util -from uaclient.api.u.pro.attach.magic.initiate.v1 import _initiate -from uaclient.api.u.pro.attach.magic.revoke.v1 import ( - MagicAttachRevokeOptions, - _revoke, -) -from uaclient.api.u.pro.attach.magic.wait.v1 import ( - MagicAttachWaitOptions, - _wait, -) -from uaclient.clouds.identity import ( - CLOUD_TYPE_TO_TITLE, - PRO_CLOUD_URLS, - get_cloud_type, -) -from uaclient.config import UAConfig -from uaclient.defaults import PRINT_WRAP_WIDTH -from uaclient.entitlements import entitlement_factory -from uaclient.entitlements.entitlement_status import ( - ApplicabilityStatus, - UserFacingStatus, -) -from uaclient.files import notices -from uaclient.files.notices import Notice -from uaclient.http import serviceclient -from uaclient.status import colorize_commands - -CVE_OR_USN_REGEX = ( - r"((CVE|cve)-\d{4}-\d{4,7}$|(USN|usn|LSN|lsn)-\d{1,5}-\d{1,2}$)" -) - -API_V1_CVES = "cves.json" -API_V1_CVE_TMPL = "cves/{cve}.json" -API_V1_NOTICES = "notices.json" -API_V1_NOTICE_TMPL = "notices/{notice}.json" - - -UnfixedPackage = NamedTuple( - "UnfixedPackage", - [ - ("pkg", str), - ("unfixed_reason", str), - ], -) - - -ReleasedPackagesInstallResult = NamedTuple( - "ReleasedPackagesInstallResult", - [ - ("fix_status", bool), - ("unfixed_pkgs", List[UnfixedPackage]), - ("installed_pkgs", Set[str]), - ("all_already_installed", bool), - ], -) - - -BinaryPackageFix = NamedTuple( - "BinaryPackageFix", - [ - ("source_pkg", str), - ("binary_pkg", str), - ("fixed_version", str), - ], -) - - -UpgradeResult = NamedTuple( - "UpgradeResult", - [ - ("status", bool), - ("failure_reason", Optional[str]), - ], -) - - -class FixStatus(enum.Enum): - """ - An enum to represent the system status after fix operation - """ - - class _Value: - def __init__(self, value: int, msg: str): - self.value = value - self.msg = msg - - SYSTEM_NON_VULNERABLE = _Value(0, "fixed") - SYSTEM_NOT_AFFECTED = _Value(0, "not-affected") - SYSTEM_STILL_VULNERABLE = _Value(1, "still-affected") - SYSTEM_VULNERABLE_UNTIL_REBOOT = _Value(2, "affected-until-reboot") - - @property - def exit_code(self): - return self.value.value - - def __str__(self): - return self.value.msg - - -FixResult = NamedTuple( - "FixResult", - [ - ("status", FixStatus), - ("unfixed_pkgs", Optional[List[UnfixedPackage]]), - ], -) - - -class UASecurityClient(serviceclient.UAServiceClient): - - url_timeout = 20 - cfg_url_base_attr = "security_url" - - def _get_query_params( - self, query_params: Dict[str, Any] - ) -> Dict[str, Any]: - """ - Update query params with data from feature config. - """ - extra_security_params = self.cfg.cfg.get("features", {}).get( - "extra_security_params", {} - ) - - if query_params: - query_params.update(extra_security_params) - return query_params - - return extra_security_params - - @util.retry(socket.timeout, retry_sleeps=[1, 3, 5]) - def request_url( - self, path, data=None, headers=None, method=None, query_params=None - ): - query_params = self._get_query_params(query_params) - return super().request_url( - path=path, - data=data, - headers=headers, - method=method, - query_params=query_params, - log_response_body=False, - ) - - def get_cves( - self, - query: Optional[str] = None, - priority: Optional[str] = None, - package: Optional[str] = None, - limit: Optional[int] = None, - offset: Optional[int] = None, - component: Optional[str] = None, - version: Optional[str] = None, - status: Optional[List[str]] = None, - ) -> List["CVE"]: - """Query to match multiple-CVEs. - - @return: List of CVE instances based on the the JSON response. - """ - query_params = { - "q": query, - "priority": priority, - "package": package, - "limit": limit, - "offset": offset, - "component": component, - "version": version, - "status": status, - } - response = self.request_url(API_V1_CVES, query_params=query_params) - if response.code != 200: - raise exceptions.SecurityAPIError( - url=API_V1_CVES, code=response.code, body=response.body - ) - return [ - CVE(client=self, response=cve_md) for cve_md in response.json_list - ] - - def get_cve(self, cve_id: str) -> "CVE": - """Query to match single-CVE. - - @return: CVE instance for JSON response from the Security API. - """ - url = API_V1_CVE_TMPL.format(cve=cve_id) - response = self.request_url(url) - if response.code != 200: - raise exceptions.SecurityAPIError( - url=url, code=response.code, body=response.body - ) - return CVE(client=self, response=response.json_dict) - - def get_notices( - self, - details: Optional[str] = None, - release: Optional[str] = None, - limit: Optional[int] = None, - offset: Optional[int] = None, - order: Optional[str] = None, - ) -> List["USN"]: - """Query to match multiple-USNs. - - @return: Sorted list of USN instances based on the the JSON response. - """ - query_params = { - "details": details, - "release": release, - "limit": limit, - "offset": offset, - "order": order, - } - response = self.request_url(API_V1_NOTICES, query_params=query_params) - if response.code != 200: - raise exceptions.SecurityAPIError( - url=API_V1_NOTICES, code=response.code, body=response.body - ) - return sorted( - [ - USN(client=self, response=usn_md) - for usn_md in response.json_dict.get("notices", []) - if details is None or details in usn_md.get("cves_ids", []) - ], - key=lambda x: x.id, - ) - - def get_notice(self, notice_id: str) -> "USN": - """Query to match single-USN. - - @return: USN instance representing the JSON response. - """ - url = API_V1_NOTICE_TMPL.format(notice=notice_id) - response = self.request_url(url) - if response.code != 200: - raise exceptions.SecurityAPIError( - url=url, code=response.code, body=response.body - ) - return USN(client=self, response=response.json_dict) - - -# Model for Security API responses -class CVEPackageStatus: - """Class representing specific CVE PackageStatus on an Ubuntu series""" - - def __init__(self, cve_response: Dict[str, Any]): - self.response = cve_response - - @property - def description(self): - return self.response["description"] - - @property - def fixed_version(self): - return self.description - - @property - def pocket(self): - return self.response["pocket"] - - @property - def release_codename(self): - return self.response["release_codename"] - - @property - def status(self): - return self.response["status"] - - @property - def status_message(self): - if self.status == "needed": - return messages.SECURITY_CVE_STATUS_NEEDED - elif self.status == "needs-triage": - return messages.SECURITY_CVE_STATUS_TRIAGE - elif self.status == "pending": - return messages.SECURITY_CVE_STATUS_PENDING - elif self.status in ("ignored", "deferred"): - return messages.SECURITY_CVE_STATUS_IGNORED - elif self.status == "DNE": - return messages.SECURITY_CVE_STATUS_DNE - elif self.status == "not-affected": - return messages.SECURITY_CVE_STATUS_NOT_AFFECTED - elif self.status == "released": - return messages.SECURITY_FIX_RELEASE_STREAM.format( - fix_stream=self.pocket_source - ) - return messages.SECURITY_CVE_STATUS_UNKNOWN.format(status=self.status) - - @property - def requires_ua(self) -> bool: - """Return True if the package requires an active Pro subscription.""" - return bool( - self.pocket_source - != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET - ) - - @property - def pocket_source(self): - """Human-readable string representing where the fix is published.""" - if self.pocket == "esm-infra": - fix_source = messages.SECURITY_UA_INFRA_POCKET - elif self.pocket == "esm-apps": - fix_source = messages.SECURITY_UA_APPS_POCKET - elif self.pocket in ("updates", "security"): - fix_source = messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET - else: - # TODO(GH: #1376 drop this when esm* pockets supplied by API) - if "esm" in self.fixed_version: - fix_source = messages.SECURITY_UA_INFRA_POCKET - else: - fix_source = messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET - return fix_source - - -class CVE: - """Class representing CVE response from the SecurityClient""" - - def __init__(self, client: UASecurityClient, response: Dict[str, Any]): - self.response = response - self.client = client - - def __eq__(self, other) -> bool: - if not isinstance(other, CVE): - return False - return self.response == other.response - - @property - def id(self): - return self.response.get("id", "UNKNOWN_CVE_ID").upper() - - def get_url_header(self): - """Return a string representing the URL for this cve.""" - title = self.description - for notice in self.notices: - # Only look at the most recent USN title - title = notice.title - break - lines = [ - "{issue}: {title}".format(issue=self.id, title=title), - " - {}".format( - messages.urls.SECURITY_CVE_PAGE.format(cve=self.id) - ), - ] - return "\n".join(lines) - - @property - def notices_ids(self) -> List[str]: - return self.response.get("notices_ids", []) - - @property - def notices(self) -> List["USN"]: - """Return a list of USN instances from API response 'notices'. - - Cache the value to avoid extra work on multiple calls. - """ - if not hasattr(self, "_notices"): - self._notices = sorted( - [ - USN(self.client, notice) - for notice in self.response.get("notices", []) - ], - key=lambda n: n.id, - reverse=True, - ) - return self._notices - - @property - def description(self): - return self.response.get("description") - - @property - def packages_status(self) -> Dict[str, CVEPackageStatus]: - """Dict of package status dicts for the current Ubuntu series. - - Top-level keys are source packages names and each value is a - CVEPackageStatus object - """ - if hasattr(self, "_packages_status"): - return self._packages_status # type: ignore - self._packages_status = {} - series = system.get_release_info().series - for package in self.response["packages"]: - for pkg_status in package["statuses"]: - if pkg_status["release_codename"] == series: - self._packages_status[package["name"]] = CVEPackageStatus( - pkg_status - ) - return self._packages_status - - -class USN: - """Class representing USN response from the SecurityClient""" - - def __init__(self, client: UASecurityClient, response: Dict[str, Any]): - self.response = response - self.client = client - - def __eq__(self, other) -> bool: - if not isinstance(other, USN): - return False - return self.response == other.response - - @property - def id(self) -> str: - return self.response.get("id", "UNKNOWN_USN_ID").upper() - - @property - def cves_ids(self) -> List[str]: - """List of CVE IDs related to this USN.""" - return self.response.get("cves_ids", []) - - @property - def cves(self) -> List[CVE]: - """List of CVE instances based on API response 'cves' key. - - Cache the values to avoid extra work for multiple call-sites. - """ - if not hasattr(self, "_cves"): - self._cves = sorted( - [ - CVE(self.client, cve) - for cve in self.response.get("cves", []) - ], - key=lambda n: n.id, - reverse=True, - ) # type: List[CVE] - return self._cves - - @property - def title(self): - return self.response.get("title") - - @property - def references(self): - return self.response.get("references") - - def get_url_header(self): - """Return a string representing the URL for this notice.""" - lines = ["{issue}: {title}".format(issue=self.id, title=self.title)] - - if self.cves_ids: - lines.append("Found CVEs:") - for cve in self.cves_ids: - lines.append( - " - {}".format( - messages.urls.SECURITY_CVE_PAGE.format(cve=cve) - ) - ) - elif self.references: - lines.append("Found Launchpad bugs:") - for reference in self.references: - lines.append(" - " + reference) - - return "\n".join(lines) - - @property - def release_packages(self) -> Dict[str, Dict[str, Dict[str, str]]]: - """Binary package information available for this release. - - - Reformat the USN.release_packages response to key it based on source - package name and related binary package names. - - :return: Dict keyed by source package name. The second-level key will - be binary package names generated from that source package and the - values will be the dict response from USN.release_packages for - that binary package. The binary metadata contains the following - keys: name, version. - Optional additional keys: pocket and component. - """ - if hasattr(self, "_release_packages"): - return self._release_packages - series = system.get_release_info().series - self._release_packages = {} # type: Dict[str, Dict[str, Any]] - # Organize source and binary packages under a common source package key - for pkg in self.response.get("release_packages", {}).get(series, []): - if pkg.get("is_source"): - # Create a "source" key under src_pkg_name with API response - if pkg["name"] in self._release_packages: - if "source" in self._release_packages[pkg["name"]]: - raise exceptions.SecurityAPIMetadataError( - error_msg=( - "{usn} metadata defines duplicate source" - " packages {pkg}" - ).format(usn=self.id, pkg=pkg["name"]), - issue=self.id, - extra_info="", - ) - self._release_packages[pkg["name"]]["source"] = pkg - else: - self._release_packages[pkg["name"]] = {"source": pkg} - else: - # is_source == False or None, then this is a binary package. - # If processed before a source item, the top-level key will - # not exist yet. - # TODO(GH: 1465: determine if this is expected on kern pkgs) - if not pkg.get("source_link"): - raise exceptions.SecurityAPIMetadataError( - error_msg=( - "{issue} metadata does not define release_packages" - " source_link for {bin_pkg}." - ).format(issue=self.id, bin_pkg=pkg["name"]), - issue=self.id, - extra_info="", - ) - elif "/" not in pkg["source_link"]: - raise exceptions.SecurityAPIMetadataError( - error_msg=( - "{issue} metadata has unexpected release_packages" - " source_link value for {bin_pkg}: {link}" - ).format( - issue=self.id, - bin_pkg=pkg["name"], - link=pkg["source_link"], - ), - issue=self.id, - extra_info="", - ) - source_pkg_name = pkg["source_link"].split("/")[-1] - if source_pkg_name not in self._release_packages: - self._release_packages[source_pkg_name] = {} - self._release_packages[source_pkg_name][pkg["name"]] = pkg - return self._release_packages - - -def query_installed_source_pkg_versions() -> Dict[str, Dict[str, str]]: - """Return a dict of all source packages installed on the system. - - The dict keys will be source package name: "krb5". The value will be a dict - with keys binary_pkg and version. - """ - status_field = "${db:Status-Status}" - out, _err = system.subp( - [ - "dpkg-query", - "-f=${Package},${Source},${Version}," + status_field + "\n", - "-W", - ] - ) - installed_packages = {} # type: Dict[str, Dict[str, str]] - for pkg_line in out.splitlines(): - pkg_name, source_pkg_name, pkg_version, status = pkg_line.split(",") - if not source_pkg_name: - # some package don't define the Source - source_pkg_name = pkg_name - if "installed" not in status: - continue - if source_pkg_name in installed_packages: - installed_packages[source_pkg_name][pkg_name] = pkg_version - else: - installed_packages[source_pkg_name] = {pkg_name: pkg_version} - return installed_packages - - -def merge_usn_released_binary_package_versions( - usns: List[USN], beta_pockets: Dict[str, bool] -) -> Dict[str, Dict[str, Dict[str, str]]]: - """Walk related USNs, merging the released binary package versions. - - For each USN, iterate over release_packages to collect released binary - package names and required fix version. If multiple related USNs - require different version fixes to the same binary package, track the - maximum version required across all USNs. - - :param usns: List of USN response instances from which to calculate merge. - :param beta_pockets: Dict keyed on service name: esm-infra, esm-apps - the values of which will be true of USN response instances - from which to calculate merge. - - :return: Dict keyed by source package name. Under each source package will - be a dict with binary package name as keys and binary package metadata - as the value. - """ - usn_pkg_versions = {} - for usn in usns: - # Aggregate USN.release_package binary versions into usn_pkg_versions - for src_pkg, binary_pkg_versions in usn.release_packages.items(): - public_bin_pkg_versions = { - bin_pkg_name: bin_pkg_md - for bin_pkg_name, bin_pkg_md in binary_pkg_versions.items() - if False - is beta_pockets.get(bin_pkg_md.get("pocket", "None"), False) - } - if src_pkg not in usn_pkg_versions and public_bin_pkg_versions: - usn_pkg_versions[src_pkg] = public_bin_pkg_versions - elif src_pkg in usn_pkg_versions: - # Since src_pkg exists, only record this USN's binary version - # when it is greater than the previous version in usn_src_pkg. - usn_src_pkg = usn_pkg_versions[src_pkg] - for bin_pkg, binary_pkg_md in public_bin_pkg_versions.items(): - if bin_pkg not in usn_src_pkg: - usn_src_pkg[bin_pkg] = binary_pkg_md - else: - prev_version = usn_src_pkg[bin_pkg]["version"] - current_version = binary_pkg_md["version"] - if ( - apt.version_compare(current_version, prev_version) - > 0 - ): - # binary_version is greater than prev_version - usn_src_pkg[bin_pkg] = binary_pkg_md - return usn_pkg_versions - - -def get_related_usns(usn, client): - """For a give usn, get the related USNs for it. - - For each CVE associated with the given USN, we capture - other USNs that are related to the CVE. We consider those - USNs related to the original USN. - """ - - # If the usn does not have any associated cves on it, - # we cannot establish a relation between USNs - if not usn.cves: - return [] - - related_usns = {} - for cve in usn.cves: - for related_usn_id in cve.notices_ids: - # We should ignore any other item that is not a USN - # For example, LSNs - if not related_usn_id.startswith("USN-"): - continue - if related_usn_id == usn.id: - continue - if related_usn_id not in related_usns: - related_usns[related_usn_id] = client.get_notice( - notice_id=related_usn_id - ) - - return list(sorted(related_usns.values(), key=lambda x: x.id)) - - -def _check_cve_fixed_by_livepatch( - issue_id: str, -) -> Tuple[Optional[FixStatus], Optional[str]]: - # Check livepatch status for CVE in fixes before checking CVE api - lp_status = livepatch.status() - if ( - lp_status is not None - and lp_status.livepatch is not None - and lp_status.livepatch.fixes is not None - ): - for fix in lp_status.livepatch.fixes: - if fix.name == issue_id.lower() and fix.patched: - version = lp_status.livepatch.version or "N/A" - return (FixStatus.SYSTEM_NON_VULNERABLE, version) - - return (None, None) - - -def _fix_cve( - cve: CVE, - usns: List[USN], - issue_id: str, - installed_packages: Dict[str, Dict[str, str]], - cfg: UAConfig, - beta_pockets: Dict[str, bool], - dry_run: bool, -) -> FixStatus: - affected_pkg_status = get_cve_affected_source_packages_status( - cve=cve, installed_packages=installed_packages - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - usns, beta_pockets - ) - - print() - return prompt_for_affected_packages( - cfg=cfg, - issue_id=issue_id, - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=dry_run, - ).status - - -def _fix_usn( - usn: USN, - related_usns: List[USN], - issue_id: str, - installed_packages: Dict[str, Dict[str, str]], - cfg: UAConfig, - beta_pockets: Dict[str, bool], - dry_run: bool, - no_related: bool, -) -> FixStatus: - # We should only highlight the target USN if we have related USNs to fix - print( - "\n" + messages.SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) - ) - - affected_pkg_status = get_affected_packages_from_usn( - usn=usn, installed_packages=installed_packages - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - [usn], beta_pockets - ) - target_fix_status, _ = prompt_for_affected_packages( - cfg=cfg, - issue_id=issue_id, - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=dry_run, - ) - - if target_fix_status not in ( - FixStatus.SYSTEM_NON_VULNERABLE, - FixStatus.SYSTEM_NOT_AFFECTED, - ): - return target_fix_status - - if not related_usns or no_related: - return target_fix_status - - print( - "\n" - + messages.SECURITY_RELATED_USNS.format( - related_usns="\n- ".join(usn.id for usn in related_usns) - ) - ) - - print("\n" + messages.SECURITY_FIXING_RELATED_USNS) - related_usn_status = {} # type: Dict[str, FixResult] - for related_usn in related_usns: - print("- {}".format(related_usn.id)) - affected_pkg_status = get_affected_packages_from_usn( - usn=related_usn, installed_packages=installed_packages - ) - usn_released_pkgs = merge_usn_released_binary_package_versions( - [related_usn], beta_pockets - ) - - related_fix_status = prompt_for_affected_packages( - cfg=cfg, - issue_id=related_usn.id, - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=dry_run, - ) - - related_usn_status[related_usn.id] = related_fix_status - print() - - print(messages.SECURITY_USN_SUMMARY) - _handle_fix_status_message( - target_fix_status, - issue_id, - context=messages.FIX_ISSUE_CONTEXT_REQUESTED, - ) - - failure_on_related_usn = False - for related_usn in related_usns: - status = related_usn_status[related_usn.id].status - _handle_fix_status_message( - status, related_usn.id, context=messages.FIX_ISSUE_CONTEXT_RELATED - ) - - if status == FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT: - print( - "- " - + messages.ENABLE_REBOOT_REQUIRED_TMPL.format( - operation="fix operation" - ) - ) - failure_on_related_usn = True - if status == FixStatus.SYSTEM_STILL_VULNERABLE: - unfixed_pkgs = ( - related_usn_status[related_usn.id].unfixed_pkgs or [] - ) - for unfixed_pkg in unfixed_pkgs: - if unfixed_pkg.unfixed_reason: - print( - " - {}: {}".format( - unfixed_pkg.pkg, unfixed_pkg.unfixed_reason - ) - ) - failure_on_related_usn = True - - if failure_on_related_usn: - print( - "\n" - + messages.SECURITY_RELATED_USN_ERROR.format(issue_id=issue_id) - ) - - return target_fix_status - - -def fix_security_issue_id( - cfg: UAConfig, - issue_id: str, - dry_run: bool = False, - no_related: bool = False, -) -> FixStatus: - if dry_run: - print(messages.SECURITY_DRY_RUN_WARNING) - - issue_id = issue_id.upper() - client = UASecurityClient(cfg=cfg) - installed_packages = query_installed_source_pkg_versions() - - # Used to filter out beta pockets during merge_usns - beta_pockets = { - "esm-apps": _is_pocket_used_by_beta_service( - messages.SECURITY_UA_APPS_POCKET, cfg - ), - "esm-infra": _is_pocket_used_by_beta_service( - messages.SECURITY_UA_INFRA_POCKET, cfg - ), - } - - if "CVE" in issue_id: - livepatch_cve_status, patch_version = _check_cve_fixed_by_livepatch( - issue_id - ) - - if livepatch_cve_status: - print( - messages.CVE_FIXED_BY_LIVEPATCH.format( - issue=issue_id, - version=patch_version, - ) - ) - return livepatch_cve_status - - try: - cve = client.get_cve(cve_id=issue_id) - usns = client.get_notices(details=issue_id) - except exceptions.SecurityAPIError as e: - if e.code == 404: - raise exceptions.SecurityIssueNotFound(issue_id=issue_id) - raise e - - print(cve.get_url_header()) - return _fix_cve( - cve=cve, - usns=usns, - issue_id=issue_id, - installed_packages=installed_packages, - cfg=cfg, - beta_pockets=beta_pockets, - dry_run=dry_run, - ) - - else: # USN - try: - usn = client.get_notice(notice_id=issue_id) - usns = get_related_usns(usn, client) - except exceptions.SecurityAPIError as e: - if e.code == 404: - raise exceptions.SecurityIssueNotFound(issue_id=issue_id) - raise e - - print(usn.get_url_header()) - if not usn.response["release_packages"]: - # Since usn.release_packages filters to our current release only - # check overall metadata and error if empty. - raise exceptions.SecurityAPIMetadataError( - error_msg=( - "{} metadata defines no fixed package versions." - ).format(issue_id), - issue=issue_id, - extra_info="", - ) - - return _fix_usn( - usn=usn, - related_usns=usns, - issue_id=issue_id, - installed_packages=installed_packages, - cfg=cfg, - beta_pockets=beta_pockets, - dry_run=dry_run, - no_related=no_related, - ) - - -def get_affected_packages_from_cves(cves, installed_packages): - affected_pkgs = {} # type: Dict[str, CVEPackageStatus] - - for cve in cves: - for pkg_name, pkg_status in get_cve_affected_source_packages_status( - cve, installed_packages - ).items(): - if pkg_name not in affected_pkgs: - affected_pkgs[pkg_name] = pkg_status - else: - current_ver = affected_pkgs[pkg_name].fixed_version - if ( - apt.version_compare(current_ver, pkg_status.fixed_version) - > 0 - ): - affected_pkgs[pkg_name] = pkg_status - - return affected_pkgs - - -def get_affected_packages_from_usn(usn, installed_packages): - affected_pkgs = {} # type: Dict[str, CVEPackageStatus] - for pkg_name, pkg_info in usn.release_packages.items(): - if pkg_name not in installed_packages: - continue - - cve_response = defaultdict(str) - cve_response["status"] = "released" - # Here we are assuming that the pocket will be the same one across - # all the different binary packages. - all_pockets = { - pkg_bin_info["pocket"] - for _, pkg_bin_info in pkg_info.items() - if pkg_bin_info.get("pocket") - } - if not all_pockets: - raise exceptions.SecurityAPIMetadataError( - error_msg=( - "{} metadata defines no pocket information for " - "any release packages." - ).format(usn.id), - issue=usn.id, - extra_info="", - ) - cve_response["pocket"] = all_pockets.pop() - - affected_pkgs[pkg_name] = CVEPackageStatus(cve_response=cve_response) - - return affected_pkgs - - -def get_usn_affected_packages_status( - usn: USN, installed_packages: Dict[str, Dict[str, str]] -) -> Dict[str, CVEPackageStatus]: - """Walk CVEs related to a USN and return a dict of all affected packages. - - :return: Dict keyed on source package name, with active CVEPackageStatus - for the current Ubuntu release. - """ - if usn.cves: - return get_affected_packages_from_cves(usn.cves, installed_packages) - else: - return get_affected_packages_from_usn(usn, installed_packages) - - -def get_cve_affected_source_packages_status( - cve: CVE, installed_packages: Dict[str, Dict[str, str]] -) -> Dict[str, CVEPackageStatus]: - """Get a dict of any CVEPackageStatuses affecting this Ubuntu release. - - :return: Dict of active CVEPackageStatus keyed by source package names. - """ - affected_pkg_versions = {} - for source_pkg, package_status in cve.packages_status.items(): - if package_status.status == "not-affected": - continue - if source_pkg in installed_packages: - affected_pkg_versions[source_pkg] = package_status - return affected_pkg_versions - - -def print_affected_packages_header( - issue_id: str, affected_pkg_status: Dict[str, CVEPackageStatus] -): - """Print header strings describing affected packages related to a CVE/USN. - - :param issue_id: String of USN or CVE issue id. - :param affected_pkg_status: Dict keyed on source package name, with active - CVEPackageStatus for the current Ubuntu release. - """ - count = len(affected_pkg_status) - if count == 0: - print(messages.SECURITY_NO_AFFECTED_PKGS) - print( - "\n" - + messages.SECURITY_ISSUE_UNAFFECTED.format( - issue=issue_id, extra_info="" - ) - ) - return - - msg = messages.SECURITY_AFFECTED_PKGS.pluralize(count).format( - count=count, pkgs=", ".join(sorted(affected_pkg_status.keys())) - ) - print( - textwrap.fill( - msg, - width=PRINT_WRAP_WIDTH, - subsequent_indent=" ", - replace_whitespace=False, - ) - ) - - -def override_usn_release_package_status( - pkg_status: CVEPackageStatus, - usn_src_released_pkgs: Dict[str, Dict[str, str]], -) -> CVEPackageStatus: - """Parse release status based on both pkg_status and USN.release_packages. - - Since some source packages in universe are not represented in - CVEPackageStatus, rely on presence of such source packages in - usn_src_released_pkgs to represent package as a "released" status. - - :param pkg_status: the CVEPackageStatus for this source package. - :param usn_src_released_pkgs: The USN.release_packages representing only - this source package. Normally, release_packages would have data on - multiple source packages. - - :return: Tuple of: - human-readable status message, boolean whether released, - boolean whether the fix requires access to UA - """ - - usn_pkg_status = copy.deepcopy(pkg_status) - if usn_src_released_pkgs and usn_src_released_pkgs.get("source"): - usn_pkg_status.response["status"] = "released" - usn_pkg_status.response["description"] = usn_src_released_pkgs[ - "source" - ]["version"] - for pkg_name, usn_released_pkg in usn_src_released_pkgs.items(): - # Copy the pocket from any valid binary package - pocket = usn_released_pkg.get("pocket") - if pocket: - usn_pkg_status.response["pocket"] = pocket - break - return usn_pkg_status - - -def group_by_usn_package_status(affected_pkg_status, usn_released_pkgs): - status_groups = {} # type: Dict[str, List[Tuple[str, CVEPackageStatus]]] - for src_pkg, pkg_status in sorted(affected_pkg_status.items()): - usn_released_src = usn_released_pkgs.get(src_pkg, {}) - usn_pkg_status = override_usn_release_package_status( - pkg_status, usn_released_src - ) - status_group = usn_pkg_status.status.replace("ignored", "deferred") - if status_group not in status_groups: - status_groups[status_group] = [] - status_groups[status_group].append((src_pkg, usn_pkg_status)) - return status_groups - - -def _format_packages_message( - pkg_status_list: List[Tuple[str, CVEPackageStatus]], - pkg_index: int, - num_pkgs: int, -) -> str: - """Format the packages and status to an user friendly message.""" - if not pkg_status_list: - return "" - - msg_index = [] - src_pkgs = [] - for src_pkg, pkg_status in pkg_status_list: - pkg_index += 1 - msg_index.append("{}/{}".format(pkg_index, num_pkgs)) - src_pkgs.append(src_pkg) - - msg_header = textwrap.fill( - "{} {}:".format( - "(" + ", ".join(msg_index) + ")", ", ".join(sorted(src_pkgs)) - ), - width=PRINT_WRAP_WIDTH, - subsequent_indent=" ", - ) - return "{}\n{}".format(msg_header, pkg_status.status_message) - - -def _get_service_for_pocket(pocket: str, cfg: UAConfig): - service_to_check = "no-service-needed" - if pocket == messages.SECURITY_UA_INFRA_POCKET: - service_to_check = "esm-infra" - elif pocket == messages.SECURITY_UA_APPS_POCKET: - service_to_check = "esm-apps" - - ent_cls = entitlement_factory(cfg=cfg, name=service_to_check) - return ent_cls(cfg) if ent_cls else None - - -def _is_pocket_used_by_beta_service(pocket: str, cfg: UAConfig) -> bool: - """Check if the pocket where the fix is at belongs to a beta service.""" - ent = _get_service_for_pocket(pocket, cfg) - if ent: - ent_status, _ = ent.user_facing_status() - - # If the service is already enabled, we proceed with the fix - # even if the service is a beta stage. - if ent_status == UserFacingStatus.ACTIVE: - return False - - return not ent.valid_service - - return False - - -def _handle_fix_status_message( - status: FixStatus, issue_id: str, context: str = "" -): - if status == FixStatus.SYSTEM_NON_VULNERABLE: - if context: - msg = messages.SECURITY_ISSUE_RESOLVED_ISSUE_CONTEXT.format( - issue=issue_id, context=context - ) - else: - msg = messages.SECURITY_ISSUE_RESOLVED.format(issue=issue_id) - print(util.handle_unicode_characters(msg)) - elif status == FixStatus.SYSTEM_NOT_AFFECTED: - if context: - msg = messages.SECURITY_ISSUE_UNAFFECTED_ISSUE_CONTEXT.format( - issue=issue_id, context=context - ) - else: - msg = messages.SECURITY_ISSUE_UNAFFECTED.format(issue=issue_id) - print(util.handle_unicode_characters(msg)) - elif status == FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT: - if context: - msg = messages.SECURITY_ISSUE_NOT_RESOLVED_ISSUE_CONTEXT.format( - issue=issue_id, context=context - ) - else: - msg = messages.SECURITY_ISSUE_NOT_RESOLVED.format(issue=issue_id) - print(util.handle_unicode_characters(msg)) - else: - if context: - msg = messages.SECURITY_ISSUE_NOT_RESOLVED_ISSUE_CONTEXT.format( - issue=issue_id, context=context - ) - else: - msg = messages.SECURITY_ISSUE_NOT_RESOLVED.format(issue=issue_id) - print(util.handle_unicode_characters(msg)) - - -def _handle_released_package_fixes( - cfg: UAConfig, - src_pocket_pkgs: Dict[str, List[Tuple[str, CVEPackageStatus]]], - binary_pocket_pkgs: Dict[str, List[BinaryPackageFix]], - pkg_index: int, - num_pkgs: int, - dry_run: bool, -) -> ReleasedPackagesInstallResult: - """Handle the packages that could be fixed and have a released status. - - :returns: Tuple of - boolean whether all packages were successfully upgraded, - list of strings containing the packages that were not upgraded, - boolean whether all packages were already installed - """ - all_already_installed = True - upgrade_status = True - unfixed_pkgs = [] # type: List[UnfixedPackage] - installed_pkgs = set() # type: Set[str] - if src_pocket_pkgs: - for pocket in [ - messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET, - messages.SECURITY_UA_INFRA_POCKET, - messages.SECURITY_UA_APPS_POCKET, - ]: - pkg_src_group = src_pocket_pkgs[pocket] - binary_pkgs = binary_pocket_pkgs[pocket] - failure_msg = messages.SECURITY_UA_SERVICE_REQUIRED.format( - service=pocket - ) - - if upgrade_status: - msg = _format_packages_message( - pkg_status_list=pkg_src_group, - pkg_index=pkg_index, - num_pkgs=num_pkgs, - ) - - if msg: - print(msg) - - if not binary_pkgs: - print(messages.SECURITY_UPDATE_INSTALLED) - continue - else: - # if even one pocket has binary_pkgs to install - # then we can't say that everything was already - # installed. - all_already_installed = False - - upgrade_pkgs = [] - for binary_pkg in sorted(binary_pkgs): - check_esm_cache = ( - pocket - != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET - ) - candidate_version = apt.get_pkg_candidate_version( - binary_pkg.binary_pkg, check_esm_cache=check_esm_cache - ) - if ( - candidate_version - and apt.version_compare( - binary_pkg.fixed_version, candidate_version - ) - <= 0 - ): - upgrade_pkgs.append(binary_pkg.binary_pkg) - else: - unfixed_reason = ( - messages.FIX_CANNOT_INSTALL_PACKAGE.format( - package=binary_pkg.binary_pkg, - version=binary_pkg.fixed_version, - ) - ) - print("- " + unfixed_reason) - unfixed_pkgs.append( - UnfixedPackage( - pkg=binary_pkg.source_pkg, - unfixed_reason=unfixed_reason, - ) - ) - - pkg_index += len(pkg_src_group) - upgrade_result = upgrade_packages_and_attach( - cfg=cfg, - upgrade_pkgs=upgrade_pkgs, - pocket=pocket, - dry_run=dry_run, - ) - upgrade_status &= upgrade_result.status - failure_msg = upgrade_result.failure_reason or "" - - if not upgrade_status: - unfixed_pkgs.extend( - [ - UnfixedPackage( - pkg=src_pkg, - unfixed_reason=failure_msg, - ) - for src_pkg, _ in pkg_src_group - ] - ) - else: - installed_pkgs.update( - binary_pkg.binary_pkg for binary_pkg in binary_pkgs - ) - - return ReleasedPackagesInstallResult( - fix_status=upgrade_status, - unfixed_pkgs=unfixed_pkgs, - installed_pkgs=installed_pkgs, - all_already_installed=all_already_installed, - ) - - -def _format_unfixed_packages_msg(unfixed_pkgs: List[UnfixedPackage]) -> str: - """Format the list of unfixed packages into an message. - - :returns: A string containing the message output for the unfixed - packages. - """ - sorted_pkgs = sorted({pkg.pkg for pkg in unfixed_pkgs}) - num_pkgs_unfixed = len(sorted_pkgs) - return textwrap.fill( - messages.SECURITY_PKG_STILL_AFFECTED.pluralize( - num_pkgs_unfixed - ).format( - num_pkgs=num_pkgs_unfixed, - pkgs=", ".join(sorted_pkgs), - ), - width=PRINT_WRAP_WIDTH, - subsequent_indent=" ", - ) - - -def prompt_for_affected_packages( - cfg: UAConfig, - issue_id: str, - affected_pkg_status: Dict[str, CVEPackageStatus], - installed_packages: Dict[str, Dict[str, str]], - usn_released_pkgs: Dict[str, Dict[str, Dict[str, str]]], - dry_run: bool, -) -> FixResult: - """Process security CVE dict returning a CVEStatus object. - - Since CVEs point to a USN if active, get_notice may be called to fill in - CVE title details. - - :returns: An FixStatus enum value corresponding to the system state - after processing the affected packages - """ - count = len(affected_pkg_status) - print_affected_packages_header(issue_id, affected_pkg_status) - if count == 0: - return FixResult( - status=FixStatus.SYSTEM_NOT_AFFECTED, unfixed_pkgs=None - ) - src_pocket_pkgs = defaultdict(list) - binary_pocket_pkgs = defaultdict(list) - pkg_index = 0 - - pkg_status_groups = group_by_usn_package_status( - affected_pkg_status, usn_released_pkgs - ) - - unfixed_pkgs = [] # type: List[UnfixedPackage] - for status_value, pkg_status_group in sorted(pkg_status_groups.items()): - if status_value != "released": - fix_result = FixStatus.SYSTEM_NON_VULNERABLE - print( - _format_packages_message( - pkg_status_list=pkg_status_group, - pkg_index=pkg_index, - num_pkgs=count, - ) - ) - pkg_index += len(pkg_status_group) - status_msg = pkg_status_group[0][1].status_message - unfixed_pkgs += [ - UnfixedPackage(pkg=src_pkg, unfixed_reason=status_msg) - for src_pkg, _ in pkg_status_group - ] - else: - for src_pkg, pkg_status in pkg_status_group: - src_pocket_pkgs[pkg_status.pocket_source].append( - (src_pkg, pkg_status) - ) - for binary_pkg, version in installed_packages[src_pkg].items(): - usn_released_src = usn_released_pkgs.get(src_pkg, {}) - if binary_pkg not in usn_released_src: - continue - fixed_version = usn_released_src.get(binary_pkg, {}).get( - "version", "" - ) - - if apt.version_compare(fixed_version, version) > 0: - binary_pocket_pkgs[pkg_status.pocket_source].append( - BinaryPackageFix( - source_pkg=src_pkg, - binary_pkg=binary_pkg, - fixed_version=fixed_version, - ) - ) - - released_pkgs_install_result = _handle_released_package_fixes( - cfg=cfg, - src_pocket_pkgs=src_pocket_pkgs, - binary_pocket_pkgs=binary_pocket_pkgs, - pkg_index=pkg_index, - num_pkgs=count, - dry_run=dry_run, - ) - - unfixed_pkgs += released_pkgs_install_result.unfixed_pkgs - - print() - if unfixed_pkgs: - print(_format_unfixed_packages_msg(unfixed_pkgs)) - - if released_pkgs_install_result.fix_status: - # fix_status is True if either: - # (1) we successfully installed all the packages we needed to - # (2) we didn't need to install any packages - # In case (2), then all_already_installed is also True - if released_pkgs_install_result.all_already_installed: - # we didn't install any packages, so we're good - fix_result = ( - FixStatus.SYSTEM_STILL_VULNERABLE - if unfixed_pkgs - else FixStatus.SYSTEM_NON_VULNERABLE - ) - elif system.should_reboot( - installed_pkgs=released_pkgs_install_result.installed_pkgs - ): - # we successfully installed some packages, but - # system reboot-required. This might be because - # or our installations. - reboot_msg = messages.ENABLE_REBOOT_REQUIRED_TMPL.format( - operation="fix operation" - ) - print(reboot_msg) - notices.add( - Notice.ENABLE_REBOOT_REQUIRED, - operation="fix operation", - ) - fix_result = ( - FixStatus.SYSTEM_STILL_VULNERABLE - if unfixed_pkgs - else FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT - ) - else: - # we successfully installed some packages, and the system - # reboot-required flag is not set, so we're good - fix_result = ( - FixStatus.SYSTEM_STILL_VULNERABLE - if unfixed_pkgs - else FixStatus.SYSTEM_NON_VULNERABLE - ) - else: - fix_result = FixStatus.SYSTEM_STILL_VULNERABLE - - _handle_fix_status_message(fix_result, issue_id) - return FixResult( - status=fix_result, - unfixed_pkgs=unfixed_pkgs, - ) - - -def _inform_ubuntu_pro_existence_if_applicable() -> None: - """Alert the user when running Pro on cloud with PRO support.""" - cloud_type, _ = get_cloud_type() - if cloud_type in PRO_CLOUD_URLS: - print( - messages.SECURITY_USE_PRO_TMPL.format( - title=CLOUD_TYPE_TO_TITLE.get(cloud_type), - cloud_specific_url=PRO_CLOUD_URLS.get(cloud_type), - ) - ) - - -def _run_ua_attach(cfg: UAConfig, token: str) -> bool: - """Attach to an Ubuntu Pro subscription with a given token. - - :return: True if attach performed without errors. - """ - import argparse - - from uaclient import cli - - print(colorize_commands([["pro", "attach", token]])) - try: - ret_code = cli.action_attach( - argparse.Namespace( - token=token, auto_enable=True, format="cli", attach_config=None - ), - cfg, - ) - return ret_code == 0 - except exceptions.UbuntuProError as err: - print(err.msg) - return False - - -def _perform_magic_attach(cfg: UAConfig): - print(messages.CLI_MAGIC_ATTACH_INIT) - initiate_resp = _initiate(cfg=cfg) - print( - "\n" - + messages.CLI_MAGIC_ATTACH_SIGN_IN.format( - user_code=initiate_resp.user_code - ) - ) - - wait_options = MagicAttachWaitOptions(magic_token=initiate_resp.token) - - try: - wait_resp = _wait(options=wait_options, cfg=cfg) - except exceptions.MagicAttachTokenError as e: - print(messages.CLI_MAGIC_ATTACH_FAILED) - - revoke_options = MagicAttachRevokeOptions( - magic_token=initiate_resp.token - ) - _revoke(options=revoke_options, cfg=cfg) - raise e - - print("\n" + messages.CLI_MAGIC_ATTACH_PROCESSING) - return _run_ua_attach(cfg, wait_resp.contract_token) - - -def _prompt_for_attach(cfg: UAConfig) -> bool: - """Prompt for attach to a subscription or token. - - :return: True if attach performed. - """ - _inform_ubuntu_pro_existence_if_applicable() - print(messages.SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION) - choice = util.prompt_choices( - messages.SECURITY_FIX_ATTACH_PROMPT, - valid_choices=["s", "a", "c"], - ) - if choice == "c": - return False - if choice == "s": - return _perform_magic_attach(cfg) - if choice == "a": - print(messages.PROMPT_ENTER_TOKEN) - token = input("> ") - return _run_ua_attach(cfg, token) - - return True - - -def _prompt_for_enable(cfg: UAConfig, service: str) -> bool: - """Prompt for enable a pro service. - - :return: True if enable performed. - """ - import argparse - - from uaclient import cli - - print(messages.SECURITY_SERVICE_DISABLED.format(service=service)) - choice = util.prompt_choices( - messages.SECURITY_FIX_ENABLE_PROMPT.format(service=service), - valid_choices=["e", "c"], - ) - - if choice == "e": - print(colorize_commands([["pro", "enable", service]])) - return bool( - 0 - == cli.action_enable( - argparse.Namespace( - service=[service], - assume_yes=True, - beta=False, - format="cli", - access_only=False, - ), - cfg, - ) - ) - - return False - - -def _check_attached(cfg: UAConfig, dry_run: bool) -> bool: - """Verify if machine is attached to an Ubuntu Pro subscription.""" - if dry_run: - print("\n" + messages.SECURITY_DRY_RUN_UA_NOT_ATTACHED) - return True - return _prompt_for_attach(cfg) - - -def _check_subscription_for_required_service( - pocket: str, cfg: UAConfig, dry_run: bool -) -> bool: - """ - Verify if the Ubuntu Pro subscription has the required service enabled. - """ - ent = _get_service_for_pocket(pocket, cfg) - - if ent: - ent_status, _ = ent.user_facing_status() - - if ent_status == UserFacingStatus.ACTIVE: - return True - - applicability_status, _ = ent.applicability_status() - if applicability_status == ApplicabilityStatus.APPLICABLE: - if dry_run: - print( - "\n" - + messages.SECURITY_DRY_RUN_UA_SERVICE_NOT_ENABLED.format( - service=ent.name - ) - ) - return True - - if _prompt_for_enable(cfg, ent.name): - return True - else: - print( - messages.SECURITY_UA_SERVICE_NOT_ENABLED.format( - service=ent.name - ) - ) - else: - print( - messages.SECURITY_UA_SERVICE_NOT_ENTITLED.format( - service=ent.name - ) - ) - - return False - - -def _prompt_for_new_token(cfg: UAConfig) -> bool: - """Prompt for attach a new subscription token to the user. - - :return: True if attach performed. - """ - import argparse - - from uaclient import cli - - _inform_ubuntu_pro_existence_if_applicable() - print(messages.SECURITY_UPDATE_NOT_INSTALLED_EXPIRED) - choice = util.prompt_choices( - messages.SECURITY_FIX_RENEW_PROMPT, - valid_choices=["r", "c"], - ) - if choice == "r": - print(messages.PROMPT_EXPIRED_ENTER_TOKEN) - token = input("> ") - print(colorize_commands([["pro", "detach"]])) - cli.action_detach( - argparse.Namespace(assume_yes=True, format="cli"), cfg - ) - return _run_ua_attach(cfg, token) - - return False - - -def _check_subscription_is_expired( - status_cache: Dict[str, Any], cfg: UAConfig, dry_run: bool -) -> bool: - """Check if the Ubuntu Pro subscription is expired. - - :returns: True if subscription is expired and not renewed. - """ - attached = status_cache.get("attached", False) - if not attached: - return False - - contract_expiry_datetime = status_cache.get("expires") - # If we don't have an expire information on the status-cache, we - # assume that the contract is expired. - if contract_expiry_datetime is None or ( - contract_expiry_datetime - < datetime.now(contract_expiry_datetime.tzinfo) - ): - if dry_run: - print(messages.SECURITY_DRY_RUN_UA_EXPIRED_SUBSCRIPTION) - return False - return not _prompt_for_new_token(cfg) - - return False - - -def upgrade_packages_and_attach( - cfg: UAConfig, upgrade_pkgs: List[str], pocket: str, dry_run: bool -) -> UpgradeResult: - """Upgrade available packages to fix a CVE. - - Upgrade all packages in upgrades_packages and, if necessary, - prompt regarding system attach prior to upgrading Ubuntu Pro packages. - - :return: True if package upgrade completed or unneeded, False otherwise. - """ - if not upgrade_pkgs: - return UpgradeResult(status=True, failure_reason=None) - - # If we are running on --dry-run mode, we don't need to be root - # to understand what will happen with the system - if not util.we_are_currently_root() and not dry_run: - msg = messages.SECURITY_APT_NON_ROOT - print(msg) - return UpgradeResult(status=False, failure_reason=msg) - - if pocket != messages.SECURITY_UBUNTU_STANDARD_UPDATES_POCKET: - # We are now using status-cache because non-root users won't - # have access to the private machine_token.json file. We - # can use the status-cache as a proxy for the attached - # information - status_cache = cfg.read_cache("status-cache") or {} - if not status_cache.get("attached", False): - if not _check_attached(cfg, dry_run): - return UpgradeResult( - status=False, - failure_reason=messages.SECURITY_UA_SERVICE_REQUIRED.format( # noqa - service=pocket - ), - ) - elif _check_subscription_is_expired( - status_cache=status_cache, cfg=cfg, dry_run=dry_run - ): - return UpgradeResult( - status=False, - failure_reason=messages.SECURITY_UA_SERVICE_WITH_EXPIRED_SUB.format( # noqa - service=pocket - ), - ) - - if not _check_subscription_for_required_service(pocket, cfg, dry_run): - # User subscription does not have required service enabled - return UpgradeResult( - status=False, - failure_reason=messages.SECURITY_UA_SERVICE_NOT_ENABLED_SHORT.format( # noqa - service=pocket - ), - ) - - print( - colorize_commands( - [ - ["apt", "update", "&&"] - + ["apt", "install", "--only-upgrade", "-y"] - + sorted(upgrade_pkgs) - ] - ) - ) - - if not dry_run: - try: - apt.run_apt_update_command() - apt.run_apt_command( - cmd=["apt-get", "install", "--only-upgrade", "-y"] - + upgrade_pkgs, - override_env_vars={"DEBIAN_FRONTEND": "noninteractive"}, - ) - except Exception as e: - msg = getattr(e, "msg", str(e)) - print(msg.strip()) - return UpgradeResult( - status=False, failure_reason=messages.SECURITY_UA_APT_FAILURE - ) - - return UpgradeResult(status=True, failure_reason=None) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/security_status.py ubuntu-advantage-tools-31.2~23.10/uaclient/security_status.py --- ubuntu-advantage-tools-30~23.10/uaclient/security_status.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/security_status.py 2024-01-18 17:34:13.000000000 +0000 @@ -7,7 +7,7 @@ import apt_pkg # type: ignore -from uaclient import livepatch, messages +from uaclient import exceptions, livepatch, messages from uaclient.api.u.pro.security.status.reboot_required.v1 import ( _reboot_required, ) @@ -230,7 +230,11 @@ # Yeah Any is bad, but so is python<3.8 without TypedDict def get_livepatch_fixed_cves() -> List[Dict[str, Any]]: - lp_status = livepatch.status() + try: + lp_status = livepatch.status() + except exceptions.ProcessExecutionError: + return [] + our_kernel_version = get_kernel_info().proc_version_signature_version if ( lp_status is not None diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/snap.py ubuntu-advantage-tools-31.2~23.10/uaclient/snap.py --- ubuntu-advantage-tools-30~23.10/uaclient/snap.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/snap.py 2024-02-14 15:37:46.000000000 +0000 @@ -128,7 +128,7 @@ def install_snapd(): event.info(messages.APT_UPDATING_LIST.format(name="standard Ubuntu")) try: - apt.update_sources_list("/etc/apt/sources.list") + apt.update_sources_list(apt.get_system_sources_file()) except exceptions.UbuntuProError as e: LOG.debug( "Trying to install snapd. Ignoring apt-get update failure: %s", @@ -176,6 +176,10 @@ ) +def refresh_snap(snap: str): + system.subp([SNAP_CMD, "refresh", snap], capture=True) + + def get_snap_info(snap: str) -> SnapPackage: snap_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) snap_sock.connect(SNAPD_SOCKET_PATH) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/system.py ubuntu-advantage-tools-31.2~23.10/uaclient/system.py --- ubuntu-advantage-tools-30~23.10/uaclient/system.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/system.py 2024-02-14 15:37:46.000000000 +0000 @@ -747,6 +747,33 @@ return True +def get_systemd_unit_active_state(service_name: str) -> Optional[str]: + try: + out, _ = subp( + [ + "systemctl", + "show", + "--property=ActiveState", + "--no-pager", + service_name, + ] + ) + if out and out.startswith("ActiveState="): + return out.split("=")[1].strip() + else: + LOG.warning( + "Couldn't find ActiveState in systemctl show output for %s", + service_name, + ) + except exceptions.ProcessExecutionError as e: + LOG.warning( + "Failed to get ActiveState for systemd unit %s", + service_name, + exc_info=e, + ) + return None + + def get_user_cache_dir() -> str: if util.we_are_currently_root(): return defaults.UAC_RUN_PATH diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_actions.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_actions.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_actions.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_actions.py 2024-01-18 17:34:13.000000000 +0000 @@ -8,6 +8,12 @@ from uaclient.testing import fakes, helpers M_PATH = "uaclient.actions." +APPARMOR_DENIED = ( + 'audit: type=1400 audit(1703513431.601:36): apparmor="DENIED" ' + 'operation="open" profile="ubuntu_pro_apt_news" ' + 'name="/proc/1422/status" pid=1422 comm="python3" ' + 'requested_mask="r" denied_mask="r" fsuid=0 ouid=0' +) def fake_instance_factory(): @@ -43,7 +49,7 @@ ( "token", True, - exceptions.UrlError(Exception(), "url"), + exceptions.ConnectivityError(cause=Exception(), url="url"), None, None, None, @@ -67,7 +73,7 @@ [{"machineTokenInfo": {"machineId": "machine-id"}}], "get-machine-id-result", mock.sentinel.entitlements, - exceptions.UrlError(Exception(), "url"), + exceptions.ConnectivityError(cause=Exception(), url="url"), None, [mock.call(contract_token="token", attachment_dt=mock.ANY)], [mock.call({"machineTokenInfo": {"machineId": "machine-id"}})], @@ -80,7 +86,7 @@ [mock.call()], [], [], - pytest.raises(exceptions.UrlError), + pytest.raises(exceptions.ConnectivityError), ), ( "token", @@ -335,8 +341,10 @@ @mock.patch("uaclient.actions._get_state_files") @mock.patch("glob.glob") @mock.patch("uaclient.log.get_user_log_file") + @mock.patch("uaclient.system.subp", return_value=(APPARMOR_DENIED, "")) def test_collect_logs_invalid_file( self, + m_system_subp, m_get_user, m_glob, m_get_state_files, @@ -362,11 +370,19 @@ mock.call("a"), mock.call("b"), ] == m_load_file.call_args_list - assert 2 == m_write_file.call_count + assert 3 == m_write_file.call_count + + # apparmor checks + assert 1 == m_system_subp.call_count + assert [ + mock.call(["journalctl", "-b", "-k", "--since=1 day ago"]), + ] == m_system_subp.call_args_list + print(m_write_file.call_args_list) assert [ mock.call("test/user0.log", "test"), mock.call("test/b", "test"), + mock.call("test/apparmor_logs.txt", APPARMOR_DENIED), ] == m_write_file.call_args_list assert [ mock.call("Failed to load file: %s\n%s", "a", "test") diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_apt.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_apt.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_apt.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_apt.py 2024-02-14 15:37:46.000000000 +0000 @@ -21,13 +21,13 @@ APT_PROXY_CONFIG_HEADER, APT_RETRIES, KEYRINGS_DIR, + SERIES_NOT_USING_DEB822, PreserveAptCfg, _ensure_esm_cache_structure, add_apt_auth_conf_entry, add_auth_apt_repo, add_ppa_pinning, assert_valid_apt_credentials, - clean_apt_files, find_apt_list_files, get_apt_cache_policy, get_apt_cache_time, @@ -36,6 +36,7 @@ get_installed_packages_names, get_pkg_candidate_version, get_remote_versions_for_package, + get_system_sources_file, is_installed, remove_apt_list_files, remove_auth_apt_repo, @@ -48,7 +49,6 @@ from uaclient.entitlements.base import UAEntitlement from uaclient.entitlements.entitlement_status import ApplicationStatus from uaclient.entitlements.repo import RepoEntitlement -from uaclient.entitlements.tests.test_repo import RepoTestEntitlement from uaclient.testing import fakes POST_INSTALL_APT_CACHE_NO_UPDATES = """ @@ -349,14 +349,12 @@ class TestAddAuthAptRepo: + @pytest.mark.parametrize("series", ("xenial", "noble")) @mock.patch("uaclient.apt.gpg.export_gpg_key") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") - @mock.patch( - "uaclient.system.get_release_info", - return_value=mock.MagicMock(series="xenial"), - ) + @mock.patch("uaclient.system.get_release_info") def test_add_auth_apt_repo_writes_sources_file( self, m_get_release_info, @@ -364,6 +362,7 @@ m_get_apt_auth_file, m_subp, m_gpg_export, + series, tmpdir, ): """Write a properly configured sources file to repo_filename.""" @@ -371,33 +370,44 @@ auth_file = tmpdir.join("auth.conf").strpath m_get_apt_auth_file.return_value = auth_file m_subp.return_value = "500 esm.canonical.com...", "" # apt policy + m_get_release_info.return_value = mock.MagicMock(series=series) add_auth_apt_repo( repo_filename=repo_file, repo_url="http://fakerepo", credentials="mycreds", - suites=("xenial",), + suites=(series,), keyring_file="keyring", ) - expected_content = ( - "deb http://fakerepo xenial main\n" - "# deb-src http://fakerepo xenial main\n" - ) + if series in SERIES_NOT_USING_DEB822: + expected_content = ( + "deb http://fakerepo {series} main\n" + "# deb-src http://fakerepo {series} main\n" + ).format(series=series) + src_keyfile = os.path.join(KEYRINGS_DIR, "keyring") + dest_keyfile = os.path.join(APT_KEYS_DIR, "keyring") + gpg_export_calls = [mock.call(src_keyfile, dest_keyfile)] + else: + expected_content = ( + "# Written by ubuntu-pro-client\n" + "Types: deb\n" + "URIs: http://fakerepo\n" + "Suites: {series}\n" + "Components: main\n" + "Signed-By: /usr/share/keyrings/keyring\n" + ).format(series=series) + gpg_export_calls = [] + assert expected_content == system.load_file(repo_file) - src_keyfile = os.path.join(KEYRINGS_DIR, "keyring") - dest_keyfile = os.path.join(APT_KEYS_DIR, "keyring") - gpg_export_calls = [mock.call(src_keyfile, dest_keyfile)] assert gpg_export_calls == m_gpg_export.call_args_list + @pytest.mark.parametrize("series", ("xenial", "noble")) @mock.patch("uaclient.apt.gpg.export_gpg_key") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") - @mock.patch( - "uaclient.system.get_release_info", - return_value=mock.MagicMock(series="xenial"), - ) + @mock.patch("uaclient.system.get_release_info") def test_add_auth_apt_repo_ignores_suites_not_matching_series( self, m_get_release_info, @@ -405,48 +415,67 @@ m_get_apt_auth_file, m_subp, m_gpg_export, + series, tmpdir, ): """Skip any apt suites that don't match the current series.""" repo_file = tmpdir.join("repo.conf").strpath auth_file = tmpdir.join("auth.conf").strpath m_get_apt_auth_file.return_value = auth_file - # apt policy with xenial-updates enabled + # apt policy with series-updates enabled stdout = dedent( """\ - 500 http://archive.ubuntu.com/ xenial-updates/main amd64 \ + 500 http://archive.ubuntu.com/ {series}-updates/main amd64 \ Packages - release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu\ - ,c=main""" + release v=XX.04,o=Ubuntu,a={series}-updates,n={series},\ + l=Ubuntu,c=main""".format( + series=series + ) ) m_subp.return_value = stdout, "" + m_get_release_info.return_value = mock.MagicMock(series=series) add_auth_apt_repo( repo_filename=repo_file, repo_url="http://fakerepo", credentials="mycreds", - suites=("xenial-one", "xenial-updates", "trusty-gone"), + suites=( + "{}-one".format(series), + "{}-updates".format(series), + "trusty-gone", + ), keyring_file="keyring", ) - expected_content = dedent( - """\ - deb http://fakerepo xenial-one main - # deb-src http://fakerepo xenial-one main - deb http://fakerepo xenial-updates main - # deb-src http://fakerepo xenial-updates main - """ - ) + if series in SERIES_NOT_USING_DEB822: + expected_content = dedent( + """\ + deb http://fakerepo {series}-one main + # deb-src http://fakerepo {series}-one main + deb http://fakerepo {series}-updates main + # deb-src http://fakerepo {series}-updates main + """ + ).format(series=series) + else: + expected_content = dedent( + """\ + # Written by ubuntu-pro-client + Types: deb + URIs: http://fakerepo + Suites: {series}-one {series}-updates + Components: main + Signed-By: /usr/share/keyrings/keyring + """ + ).format(series=series) + assert expected_content == system.load_file(repo_file) + @pytest.mark.parametrize("series", ("xenial", "noble")) @mock.patch("uaclient.apt.gpg.export_gpg_key") @mock.patch("uaclient.system.subp") @mock.patch("uaclient.apt.get_apt_auth_file_from_apt_config") @mock.patch("uaclient.apt.assert_valid_apt_credentials") - @mock.patch( - "uaclient.system.get_release_info", - return_value=mock.MagicMock(series="xenial"), - ) + @mock.patch("uaclient.system.get_release_info") def test_add_auth_apt_repo_comments_updates_suites_on_non_update_machine( self, m_get_release_info, @@ -454,35 +483,53 @@ m_get_apt_auth_file, m_subp, m_gpg_export, + series, tmpdir, ): """Skip any apt suites that don't match the current series.""" repo_file = tmpdir.join("repo.conf").strpath auth_file = tmpdir.join("auth.conf").strpath m_get_apt_auth_file.return_value = auth_file - # apt policy without xenial-updates enabled - origin = "test-origin" + # apt policy without series-updates enabled m_subp.return_value = ( - POST_INSTALL_APT_CACHE_NO_UPDATES.format("xenial", origin), + POST_INSTALL_APT_CACHE_NO_UPDATES.format(series, "test-origin"), "", ) + m_get_release_info.return_value = mock.MagicMock(series=series) add_auth_apt_repo( repo_filename=repo_file, repo_url="http://fakerepo", credentials="mycreds", - suites=("xenial-one", "xenial-updates", "trusty-gone"), + suites=( + "{}-one".format(series), + "{}-updates".format(series), + "trusty-gone", + ), keyring_file="keyring", ) - expected_content = dedent( - """\ - deb http://fakerepo xenial-one main - # deb-src http://fakerepo xenial-one main - # deb http://fakerepo xenial-updates main - # deb-src http://fakerepo xenial-updates main - """ - ) + if series in SERIES_NOT_USING_DEB822: + expected_content = dedent( + """\ + deb http://fakerepo {series}-one main + # deb-src http://fakerepo {series}-one main + # deb http://fakerepo {series}-updates main + # deb-src http://fakerepo {series}-updates main + """ + ).format(series=series) + else: + expected_content = dedent( + """\ + # Written by ubuntu-pro-client + Types: deb + URIs: http://fakerepo + Suites: {series}-one + Components: main + Signed-By: /usr/share/keyrings/keyring + """ + ).format(series=series) + assert expected_content == system.load_file(repo_file) @mock.patch("uaclient.apt.gpg.export_gpg_key") @@ -641,7 +688,7 @@ class TestRepo(request.param): name = entitlement_name - repo_list_file_tmpl = repo_tmpl + repo_file_tmpl = repo_tmpl repo_pref_file_tmpl = pref_tmpl is_repo = request.param == RepoEntitlement @@ -657,51 +704,6 @@ return TestRepo - @mock.patch("os.path.exists", return_value=True) - @mock.patch("uaclient.system.ensure_file_absent") - def test_removals_for_repo_entitlements( - self, m_ensure_file_absent, _m_path_exists - ): - m_entitlements = mock.Mock() - m_entitlements.ENTITLEMENT_CLASSES = [RepoTestEntitlement] - - clean_apt_files(_entitlements=m_entitlements) - - assert 2 == m_ensure_file_absent.call_count - - def test_files_for_all_series_removed(self, mock_apt_entitlement, tmpdir): - m_entitlements = mock.Mock() - m_entitlements.ENTITLEMENT_CLASSES = [mock_apt_entitlement] - - clean_apt_files(_entitlements=m_entitlements) - - if mock_apt_entitlement.is_repo: - assert [] == tmpdir.listdir() - else: - assert sorted( - [tmpdir.join("source-test_ent"), tmpdir.join("pref-test_ent")] - ) == sorted(tmpdir.listdir()) - - def test_other_files_not_removed(self, mock_apt_entitlement, tmpdir): - other_filename = "other_file-acidic" - tmpdir.join(other_filename).ensure() - - m_entitlements = mock.Mock() - m_entitlements.ENTITLEMENT_CLASSES = [mock_apt_entitlement] - - clean_apt_files(_entitlements=m_entitlements) - - if mock_apt_entitlement.is_repo: - assert [tmpdir.join(other_filename)] == tmpdir.listdir() - else: - assert sorted( - [ - tmpdir.join("source-test_ent"), - tmpdir.join("pref-test_ent"), - tmpdir.join(other_filename), - ] - ) == sorted(tmpdir.listdir()) - @pytest.fixture(params=(mock.sentinel.default, None, "some_string")) def remove_auth_apt_repo_kwargs(request): @@ -718,15 +720,20 @@ return kwargs +@mock.patch("uaclient.apt.system.subp") +@mock.patch("uaclient.apt.remove_repo_from_apt_auth_file") +@mock.patch("uaclient.apt.system.ensure_file_absent") class TestRemoveAuthAptRepo: - @mock.patch("uaclient.apt.system.subp") - @mock.patch("uaclient.apt.remove_repo_from_apt_auth_file") - @mock.patch("uaclient.apt.system.ensure_file_absent") def test_repo_file_deleted( - self, m_ensure_file_absent, _mock, __mock, remove_auth_apt_repo_kwargs + self, + m_ensure_file_absent, + _m_remove_repo, + _m_subp, + remove_auth_apt_repo_kwargs, ): """Ensure that repo_filename is deleted, regardless of other params.""" - repo_filename, repo_url = mock.sentinel.filename, mock.sentinel.url + repo_filename = "/etc/apt/sources.list.d/pro-repofile.list" + repo_url = mock.sentinel.url remove_auth_apt_repo( repo_filename, repo_url, **remove_auth_apt_repo_kwargs @@ -734,14 +741,36 @@ assert mock.call(repo_filename) in m_ensure_file_absent.call_args_list - @mock.patch("uaclient.apt.system.subp") - @mock.patch("uaclient.apt.system.ensure_file_absent") - @mock.patch("uaclient.apt.remove_repo_from_apt_auth_file") + def test_old_repo_file_deleted_when_deb822( + self, + m_ensure_file_absent, + _m_remove_repo, + _m_subp, + remove_auth_apt_repo_kwargs, + ): + repo_filename = "/etc/apt/sources.list.d/pro-repofile.sources" + repo_url = mock.sentinel.url + + remove_auth_apt_repo( + repo_filename, repo_url, **remove_auth_apt_repo_kwargs + ) + + assert mock.call(repo_filename) in m_ensure_file_absent.call_args_list + assert ( + mock.call("/etc/apt/sources.list.d/pro-repofile.list") + in m_ensure_file_absent.call_args_list + ) + def test_remove_from_auth_file_called( - self, m_remove_repo, _mock, __mock, remove_auth_apt_repo_kwargs + self, + _m_ensure_file_absent, + m_remove_repo, + _m_subp, + remove_auth_apt_repo_kwargs, ): """Ensure that remove_repo_from_apt_auth_file is called.""" - repo_filename, repo_url = mock.sentinel.filename, mock.sentinel.url + repo_filename = "/etc/apt/sources.list.d/pro-repofile.list" + repo_url = mock.sentinel.url remove_auth_apt_repo( repo_filename, repo_url, **remove_auth_apt_repo_kwargs @@ -749,14 +778,16 @@ assert mock.call(repo_url) in m_remove_repo.call_args_list - @mock.patch("uaclient.apt.system.subp") - @mock.patch("uaclient.apt.remove_repo_from_apt_auth_file") - @mock.patch("uaclient.apt.system.ensure_file_absent") def test_keyring_file_deleted_if_given( - self, m_ensure_file_absent, _mock, __mock, remove_auth_apt_repo_kwargs + self, + m_ensure_file_absent, + _m_remove_repo, + _m_subp, + remove_auth_apt_repo_kwargs, ): """We should always delete the keyring file if it is given""" - repo_filename, repo_url = mock.sentinel.filename, mock.sentinel.url + repo_filename = "/etc/apt/sources.list.d/pro-repofile.list" + repo_url = mock.sentinel.url remove_auth_apt_repo( repo_filename, repo_url, **remove_auth_apt_repo_kwargs @@ -1584,3 +1615,19 @@ tmpdir + "/var/lib/apt/lists/partial", exist_ok=True, mode=755 ), ] + + +class TestGetSystemSourcesFile: + @pytest.mark.parametrize( + "new_source_exists,expected_return", + ( + (True, "/etc/apt/sources.list.d/ubuntu.sources"), + (False, "/etc/apt/sources.list"), + ), + ) + @mock.patch("uaclient.apt.os.path.exists") + def test_get_system_sources_file( + self, m_exists, new_source_exists, expected_return + ): + m_exists.return_value = new_source_exists + assert get_system_sources_file() == expected_return diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_contract.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_contract.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_contract.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_contract.py 2024-01-18 17:34:13.000000000 +0000 @@ -528,9 +528,12 @@ "request_side_effect,expected_exception,message", ( ( - exceptions.UrlError("cause", "url"), + exceptions.ConnectivityError(cause="cause", url="url"), exceptions.ConnectivityError, - messages.E_CONNECTIVITY_ERROR, + messages.E_CONNECTIVITY_ERROR.format( + url="url", + cause_error="cause", + ), ), ( [ @@ -604,12 +607,20 @@ cfg = FakeConfig() client = UAContractClient(cfg) magic_token = "test-id" - request_url.side_effect = exceptions.UrlError("cause", "url") + request_url.side_effect = exceptions.ConnectivityError( + cause=Exception("cause"), url="url" + ) with pytest.raises(exceptions.ConnectivityError) as exc_error: client.get_magic_attach_token_info(magic_token=magic_token) - assert messages.E_CONNECTIVITY_ERROR.msg == exc_error.value.msg + assert ( + messages.E_CONNECTIVITY_ERROR.format( + url="url", + cause_error="cause", + ).msg + == exc_error.value.msg + ) assert messages.E_CONNECTIVITY_ERROR.name == exc_error.value.msg_code @pytest.mark.parametrize( @@ -651,12 +662,20 @@ cfg = FakeConfig() client = UAContractClient(cfg) magic_token = "test-id" - request_url.side_effect = exceptions.UrlError("cause", "url") + request_url.side_effect = exceptions.ConnectivityError( + cause=Exception("cause"), url="url" + ) with pytest.raises(exceptions.ConnectivityError) as exc_error: client.revoke_magic_attach_token(magic_token=magic_token) - assert messages.E_CONNECTIVITY_ERROR.msg == exc_error.value.msg + assert ( + messages.E_CONNECTIVITY_ERROR.format( + url="url", + cause_error="cause", + ).msg + == exc_error.value.msg + ) assert messages.E_CONNECTIVITY_ERROR.name == exc_error.value.msg_code @pytest.mark.parametrize( @@ -1161,12 +1180,12 @@ """Raise error get_available_resources can't contact backend""" cfg = FakeConfig() - urlerror = exceptions.UrlError( + urlerror = exceptions.ConnectivityError( socket.gaierror(-2, "Name or service not known"), "url" ) m_available_resources.side_effect = urlerror - with pytest.raises(exceptions.UrlError) as exc: + with pytest.raises(exceptions.ConnectivityError) as exc: get_available_resources(cfg) assert urlerror == exc.value diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_lib_daemon.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_lib_daemon.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_lib_daemon.py 1970-01-01 00:00:00.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_lib_daemon.py 2024-02-14 15:37:46.000000000 +0000 @@ -0,0 +1,59 @@ +import mock +import pytest + +from lib.daemon import ( + WAIT_FOR_CLOUD_CONFIG_POLL_TIMES, + WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME, + _wait_for_cloud_config, +) + + +class TestWaitForCloudConfig: + @pytest.mark.parametrize( + [ + "active_state_side_effect", + "expected_sleep_calls", + ], + ( + # not activating + ( + ["active"], + [], + ), + ( + ["inactive"], + [], + ), + ( + [None], + [], + ), + # activating, then finishes + ( + (["activating"] * 11) + ["active"], + [mock.call(WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME)] * 11, + ), + ( + (["activating"] * 11) + ["failed"], + [mock.call(WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME)] * 11, + ), + # still activating after polling maximum times + ( + ["activating"] * (WAIT_FOR_CLOUD_CONFIG_POLL_TIMES + 1000), + [mock.call(WAIT_FOR_CLOUD_CONFIG_SLEEP_TIME)] + * WAIT_FOR_CLOUD_CONFIG_POLL_TIMES, + ), + ), + ) + @mock.patch("lib.daemon.time.sleep") + @mock.patch("lib.daemon.system.get_systemd_unit_active_state") + def test_wait_for_cloud_config( + self, + m_get_systemd_unit_active_state, + m_sleep, + active_state_side_effect, + expected_sleep_calls, + ): + m_get_systemd_unit_active_state.side_effect = active_state_side_effect + _wait_for_cloud_config() + assert m_sleep.call_args_list == expected_sleep_calls diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_livepatch.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_livepatch.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_livepatch.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_livepatch.py 2024-01-18 17:34:13.000000000 +0000 @@ -36,7 +36,6 @@ ], [ (False, None, None), - (True, exceptions.ProcessExecutionError(""), None), (True, [("", None)], None), (True, [("{", None)], None), (True, [("{}", None)], None), @@ -197,6 +196,20 @@ m_subp.side_effect = subp_sideeffect assert expected == status() + @mock.patch(M_PATH + "system.subp") + @mock.patch(M_PATH + "is_livepatch_installed") + def test_status_when_command_fails( + self, + m_is_livepatch_installed, + m_subp, + ): + + m_is_livepatch_installed.return_value = True + m_subp.side_effect = exceptions.ProcessExecutionError("") + + with pytest.raises(exceptions.ProcessExecutionError): + status() + @mock.patch(M_PATH + "serviceclient.UAServiceClient.request_url") @mock.patch(M_PATH + "serviceclient.UAServiceClient.headers") @@ -314,7 +327,12 @@ ], LivepatchSupport.UNSUPPORTED, ), - (exceptions.UrlError(mock.MagicMock(), "url"), None), + ( + exceptions.ConnectivityError( + cause=mock.MagicMock(), url="url" + ), + None, + ), (Exception(), None), ( [ diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_lock.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_lock.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_lock.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_lock.py 2024-02-14 15:37:46.000000000 +0000 @@ -3,26 +3,24 @@ from uaclient.exceptions import LockHeldError from uaclient.files.notices import Notice -from uaclient.lock import SingleAttemptLock, SpinLock +from uaclient.lock import RetryLock from uaclient.messages import LOCK_HELD M_PATH = "uaclient.lock." M_PATH_UACONFIG = "uaclient.config.UAConfig." -@pytest.mark.parametrize("lock_cls", (SingleAttemptLock, SpinLock)) -@mock.patch("os.getpid", return_value=123) -@mock.patch(M_PATH_UACONFIG + "delete_cache_key") -@mock.patch("uaclient.files.notices.NoticesManager.add") -@mock.patch(M_PATH_UACONFIG + "write_cache") -class TestLockCommon: +class TestRetryLock: + @mock.patch("os.getpid", return_value=123) + @mock.patch(M_PATH_UACONFIG + "delete_cache_key") + @mock.patch("uaclient.files.notices.NoticesManager.add") + @mock.patch(M_PATH_UACONFIG + "write_cache") def test_creates_and_releases_lock( self, m_write_cache, m_add_notice, m_delete_cache_key, _m_getpid, - lock_cls, FakeConfig, ): cfg = FakeConfig() @@ -32,7 +30,7 @@ assert arg == mock.sentinel.arg return mock.sentinel.success - with lock_cls(cfg=cfg, lock_holder="some operation"): + with RetryLock(cfg=cfg, lock_holder="some operation"): ret = test_function(arg) assert mock.sentinel.success == ret @@ -45,13 +43,16 @@ ] == m_add_notice.call_args_list assert [mock.call("lock")] == m_delete_cache_key.call_args_list + @mock.patch("os.getpid", return_value=123) + @mock.patch(M_PATH_UACONFIG + "delete_cache_key") + @mock.patch("uaclient.files.notices.NoticesManager.add") + @mock.patch(M_PATH_UACONFIG + "write_cache") def test_creates_and_releases_lock_when_error_occurs( self, m_write_cache, m_add_notice, m_delete_cache_key, _m_getpid, - lock_cls, FakeConfig, ): cfg = FakeConfig() @@ -60,7 +61,7 @@ raise RuntimeError("test") with pytest.raises(RuntimeError) as exc: - with lock_cls(cfg=cfg, lock_holder="some operation"): + with RetryLock(cfg=cfg, lock_holder="some operation"): test_function() assert "test" == str(exc.value) @@ -73,48 +74,9 @@ ] == m_add_notice.call_args_list assert [mock.call("lock")] == m_delete_cache_key.call_args_list - -@mock.patch("os.getpid", return_value=123) -@mock.patch(M_PATH_UACONFIG + "delete_cache_key") -@mock.patch("uaclient.files.notices.NoticesManager.add") -@mock.patch(M_PATH_UACONFIG + "write_cache") -class TestSingleAttemptLock: - @mock.patch(M_PATH_UACONFIG + "check_lock_info", return_value=(10, "held")) - def test_raises_lock_held_when_held( - self, - _m_check_lock_info, - m_write_cache, - m_add_notice, - m_delete_cache_key, - _m_getpid, - FakeConfig, - ): - cfg = FakeConfig() - arg = mock.sentinel.arg - - def test_function(args, cfg): - assert arg == mock.sentinel.arg - return mock.sentinel.success - - with pytest.raises(LockHeldError) as exc: - with SingleAttemptLock(cfg=cfg, lock_holder="some operation"): - test_function(arg, cfg=cfg) - - assert ( - "Unable to perform: some operation.\n" - + LOCK_HELD.format(lock_holder="held", pid=10) - == exc.value.msg - ) - - assert [] == m_write_cache.call_args_list - assert [] == m_add_notice.call_args_list - assert [] == m_delete_cache_key.call_args_list - - -class TestSpinLock: @mock.patch(M_PATH + "time.sleep") @mock.patch( - M_PATH + "SingleAttemptLock.__enter__", + M_PATH + "RetryLock.grab_lock", side_effect=[ LockHeldError( lock_request="request", lock_holder="holder", pid=10 @@ -130,7 +92,7 @@ ): cfg = FakeConfig() - with SpinLock( + with RetryLock( cfg=cfg, lock_holder="request", sleep_time=1, max_retries=3 ): pass @@ -144,7 +106,7 @@ @mock.patch(M_PATH + "time.sleep") @mock.patch( - M_PATH + "SingleAttemptLock.__enter__", + M_PATH + "RetryLock.grab_lock", side_effect=[ LockHeldError( lock_request="request", lock_holder="holder", pid=10 @@ -161,7 +123,7 @@ cfg = FakeConfig() with pytest.raises(LockHeldError) as exc: - with SpinLock( + with RetryLock( cfg=cfg, lock_holder="request", sleep_time=1, max_retries=2 ): pass diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_reboot_cmds.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_reboot_cmds.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_reboot_cmds.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_reboot_cmds.py 2024-02-14 15:37:46.000000000 +0000 @@ -91,7 +91,7 @@ ) # noqa: E501 @mock.patch("lib.reboot_cmds.refresh_contract") @mock.patch("lib.reboot_cmds.fix_pro_pkg_holds") -@mock.patch("uaclient.lock.SpinLock") +@mock.patch("uaclient.lock.RetryLock") @mock.patch("lib.reboot_cmds._is_attached") @mock.patch( "uaclient.files.state_files.reboot_cmd_marker_file", diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_security.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_security.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_security.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_security.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,3242 +0,0 @@ -import copy -import datetime -import json -import textwrap -from collections import defaultdict - -import mock -import pytest - -from uaclient import exceptions, http, livepatch -from uaclient.clouds.identity import NoCloudTypeReason -from uaclient.entitlements.entitlement_status import ( - ApplicabilityStatus, - UserFacingStatus, -) -from uaclient.files.notices import Notice -from uaclient.messages import ( - ENABLE_REBOOT_REQUIRED_TMPL, - FAIL_X, - OKGREEN_CHECK, - PROMPT_ENTER_TOKEN, - PROMPT_EXPIRED_ENTER_TOKEN, - SECURITY_APT_NON_ROOT, - SECURITY_CVE_STATUS_IGNORED, - SECURITY_CVE_STATUS_NEEDED, - SECURITY_CVE_STATUS_PENDING, - SECURITY_CVE_STATUS_TRIAGE, - SECURITY_DRY_RUN_UA_EXPIRED_SUBSCRIPTION, - SECURITY_DRY_RUN_UA_NOT_ATTACHED, - SECURITY_DRY_RUN_UA_SERVICE_NOT_ENABLED, - SECURITY_FIXING_REQUESTED_USN, - SECURITY_RELATED_USN_ERROR, - SECURITY_SERVICE_DISABLED, - SECURITY_UA_SERVICE_NOT_ENABLED, - SECURITY_UA_SERVICE_NOT_ENTITLED, - SECURITY_UA_SERVICE_REQUIRED, - SECURITY_UPDATE_NOT_INSTALLED_EXPIRED, - SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, - SECURITY_USE_PRO_TMPL, -) -from uaclient.security import ( - API_V1_CVE_TMPL, - API_V1_CVES, - API_V1_NOTICE_TMPL, - API_V1_NOTICES, - CVE, - USN, - CVEPackageStatus, - FixStatus, - UASecurityClient, - UnfixedPackage, - UpgradeResult, - _check_attached, - _check_subscription_for_required_service, - _check_subscription_is_expired, - _fix_usn, - _prompt_for_attach, - fix_security_issue_id, - get_cve_affected_source_packages_status, - get_related_usns, - get_usn_affected_packages_status, - merge_usn_released_binary_package_versions, - override_usn_release_package_status, - prompt_for_affected_packages, - query_installed_source_pkg_versions, - upgrade_packages_and_attach, -) -from uaclient.status import colorize_commands -from uaclient.testing import fakes - -M_PATH = "uaclient.contract." -M_REPO_PATH = "uaclient.entitlements.repo.RepoEntitlement." - - -SAMPLE_GET_CVES_QUERY_PARAMS = { - "query": "vq", - "priority": "vpr", - "package": "vpa", - "limit": 1, - "offset": 2, - "component": "vc", - "version": "vv", - "status": "vs", -} - -SAMPLE_GET_NOTICES_QUERY_PARAMS = { - "details": "cve", - "release": "vq", - "limit": 1, - "offset": 2, - "order": "vo", -} - - -CVE_ESM_PACKAGE_STATUS_RESPONSE = { - "component": None, - "description": "1.17-6ubuntu4.1+esm1", - "pocket": "esm-infra", - "release_codename": "focal", - "status": "released", -} - - -SAMBA_CVE_STATUS_BIONIC = { - "component": None, - "description": "2:4.7.6+dfsg~ubuntu-0ubuntu2.19", - "pocket": None, - "release_codename": "bionic", - "status": "released", -} -SAMBA_CVE_STATUS_FOCAL = { - "component": None, - "description": "2:4.11.6+dfsg-0ubuntu1.4", - "pocket": None, - "release_codename": "focal", - "status": "not-affected", -} -SAMBA_CVE_STATUS_UPSTREAM = { - "component": None, - "description": "", - "pocket": None, - "release_codename": "upstream", - "status": "needs-triage", -} - -SAMPLE_CVE_RESPONSE = { - "bugs": ["https://bugzilla.samba.org/show_bug.cgi?id=14497"], - "description": "\nAn elevation of privilege vulnerability exists ...", - "id": "CVE-2020-1472", - "notes": [{"author": "..", "note": "..."}], - "notices_ids": ["USN-4510-1", "USN-4510-2", "USN-4559-1"], - "packages": [ - { - "debian": "https://tracker.debian.org/pkg/samba", - "name": "samba", - "source": "https://ubuntu.com/security/cve?package=samba", - "statuses": [ - SAMBA_CVE_STATUS_BIONIC, - SAMBA_CVE_STATUS_FOCAL, - SAMBA_CVE_STATUS_UPSTREAM, - ], - } - ], - "status": "active", -} - -SAMPLE_USN_RESPONSE = { - "cves_ids": ["CVE-2020-1473", "CVE-2020-1472"], - "id": "USN-4510-2", - "instructions": "In general, a standard system update will make all ...\n", - "references": [], - "release_packages": { - "series-example-1": [ - { - "description": "SMB/CIFS file, print, and login ... Unix", - "is_source": True, - "name": "samba", - "version": "2:4.3.11+dfsg-0ubuntu0.14.04.20+esm9", - }, - { - "is_source": False, - "name": "samba", - "source_link": "https://launchpad.net/ubuntu/+source/samba", - "version": "2~14.04.1+esm9", - "version_link": "https://....11+dfsg-0ubuntu0.14.04.20+esm9", - }, - ], - "series-example-2": [ - { - "description": "high-level 3D graphics kit implementing ...", - "is_source": True, - "name": "coin3", - "version": "3.1.4~abc9f50-4ubuntu2+esm1", - }, - { - "is_source": False, - "name": "libcoin80-runtime", - "source_link": "https://launchpad.net/ubuntu/+source/coin3", - "version": "3~18.04.1+esm2", - "version_link": "https://coin3...18.04.1+esm2", - }, - ], - }, - "summary": "Samba would allow unintended access to files over the ....\n", - "title": "Samba vulnerability", - "type": "USN", -} - -SAMPLE_USN_RESPONSE_NO_CVES = { - "cves_ids": [], - "id": "USN-4038-3", - "instructions": "In general, a standard system update will make all ...\n", - "references": ["https://launchpad.net/bugs/1834494"], - "release_packages": { - "bionic": [ - { - "description": "high-level 3D graphics kit implementing ...", - "is_source": True, - "name": "coin3", - "version": "3.1.4~abc9f50-4ubuntu2+esm1", - }, - { - "is_source": False, - "name": "libcoin80-runtime", - "source_link": "https://launchpad.net/ubuntu/+source/coin3", - "version": "3~18.04.1+esm2", - "version_link": "https://coin3...18.04.1+esm2", - "pocket": "security", - }, - ] - }, - "summary": "", - "title": "USN vulnerability", - "type": "USN", -} - - -def shallow_merge_dicts(a, b): - c = a.copy() - c.update(b) - return c - - -class TestGetCVEAffectedPackageStatus: - @pytest.mark.parametrize( - "series,installed_packages,expected_status", - ( - ("bionic", {}, {}), - # installed package version has no bearing on status filtering - ("bionic", {"samba": "1000"}, SAMBA_CVE_STATUS_BIONIC), - # active series has a bearing on status filtering - ("upstream", {"samba": "1000"}, SAMBA_CVE_STATUS_UPSTREAM), - # package status not-affected gets filtered from affected_pkgs - ("focal", {"samba": "1000"}, {}), - ), - ) - @mock.patch("uaclient.security.system.get_release_info") - def test_affected_packages_status_filters_by_installed_pkgs_and_series( - self, - m_get_release_info, - series, - installed_packages, - expected_status, - FakeConfig, - ): - """Package statuses are filtered if not installed""" - m_get_release_info.return_value = mock.MagicMock(series=series) - client = UASecurityClient(FakeConfig()) - cve = CVE(client, SAMPLE_CVE_RESPONSE) - affected_packages = get_cve_affected_source_packages_status( - cve, installed_packages=installed_packages - ) - if expected_status: - package_status = affected_packages["samba"] - assert expected_status == package_status.response - else: - assert expected_status == affected_packages - - -class TestCVE: - def test_cve_init_attributes(self, FakeConfig): - """CVE.__init__ saves client and response on instance.""" - client = UASecurityClient(FakeConfig()) - cve = CVE(client, {"some": "response"}) - assert client == cve.client - assert {"some": "response"} == cve.response - - @pytest.mark.parametrize( - "cve1,cve2,are_equal", - ( - (CVE(None, {"1": "2"}), CVE(None, {"1": "2"}), True), - (CVE("A", {"1": "2"}), CVE("B", {"1": "2"}), True), - (CVE(None, {}), CVE("B", {"1": "2"}), False), - (CVE(None, {"1": "2"}), USN(None, {"1": "2"}), False), - ), - ) - def test_equality(self, cve1, cve2, are_equal): - """Equality is based instance type and CVE.response value""" - if are_equal: - assert cve1.response == cve2.response - assert cve1 == cve2 - else: - if isinstance(cve1, CVE) and isinstance(cve2, CVE): - assert cve1.response != cve2.response - assert cve1 != cve2 - - @pytest.mark.parametrize( - "attr_name,expected,response", - ( - ("description", None, {}), - ("description", "descr", {"description": "descr"}), - ("id", "UNKNOWN_CVE_ID", {}), - ( - "id", - "CVE-123", - {"id": "cve-123"}, - ), # Uppercase of id value is used - ("notices_ids", [], {}), - ("notices_ids", [], {"notices_ids": []}), - ("notices_ids", ["1", "2"], {"notices_ids": ["1", "2"]}), - ), - ) - def test_cve_basic_properties_from_response( - self, attr_name, expected, response, FakeConfig - ): - """CVE instance properties are set from Security API CVE response.""" - client = UASecurityClient(FakeConfig()) - cve = CVE(client, response) - assert expected == getattr(cve, attr_name) - - def test_get_url_header(self, FakeConfig): - """CVE.get_url_header returns a string based on the CVE response.""" - client = UASecurityClient(FakeConfig()) - detailed_cve_response = copy.deepcopy(SAMPLE_CVE_RESPONSE) - # Detailed CVE responses will contain full USN metadata in notices key - detailed_cve_response["notices"] = [{"title": "Samba vulnerability"}] - cve = CVE(client, detailed_cve_response) - assert ( - textwrap.dedent( - """\ - CVE-2020-1472: Samba vulnerability - - https://ubuntu.com/security/CVE-2020-1472""" - ) - == cve.get_url_header() - ) - - @pytest.mark.parametrize( - "usns_response,expected", - ( - (None, []), - ([], []), - ( # USNs are properly sorted by id - [{"id": "1"}, {"id": "2"}], - [USN(None, {"id": "2"}), USN(None, {"id": "1"})], - ), - ), - ) - def test_notices_cached_from_usns_response( - self, usns_response, expected, FakeConfig - ): - """List of USNs returned from CVE 'usns' response if present.""" - client = UASecurityClient(FakeConfig()) - cve_response = copy.deepcopy(SAMPLE_CVE_RESPONSE) - if usns_response is not None: - cve_response["notices"] = usns_response - cve = CVE(client, cve_response) - assert expected == cve.notices - # clear box test caching in effect - cve.response = "junk" - assert expected == cve.notices - - -class TestUSN: - def test_usn_init_attributes(self, FakeConfig): - """USN.__init__ saves client and response on instance.""" - client = UASecurityClient(FakeConfig()) - cve = USN(client, {"some": "response"}) - assert client == cve.client - assert {"some": "response"} == cve.response - - @pytest.mark.parametrize( - "usn1,usn2,are_equal", - ( - (USN(None, {"1": "2"}), USN(None, {"1": "2"}), True), - (USN("A", {"1": "2"}), USN("B", {"1": "2"}), True), - (USN(None, {}), USN("B", {"1": "2"}), False), - (USN(None, {"1": "2"}), CVE(None, {"1": "2"}), False), - ), - ) - def test_equality(self, usn1, usn2, are_equal): - """Equality is based instance type and USN.response value""" - if are_equal: - assert usn1.response == usn2.response - assert usn1 == usn2 - else: - if isinstance(usn1, USN) and isinstance(usn2, USN): - assert usn1.response != usn2.response - assert usn1 != usn2 - - @pytest.mark.parametrize( - "attr_name,expected,response", - ( - ("title", None, {}), - ("title", "my title", {"title": "my title"}), - ("id", "UNKNOWN_USN_ID", {}), - ( - "id", - "USN-123", - {"id": "usn-123"}, - ), # Uppercase of id value is used - ("cves_ids", [], {}), - ("cves_ids", [], {"cves_ids": []}), - ("cves_ids", ["1", "2"], {"cves_ids": ["1", "2"]}), - ("cves", [], {}), - ("cves", [], {"cves": []}), - ), - ) - def test_usn_basic_properties_from_response( - self, attr_name, expected, response, FakeConfig - ): - """USN instance properties are set from Security API USN response.""" - client = UASecurityClient(FakeConfig()) - usn = USN(client, response) - assert expected == getattr(usn, attr_name) - - @pytest.mark.parametrize( - "series,expected", - ( - ( - "series-example-1", - { - "samba": { - "source": { - "description": ( - "SMB/CIFS file, print, and login ... Unix" - ), - "is_source": True, - "name": "samba", - "version": "2:4.3.11+dfsg-0ubuntu0.14.04.20+esm9", - }, - "samba": { - "is_source": False, - "name": "samba", - "source_link": ( - "https://launchpad.net/ubuntu/+source/samba" - ), - "version": "2~14.04.1+esm9", - "version_link": ( - "https://....11+dfsg-0ubuntu0.14.04.20+esm9" - ), - }, - } - }, - ), - ( - "series-example-2", - { - "coin3": { - "source": { - "description": ( - "high-level 3D graphics kit implementing ..." - ), - "is_source": True, - "name": "coin3", - "version": "3.1.4~abc9f50-4ubuntu2+esm1", - }, - "libcoin80-runtime": { - "is_source": False, - "name": "libcoin80-runtime", - "source_link": ( - "https://launchpad.net/ubuntu/+source/coin3" - ), - "version": "3~18.04.1+esm2", - "version_link": "https://coin3...18.04.1+esm2", - }, - } - }, - ), - ("series-example-3", {}), - ), - ) - @mock.patch("uaclient.system.get_release_info") - def test_release_packages_returns_source_and_binary_pkgs_for_series( - self, m_get_release_info, series, expected, FakeConfig - ): - m_get_release_info.return_value = mock.MagicMock(series=series) - client = UASecurityClient(FakeConfig()) - usn = USN(client, SAMPLE_USN_RESPONSE) - - assert expected == usn.release_packages - usn._release_packages = {"sl": "1.0"} - assert {"sl": "1.0"} == usn.release_packages - - @pytest.mark.parametrize( - "source_link,error_msg", - ( - ( - None, - ( - "Metadata for USN-4510-2 is invalid. " - "Error: USN-4510-2 metadata does not define " - "release_packages source_link for samba2." - ), - ), - ( - "unknown format", - ( - "Metadata for USN-4510-2 is invalid. " - "Error: USN-4510-2 metadata has unexpected " - "release_packages source_link value for samba2: " - "unknown format." - ), - ), - ), - ) - @mock.patch("uaclient.system.get_release_info") - def test_release_packages_errors_on_sparse_source_url( - self, m_get_release_info, source_link, error_msg, FakeConfig - ): - """Raise errors when USN metadata contains no valid source_link.""" - m_get_release_info.return_value = mock.MagicMock( - series="series-example-1" - ) - client = UASecurityClient(FakeConfig()) - sparse_md = copy.deepcopy(SAMPLE_USN_RESPONSE) - sparse_md["release_packages"]["series-example-1"].append( - { - "is_source": False, - "name": "samba2", - "source_link": source_link, - "version": "2~14.04.1+esm9", - "version_link": "https://....11+dfsg-0ubuntu0.14.04.20+esm9", - } - ) - usn = USN(client, sparse_md) - with pytest.raises(exceptions.SecurityAPIMetadataError) as exc: - usn.release_packages - assert error_msg in str(exc.value) - - @pytest.mark.parametrize( - "usn_response,expected", - ( - ( - SAMPLE_USN_RESPONSE, - textwrap.dedent( - """\ - USN-4510-2: Samba vulnerability - Found CVEs: - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472""" - ), - ), - ( - shallow_merge_dicts( - SAMPLE_USN_RESPONSE, - {"cves_ids": SAMPLE_USN_RESPONSE["cves_ids"] * 8}, - ), - """\ -USN-4510-2: Samba vulnerability -Found CVEs: - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472 - - https://ubuntu.com/security/CVE-2020-1473 - - https://ubuntu.com/security/CVE-2020-1472""", - ), - ( - SAMPLE_USN_RESPONSE_NO_CVES, - textwrap.dedent( - """\ - USN-4038-3: USN vulnerability - Found Launchpad bugs: - - https://launchpad.net/bugs/1834494""" - ), - ), - ), - ) - def test_get_url_header(self, FakeConfig, usn_response, expected): - """USN.get_url_header returns a string based on the USN response.""" - client = UASecurityClient(FakeConfig()) - usn = USN(client, usn_response) - assert expected == usn.get_url_header() - - @pytest.mark.parametrize( - "cves_response,expected", - ( - (None, []), - ([], []), - ( # CVEs are properly sorted by id - [{"id": "1"}, {"id": "2"}], - [CVE(None, {"id": "2"}), CVE(None, {"id": "1"})], - ), - ), - ) - def test_cves_cached_and_sorted_from_cves_response( - self, cves_response, expected, FakeConfig - ): - """List of USNs returned from CVE 'usns' response if present.""" - client = UASecurityClient(FakeConfig()) - usn_response = copy.deepcopy(SAMPLE_USN_RESPONSE) - if cves_response is not None: - usn_response["cves"] = cves_response - usn = USN(client, usn_response) - assert expected == usn.cves - # clear box test caching in effect - usn.response = "junk" - assert expected == usn.cves - - -class TestCVEPackageStatus: - def test_simple_properties_from_response(self): - pkg_status = CVEPackageStatus( - cve_response=CVE_ESM_PACKAGE_STATUS_RESPONSE - ) - assert CVE_ESM_PACKAGE_STATUS_RESPONSE == pkg_status.response - assert pkg_status.response["description"] == pkg_status.description - assert pkg_status.description == pkg_status.fixed_version - assert pkg_status.response["pocket"] == pkg_status.pocket - assert ( - pkg_status.response["release_codename"] - == pkg_status.release_codename - ) - assert pkg_status.response["status"] == pkg_status.status - - @pytest.mark.parametrize( - "pocket,description,expected", - ( - ("esm-infra", "1.2", "Ubuntu Pro: ESM Infra"), - ("esm-apps", "1.2", "Ubuntu Pro: ESM Apps"), - ("updates", "1.2esm", "Ubuntu standard updates"), - ("security", "1.2esm", "Ubuntu standard updates"), - (None, "1.2", "Ubuntu standard updates"), - (None, "1.2esm", "Ubuntu Pro: ESM Infra"), - ), - ) - def test_pocket_source_from_response(self, pocket, description, expected): - cve_response = {"pocket": pocket, "description": description} - pkg_status = CVEPackageStatus(cve_response=cve_response) - assert expected == pkg_status.pocket_source - - @pytest.mark.parametrize( - "pocket,description,expected", - ( - ("esm-infra", "1.2", True), - ("esm-apps", "1.2", True), - ("updates", "1.2esm", False), - ("security", "1.2esm", False), - (None, "1.2", False), - (None, "1.2esm", True), - ), - ) - def test_requires_ua_from_response(self, pocket, description, expected): - """requires_pro is derived from response pocket and description.""" - cve_response = {"pocket": pocket, "description": description} - pkg_status = CVEPackageStatus(cve_response=cve_response) - assert expected is pkg_status.requires_ua - - @pytest.mark.parametrize( - "status,pocket,expected", - ( - ( - "not-affected", - "", - "Source package is not affected on this release.", - ), - ("DNE", "", "Source package does not exist on this release."), - ( - "needs-triage", - "esm-infra", - "Ubuntu security engineers are investigating this issue.", - ), - ("needed", "esm-infra", "Sorry, no fix is available yet."), - ( - "pending", - "esm-infra", - "A fix is coming soon. Try again tomorrow.", - ), - ("ignored", "esm-infra", "Sorry, no fix is available."), - ( - "released", - "esm-infra", - "A fix is available in Ubuntu Pro: ESM Infra.", - ), - ( - "released", - "security", - "A fix is available in Ubuntu standard updates.", - ), - ("bogus", "1.2", "UNKNOWN: bogus"), - ), - ) - def test_status_message_from_response(self, status, pocket, expected): - cve_response = {"pocket": pocket, "status": status} - pkg_status = CVEPackageStatus(cve_response=cve_response) - assert expected == pkg_status.status_message - - -@mock.patch("uaclient.security.UASecurityClient.request_url") -class TestUASecurityClient: - @pytest.mark.parametrize( - "m_kwargs,expected_error, extra_security_params", - ( - ({}, None, None), - ({"query": "vq"}, None, {"test": "blah"}), - (SAMPLE_GET_CVES_QUERY_PARAMS, None, None), - ({"invalidparam": "vv"}, TypeError, None), - ), - ) - def test_get_cves_sets_query_params_on_get_cves_route( - self, - request_url, - m_kwargs, - expected_error, - extra_security_params, - FakeConfig, - ): - """GET CVE instances from API_V1_CVES route with querystrings""" - cfg = FakeConfig() - if extra_security_params: - cfg.override_features( - {"extra_security_params": extra_security_params} - ) - - client = UASecurityClient(cfg) - if expected_error: - with pytest.raises(expected_error) as exc: - client.get_cves(**m_kwargs) - assert ( - "get_cves() got an unexpected keyword argument 'invalidparam'" - ) in str(exc.value) - assert 0 == request_url.call_count - else: - for key in SAMPLE_GET_CVES_QUERY_PARAMS: - if key not in m_kwargs: - m_kwargs[key] = None - request_url.return_value = http.HTTPResponse( - code=200, - headers={}, - body="", - json_dict={}, - json_list=["body1", "body2"], - ) - [cve1, cve2] = client.get_cves(**m_kwargs) - assert isinstance(cve1, CVE) - assert isinstance(cve2, CVE) - assert "body1" == cve1.response - assert "body2" == cve2.response - # get_cves transposes "query" to "q" - m_kwargs["q"] = m_kwargs.pop("query") - - assert [ - mock.call(API_V1_CVES, query_params=m_kwargs) - ] == request_url.call_args_list - - @pytest.mark.parametrize( - "m_kwargs,expected_error, extra_security_params", - ( - ({}, None, None), - ({"details": "cve"}, None, None), - (SAMPLE_GET_NOTICES_QUERY_PARAMS, None, {"test": "blah"}), - ({"invalidparam": "vv"}, TypeError, None), - ), - ) - def test_get_notices_sets_query_params_on_get_cves_route( - self, - request_url, - m_kwargs, - expected_error, - extra_security_params, - FakeConfig, - ): - """GET body from API_V1_NOTICES route with appropriate querystring""" - cfg = FakeConfig() - if extra_security_params: - cfg.override_features( - {"extra_security_params": extra_security_params} - ) - - client = UASecurityClient(cfg) - if expected_error: - with pytest.raises(expected_error) as exc: - client.get_notices(**m_kwargs) - assert ( - "get_notices() got an unexpected keyword argument" - " 'invalidparam'" - ) in str(exc.value) - assert 0 == request_url.call_count - else: - for key in SAMPLE_GET_NOTICES_QUERY_PARAMS: - if key not in m_kwargs: - m_kwargs[key] = None - request_url.return_value = http.HTTPResponse( - code=200, - headers={}, - body="", - json_dict={ - "notices": [ - {"id": "2", "cves_ids": ["cve"]}, - {"id": "1", "cves_ids": ["cve"]}, - ] - }, - json_list=[], - ) - [usn1, usn2] = client.get_notices(**m_kwargs) - assert isinstance(usn1, USN) - assert isinstance(usn2, USN) - assert "1" == usn1.id - assert "2" == usn2.id - assert [ - mock.call(API_V1_NOTICES, query_params=m_kwargs) - ] == request_url.call_args_list - - @pytest.mark.parametrize("details", (("cve1"), (None))) - def test_get_notices_filter_usns_when_setting_details_param( - self, request_url, details, FakeConfig - ): - """Test if details are used to filter the returned USNs.""" - cfg = FakeConfig() - client = UASecurityClient(cfg) - request_url.return_value = http.HTTPResponse( - code=200, - headers={}, - body="", - json_dict={ - "notices": [ - {"id": "2", "cves_ids": ["cve1"]}, - {"id": "1", "cves_ids": ["cve12"]}, - ] - }, - json_list=[], - ) - usns = client.get_notices(details=details) - - if details: - assert len(usns) == 1 - assert usns[0].id == "2" - else: - assert len(usns) == 2 - assert usns[0].id == "1" - assert usns[1].id == "2" - - @pytest.mark.parametrize( - "m_kwargs,expected_error, extra_security_params", - (({}, TypeError, None), ({"cve_id": "CVE-1"}, None, {"test": "blah"})), - ) - def test_get_cve_provides_response_from_cve_json_route( - self, - request_url, - m_kwargs, - expected_error, - extra_security_params, - FakeConfig, - ): - """GET body from API_V1_CVE_TMPL route with required cve_id.""" - cfg = FakeConfig() - if extra_security_params: - cfg.override_features( - {"extra_security_params": extra_security_params} - ) - client = UASecurityClient(cfg) - if expected_error: - with pytest.raises(expected_error) as exc: - client.get_cve(**m_kwargs) - assert ( - "get_cve() missing 1 required positional argument: 'cve_id'" - ) in str(exc.value) - assert 0 == request_url.call_count - else: - request_url.return_value = http.HTTPResponse( - code=200, - headers={}, - body="", - json_dict={"body": "body"}, - json_list=[], - ) - cve = client.get_cve(**m_kwargs) - assert isinstance(cve, CVE) - assert {"body": "body"} == cve.response - assert [ - mock.call(API_V1_CVE_TMPL.format(cve=m_kwargs["cve_id"])) - ] == request_url.call_args_list - - @pytest.mark.parametrize( - "m_kwargs,expected_error, extra_security_params", - ( - ({}, TypeError, None), - ({"notice_id": "USN-1"}, None, {"test": "blah"}), - ), - ) - def test_get_notice_provides_response_from_notice_json_route( - self, - request_url, - m_kwargs, - expected_error, - extra_security_params, - FakeConfig, - ): - """GET body from API_V1_NOTICE_TMPL route with required notice_id.""" - cfg = FakeConfig() - if extra_security_params: - cfg.override_features( - {"extra_security_params": extra_security_params} - ) - - client = UASecurityClient(cfg) - if expected_error: - with pytest.raises(expected_error) as exc: - client.get_notice(**m_kwargs) - assert ( - "get_notice() missing 1 required positional argument:" - " 'notice_id'" - ) in str(exc.value) - assert 0 == request_url.call_count - else: - request_url.return_value = http.HTTPResponse( - code=200, - headers={}, - body="", - json_dict={"body": "body"}, - json_list=[], - ) - assert {"body": "body"} == client.get_notice(**m_kwargs).response - assert [ - mock.call( - API_V1_NOTICE_TMPL.format(notice=m_kwargs["notice_id"]) - ) - ] == request_url.call_args_list - - -class TestQueryInstalledPkgSources: - @pytest.mark.parametrize( - "dpkg_out,results", - ( - # Ignore b non-installed status - ("a,,1.2,installed\nb,b,1.2,config-files", {"a": {"a": "1.2"}}), - # Handle cases where no Source is defined for the pkg - ( - "a,,1.2,installed\nzip,zip,3.0,installed", - {"a": {"a": "1.2"}, "zip": {"zip": "3.0"}}, - ), - # Prefer Source package name to binary package name - ( - "b,bsrc,1.2,installed\nzip,zip,3.0,installed", - {"bsrc": {"b": "1.2"}, "zip": {"zip": "3.0"}}, - ), - ), - ) - @mock.patch("uaclient.security.system.subp") - @mock.patch("uaclient.system.get_release_info") - def test_result_keyed_by_source_package_name( - self, m_get_release_info, subp, dpkg_out, results - ): - m_get_release_info.return_value = mock.MagicMock(series="bionic") - subp.return_value = dpkg_out, "" - assert results == query_installed_source_pkg_versions() - _format = "-f=${Package},${Source},${Version},${db:Status-Status}\n" - assert [ - mock.call(["dpkg-query", _format, "-W"]) - ] == subp.call_args_list - - -CVE_PKG_STATUS_NEEDED = { - "description": "2.1", - "pocket": None, - "status": "needed", -} -CVE_PKG_STATUS_IGNORED = { - "description": "2.1", - "pocket": None, - "status": "ignored", -} -CVE_PKG_STATUS_DEFERRED = { - "description": "2.1", - "pocket": None, - "status": "deferred", -} -CVE_PKG_STATUS_NEEDS_TRIAGE = { - "description": "2.1", - "pocket": None, - "status": "needs-triage", -} -CVE_PKG_STATUS_PENDING = { - "description": "2.1", - "pocket": None, - "status": "pending", -} -CVE_PKG_STATUS_RELEASED = { - "description": "2.1", - "pocket": "updates", - "status": "released", -} -CVE_PKG_STATUS_RELEASED_ESM_INFRA = { - "description": "2.1", - "pocket": "esm-infra", - "status": "released", -} -CVE_PKG_STATUS_RELEASED_ESM_APPS = { - "description": "2.1", - "pocket": "esm-apps", - "status": "released", -} -CVE_PKG_STATUS_NEEDED = {"description": "", "pocket": None, "status": "needed"} - - -class TestPromptForAffectedPackages: - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages," - "usn_released_pkgs,cloud_type,expected,expected_ret," - "expected_unfixed_pkgs", - ( - ( # No affected_packages listed - {}, - {"curl": {"curl": "1.0"}}, - {"unread-because-no-affected-pkgs": {}}, - (None, NoCloudTypeReason.NO_CLOUD_DETECTED), - textwrap.dedent( - """\ - No affected source packages are installed. - - {check} USN-### does not affect your system. - """.format( - check=OKGREEN_CHECK # noqa: E126 - ) # noqa: E126 - ), - FixStatus.SYSTEM_NOT_AFFECTED, - None, - ), - ( # version is >= released affected package - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"slsrc": {"sl": "2.1"}}, - {"slsrc": {"sl": {"version": "2.1"}}}, - (None, NoCloudTypeReason.NO_CLOUD_DETECTED), - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - The update is already installed. - - {check} USN-### is resolved. - """.format( - check=OKGREEN_CHECK # noqa: E126 - ) # noqa: E126 - ), - FixStatus.SYSTEM_NON_VULNERABLE, - [], - ), - ( # usn_released_pkgs version is used instead of CVE (2.1) - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"slsrc": {"sl": "2.1"}}, - {"slsrc": {"sl": {"version": "2.2"}}}, - (None, NoCloudTypeReason.NO_CLOUD_DETECTED), - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y sl"]] - ) - + "\n\n" - + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), - FixStatus.SYSTEM_NON_VULNERABLE, - [], - ), - ( # version is < released affected package standard updates - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"slsrc": {"sl": "2.0"}}, - {"slsrc": {"sl": {"version": "2.1"}}}, - (None, NoCloudTypeReason.NO_CLOUD_DETECTED), - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - """ - ) - + "\n".join( - [ - colorize_commands( - [ - [ - "apt update && apt install --only-upgrade" - " -y sl" - ] - ] - ), - "", - "{check} USN-### is resolved.\n".format( - check=OKGREEN_CHECK - ), - ] - ), - FixStatus.SYSTEM_NON_VULNERABLE, - [], - ), - ( # version is < released affected package esm-infra updates - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"slsrc": {"sl": "2.0"}}, - {"slsrc": {"sl": {"version": "2.1"}}}, - ("azure", None), - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + "\n".join( - [ - SECURITY_USE_PRO_TMPL.format( - title="Azure", - cloud_specific_url="https://ubuntu.com/azure/pro", - ), - SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, - ] - ), - FixStatus.SYSTEM_STILL_VULNERABLE, - [ - UnfixedPackage( - pkg="slsrc", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Infra" - ), - ), - ], - ), - ( # version < released package in esm-infra updates and aws cloud - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"slsrc": {"sl": "2.0"}}, - {"slsrc": {"sl": {"version": "2.1"}}}, - ("aws", None), - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + "\n".join( - [ - SECURITY_USE_PRO_TMPL.format( - title="AWS", - cloud_specific_url="https://ubuntu.com/aws/pro", - ), - SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, - ] - ), - FixStatus.SYSTEM_STILL_VULNERABLE, - [ - UnfixedPackage( - pkg="slsrc", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Infra" - ), - ), - ], - ), - ( # version is < released affected both esm-apps and standard - { - "slsrc": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_APPS - ), - "curl": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - }, - {"slsrc": {"sl": "2.0"}, "curl": {"curl": "2.0"}}, - { - "slsrc": {"sl": {"version": "2.1"}}, - "curl": {"curl": {"version": "2.1"}}, - }, - ("gce", None), - textwrap.dedent( - """\ - 2 affected source packages are installed: curl, slsrc - (1/2) curl: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y curl"]] - ) - + "\n" - + textwrap.dedent( - """\ - (2/2) slsrc: - A fix is available in Ubuntu Pro: ESM Apps. - """ - ) - + "\n".join( - [ - SECURITY_USE_PRO_TMPL.format( - title="GCP", - cloud_specific_url="https://ubuntu.com/gcp/pro", - ), - SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, - ] - ) - + "\n\n" - + "1 package is still affected: slsrc", - FixStatus.SYSTEM_STILL_VULNERABLE, - [ - UnfixedPackage( - pkg="slsrc", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Apps" - ), - ), - ], - ), - ( # version is < released affected both esm-apps and standard - { - "pkg1": CVEPackageStatus(CVE_PKG_STATUS_IGNORED), - "pkg2": CVEPackageStatus(CVE_PKG_STATUS_IGNORED), - "pkg3": CVEPackageStatus(CVE_PKG_STATUS_PENDING), - "pkg4": CVEPackageStatus(CVE_PKG_STATUS_PENDING), - "pkg5": CVEPackageStatus(CVE_PKG_STATUS_NEEDS_TRIAGE), - "pkg6": CVEPackageStatus(CVE_PKG_STATUS_NEEDS_TRIAGE), - "pkg7": CVEPackageStatus(CVE_PKG_STATUS_NEEDED), - "pkg8": CVEPackageStatus(CVE_PKG_STATUS_NEEDED), - "pkg9": CVEPackageStatus(CVE_PKG_STATUS_DEFERRED), - "pkg10": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - "pkg11": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - "pkg12": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_INFRA - ), - "pkg13": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_INFRA - ), - "pkg14": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_APPS - ), - "pkg15": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_APPS - ), - }, - { - "pkg10": {"pkg10": "2.0"}, - "pkg11": {"pkg11": "2.0"}, - "pkg12": {"pkg12": "2.0"}, - "pkg13": {"pkg13": "2.0"}, - "pkg14": {"pkg14": "2.0"}, - "pkg15": {"pkg15": "2.0"}, - }, - { - "pkg10": {"pkg10": {"version": "2.1"}}, - "pkg11": {"pkg11": {"version": "2.1"}}, - "pkg12": {"pkg12": {"version": "2.1"}}, - "pkg13": {"pkg13": {"version": "2.1"}}, - "pkg14": {"pkg14": {"version": "2.1"}}, - "pkg15": {"pkg15": {"version": "2.1"}}, - }, - ("gce", None), - textwrap.dedent( - """\ - 15 affected source packages are installed: {} - (1/15, 2/15, 3/15) pkg1, pkg2, pkg9: - Sorry, no fix is available. - (4/15, 5/15) pkg7, pkg8: - Sorry, no fix is available yet. - (6/15, 7/15) pkg5, pkg6: - Ubuntu security engineers are investigating this issue. - (8/15, 9/15) pkg3, pkg4: - A fix is coming soon. Try again tomorrow. - (10/15, 11/15) pkg10, pkg11: - A fix is available in Ubuntu standard updates. - """ - ).format( - ( - "pkg1, pkg10, pkg11, pkg12, pkg13,\n" - " pkg14, pkg15, pkg2, pkg3, pkg4, pkg5, pkg6, pkg7," - " pkg8, pkg9" - ) - ) - + colorize_commands( - [ - [ - "apt update && apt install --only-upgrade" - " -y pkg10 pkg11" - ] - ] - ) - + "\n" - + textwrap.dedent( - """\ - (12/15, 13/15) pkg12, pkg13: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + "\n".join( - [ - SECURITY_USE_PRO_TMPL.format( - title="GCP", - cloud_specific_url="https://ubuntu.com/gcp/pro", - ), - SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION, - ] - ) - + "\n\n" - + "13 packages are still affected: {}".format( - ( - "pkg1, pkg12, pkg13, pkg14, pkg15, pkg2, pkg3,\n" - " pkg4, pkg5, pkg6, pkg7, pkg8, pkg9" - ) - ), - FixStatus.SYSTEM_STILL_VULNERABLE, - [ - UnfixedPackage( - pkg="pkg1", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg2", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg9", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg7", - unfixed_reason=SECURITY_CVE_STATUS_NEEDED, - ), - UnfixedPackage( - pkg="pkg8", - unfixed_reason=SECURITY_CVE_STATUS_NEEDED, - ), - UnfixedPackage( - pkg="pkg5", - unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, - ), - UnfixedPackage( - pkg="pkg6", - unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, - ), - UnfixedPackage( - pkg="pkg3", - unfixed_reason=SECURITY_CVE_STATUS_PENDING, - ), - UnfixedPackage( - pkg="pkg4", - unfixed_reason=SECURITY_CVE_STATUS_PENDING, - ), - UnfixedPackage( - pkg="pkg12", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Infra" - ), - ), - UnfixedPackage( - pkg="pkg13", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Infra" - ), - ), - UnfixedPackage( - pkg="pkg14", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Apps" - ), - ), - UnfixedPackage( - pkg="pkg15", - unfixed_reason=SECURITY_UA_SERVICE_REQUIRED.format( - service="Ubuntu Pro: ESM Apps" - ), - ), - ], - ), - ( # No released version - { - "pkg1": CVEPackageStatus(CVE_PKG_STATUS_IGNORED), - "pkg2": CVEPackageStatus(CVE_PKG_STATUS_IGNORED), - "pkg3": CVEPackageStatus(CVE_PKG_STATUS_PENDING), - "pkg4": CVEPackageStatus(CVE_PKG_STATUS_PENDING), - "pkg5": CVEPackageStatus(CVE_PKG_STATUS_NEEDS_TRIAGE), - "pkg6": CVEPackageStatus(CVE_PKG_STATUS_NEEDS_TRIAGE), - "pkg7": CVEPackageStatus(CVE_PKG_STATUS_NEEDED), - "pkg8": CVEPackageStatus(CVE_PKG_STATUS_NEEDED), - "pkg9": CVEPackageStatus(CVE_PKG_STATUS_DEFERRED), - }, - {}, - {}, - ("gcp", None), - textwrap.dedent( - """\ - 9 affected source packages are installed: {} - (1/9, 2/9, 3/9) pkg1, pkg2, pkg9: - Sorry, no fix is available. - (4/9, 5/9) pkg7, pkg8: - Sorry, no fix is available yet. - (6/9, 7/9) pkg5, pkg6: - Ubuntu security engineers are investigating this issue. - (8/9, 9/9) pkg3, pkg4: - A fix is coming soon. Try again tomorrow. - """ - ).format( - "pkg1, pkg2, pkg3, pkg4, pkg5, pkg6,\n" - " pkg7, pkg8, pkg9" - ) - + "\n" - + "9 packages are still affected: {}".format( - "pkg1, pkg2, pkg3, pkg4, pkg5, pkg6, pkg7, pkg8,\n" - " pkg9" - ) - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - FixStatus.SYSTEM_STILL_VULNERABLE, - [ - UnfixedPackage( - pkg="pkg1", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg2", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg9", - unfixed_reason=SECURITY_CVE_STATUS_IGNORED, - ), - UnfixedPackage( - pkg="pkg7", - unfixed_reason=SECURITY_CVE_STATUS_NEEDED, - ), - UnfixedPackage( - pkg="pkg8", - unfixed_reason=SECURITY_CVE_STATUS_NEEDED, - ), - UnfixedPackage( - pkg="pkg5", - unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, - ), - UnfixedPackage( - pkg="pkg6", - unfixed_reason=SECURITY_CVE_STATUS_TRIAGE, - ), - UnfixedPackage( - pkg="pkg3", - unfixed_reason=SECURITY_CVE_STATUS_PENDING, - ), - UnfixedPackage( - pkg="pkg4", - unfixed_reason=SECURITY_CVE_STATUS_PENDING, - ), - ], - ), - ( # text wrapping required in several places - { - "longpackagename1": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED - ), - "longpackagename2": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED - ), - "longpackagename3": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED - ), - "longpackagename4": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED - ), - "longpackagename5": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED - ), - }, - { - "longpackagename1": {"longpackagename1": "2.0"}, - "longpackagename2": {"longpackagename2": "2.0"}, - "longpackagename3": {"longpackagename3": "2.0"}, - "longpackagename4": {"longpackagename4": "2.0"}, - "longpackagename5": {"longpackagename5": "2.0"}, - }, - { - "longpackagename1": { - "longpackagename1": {"version": "2.1"} - }, - "longpackagename2": { - "longpackagename2": {"version": "2.1"} - }, - "longpackagename3": { - "longpackagename3": {"version": "2.1"} - }, - "longpackagename4": { - "longpackagename4": {"version": "2.1"} - }, - "longpackagename5": { - "longpackagename5": {"version": "2.1"} - }, - }, - ("gcp", None), - """\ -5 affected source packages are installed: longpackagename1, longpackagename2, - longpackagename3, longpackagename4, longpackagename5 -(1/5, 2/5, 3/5, 4/5, 5/5) longpackagename1, longpackagename2, longpackagename3, - longpackagename4, longpackagename5: -A fix is available in Ubuntu standard updates.\n""" - + colorize_commands( - [ - [ - "apt update && apt install --only-upgrade" - " -y longpackagename1 longpackagename2 " - "longpackagename3 longpackagename4 " - "longpackagename5" - ] - ] - ) - + "\n\n" - + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), - FixStatus.SYSTEM_NON_VULNERABLE, - [], - ), - ), - ) - @mock.patch("uaclient.entitlements.base.UAEntitlement.user_facing_status") - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="c") - def test_messages_for_affected_packages_based_on_installed_and_usn_release( - self, - prompt_choices, - get_cloud_type, - m_run_apt_cmd, - _m_get_pkg_cand_ver, - _m_os_getuid, - _m_should_reboot, - m_user_facing_status, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - cloud_type, - expected, - expected_ret, - expected_unfixed_pkgs, - FakeConfig, - capsys, - _subp, - ): - """Messaging is based on affected status and installed packages.""" - get_cloud_type.return_value = cloud_type - m_user_facing_status.return_value = (UserFacingStatus.INACTIVE, "") - cfg = FakeConfig() - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - actual_ret = prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - assert expected_ret == actual_ret.status - assert expected_unfixed_pkgs == actual_ret.unfixed_pkgs - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - { - "pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_APPS), - "pkg2": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - "pkg3": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_INFRA - ), - }, - { - "pkg1": {"pkg1": "1.8"}, - "pkg2": {"pkg2": "1.8"}, - "pkg3": {"pkg3": "1.8"}, - }, - { - "pkg1": {"pkg1": {"version": "2.0"}}, - "pkg2": {"pkg2": {"version": "2.0"}}, - "pkg3": {"pkg3": {"version": "2.0"}}, - }, - textwrap.dedent( - """\ - 3 affected source packages are installed: pkg1, pkg2, pkg3 - (1/3) pkg2: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg2"]] - ) - + "\n" - + textwrap.dedent( - """\ - (2/3) pkg3: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION - + "\n" - + PROMPT_ENTER_TOKEN - + "\n" - + colorize_commands([["pro attach token"]]) - + "\n" - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg3"]] - ) - + "\n" - + textwrap.dedent( - """\ - (3/3) pkg1: - A fix is available in Ubuntu Pro: ESM Apps. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - + "\n\n" - + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), - ), - ), - ) - @mock.patch("uaclient.util.is_config_value_true", return_value=True) - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.security._check_subscription_is_expired") - @mock.patch("uaclient.security._check_subscription_for_required_service") - @mock.patch("uaclient.cli.action_attach") - @mock.patch("builtins.input", return_value="token") - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="a") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - def test_messages_for_affected_packages_covering_all_release_pockets( - self, - _m_apt_pkg_candidate_version, - m_prompt_choices, - m_get_cloud_type, - m_run_apt_cmd, - _m_input, - m_action_attach, - m_check_subscription_for_service, - m_check_subscription_expired, - _m_should_reboot, - _m_is_config_value_true, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - m_check_subscription_for_service.return_value = True - m_check_subscription_expired.return_value = False - - def fake_attach(args, cfg): - cfg.for_attached_machine() - return 0 - - m_action_attach.side_effect = fake_attach - - cfg = FakeConfig() - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - { - "pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - "pkg2": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_APPS), - "pkg3": CVEPackageStatus( - CVE_PKG_STATUS_RELEASED_ESM_INFRA - ), - }, - { - "pkg1": {"pkg1": "1.8"}, - "pkg2": {"pkg2": "1.8"}, - "pkg3": {"pkg3": "1.8"}, - }, - { - "pkg1": {"pkg1": {"version": "2.0"}}, - "pkg2": {"pkg2": {"version": "2.0"}}, - "pkg3": {"pkg3": {"version": "2.0"}}, - }, - textwrap.dedent( - """\ - 3 affected source packages are installed: pkg1, pkg2, pkg3 - (1/3) pkg1: - A fix is available in Ubuntu standard updates. - """ - ) - + "\n" - + "3 packages are still affected: pkg1, pkg2, pkg3" - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - ), - ), - ) - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.security.upgrade_packages_and_attach") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - def test_messages_for_affected_packages_when_fix_fail( - self, - _m_apt_pkg_candidate_version, - m_upgrade_packages, - _m_should_reboot, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_upgrade_packages.return_value = UpgradeResult( - status=False, failure_reason=None - ) - - cfg = FakeConfig() - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - { - "pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - "pkg2": CVEPackageStatus(CVE_PKG_STATUS_RELEASED), - }, - { - "pkg1": {"pkg1": "1.8"}, - "pkg2": {"pkg2": "1.8"}, - }, - { - "pkg1": {"pkg1": {"version": "2.0"}}, - "pkg2": {"pkg2": {"version": "1.8"}}, - }, - textwrap.dedent( - """\ - 2 affected source packages are installed: pkg1, pkg2 - (1/2, 2/2) pkg1, pkg2: - A fix is available in Ubuntu standard updates. - - Cannot install package pkg1 version 2.0 - """ - ) - + "\n" - + "1 package is still affected: pkg1" - + "\n" - + "{check} CVE-### is not resolved.\n".format(check=FAIL_X), - ), - ), - ) - @mock.patch("uaclient.apt.get_pkg_candidate_version") - def test_messages_for_affected_packages_when_pkg_cannot_be_upgraded( - self, - m_apt_pkg_candidate_version, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_apt_pkg_candidate_version.return_value = "1.8" - - cfg = FakeConfig() - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") - prompt_for_affected_packages( - cfg=cfg, - issue_id="CVE-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize("should_reboot", (False, True)) - @pytest.mark.parametrize( - "service_status", - ( - (UserFacingStatus.INACTIVE), - (UserFacingStatus.INAPPLICABLE), - (UserFacingStatus.UNAVAILABLE), - ), - ) - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_UPDATE_NOT_INSTALLED_SUBSCRIPTION - + "\n" - + PROMPT_ENTER_TOKEN - + "\n" - + colorize_commands([["pro attach token"]]) - + "\n" - + SECURITY_UA_SERVICE_NOT_ENTITLED.format(service="esm-infra") - + "\n\n" - + "1 package is still affected: pkg1" - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - ), - ), - ) - @mock.patch("uaclient.util.is_config_value_true", return_value=True) - @mock.patch("uaclient.system.should_reboot") - @mock.patch("uaclient.cli.action_attach") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("builtins.input", return_value="token") - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="a") - def test_messages_for_affected_packages_when_required_service_not_enabled( - self, - m_prompt_choices, - m_get_cloud_type, - _m_input, - _m_apt_pkg_candidate_version, - m_action_attach, - m_should_reboot, - _m_is_config_value_true, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - service_status, - should_reboot, - FakeConfig, - capsys, - _subp, - ): - m_should_reboot.return_value = should_reboot - m_get_cloud_type.return_value = ("cloud", None) - - def fake_attach(args, cfg): - cfg.for_attached_machine() - return 0 - - m_action_attach.side_effect = fake_attach - m_entitlement_cls = mock.MagicMock() - m_entitlement_obj = m_entitlement_cls.return_value - m_entitlement_obj.user_facing_status.return_value = ( - service_status, - "", - ) - m_entitlement_obj.applicability_status.return_value = ( - ApplicabilityStatus.INAPPLICABLE, - "", - ) - m_entitlement_obj.name = "esm-infra" - - cfg = FakeConfig() - with mock.patch( - "uaclient.security.entitlement_factory", - return_value=m_entitlement_cls, - ): - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_SERVICE_DISABLED.format(service="esm-infra") - + "\n" - + colorize_commands([["pro enable esm-infra"]]) - + "\n" - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - + "\n\n" - + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), - ), - ), - ) - @mock.patch("uaclient.security._is_pocket_used_by_beta_service") - @mock.patch("uaclient.util.is_config_value_true", return_value=False) - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.security._check_subscription_is_expired") - @mock.patch("uaclient.cli.action_enable", return_value=0) - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="e") - def test_messages_for_affected_packages_when_service_can_be_enabled( - self, - m_prompt_choices, - m_get_cloud_type, - _m_os_getuid, - _m_apt_pkg_candidate_version, - _m_run_apt, - m_action_enable, - m_check_subscription_expired, - _m_should_reboot, - _m_is_config_value_true, - m_is_pocket_beta_service, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - m_check_subscription_expired.return_value = False - m_is_pocket_beta_service.return_value = False - - m_entitlement_cls = mock.MagicMock() - m_entitlement_obj = m_entitlement_cls.return_value - m_entitlement_obj.user_facing_status.return_value = ( - UserFacingStatus.INACTIVE, - "", - ) - m_entitlement_obj.applicability_status.return_value = ( - ApplicabilityStatus.APPLICABLE, - "", - ) - type(m_entitlement_obj).name = mock.PropertyMock( - return_value="esm-infra" - ) - - cfg = FakeConfig().for_attached_machine() - with mock.patch( - "uaclient.entitlements.entitlement_factory", - return_value=m_entitlement_cls, - ): - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_SERVICE_DISABLED.format(service="esm-infra") - + "\n" - + SECURITY_UA_SERVICE_NOT_ENABLED.format(service="esm-infra") - + "\n\n" - + "1 package is still affected: pkg1" - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - ), - ), - ) - @mock.patch("uaclient.security._is_pocket_used_by_beta_service") - @mock.patch("uaclient.util.is_config_value_true", return_value=False) - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.security._check_subscription_is_expired") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="c") - def test_messages_for_affected_packages_when_service_kept_disabled( - self, - m_prompt_choices, - m_get_cloud_type, - _m_os_getuid, - _m_apt_pkg_candidate_version, - m_check_subscription_expired, - _m_should_reboot, - _m_is_config_value_true, - m_is_pocket_beta_service, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - m_check_subscription_expired.return_value = False - m_is_pocket_beta_service.return_value = False - - m_entitlement_cls = mock.MagicMock() - m_entitlement_obj = m_entitlement_cls.return_value - m_entitlement_obj.user_facing_status.return_value = ( - UserFacingStatus.INACTIVE, - "", - ) - m_entitlement_obj.applicability_status.return_value = ( - ApplicabilityStatus.APPLICABLE, - "", - ) - type(m_entitlement_obj).name = mock.PropertyMock( - return_value="esm-infra" - ) - - cfg = FakeConfig().for_attached_machine() - with mock.patch( - "uaclient.entitlements.entitlement_factory", - return_value=m_entitlement_cls, - ): - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_UPDATE_NOT_INSTALLED_EXPIRED - + "\n" - + PROMPT_EXPIRED_ENTER_TOKEN - + "\n" - + colorize_commands([["pro detach"]]) - + "\n" - + colorize_commands([["pro attach token"]]) - + "\n" - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - + "\n\n" - + "{check} USN-### is resolved.\n".format(check=OKGREEN_CHECK), - ), - ), - ) - @mock.patch("uaclient.security._is_pocket_used_by_beta_service") - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("uaclient.cli.action_attach") - @mock.patch("builtins.input", return_value="token") - @mock.patch("uaclient.cli.action_detach") - @mock.patch("uaclient.security._check_subscription_for_required_service") - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="r") - def test_messages_for_affected_packages_when_subscription_expired( - self, - m_prompt_choices, - m_get_cloud_type, - m_check_subscription_for_service, - _m_cli_detach, - _m_input, - m_cli_attach, - _m_apt_pkg_candidate_version, - _m_run_apt_command, - _m_should_reboot, - m_is_pocket_beta_service, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - m_check_subscription_for_service.return_value = True - m_cli_attach.return_value = 0 - m_is_pocket_beta_service.return_value = False - - cfg = FakeConfig().for_attached_machine( - status_cache={ - "expires": "1999-12-01T00:00:00Z", - "attached": True, - }, - ) - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - - out, err = capsys.readouterr() - assert expected == out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED_ESM_INFRA)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + SECURITY_UPDATE_NOT_INSTALLED_EXPIRED - + "\n\n" - + "1 package is still affected: pkg1" - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - ), - ), - ) - @mock.patch("uaclient.security._is_pocket_used_by_beta_service") - @mock.patch("uaclient.system.should_reboot", return_value=False) - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.get_cloud_type") - @mock.patch("uaclient.security.util.prompt_choices", return_value="c") - def test_messages_for_affected_packages_when_subscription_not_renewed( - self, - m_prompt_choices, - m_get_cloud_type, - _m_os_getuid, - _m_apt_pkg_candidate_version, - _m_should_reboot, - m_is_pocket_beta_service, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - m_is_pocket_beta_service.return_value = False - - cfg = FakeConfig().for_attached_machine( - status_cache={ - "expires": "1999-12-01T00:00:00Z", - "attached": True, - }, - ) - - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - - out, err = capsys.readouterr() - assert expected in out - - @pytest.mark.parametrize( - "affected_pkg_status,installed_pkgs,usn_released_pkgs,exp_msg,exp_ret", - ( - ( - {"pkg1": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"pkg1": {"pkg1": "1.8"}}, - {"pkg1": {"pkg1": {"version": "2.0"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - + "\n\n" - + "A reboot is required to complete fix operation." - + "\n" - + "{check} USN-### is not resolved.\n".format(check=FAIL_X), - FixStatus.SYSTEM_VULNERABLE_UNTIL_REBOOT, - ), - ), - ) - @mock.patch("uaclient.files.notices.NoticesManager.add") - @mock.patch("uaclient.system.should_reboot", return_value=True) - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.get_cloud_type") - def test_messages_for_affected_packages_when_reboot_required( - self, - m_get_cloud_type, - _m_os_getuid, - _m_apt_pkg_candidate_version, - _m_run_apt_command, - _m_should_reboot, - m_add_notice, - affected_pkg_status, - installed_pkgs, - usn_released_pkgs, - exp_msg, - exp_ret, - FakeConfig, - capsys, - _subp, - ): - m_get_cloud_type.return_value = ("cloud", None) - - cfg = FakeConfig() - with mock.patch("uaclient.system._subp", side_effect=_subp): - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock( - return_value="utf-8" - ) - actual_ret = prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_pkgs, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - assert exp_ret == actual_ret.status - out, err = capsys.readouterr() - assert exp_msg in out - - assert [ - mock.call( - Notice.ENABLE_REBOOT_REQUIRED, - ENABLE_REBOOT_REQUIRED_TMPL.format(operation="fix operation"), - ) - ] == m_add_notice.call_args_list - - @pytest.mark.parametrize( - "affected_pkg_status,installed_packages,usn_released_pkgs,expected", - ( - ( - {"slsrc": CVEPackageStatus(CVE_PKG_STATUS_RELEASED)}, - {"slsrc": {"sl": "2.1"}}, - {"slsrc": {"sl": {"version": "2.1"}}}, - textwrap.dedent( - """\ - 1 affected source package is installed: slsrc - (1/1) slsrc: - A fix is available in Ubuntu standard updates. - The update is already installed. - - {check} USN-### is resolved. - """.format( - check=OKGREEN_CHECK # noqa: E126 - ) # noqa: E126 - ), - ), - ), - ) - @mock.patch("uaclient.files.notices.NoticesManager.add") - @mock.patch("uaclient.system.should_reboot", return_value=True) - @mock.patch("uaclient.apt.run_apt_command", return_value="") - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.get_cloud_type") - def test_messages_for_affected_packages_when_reboot_required_but_update_already_installed( # noqa: E501 - self, - m_get_cloud_type, - _m_os_getuid, - _m_apt_pkg_candidate_version, - _m_run_apt_command, - _m_should_reboot, - m_add_notice, - affected_pkg_status, - installed_packages, - usn_released_pkgs, - expected, - FakeConfig, - capsys, - ): - m_get_cloud_type.return_value = ("cloud", None) - - cfg = FakeConfig() - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") - prompt_for_affected_packages( - cfg=cfg, - issue_id="USN-###", - affected_pkg_status=affected_pkg_status, - installed_packages=installed_packages, - usn_released_pkgs=usn_released_pkgs, - dry_run=False, - ) - out, err = capsys.readouterr() - assert expected in out - - -class TestUpgradePackagesAndAttach: - @pytest.mark.parametrize("root", ((True), (False))) - @mock.patch("uaclient.util.we_are_currently_root") - @mock.patch("uaclient.security.system.subp") - def test_upgrade_packages_are_installed_without_need_for_ua( - self, m_subp, m_we_are_currently_root, root, capsys - ): - m_subp.return_value = ("", "") - m_we_are_currently_root.return_value = root - - upgrade_packages_and_attach( - cfg=None, - upgrade_pkgs=["t1", "t2"], - pocket="Ubuntu standard updates", - dry_run=False, - ) - - out, err = capsys.readouterr() - if root: - assert m_subp.call_count == 2 - assert "apt update" in out - assert "apt install --only-upgrade -y t1 t2" in out - else: - assert SECURITY_APT_NON_ROOT in out - assert m_subp.call_count == 0 - - @pytest.mark.parametrize( - "exception, expected_error_msg", - ( - (Exception("base-exception"), "base-exception"), - (fakes.FakeUbuntuProError(), "This is a test"), - ), - ) - @mock.patch("os.getuid", return_value=0) - @mock.patch("uaclient.security.system.subp") - def test_upgrade_packages_fail_if_apt_command_fails( - self, m_subp, m_os_getuid, exception, expected_error_msg, capsys - ): - m_subp.side_effect = exception - assert ( - upgrade_packages_and_attach( - cfg=None, - upgrade_pkgs=["t1=123"], - pocket="Ubuntu standard updates", - dry_run=False, - ).status - is False - ) - - out, _ = capsys.readouterr() - assert expected_error_msg in out - - -class TestGetRelatedUSNs: - def test_no_usns_returned_when_no_cves_are_found(self, FakeConfig): - cfg = FakeConfig() - client = UASecurityClient(cfg=cfg) - usn = USN(client, SAMPLE_USN_RESPONSE_NO_CVES) - - assert [] == get_related_usns(usn, client) - - def test_usns_ignore_non_usns_items(self, FakeConfig): - expected_value = mock.MagicMock(id="USN-1235-1") - - def fake_get_notice(notice_id): - return expected_value - - m_client = mock.MagicMock() - m_client.get_notice.side_effect = fake_get_notice - - m_usn = mock.MagicMock( - cves=[ - mock.MagicMock( - notices_ids=["USN-1235-1", "LSN-0088-1"], - ) - ], - id="USN-8796-1", - ) - - assert [expected_value] == get_related_usns(m_usn, m_client) - - -class TestGetUSNAffectedPackagesStatus: - @pytest.mark.parametrize( - "installed_packages, affected_packages", - ( - ( - {"coin3": {"libcoin80-runtime", "1.0"}}, - { - "coin3": CVEPackageStatus( - defaultdict( - str, {"status": "released", "pocket": "security"} - ) - ) - }, - ), - ), - ) - @mock.patch("uaclient.system.get_release_info") - def test_pkgs_come_from_release_packages_if_usn_has_no_cves( - self, - m_get_release_info, - installed_packages, - affected_packages, - FakeConfig, - ): - m_get_release_info.return_value = mock.MagicMock(series="bionic") - - cfg = FakeConfig() - client = UASecurityClient(cfg=cfg) - usn = USN(client, SAMPLE_USN_RESPONSE_NO_CVES) - actual_value = get_usn_affected_packages_status( - usn, installed_packages - ) - - if not affected_packages: - assert actual_value is {} - else: - assert "coin3" in actual_value - assert ( - affected_packages["coin3"].status - == actual_value["coin3"].status - ) - assert ( - affected_packages["coin3"].pocket_source - == actual_value["coin3"].pocket_source - ) - - -class TestFixSecurityIssueId: - @pytest.mark.parametrize( - "issue_id,livepatch_status,exp_ret", - ( - ( - "cve-2013-1798", - livepatch.LivepatchStatusStatus( - kernel="4.4.0-210.242-generic", - supported=None, - livepatch=livepatch.LivepatchPatchStatus( - state="applied", - version="87.1", - fixes=[ - livepatch.LivepatchPatchFixStatus( - name="cve-2013-1798", patched=True - ) - ], - ), - ), - FixStatus.SYSTEM_NON_VULNERABLE, - ), - ), - ) - @mock.patch("uaclient.livepatch.status") - def test_patched_msg_when_issue_id_fixed_by_livepatch( - self, - m_livepatch_status, - issue_id, - livepatch_status, - exp_ret, - FakeConfig, - ): - """fix_security_id returns system not vulnerable when issue_id fixed - by livepatch""" - m_livepatch_status.return_value = livepatch_status - with mock.patch( - "uaclient.security.query_installed_source_pkg_versions" - ): - assert exp_ret == fix_security_issue_id(FakeConfig(), issue_id) - - @pytest.mark.parametrize("error_code", ((404), (400))) - @pytest.mark.parametrize( - "issue_id", (("CVE-1800-123456"), ("USN-12345-12")) - ) - @mock.patch("uaclient.security.query_installed_source_pkg_versions") - def test_error_msg_when_issue_id_is_not_found( - self, _m_query_versions, issue_id, error_code, FakeConfig - ): - expected_message = "Error: {} not found.".format(issue_id) - if "CVE" in issue_id: - mock_func = "get_cve" - issue_type = "CVE" - else: - mock_func = "get_notice" - issue_type = "USN" - - with mock.patch.object(UASecurityClient, mock_func) as m_func: - msg = "{} with id 'ID' does not exist".format(issue_type) - - m_func.side_effect = exceptions.SecurityAPIError( - url="URL", code=error_code, body=json.dumps({"message": msg}) - ) - - with pytest.raises(exceptions.UbuntuProError) as exc: - fix_security_issue_id(FakeConfig(), issue_id) - - if error_code == 404: - expected_message = "Error: {} not found.".format(issue_id) - else: - expected_message = ( - "Error connecting to URL: " - + str(error_code) - + " " - + json.dumps({"message": msg}) - ) - - assert expected_message == exc.value.msg - - @mock.patch("uaclient.security.query_installed_source_pkg_versions") - @mock.patch("uaclient.security.get_usn_affected_packages_status") - @mock.patch("uaclient.security.merge_usn_released_binary_package_versions") - def test_error_msg_when_usn_does_not_have_any_related_usns( - self, - m_merge_usn, - m_usn_affected_pkgs, - m_query_installed_pkgs, - FakeConfig, - ): - m_query_installed_pkgs.return_value = {} - m_usn_affected_pkgs.return_value = {} - m_merge_usn.return_value = {} - with mock.patch.object(UASecurityClient, "get_notice") as m_notice: - with mock.patch.object( - UASecurityClient, "get_notices" - ) as m_notices: - usn_mock = mock.MagicMock() - cve_mock = mock.MagicMock() - - type(cve_mock).notices_ids = mock.PropertyMock( - return_value=["USN-123"] - ) - type(usn_mock).cves = mock.PropertyMock( - return_value=[cve_mock] - ) - type(usn_mock).response = mock.PropertyMock( - return_value={"release_packages": {}} - ) - type(usn_mock).cves_ids = mock.PropertyMock( - return_value=["cve-123"] - ) - type(usn_mock).id = mock.PropertyMock(return_value="id") - - m_notice.return_value = usn_mock - m_notices.return_value = [usn_mock] - - with pytest.raises(exceptions.SecurityAPIMetadataError) as exc: - fix_security_issue_id(FakeConfig(), "USN-123") - - expected_msg = ( - "Error: USN-123 metadata defines no fixed package versions." - ) - assert expected_msg in exc.value.msg - - -class TestMergeUSNReleasedBinaryPackageVersions: - @pytest.mark.parametrize( - "usns_released_packages, expected_pkgs_dict", - ( - ([{}], {}), - ( - [{"pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}}], - {"pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}}, - ), - ( - [ - { - "pkg1": { - "libpkg1": {"version": "1.0", "name": "libpkg1"} - }, - "pkg2": { - "libpkg2": {"version": "2.0", "name": "libpkg2"}, - "libpkg3": {"version": "3.0", "name": "libpkg3"}, - "libpkg4": {"version": "3.0", "name": "libpkg4"}, - }, - }, - { - "pkg2": { - "libpkg2": {"version": "1.8", "name": "libpkg2"}, - "libpkg4": {"version": "3.2", "name": "libpkg4"}, - } - }, - ], - { - "pkg1": {"libpkg1": {"version": "1.0", "name": "libpkg1"}}, - "pkg2": { - "libpkg2": {"version": "2.0", "name": "libpkg2"}, - "libpkg3": {"version": "3.0", "name": "libpkg3"}, - "libpkg4": {"version": "3.2", "name": "libpkg4"}, - }, - }, - ), - ( - [ - { - "pkg1": { - "libpkg1": {"version": "1.0", "name": "libpkg1"}, - "source": {"version": "2.0", "name": "pkg1"}, - } - }, - {"pkg1": {"source": {"version": "2.5", "name": "pkg1"}}}, - ], - { - "pkg1": { - "libpkg1": {"version": "1.0", "name": "libpkg1"}, - "source": {"version": "2.5", "name": "pkg1"}, - } - }, - ), - ( - [ - { - "pkg1": { - "libpkg1": {"version": "1.0", "name": "libpkg1"}, - "source": {"version": "2.0", "name": "pkg1"}, - }, - "pkg2": { - "libpkg2": { - "version": "2.0", - "name": "libpkg2", - "pocket": "esm-apps", - }, - "source": { - "version": "2.0", - "name": "pkg2", - "pocket": "esm-apps", - }, - }, - } - ], - { - "pkg1": { - "libpkg1": {"version": "1.0", "name": "libpkg1"}, - "source": {"version": "2.0", "name": "pkg1"}, - } - }, - ), - ), - ) - def test_merge_usn_released_binary_package_versions( - self, usns_released_packages, expected_pkgs_dict, _subp - ): - usns = [] - beta_packages = {"esm-infra": False, "esm-apps": True} - - for usn_released_pkgs in usns_released_packages: - usn = mock.MagicMock() - type(usn).release_packages = mock.PropertyMock( - return_value=usn_released_pkgs - ) - usns.append(usn) - - with mock.patch("uaclient.system._subp", side_effect=_subp): - usn_pkgs_dict = merge_usn_released_binary_package_versions( - usns, beta_packages - ) - assert expected_pkgs_dict == usn_pkgs_dict - - -class TestOverrideUSNReleasePackageStatus: - @pytest.mark.parametrize( - "pkg_status", - ( - CVE_PKG_STATUS_IGNORED, - CVE_PKG_STATUS_PENDING, - CVE_PKG_STATUS_NEEDS_TRIAGE, - CVE_PKG_STATUS_NEEDED, - CVE_PKG_STATUS_DEFERRED, - CVE_PKG_STATUS_RELEASED, - CVE_PKG_STATUS_RELEASED_ESM_INFRA, - ), - ) - @pytest.mark.parametrize( - "usn_src_released_pkgs,expected", - ( - ({}, None), - ( # No "source" key, so ignore all binaries - {"somebinary": {"pocket": "my-pocket", "version": "usn-ver"}}, - None, - ), - ( - { - "source": { - "name": "srcpkg", - "version": "usn-source-pkg-ver", - }, - "somebinary": { - "pocket": "my-pocket", - "version": "usn-bin-ver", - }, - }, - { - "pocket": "my-pocket", - "description": "usn-source-pkg-ver", - "status": "released", - }, - ), - ), - ) - def test_override_cve_src_info_with_pocket_and_ver_from_usn( - self, usn_src_released_pkgs, expected, pkg_status - ): - """Override CVEPackageStatus with released/pocket from USN.""" - orig_cve = CVEPackageStatus(pkg_status) - override = override_usn_release_package_status( - orig_cve, usn_src_released_pkgs - ) - if expected is None: # Expect CVEPackageStatus unaltered - assert override.response == orig_cve.response - else: - assert expected == override.response - - -class TestCheckAttached: - def test_check_attached_print_message_and_succeed_on_dry_run( - self, - FakeConfig, - capsys, - ): - cfg = FakeConfig() - assert _check_attached(cfg, dry_run=True) - - out, _ = capsys.readouterr() - assert SECURITY_DRY_RUN_UA_NOT_ATTACHED in out - - -class TestCheckSubscriptionForRequiredService: - @mock.patch("uaclient.security._get_service_for_pocket") - def test_check_subscription_print_message_and_succeed_on_dry_run( - self, - m_get_service, - FakeConfig, - capsys, - ): - ent_mock = mock.MagicMock() - ent_mock.user_facing_status.return_value = ( - UserFacingStatus.INACTIVE, - None, - ) - ent_mock.applicability_status.return_value = ( - ApplicabilityStatus.APPLICABLE, - None, - ) - type(ent_mock).name = mock.PropertyMock(return_value="test") - - m_get_service.return_value = ent_mock - cfg = FakeConfig() - assert _check_subscription_for_required_service( - pocket="", cfg=cfg, dry_run=True - ) - - out, _ = capsys.readouterr() - assert ( - SECURITY_DRY_RUN_UA_SERVICE_NOT_ENABLED.format(service="test") - in out - ) - - @mock.patch("uaclient.security._get_service_for_pocket") - def test_check_subscription_when_service_enabled( - self, m_get_service, FakeConfig - ): - ent_mock = mock.MagicMock() - ent_mock.user_facing_status.return_value = ( - UserFacingStatus.ACTIVE, - None, - ) - - m_get_service.return_value = ent_mock - cfg = FakeConfig() - assert _check_subscription_for_required_service( - pocket="", cfg=cfg, dry_run=False - ) - - -class TestCheckSubscriptionIsExpired: - def test_check_subscription_is_expired_passes_on_dry_run( - self, FakeConfig, capsys - ): - now = datetime.datetime.utcnow() - expire_date = now + datetime.timedelta(days=-10) - status_cache = {"attached": True, "expires": expire_date} - - assert not _check_subscription_is_expired( - status_cache=status_cache, cfg=None, dry_run=True - ) - - out, _ = capsys.readouterr() - assert SECURITY_DRY_RUN_UA_EXPIRED_SUBSCRIPTION in out - - @mock.patch("uaclient.security._prompt_for_new_token") - def test_check_subscription_is_expired(self, m_prompt, FakeConfig, capsys): - m_prompt.return_value = False - now = datetime.datetime.utcnow() - expire_date = now + datetime.timedelta(days=-10) - status_cache = {"attached": True, "expires": expire_date} - - assert _check_subscription_is_expired( - status_cache=status_cache, cfg=None, dry_run=False - ) - assert 1 == m_prompt.call_count - - -class TestPromptForAttach: - @mock.patch("uaclient.security._initiate") - @mock.patch("uaclient.security._wait") - @mock.patch("uaclient.security._revoke") - @mock.patch("uaclient.security._inform_ubuntu_pro_existence_if_applicable") - @mock.patch("uaclient.util.prompt_choices") - def test_magic_attach_revoke_token_if_wait_fails( - self, - m_prompt_choices, - _m_inform_pro, - m_revoke, - m_wait, - m_initiate, - FakeConfig, - ): - m_prompt_choices.return_value = "s" - m_initiate.return_value = mock.MagicMock( - token="token", user_code="user_code" - ) - m_wait.side_effect = exceptions.MagicAttachTokenError() - - with pytest.raises(exceptions.MagicAttachTokenError): - _prompt_for_attach(cfg=FakeConfig()) - - assert 1 == m_initiate.call_count - assert 1 == m_wait.call_count - assert 1 == m_revoke.call_count - - -class TestFixUSN: - @mock.patch("uaclient.security._check_attached", return_value=False) - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("uaclient.security.merge_usn_released_binary_package_versions") - @mock.patch("uaclient.security.get_affected_packages_from_usn") - def test_fix_usn_with_related_usns( - self, - m_affected_pkgs, - m_merge_usn, - _m_get_pkg_cand_ver, - _m_check_attached, - capsys, - FakeConfig, - ): - usn = mock.MagicMock() - issue_id = "USN-123" - related_usns = [ - mock.MagicMock(id="USN-456"), - mock.MagicMock(id="USN-789"), - mock.MagicMock(id="USN-822"), - ] - installed_packages = { - "pkg1": {"pkg1": "1.0"}, - "pkg2": {"pkg2": "1.0"}, - "pkg3": {"pkg3": "1.0"}, - "pkg4": {"pkg4": "1.0"}, - "pkg5": {"pkg5": "1.0"}, - } - cfg = FakeConfig() - beta_pockets = {} - dry_run = False - no_related = False - - m_affected_pkgs.side_effect = [ - { - "pkg1": CVEPackageStatus( - { - "status": "released", - "pocket": "security", - } - ) - }, - { - "pkg2": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-infra", - } - ) - }, - { - "pkg3": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-apps", - } - ), - "pkg4": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-apps", - } - ), - }, - { - "pkg5": CVEPackageStatus( - { - "status": "pending", - "pocket": "security", - } - ) - }, - ] - m_merge_usn.side_effect = [ - { - "pkg1": {"pkg1": {"version": "1.2", "name": "pkg1"}}, - }, - { - "pkg2": {"pkg2": {"version": "1.2", "name": "pkg2"}}, - }, - { - "pkg3": {"pkg3": {"version": "1.2", "name": "pkg3"}}, - "pkg4": {"pkg4": {"version": "1.2", "name": "pkg4"}}, - }, - { - "pkg5": {"pkg5": {"version": "1.2", "name": "pkg5"}}, - }, - ] - - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") - actual_ret = _fix_usn( - usn=usn, - related_usns=related_usns, - issue_id=issue_id, - installed_packages=installed_packages, - cfg=cfg, - beta_pockets=beta_pockets, - dry_run=dry_run, - no_related=no_related, - ) - - expected_msg = ( - "\n" - + SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) - + "\n" - + textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - + "\n\n" - + "{check} USN-123 is resolved.\n".format(check=OKGREEN_CHECK) - + "\n" - + textwrap.dedent( - """\ - Found related USNs: - - USN-456 - - USN-789 - - USN-822 - """ - ) - + "\n" - + textwrap.dedent( - """\ - Fixing related USNs: - - USN-456 - 1 affected source package is installed: pkg2 - (1/1) pkg2: - A fix is available in Ubuntu Pro: ESM Infra. - """ - ) - + "\n" - + "1 package is still affected: pkg2" - + "\n" - + "{check} USN-456 is not resolved.".format(check=FAIL_X) - + "\n\n" - + textwrap.dedent( - """\ - - USN-789 - 2 affected source packages are installed: pkg3, pkg4 - (1/2, 2/2) pkg3, pkg4: - A fix is available in Ubuntu Pro: ESM Apps. - """ - ) - + "\n" - + "2 packages are still affected: pkg3, pkg4" - + "\n" - + "{check} USN-789 is not resolved.".format(check=FAIL_X) - + "\n\n" - + textwrap.dedent( - """\ - - USN-822 - 1 affected source package is installed: pkg5 - (1/1) pkg5: - A fix is coming soon. Try again tomorrow. - """ - ) - + "\n" - + "1 package is still affected: pkg5" - + "\n" - + "{check} USN-822 is not resolved.".format(check=FAIL_X) - + "\n\n" - + "Summary:" - + "\n" - + "{check} USN-123 [requested] is resolved.".format( - check=OKGREEN_CHECK - ) - + "\n" - + "{check} USN-456 [related] is not resolved.".format(check=FAIL_X) - + "\n" - + " - pkg2: Ubuntu Pro: ESM Infra is required for upgrade." - + "\n" - + "{check} USN-789 [related] is not resolved.".format(check=FAIL_X) - + "\n" - + " - pkg3: Ubuntu Pro: ESM Apps is required for upgrade." - + "\n" - + " - pkg4: Ubuntu Pro: ESM Apps is required for upgrade." - + "\n" - + "{check} USN-822 [related] is not resolved.".format(check=FAIL_X) - + "\n" - + " - pkg5: A fix is coming soon. Try again tomorrow." - + "\n\n" - + SECURITY_RELATED_USN_ERROR.format(issue_id="USN-123") - ) - out, err = capsys.readouterr() - assert expected_msg in out - assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret - - @mock.patch("uaclient.apt.get_pkg_candidate_version", return_value="99.9") - @mock.patch("uaclient.security.merge_usn_released_binary_package_versions") - @mock.patch("uaclient.security.get_affected_packages_from_usn") - def test_fix_usn_when_no_related_value_is_true( - self, - m_affected_pkgs, - m_merge_usn, - _m_get_pkg_cand_ver, - capsys, - FakeConfig, - ): - usn = mock.MagicMock() - issue_id = "USN-123" - related_usns = [ - mock.MagicMock(id="USN-456"), - mock.MagicMock(id="USN-789"), - mock.MagicMock(id="USN-822"), - ] - installed_packages = { - "pkg1": {"pkg1": "1.0"}, - "pkg2": {"pkg2": "1.0"}, - "pkg3": {"pkg3": "1.0"}, - "pkg4": {"pkg4": "1.0"}, - "pkg5": {"pkg5": "1.0"}, - } - cfg = FakeConfig() - beta_pockets = {} - dry_run = False - no_related = True - - m_affected_pkgs.side_effect = [ - { - "pkg1": CVEPackageStatus( - { - "status": "released", - "pocket": "security", - } - ) - }, - { - "pkg2": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-infra", - } - ) - }, - { - "pkg3": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-apps", - } - ), - "pkg4": CVEPackageStatus( - { - "status": "released", - "pocket": "esm-apps", - } - ), - }, - { - "pkg5": CVEPackageStatus( - { - "status": "pending", - "pocket": "security", - } - ) - }, - ] - m_merge_usn.side_effect = [ - { - "pkg1": {"pkg1": {"version": "1.2", "name": "pkg1"}}, - }, - ] - - with mock.patch("uaclient.util.sys") as m_sys: - m_stdout = mock.MagicMock() - type(m_sys).stdout = m_stdout - type(m_stdout).encoding = mock.PropertyMock(return_value="utf-8") - actual_ret = _fix_usn( - usn=usn, - related_usns=related_usns, - issue_id=issue_id, - installed_packages=installed_packages, - cfg=cfg, - beta_pockets=beta_pockets, - dry_run=dry_run, - no_related=no_related, - ) - - expected_msg = ( - "\n" - + SECURITY_FIXING_REQUESTED_USN.format(issue_id=issue_id) - + "\n" - + textwrap.dedent( - """\ - 1 affected source package is installed: pkg1 - (1/1) pkg1: - A fix is available in Ubuntu standard updates. - """ - ) - + colorize_commands( - [["apt update && apt install --only-upgrade" " -y pkg1"]] - ) - ) - out, err = capsys.readouterr() - assert expected_msg in out - assert FixStatus.SYSTEM_NON_VULNERABLE == actual_ret diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_system.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_system.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_system.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_system.py 2024-02-14 15:37:46.000000000 +0000 @@ -1336,6 +1336,31 @@ assert expected_return == system.is_systemd_unit_active("test") +class TestGetSystemdUnitActiveState: + @pytest.mark.parametrize( + [ + "subp_side_effect", + "expected", + ], + ( + ([("ActiveState=active\n", "")], "active"), + ([("ActiveState=activating\n", "")], "activating"), + ([("", "")], None), + ([("anything=anything", "")], None), + (exceptions.ProcessExecutionError("test"), None), + ), + ) + @mock.patch("uaclient.system.subp") + def test_get_systemd_unit_active_state( + self, + m_subp, + subp_side_effect, + expected, + ): + m_subp.side_effect = subp_side_effect + assert expected == system.get_systemd_unit_active_state("test.service") + + class TestGetCpuInfo: @pytest.mark.parametrize( "cpuinfo,vendor_id,model,stepping", diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_util.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_util.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_util.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_util.py 2024-01-18 17:34:13.000000000 +0000 @@ -468,3 +468,33 @@ assert ( util.replace_top_level_logger_name(logger_name) == new_logger_name ) + + +class TestSetFilenameExtension: + @pytest.mark.parametrize( + "input_string,extension,output_string", + ( + ("virus.exe", "test", "virus.test"), + ( + "/many/dots.and.slashes/in/this.file", + "test", + "/many/dots.and.slashes/in/this.test", + ), + ("/no/change/when.same", "same", "/no/change/when.same"), + ("no_extension", "test", "no_extension.test"), + ( + "/with.previous.dots/no_extension", + "test", + "/with.previous.dots/no_extension.test", + ), + ("d.o.t.s", "test", "d.o.t.test"), + (".dotfile", "test", ".dotfile.test"), + ), + ) + def test_set_filename_extension( + self, input_string, extension, output_string + ): + assert ( + util.set_filename_extension(input_string, extension) + == output_string + ) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/tests/test_version.py ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_version.py --- ubuntu-advantage-tools-30~23.10/uaclient/tests/test_version.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/tests/test_version.py 2024-02-29 14:03:11.000000000 +0000 @@ -86,7 +86,7 @@ class TestCheckForNewVersion: @pytest.mark.parametrize("compare_return", (-1, 0, 1)) @mock.patch("uaclient.version.version_compare") - @mock.patch("uaclient.version.get_pkg_candidate_version") + @mock.patch("uaclient.version.get_last_known_candidate") def test_check_for_new_version( self, m_candidate, m_compare, compare_return ): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/upgrade_lts_contract.py ubuntu-advantage-tools-31.2~23.10/uaclient/upgrade_lts_contract.py --- ubuntu-advantage-tools-30~23.10/uaclient/upgrade_lts_contract.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/upgrade_lts_contract.py 2024-01-18 17:34:13.000000000 +0000 @@ -41,8 +41,8 @@ "bionic": "xenial", "focal": "bionic", "jammy": "focal", - "lunar": "jammy", "mantic": "lunar", + "noble": "jammy", } LOG = logging.getLogger(util.replace_top_level_logger_name(__name__)) diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/util.py ubuntu-advantage-tools-31.2~23.10/uaclient/util.py --- ubuntu-advantage-tools-30~23.10/uaclient/util.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/util.py 2024-01-18 17:34:13.000000000 +0000 @@ -461,6 +461,11 @@ return os.getuid() == 0 +def set_filename_extension(filename: str, new_extension: str) -> str: + name, _extension = os.path.splitext(filename) + return name + "." + new_extension + + def print_package_list( package_list: List[str], ): diff -Nru ubuntu-advantage-tools-30~23.10/uaclient/version.py ubuntu-advantage-tools-31.2~23.10/uaclient/version.py --- ubuntu-advantage-tools-30~23.10/uaclient/version.py 2023-11-07 14:23:34.000000000 +0000 +++ ubuntu-advantage-tools-31.2~23.10/uaclient/version.py 2024-02-29 14:03:11.000000000 +0000 @@ -14,7 +14,7 @@ from uaclient.exceptions import ProcessExecutionError from uaclient.system import subp -__VERSION__ = "30" +__VERSION__ = "31.2" PACKAGED_VERSION = "@@PACKAGED_VERSION@@" @@ -51,10 +51,9 @@ not os.path.exists(CANDIDATE_CACHE_PATH) or os.stat(CANDIDATE_CACHE_PATH).st_mtime < last_apt_cache_update ): + candidate_version = None try: - candidate_version = get_pkg_candidate_version( - "ubuntu-advantage-tools" - ) + candidate_version = get_pkg_candidate_version("ubuntu-pro-client") if candidate_version: os.makedirs(UAC_RUN_PATH, exist_ok=True) with open(CANDIDATE_CACHE_PATH, "w") as f: