diff -Nru gce-compute-image-packages-20190801/CONTRIB.md gce-compute-image-packages-20201222.00/CONTRIB.md --- gce-compute-image-packages-20190801/CONTRIB.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/CONTRIB.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -# How to become a contributor and submit your own code - -## Contributor License Agreements - -We'd love to accept your sample apps and patches! Before we can take them, we -have to jump a couple of legal hurdles. - -Please fill out either the individual or corporate Contributor License Agreement -(CLA). - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an - [individual CLA](https://developers.google.com/open-source/cla/individual). - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a - [corporate CLA](https://developers.google.com/open-source/cla/corporate). - -Follow either of the two links above to access the appropriate CLA and -instructions for how to sign and return it. Once we receive it, we'll be able to -accept your pull requests. - -## Contributing a patch - -1. Submit an issue describing your proposed change to the repo in question. -1. The repo owner will respond to your issue promptly. -1. If your proposed change is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). -1. Fork the desired repo, develop and test your code changes. -1. Ensure that your code adheres to the existing style in the sample to which - you are contributing. Refer to the - [Google Cloud Platform Samples Style Guide](https://github.com/GoogleCloudPlatform/Template/wiki/style.html) - for the recommended coding standards for this organization. -1. Ensure that your code has an appropriate set of unit tests which all pass. -1. Submit a pull request. - -## Contributing a new sample App - -1. Submit an issue to the `GoogleCloudPlatform/Template` repo describing your - proposed sample app. -1. The Template repo owner will respond to your enhancement issue promptly. - Instructional value is the top priority when evaluating new app proposals for - this collection of repos. -1. If your proposal is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). -1. Create your own repo for your app following this naming convention: - * {product}-{app-name}-{language} - * products: appengine, compute, storage, bigquery, prediction, cloudsql - * example: appengine-guestbook-python - * For multi-product apps, concatenate the primary products, like this: - compute-appengine-demo-suite-python. - * For multi-language apps, concatenate the primary languages like this: - appengine-sockets-python-java-go. - -1. Clone the `README.md`, `CONTRIB.md` and `LICENSE` files from the - GoogleCloudPlatform/Template repo. -1. Ensure that your code adheres to the existing style in the sample to which - you are contributing. Refer to the - [Google Cloud Platform Samples Style Guide](https://github.com/GoogleCloudPlatform/Template/wiki/style.html) - for the recommended coding standards for this organization. -1. Ensure that your code has an appropriate set of unit tests which all pass. -1. Submit a request to fork your repo in GoogleCloudPlatform organization via - your proposal issue. diff -Nru gce-compute-image-packages-20190801/daisy_workflows/build_debian.wf.json gce-compute-image-packages-20201222.00/daisy_workflows/build_debian.wf.json --- gce-compute-image-packages-20190801/daisy_workflows/build_debian.wf.json 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/daisy_workflows/build_debian.wf.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -{ - "Name": "build-debian-packages", - "Vars": { - "output_path": { - "Description": "GCS output path for packages." - }, - "github_repo": { - "Value": "https://github.com/GoogleCloudPlatform/compute-image-packages.git", - "Description": "Github repo to build packages from." - }, - "github_branch": { - "Value": "master", - "Description": "Github branch to build packages from." - } - }, - "Sources": { - "build_deb_packages.sh": "./build_deb_packages.sh" - }, - "Steps": { - "setup-disk": { - "CreateDisks": [ - { - "Name": "disk-deb9-build", - "SourceImage": "projects/debian-cloud/global/images/family/debian-9", - "SizeGb": "10", - "Type": "pd-ssd" - }, - { - "Name": "disk-deb10-build", - "SourceImage": "projects/debian-cloud/global/images/family/debian-10", - "SizeGb": "10", - "Type": "pd-ssd" - } - ] - }, - "package-build": { - "CreateInstances": [ - { - "Name": "inst-deb9-build", - "Disks": [ - {"Source": "disk-deb9-build"} - ], - "MachineType": "n1-standard-2", - "Metadata": { - "github_branch": "${github_branch}", - "github_repo": "${github_repo}", - "output_path": "${output_path}" - }, - "Scopes": ["https://www.googleapis.com/auth/devstorage.read_write"], - "StartupScript": "build_deb_packages.sh" - }, - { - "Name": "inst-deb10-build", - "Disks": [ - {"Source": "disk-deb10-build"} - ], - "MachineType": "n1-standard-2", - "Metadata": { - "github_branch": "${github_branch}", - "github_repo": "${github_repo}", - "output_path": "${output_path}" - }, - "Scopes": ["https://www.googleapis.com/auth/devstorage.read_write"], - "StartupScript": "build_deb_packages.sh" - } - ] - }, - "wait-for-build": { - "WaitForInstancesSignal": [ - { - "Name": "inst-deb9-build", - "SerialOutput": { - "Port": 1, - "SuccessMatch": "BuildSuccess:", - "FailureMatch": "BuildFailed:" - } - }, - { - "Name": "inst-deb10-build", - "SerialOutput": { - "Port": 1, - "SuccessMatch": "BuildSuccess:", - "FailureMatch": "BuildFailed:" - } - } - ] - } - }, - "Dependencies": { - "package-build": ["setup-disk"], - "wait-for-build": ["package-build"] - } -} diff -Nru gce-compute-image-packages-20190801/daisy_workflows/build_deb_packages.sh gce-compute-image-packages-20201222.00/daisy_workflows/build_deb_packages.sh --- gce-compute-image-packages-20190801/daisy_workflows/build_deb_packages.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/daisy_workflows/build_deb_packages.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -URL="http://metadata/computeMetadata/v1/instance/attributes" - -BRANCH="$(curl -f -H Metadata-Flavor:Google ${URL}/github_branch)" -GIT_REPO="$(curl -f -H Metadata-Flavor:Google ${URL}/github_repo)" -OUTPUT="$(curl -f -H Metadata-Flavor:Google ${URL}/output_path)" - -if [ -z $OUTPUT ]; then - OUTPUT="$(curl -f -H Metadata-Flavor:Google ${URL}/daisy-outs-path)" -fi - -if [[ ! -e /etc/debian_version ]]; then - echo "BuildFailed: not a debian host!" - exit 1 -fi - -workdir=$(pwd) -mkdir output - -sudo apt-get install -y git - -# Clone the github repo. -git clone ${GIT_REPO} -b ${BRANCH} compute-image-packages -if [ $? -ne 0 ]; then - echo "BuildFailed: Unable to clone github repo ${GIT_REPO} and branch ${BRANCH}" - exit 1 -fi - -# Build packages. -cd compute-image-packages/packages -for package in *; do - pushd "$package" - ./packaging/setup_deb.sh - if [[ $? -ne 0 ]]; then - echo "BuildFailed: Unable to build $package" - exit 1 - fi - find /tmp/debpackage \( -iname '*.deb' -o -iname '*.dsc' \) \ - -exec mv '{}' "${workdir}/output/" \; - popd -done - -# Copy the deb and dsc files to the output. -cd "${workdir}/output" -# For Debian 10 right now, only copy OS Login package. -if [[ "$(cut -d. -f1 Wed, 13 Jan 2021 23:12:30 +0100 + +gce-compute-image-packages (20201222.00-0ubuntu2) hirsute; urgency=medium + + * Update udev rule fix from upstream + + -- Balint Reczey Wed, 13 Jan 2021 23:12:11 +0100 + +gce-compute-image-packages (20201222.00-0ubuntu1) hirsute; urgency=medium + + * debian/changelog: Fix LP bug references + * debian/copyright: Don't exclude upstream debian/ dir + * debian/gbp.conf: Add minimal gbp configuration + * New upstream version 20201222.00 (LP: #1911454) + - Improved nvme drive handling + - Fix tx affinity logic when number of CPUs is above 32 + * Ship new google_nvme_id script for udev + * Depend on nvme-cli + * Fix invalid substitution error in 65-gce-disk-naming.rules + + -- Balint Reczey Wed, 13 Jan 2021 21:08:22 +0100 + +gce-compute-image-packages (20200626.00-0ubuntu2) hirsute; urgency=medium + + * debian/NEWS: Explain recent packaging changes + * Don't provide transitional python3-google-compute-engine package. + Installing google-guest-agent forces the removal of + python3-google-compute-engine to avoid accidental upgrades in stable + releases. (LP: #1905986) + + -- Balint Reczey Wed, 13 Jan 2021 13:23:10 +0100 + +gce-compute-image-packages (20200626.00-0ubuntu1) groovy; urgency=medium + + * New upstream version 20191115 (LP: #1853846) + - Move the OS Login components to a separate project + * Drop google-compute-engine-oslogin packaging + * Drop obsolete patches + * debian/watch: Update upstream repository location + * New upstream version 20200626.00 + * Drop obsolete patches + * Adjust packaging to conform to upstream changes + + -- Balint Reczey Thu, 23 Jul 2020 14:57:20 +0200 + +gce-compute-image-packages (20190801-0ubuntu5) groovy; urgency=medium + + * Disable automatic adding of groups to all users (LP: #1878654) + - d/p/0006-Remove-OS-Login-users-from-admin-groups.-29.patch: remove + adm, docker, and lxd groups + - d/p/0007-Remove-local-user-groups-for-OS-Login-users.-30.patch: + remove dip and plugdev groups + + -- Steve Beattie Thu, 14 May 2020 15:25:37 -0700 + gce-compute-image-packages (20190801-0ubuntu4.2) focal; urgency=medium * Drop google-compute-engine-oslogin packaging (LP: #1899629) diff -Nru gce-compute-image-packages-20190801/debian/control gce-compute-image-packages-20201222.00/debian/control --- gce-compute-image-packages-20190801/debian/control 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/control 2021-01-13 22:12:30.000000000 +0000 @@ -2,47 +2,41 @@ Maintainer: Ubuntu Developers Section: admin Priority: optional -Build-Depends: cmake, - debhelper (>= 9.20160709), - dh-python, - python3-all, - python3-distro, - python3-setuptools, - python3-pytest, - python3-boto +Build-Depends: debhelper (>= 9.20160709), Standards-Version: 3.9.8 Vcs-Browser: https://code.launchpad.net/~ubuntu-core-dev/+git/gce-compute-image-packages Vcs-Git: https://git.launchpad.net/~ubuntu-core-dev/+git/gce-compute-image-packages -b ubuntu/master Homepage: https://github.com/GoogleCloudPlatform/compute-image-packages XSBC-Original-Maintainer: Daniel Watkins -Package: gce-compute-image-packages +Package: google-compute-engine Architecture: all -Depends: python3:any, - ${python3:Depends}, - ${misc:Depends}, +Depends: ${misc:Depends}, google-compute-engine-oslogin (>> 20190801), - python3-google-compute-engine (= ${source:Version}), - cloud-init + google-guest-agent, + nvme-cli, + ${misc:Depends} Provides: irqbalance Recommends: rsyslog | system-log-daemon Suggests: libpam-cracklib Conflicts: gce-cloud-config, gce-daemon, - gce-startup-scripts + gce-startup-scripts, + gce-compute-image-packages (<< 20191115) Replaces: gce-cloud-config, gce-daemon, - gce-startup-scripts -Description: GCE's compute-image-packages for use in their guest environment - This is a collection of scripts that are used on Google Compute Engine images - to ensure compatibility with the cloud, as well as to enable features specific - to the cloud. + gce-startup-scripts, + gce-compute-image-packages (<< 20191115) +Description: Google Compute Engine guest environment. + This package contains scripts and configuration files for + features specific to the Google Compute Engine cloud environment. -Package: python3-google-compute-engine +Package: gce-compute-image-packages Architecture: all -Section: python -Depends: ${python3:Depends}, - ${misc:Depends} -Description: Python library for Google Compute Engine interaction (Python 3) - Python libraries used for interacting with Google Compute Engine's APIs and - functionality. This package contains the modules for Python 3.x. +Depends: ${misc:Depends}, + google-compute-engine +Description: transitional dummy package + This dummy package is provided for a smooth transition from the + previous old package name to google-compute-engine. + . + It may be safely removed after installation. diff -Nru gce-compute-image-packages-20190801/debian/copyright gce-compute-image-packages-20201222.00/debian/copyright --- gce-compute-image-packages-20190801/debian/copyright 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/copyright 2021-01-13 22:12:30.000000000 +0000 @@ -2,32 +2,17 @@ Upstream-Name: compute-image-packages Upstream-Contact: Google Corp Source: https://github.com/GoogleCloudPlatform/compute-image-packages -Files-Excluded: debian Files: * -Copyright: 2017-2018, Google Inc. +Copyright: 2017-2020, Google Inc. License: Apache-2.0 Files: debian/* -Copyright: 2017-2018, Canonical Group, Ltd. +Copyright: 2017-2020, Canonical Group, Ltd. + 2017-2020, Google Inc. License: Apache-2.0 -Files: debian/gce-compute-image-packages.postinst - debian/gce-compute-image-packages.preinst - debian/gce-compute-image-packages.prerm -Copyright: 2017-2018, Google Inc. -License: Apache-2.0 - -Files: disk_expand/third_party/dracut-modules-growroot/* -Copyright: 2016, Red Hat, Inc. -License: GPL-3 - -Files: disk_expand/third_party/cloud-utils/* -Copyright: 2011 Canonical Ltd. - 2013 Hewlett-Packard Development Company, L.P. -License: GPL-3 - -Files: google_config/sbin/google-dhclient-script +Files: src/sbin/google-dhclient-script Copyright: 2008-2014 Red Hat, Inc. License: GPL-2+ @@ -47,23 +32,6 @@ On Debian systems, the complete text of the Apache version 2.0 license can be found in "/usr/share/common-licenses/Apache-2.0". -License: GPL-3 - This package is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public - License as published by the Free Software Foundation; version 3 of - the License. - . - This package is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - . - You should have received a copy of the GNU General Public License - along with this program. If not, see . - . - On Debian systems, the complete text of the GNU General - Public License can be found in "/usr/share/common-licenses/GPL-3". - License: GPL-2+ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff -Nru gce-compute-image-packages-20190801/debian/gbp.conf gce-compute-image-packages-20201222.00/debian/gbp.conf --- gce-compute-image-packages-20190801/debian/gbp.conf 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gbp.conf 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,4 @@ +[DEFAULT] +debian-branch = ubuntu/master +debian-tag = ubuntu/%(version)s +debian-tag-msg = %(pkg)s Ubuntu release %(version)s diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.install gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.install --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.install 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.install 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -# Upstream-provided configuration -packages/google-compute-engine/src/etc/* etc -packages/google-compute-engine/src/lib/* lib -packages/google-compute-engine/src/usr/* usr - -# Ubuntu-specific configuration -debian/instance_configs.cfg.distro etc/default -debian/91-gce.cfg etc/cloud/cloud.cfg.d -debian/99-gce.rules lib/udev/rules.d diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.links gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.links --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.links 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.links 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -usr/bin/google_set_hostname etc/dhcp/dhclient-exit-hooks.d/google_set_hostname diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.lintian-overrides gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.lintian-overrides --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.lintian-overrides 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -# preinst performs the transition from obsolete systemd services manually -gce-compute-image-packages: maintainer-script-calls-systemctl preinst -# upstream does not provide manpages for those instrastructure scripts -gce-compute-image-packages: binary-without-manpage usr/bin/google_accounts_daemon -gce-compute-image-packages: binary-without-manpage usr/bin/google_clock_skew_daemon -gce-compute-image-packages: binary-without-manpage usr/bin/google_instance_setup -gce-compute-image-packages: binary-without-manpage usr/bin/google_metadata_script_runner -gce-compute-image-packages: binary-without-manpage usr/bin/google_network_daemon -gce-compute-image-packages: binary-without-manpage usr/bin/google_optimize_local_ssd -gce-compute-image-packages: binary-without-manpage usr/bin/google_set_multiqueue -# this is indeed unusual but also intentional -gce-compute-image-packages: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/google-instance-setup.service sshd.service -# systemd services are cleaned up manually and this is how upstream does it -gce-compute-image-packages: maintainer-script-calls-systemctl preinst* - diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.postinst gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.postinst --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.postinst 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.postinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/sh -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -# Manually added service start or restart. We do not start or restart -# google-shutdown-scripts.service google-startup-scripts.service. -set -e -if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true - if [ -n "$2" ]; then - _dh_action=reload-or-restart - else - _dh_action=start - fi - deb-systemd-invoke $_dh_action \ - google-instance-setup.service \ - google-accounts-daemon.service \ - google-clock-skew-daemon.service \ - google-network-daemon.service >/dev/null || true -fi diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.preinst gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.preinst --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.preinst 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.preinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -#DEBHELPER# - -if [ -d /run/systemd/system ] ; then - if [ "$1" = upgrade ]; then - # Remove old services if they exist on upgrade. - if [ -f /lib/systemd/system/google-ip-forwarding-daemon.service ]; then - systemctl stop google-ip-forwarding-daemon.service - systemctl disable google-ip-forwarding-daemon.service - fi - - if [ -f /lib/systemd/system/google-network-setup.service ]; then - systemctl stop google-network-setup.service - systemctl disable google-network-setup.service - fi - fi -fi diff -Nru gce-compute-image-packages-20190801/debian/gce-compute-image-packages.prerm gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.prerm --- gce-compute-image-packages-20190801/debian/gce-compute-image-packages.prerm 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/gce-compute-image-packages.prerm 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -#!/bin/sh -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -# Manually stop all services. -set -e -if [ -d /run/systemd/system ] && [ "$1" = remove ]; then - deb-systemd-invoke stop \ - google-instance-setup.service \ - google-accounts-daemon.service \ - google-clock-skew-daemon.service \ - google-network-daemon.service \ - google-shutdown-scripts.service \ - google-startup-scripts.service >/dev/null -fi diff -Nru gce-compute-image-packages-20190801/debian/install gce-compute-image-packages-20201222.00/debian/install --- gce-compute-image-packages-20190801/debian/install 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/install 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,12 @@ +# Upstream-provided configuration +src/etc/apt/apt.conf.d/* etc/apt/apt.conf.d +src/etc/modprobe.d/* etc/modprobe.d +src/etc/rsyslog.d/* etc/rsyslog.d +src/etc/sysctl.d/* etc/sysctl.d +src/lib/udev/* lib/udev +src/usr/bin/* usr/bin + +# Ubuntu-specific configuration +debian/instance_configs.cfg.distro etc/default +debian/91-gce.cfg etc/cloud/cloud.cfg.d +debian/99-gce.rules lib/udev/rules.d diff -Nru gce-compute-image-packages-20190801/debian/links gce-compute-image-packages-20201222.00/debian/links --- gce-compute-image-packages-20190801/debian/links 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/links 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1 @@ +usr/bin/google_set_hostname etc/dhcp/dhclient-exit-hooks.d/google_set_hostname diff -Nru gce-compute-image-packages-20190801/debian/lintian-overrides gce-compute-image-packages-20201222.00/debian/lintian-overrides --- gce-compute-image-packages-20190801/debian/lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/lintian-overrides 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,15 @@ +# preinst performs the transition from obsolete systemd services manually +google-compute-engine: maintainer-script-calls-systemctl preinst +# upstream does not provide manpages for those instrastructure scripts +google-compute-engine: binary-without-manpage usr/bin/google_accounts_daemon +google-compute-engine: binary-without-manpage usr/bin/google_clock_skew_daemon +google-compute-engine: binary-without-manpage usr/bin/google_instance_setup +google-compute-engine: binary-without-manpage usr/bin/google_metadata_script_runner +google-compute-engine: binary-without-manpage usr/bin/google_network_daemon +google-compute-engine: binary-without-manpage usr/bin/google_optimize_local_ssd +google-compute-engine: binary-without-manpage usr/bin/google_set_multiqueue +# this is indeed unusual but also intentional +google-compute-engine: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/google-instance-setup.service sshd.service +# systemd services are cleaned up manually and this is how upstream does it +google-compute-engine: maintainer-script-calls-systemctl preinst* + diff -Nru gce-compute-image-packages-20190801/debian/NEWS gce-compute-image-packages-20201222.00/debian/NEWS --- gce-compute-image-packages-20190801/debian/NEWS 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/NEWS 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,22 @@ +gce-compute-image-packages (20200626.00-0ubuntu1) groovy; urgency=medium + + Google Compute Engine's upstream compute-image-packages project + repository [1] has been split up to smaller ones [2] and this incurs + the following packaging changes: + + - The Python libraries packaged as python3-google-compute-engine are + not shipped anymore and the Guest Agent (written in Go) functionality + and services are provided in the google-guest-agent package. + + - The libraries, applications and configurations for using OS Login is + still shipped in the google-compute-engine-oslogin package. + (It is built from the new google-compute-engine-oslogin source package). + + - The scripts and configuration files originally shipped by the + gce-compute-image-packages package are now shipped in the + google-compute-engine package. + + [1] https://github.com/GoogleCloudPlatform/compute-image-packages + [2] https://bugs.launchpad.net/ubuntu/+bug/1870314/comments/2 + + -- Balint Reczey Thu, 26 Nov 2020 16:15:41 +0100 diff -Nru gce-compute-image-packages-20190801/debian/patches/0001-correct-udev-rule-syntax-15.patch gce-compute-image-packages-20201222.00/debian/patches/0001-correct-udev-rule-syntax-15.patch --- gce-compute-image-packages-20190801/debian/patches/0001-correct-udev-rule-syntax-15.patch 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0001-correct-udev-rule-syntax-15.patch 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,25 @@ +From 9f8ec1c3da8916aec8470e10fc674b1f2306132e Mon Sep 17 00:00:00 2001 +From: Liam Hopkins +Date: Wed, 13 Jan 2021 14:03:58 -0800 +Subject: [PATCH] correct udev rule syntax (#15) + +--- + src/lib/udev/rules.d/65-gce-disk-naming.rules | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib/udev/rules.d/65-gce-disk-naming.rules b/src/lib/udev/rules.d/65-gce-disk-naming.rules +index 142b8d2..a8035d3 100644 +--- a/src/lib/udev/rules.d/65-gce-disk-naming.rules ++++ b/src/lib/udev/rules.d/65-gce-disk-naming.rules +@@ -21,7 +21,7 @@ SUBSYSTEM!="block", GOTO="gce_disk_naming_end" + KERNEL=="sd*|vd*", IMPORT{program}="scsi_id --export --whitelisted -d $tempnode" + + # NVME Local SSD naming +-KERNEL=="nvme*n*", ATTRS{model}=="nvme_card", PROGRAM="/bin/sh -c 'echo $((%n-1))'", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-%c" ++KERNEL=="nvme*n*", ATTRS{model}=="nvme_card", PROGRAM="/bin/sh -c 'echo $$((%n-1))'", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-%c" + KERNEL=="nvme*", ATTRS{model}=="nvme_card", ENV{ID_SERIAL}="Google_EphemeralDisk_$env{ID_SERIAL_SHORT}" + + # NVME Persistent Disk Naming +-- +2.25.1 + diff -Nru gce-compute-image-packages-20190801/debian/patches/0001-set-LDFLAGS-to-prevent-undefs.patch gce-compute-image-packages-20201222.00/debian/patches/0001-set-LDFLAGS-to-prevent-undefs.patch --- gce-compute-image-packages-20190801/debian/patches/0001-set-LDFLAGS-to-prevent-undefs.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0001-set-LDFLAGS-to-prevent-undefs.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -From: Brian Murray -Date: Fri, 12 Apr 2019 13:23:42 +0200 -Subject: set LDFLAGS to report unresolved symbol references - -Origin: Ubuntu -Forwarded: no -Last-Update: 2018-11-08 ---- - packages/google-compute-engine-oslogin/src/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/packages/google-compute-engine-oslogin/src/Makefile b/packages/google-compute-engine-oslogin/src/Makefile -index 8b5d339..db77807 100644 ---- a/packages/google-compute-engine-oslogin/src/Makefile -+++ b/packages/google-compute-engine-oslogin/src/Makefile -@@ -8,7 +8,7 @@ FLAGS = -fPIC -Wall -g - CFLAGS = $(FLAGS) -Wstrict-prototypes - CXXFLAGS = $(FLAGS) - --LDFLAGS = -shared -Wl,-soname,$(SONAME) -+LDFLAGS = -shared -Wl,-z,defs -Wl,-soname,$(SONAME) - LDLIBS = -lcurl -ljson-c - PAMLIBS = -lpam $(LDLIBS) - diff -Nru gce-compute-image-packages-20190801/debian/patches/0003-order-startup-scripts-after-snap-seeding.service gce-compute-image-packages-20201222.00/debian/patches/0003-order-startup-scripts-after-snap-seeding.service --- gce-compute-image-packages-20190801/debian/patches/0003-order-startup-scripts-after-snap-seeding.service 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0003-order-startup-scripts-after-snap-seeding.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -From: Daniel Watkins -Date: Fri, 12 Apr 2019 13:23:42 +0200 -Subject: Ensure that snaps have been seeded before startup scripts run - -Last-Update: 2018-05-15 ---- - .../src/lib/systemd/system/google-startup-scripts.service | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -index 89f245c..fa0532b 100644 ---- a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -+++ b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -@@ -4,6 +4,8 @@ After=network-online.target network.target rsyslog.service - After=google-instance-setup.service google-network-daemon.service - After=cloud-final.service multi-user.target - Wants=cloud-final.service -+After=snapd.seeded.service -+Wants=snapd.seeded.service - - [Service] - ExecStart=/usr/bin/google_metadata_script_runner --script-type startup diff -Nru gce-compute-image-packages-20190801/debian/patches/0004-order-shutdown-scripts-after-snapd.patch gce-compute-image-packages-20201222.00/debian/patches/0004-order-shutdown-scripts-after-snapd.patch --- gce-compute-image-packages-20190801/debian/patches/0004-order-shutdown-scripts-after-snapd.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0004-order-shutdown-scripts-after-snapd.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -From: Daniel Watkins -Date: Fri, 12 Apr 2019 13:23:42 +0200 -Subject: Order shutdown scripts after snapd.service - -This ensures that snaps will be available when the scripts run on shutdown -Last-Update: 2018-05-17 ---- - .../src/lib/systemd/system/google-shutdown-scripts.service | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service b/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -index ae23d76..b38e68a 100644 ---- a/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -+++ b/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -@@ -2,6 +2,7 @@ - Description=Google Compute Engine Shutdown Scripts - After=network-online.target network.target rsyslog.service - After=google-instance-setup.service google-network-daemon.service -+After=snapd.service - - [Service] - ExecStart=/bin/true diff -Nru gce-compute-image-packages-20190801/debian/patches/0005-add-snap-bin-to-path.patch gce-compute-image-packages-20201222.00/debian/patches/0005-add-snap-bin-to-path.patch --- gce-compute-image-packages-20190801/debian/patches/0005-add-snap-bin-to-path.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0005-add-snap-bin-to-path.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -From: Daniel Watkins -Date: Fri, 12 Apr 2019 13:23:42 +0200 -Subject: Add /snap/bin to PATH for startup/shutdown scripts - -These scripts often rely on binaries installed by the google-cloud-sdk snap, -so we need to ensure those binaries are available in the execution environment -of the units. This patch should be dropped once LP: #1771858 has been fixed -in systemd. -Last-Update: 2018-05-17 ---- - .../src/lib/systemd/system/google-shutdown-scripts.service | 1 + - .../src/lib/systemd/system/google-startup-scripts.service | 1 + - 2 files changed, 2 insertions(+) - -diff --git a/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service b/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -index b38e68a..71a9a90 100644 ---- a/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -+++ b/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service -@@ -11,6 +11,7 @@ Type=oneshot - RemainAfterExit=true - TimeoutStopSec=0 - StandardOutput=journal+console -+Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin - - [Install] - WantedBy=multi-user.target -diff --git a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -index fa0532b..870d4c0 100644 ---- a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -+++ b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -@@ -12,6 +12,7 @@ ExecStart=/usr/bin/google_metadata_script_runner --script-type startup - KillMode=process - Type=oneshot - StandardOutput=journal+console -+Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin - - [Install] - WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/debian/patches/0006-Remove-OS-Login-users-from-admin-groups.-29.patch gce-compute-image-packages-20201222.00/debian/patches/0006-Remove-OS-Login-users-from-admin-groups.-29.patch --- gce-compute-image-packages-20190801/debian/patches/0006-Remove-OS-Login-users-from-admin-groups.-29.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0006-Remove-OS-Login-users-from-admin-groups.-29.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -From 50b0fb7b5804c22ef9581e7dc91875801dfa5469 Mon Sep 17 00:00:00 2001 -From: Max Illfelder -Date: Mon, 27 Apr 2020 15:01:30 -0700 -Subject: [PATCH] Remove OS Login users from admin groups. (#29) - -Local user accounts can use these groups for privilege escallation. As a -short term remediation, we should stop adding all users to these groups -by default. ---- - packages/google-compute-engine-oslogin/google_oslogin_control | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: b/packages/google-compute-engine-oslogin/google_oslogin_control -=================================================================== ---- a/packages/google-compute-engine-oslogin/google_oslogin_control -+++ b/packages/google-compute-engine-oslogin/google_oslogin_control -@@ -265,7 +265,7 @@ modify_group_conf() { - fi - - local group_config="${1:-${group_config}}" -- local group_conf_entry="sshd;*;*;Al0000-2400;adm,dip,docker,lxd,plugdev,video" -+ local group_conf_entry="sshd;*;*;Al0000-2400;dip,plugdev,video" - - if ! grep -q "$group_conf_entry" "$group_config"; then - $sed -i"" "\$a ${added_comment}\n${group_conf_entry}" "$group_config" diff -Nru gce-compute-image-packages-20190801/debian/patches/0007-Remove-local-user-groups-for-OS-Login-users.-30.patch gce-compute-image-packages-20201222.00/debian/patches/0007-Remove-local-user-groups-for-OS-Login-users.-30.patch --- gce-compute-image-packages-20190801/debian/patches/0007-Remove-local-user-groups-for-OS-Login-users.-30.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/0007-Remove-local-user-groups-for-OS-Login-users.-30.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -From 88f1ba85e20b3b3a07bfad2eeb723a6b06e41fc8 Mon Sep 17 00:00:00 2001 -From: Max Illfelder -Date: Mon, 4 May 2020 09:54:44 -0700 -Subject: [PATCH] Remove local user groups for OS Login users. (#30) - -Leaving the video group for GPU instances. ---- - packages/google-compute-engine-oslogin/google_oslogin_control | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: b/packages/google-compute-engine-oslogin/google_oslogin_control -=================================================================== ---- a/packages/google-compute-engine-oslogin/google_oslogin_control -+++ b/packages/google-compute-engine-oslogin/google_oslogin_control -@@ -265,7 +265,7 @@ modify_group_conf() { - fi - - local group_config="${1:-${group_config}}" -- local group_conf_entry="sshd;*;*;Al0000-2400;dip,plugdev,video" -+ local group_conf_entry="sshd;*;*;Al0000-2400;video" - - if ! grep -q "$group_conf_entry" "$group_config"; then - $sed -i"" "\$a ${added_comment}\n${group_conf_entry}" "$group_config" diff -Nru gce-compute-image-packages-20190801/debian/patches/fix-startup-script-ordering.patch gce-compute-image-packages-20201222.00/debian/patches/fix-startup-script-ordering.patch --- gce-compute-image-packages-20190801/debian/patches/fix-startup-script-ordering.patch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/fix-startup-script-ordering.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -From: Ubuntu Developers -Date: Fri, 12 Apr 2019 13:23:42 +0200 -Subject: Fix where GCE startup scripts are run during boot - - This fixes: - * GCE mirrors not in place before startup scripts are run (LP: #1436846) - * Run google-startup-scripts.service after multi-user.target (LP: #1627436) -Author: Phil Roche -Bug-Ubuntu: https://bugs.launchpad.net/bugs/1436846 -Bug-Ubuntu: https://bugs.launchpad.net/bugs/1627436 -Last-Update: 2016-11-23 ---- - .../src/lib/systemd/system/google-startup-scripts.service | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -index 233298a..89f245c 100644 ---- a/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -+++ b/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service -@@ -2,6 +2,8 @@ - Description=Google Compute Engine Startup Scripts - After=network-online.target network.target rsyslog.service - After=google-instance-setup.service google-network-daemon.service -+After=cloud-final.service multi-user.target -+Wants=cloud-final.service - - [Service] - ExecStart=/usr/bin/google_metadata_script_runner --script-type startup diff -Nru gce-compute-image-packages-20190801/debian/patches/series gce-compute-image-packages-20201222.00/debian/patches/series --- gce-compute-image-packages-20190801/debian/patches/series 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/patches/series 2021-01-13 22:12:30.000000000 +0000 @@ -1,7 +1 @@ -0001-set-LDFLAGS-to-prevent-undefs.patch -fix-startup-script-ordering.patch -0003-order-startup-scripts-after-snap-seeding.service -0004-order-shutdown-scripts-after-snapd.patch -0005-add-snap-bin-to-path.patch -0006-Remove-OS-Login-users-from-admin-groups.-29.patch -0007-Remove-local-user-groups-for-OS-Login-users.-30.patch +0001-correct-udev-rule-syntax-15.patch diff -Nru gce-compute-image-packages-20190801/debian/preinst gce-compute-image-packages-20201222.00/debian/preinst --- gce-compute-image-packages-20190801/debian/preinst 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/preinst 2021-01-13 22:12:30.000000000 +0000 @@ -0,0 +1,33 @@ +#!/bin/sh +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +#DEBHELPER# + +if [ "$1" = upgrade ]; then + # Remove old services if they exist on upgrade. + for svc in google-ip-forwarding-daemon google-network-setup \ + google-network-daemon google-accounts-daemon google-clock-skew-daemon \ + google-instance-setup; do + if systemctl is-enabled ${svc}.service >/dev/null 2>&1; then + systemctl --no-reload disable ${svc}.service >/dev/null 2>&1 || : + if [ -d /run/systemd/system ]; then + systemctl stop ${svc}.service >/dev/null 2>&1 || : + fi + fi + done + systemdctl daemon-reload >/dev/null 2>&1 || : +fi diff -Nru gce-compute-image-packages-20190801/debian/rules gce-compute-image-packages-20201222.00/debian/rules --- gce-compute-image-packages-20190801/debian/rules 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/rules 2021-01-13 22:12:30.000000000 +0000 @@ -1,37 +1,4 @@ #!/usr/bin/make -f -export PYBUILD_NAME=google-compute-engine -export PYBUILD_TEST_PYTEST=1 -export PYBUILD_DIR=packages/python-google-compute-engine - -DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH) - %: - dh $@ --with python3,systemd --buildsystem=pybuild - -override_dh_auto_configure: - dh_auto_configure - -override_dh_link-indep: - rm -f debian/gce-compute-image-packages/etc/dhcp/dhclient-exit-hooks \ - debian/gce-compute-image-packages/etc/dhcp/dhclient-exit-hooks.d/google_set_hostname - dh_link -i - -override_dh_installdeb-indep: - rm -rf debian/gce-compute-image-packages/etc/init - dh_installdeb -i - -override_dh_systemd_start: - # Configured in gce-compute-image-packages.postinst instead. - -override_dh_python3: - dh_python3 - # We want to split the Python 3 scripts out to the - # gce-compute-image-packages package - mkdir -p debian/gce-compute-image-packages/usr/bin/ - mv debian/python3-google-compute-engine/usr/bin/* debian/gce-compute-image-packages/usr/bin/ - rmdir debian/python3-google-compute-engine/usr/bin - -override_dh_clean: - dh_clean packages/google_compute_engine.egg-info/ - + dh $@ diff -Nru gce-compute-image-packages-20190801/debian/watch gce-compute-image-packages-20201222.00/debian/watch --- gce-compute-image-packages-20190801/debian/watch 2020-10-13 14:27:04.000000000 +0000 +++ gce-compute-image-packages-20201222.00/debian/watch 2021-01-13 22:12:30.000000000 +0000 @@ -7,4 +7,4 @@ filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/gce-compute-image-packages-$1\.tar\.gz/,\ repack,\ repacksuffix=+dfsg1 \ - https://github.com/GoogleCloudPlatform/compute-image-packages/tags .*/v?(\d\S*)\.tar\.gz + https://github.com/GoogleCloudPlatform/guest-configs/tags .*/v?(\d\S*)\.tar\.gz diff -Nru gce-compute-image-packages-20190801/.gitignore gce-compute-image-packages-20201222.00/.gitignore --- gce-compute-image-packages-20190801/.gitignore 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -*.pyc -*.pyo -.DS_Store -.idea -.cache -.tox -.coverage -*.egg-info -build -*.deb -*.rpm - -# emacs backup files -*.*~ diff -Nru gce-compute-image-packages-20190801/LICENSE gce-compute-image-packages-20201222.00/LICENSE --- gce-compute-image-packages-20190801/LICENSE 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/LICENSE 2020-12-07 19:55:14.000000000 +0000 @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2013 Google Inc. + Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff -Nru gce-compute-image-packages-20190801/OWNERS gce-compute-image-packages-20201222.00/OWNERS --- gce-compute-image-packages-20190801/OWNERS 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/OWNERS 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,13 @@ +# This file enables automatic assignment of PR reviewers. +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - adjackura + - hopkiw + - illfelder + - zmarano +reviewers: + - adjackura + - hopkiw + - illfelder + - zmarano diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/dracut6_7.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/dracut6_7.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/dracut6_7.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/dracut6_7.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Modifies rhel6 dracut for rhel7. - -mv src/usr/share src/usr/lib -pushd src/usr/lib/dracut/modules.d/50expand_rootfs - -cat >module-setup.sh < Mon, 08 Jul 2019 10:20:47 -0700 - -gce-disk-expand (2.0.0-1) stable; urgency=low - - * Initial debian package import. - * Standardize on initramfs scripts for disk resizing. - - -- Google Cloud Team Mon, 26 Nov 2018 12:00:00 -0700 diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/compat gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/compat --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/compat 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -9 diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/control gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/control --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/control 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/control 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -Source: gce-disk-expand -Section: admin -Priority: optional -Maintainer: Google Cloud Team -Build-Depends: debhelper (>= 10) -Standards-Version: 3.9.8 -Homepage: https://github.com/GoogleCloudPlatform/compute-image-packages - -Package: gce-disk-expand -Architecture: all -Depends: parted, - ${misc:Depends} -Description: Automatically resize the root partition on first boot. - This package resizes the root partition on first boot using parted. diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/copyright gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/copyright --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/copyright 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/copyright 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: gce-disk-expand -Upstream-Contact: gc-team@google.com - -Files: * -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -Files: debian/* -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/install gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/install --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/install 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/install 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -src/usr/share/initramfs-tools usr/share/ -src/expandfs-lib.sh usr/share/initramfs-tools/scripts/ diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/postinst gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/postinst --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/postinst 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/postinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -set -e - -case "$1" in - configure) - if which /usr/sbin/update-initramfs > /dev/null; then - update-initramfs -u - fi - ;; -esac diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/rules gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/rules --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/debian/rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/debian/rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -#!/usr/bin/make -f - -%: - dh $@ diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/gce-disk-expand.spec gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/gce-disk-expand.spec --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/gce-disk-expand.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/gce-disk-expand.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -Name: gce-disk-expand -Summary: Google Compute Engine root disk expansion module -Epoch: 1 -Version: %{_version} -Release: g1 -License: Apache Software License -Group: System Environment/Base -URL: https://github.com/GoogleCloudPlatform/compute-image-packages -Source0: %{name}_%{version}.orig.tar.gz -Requires: e2fsprogs, dracut, grep, util-linux, parted -Conflicts: dracut-modules-growroot - -# Allow other files in the source that don't end up in the package. -%define _unpackaged_files_terminate_build 0 - -%description -This package resizes the root partition on first boot using parted. - -%prep -%autosetup - -%install -mv src/expandfs-lib.sh src/usr/share/dracut/modules.d/50expand_rootfs/ -%if 0%{?rhel} >= 7 - ./dracut6_7.sh -%endif -rsync -Pravz src/ %{buildroot} - -%files -%if 0%{?rhel} >= 7 - %attr(755,root,root) /usr/lib/dracut/modules.d/50expand_rootfs/* -%else - %attr(755,root,root) /usr/share/dracut/modules.d/50expand_rootfs/* -%endif - -%post -dracut --force - -%postun -dracut --force diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/setup_deb.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/setup_deb.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/setup_deb.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/setup_deb.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="gce-disk-expand" -VERSION="20190708.00" - -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -# Build dependencies. -sudo apt-get -y install dh-systemd - -# DEB creation tools. -sudo apt-get -y install debhelper devscripts build-essential rsync - -rm -rf /tmp/debpackage -mkdir /tmp/debpackage -tar czvf /tmp/debpackage/${NAME}_${VERSION}.orig.tar.gz --exclude .git --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -cd /tmp/debpackage -tar xzvf ${NAME}_${VERSION}.orig.tar.gz - -cd ${NAME}-${VERSION} -cp -r ${working_dir}/packaging/debian ./ - -debuild -us -uc diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/setup_rpm.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/setup_rpm.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/packaging/setup_rpm.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/packaging/setup_rpm.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="gce-disk-expand" -VERSION="20190708.00" - -rpm_working_dir=/tmp/rpmpackage/ -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -# RPM creation tools. -sudo yum -y install rpmdevtools rsync - -rm -rf ${rpm_working_dir} -mkdir -p ${rpm_working_dir}/{SOURCES,SPECS} -cp packaging/${NAME}.spec ${rpm_working_dir}/SPECS/ - -tar czvf ${rpm_working_dir}/SOURCES/${NAME}_${VERSION}.orig.tar.gz \ - --exclude .git --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -rpmbuild --define "_topdir ${rpm_working_dir}/" --define "_version ${VERSION}" \ - -ba ${rpm_working_dir}/SPECS/${NAME}.spec diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/README.md gce-compute-image-packages-20201222.00/packages/gce-disk-expand/README.md --- gce-compute-image-packages-20190801/packages/gce-disk-expand/README.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -## gce-disk-expand package for CentOS/RHEL and Debian - -This package is intended to expand the root partition up to 2TB on a GCE VM -without a GPT partition table and over 2TB on GPT partitioned UEFI enabled -images. - -This package has been tested on the following distros and versions. - -* RHEL/CentOS 7.4+ -* RHEL 8+ -* Debian 10+ diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/expandfs-lib.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/expandfs-lib.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/expandfs-lib.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/expandfs-lib.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -resize_filesystem() { - local disk="$1" fs_type="" - - if ! fs_type=$(blkid_get_fstype "$disk"); then - echo "$fs_type" - return 1 - fi - - case "${fs_type}" in - xfs) - echo "XFS filesystems must be mounted to be resized, deferring." - echo "true" > /tmp/xfs_resize - return 1 - ;; - ext*) - if ! out=$(e2fsck -pf "$disk"); then - echo "Calling e2fsck \"${disk}\" failed: ${out}" - return 1 - fi - if ! out=$(resize2fs "$disk"); then - echo "Calling resize2fs \"${disk}\" failed: ${out}" - return 1 - fi - ;; - *) - echo "Unsupported filesystem type ${fs_type}, unable to expand size." - return 1 - ;; - esac -} - -blkid_get_fstype() ( - local root="$1" - - if ! out=$(blkid -o udev "$root"); then - echo "Detecting fstype by blkid failed: ${out}" - return 1 - fi - - eval "$out" - if [ -z "$ID_FS_TYPE" ]; then - echo "No ID_FS_TYPE from blkid." - return 1 - fi - echo $ID_FS_TYPE -) - - -# Checks for and corrects the end-of-disk GPT backup block in case of expanded -# disk. -parted_fix_gpt() { - local disk="$1" - [ -z "$disk" ] && return - - if parted -sm "$disk" print 2>&1 | grep -q "fix the GPT"; then - # Running parted prompts the user to fix this condition, but only does so in - # the interactive exception handler. In order to pass input we must use the - # hidden triple-dash flag and pass both print and Fix arguments. `print` - # alone will not perform the fix, but `Fix` alone will fail the argument - # parser. - parted -m ---pretend-input-tty "$disk" print Fix >/dev/null 2>&1 /dev/null 2>&1 &1 | grep -q "fix the GPT"; then - echo "Failed to fix the GPT." - return 1 - fi - fi -} - -# Returns "disk:partition", supporting multiple block types. -split_partition() { - local root="$1" disk="" partnum="" - [ -z "$root" ] && return - - if [ -e /sys/block/${root##*/} ]; then - echo "Root is not a partition, skipping partition resize." - return 1 - fi - - disk=${root%%p[0-9]*} - [ "$disk" = "$root" ] && disk=${root%%[0-9]} - - partnum=${root#${disk}} - partnum=${partnum#p} - - echo "${disk}:${partnum}" -} - -# Checks if partition needs resizing. -parted_needresize() { - local disk="$1" partnum="$2" disksize="" partend="" - [ -z "$root" ] && return - - if ! out=$(parted -sm "$disk" unit b print 2>&1); then - echo "Failed to get disk details: ${out}" - return 1 - fi - - udevadm settle - - if ! printf "$out" | sed '$!d' | grep -q "^${partnum}:"; then - echo "Root partition is not final partition on disk. Not resizing." - return 1 - fi - - disksize=$(printf "$out" | grep "^${disk}" | cut -d: -f2) - partend=$(printf "$out" | sed '$!d' | cut -d: -f4) - [ -n "$disksize" -a -n "$partend" ] || return 1 - - disksize=${disksize%%B} - partend=${partend%%B} - - # Check if the distance is > .5GB - [ $((disksize-partend)) -gt 536870912 ] - return -} - -# Resizes partition using 'resizepart' command. -parted_resizepart() { - local disk="$1" partnum="$2" - [ -z "$disk" -o -z "$partnum" ] && return - - if ! out=$(parted -sm "$disk" -- resizepart $partnum -1 2>&1); then - echo "Unable to resize ${disk}${partnum}: ${out}" - return 1 - fi - udevadm settle -} diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/check gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/check --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/check 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/check 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -command -v parted >/dev/null 2>&1 diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs_dummy.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs_dummy.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs_dummy.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs_dummy.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -#!/bin/sh -# Dummy script, to make sure systemd executes the cmdline stage (which exports -# the 'root' variable required for expand_root) diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/expand_rootfs.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Contains dracut-specific logic for detecting disk, then calls appropriate -# library functions. - -kmsg() { - echo "expand_rootfs: $@" > /dev/kmsg -} - -main() { - local disk="" partnum="" fs_type="" rootdev="" - - # Remove 'block:' prefix and find the root device. - if ! rootdev=$(readlink -f "${root#block:}") || [ -z "${rootdev}" ]; then - kmsg "Unable to find root device." - return - fi - - if ! out=$(split_partition "$rootdev"); then - kmsg "Failed to detect disk and partition info: ${out}" - return - fi - - disk=${out%:*} - partnum=${out#*:} - - if ! parted_needresize "$disk" "$partnum"; then - kmsg "Disk ${rootdev} doesn't need resizing" - return - fi - - if ! parted --help | grep -q 'resizepart'; then - kmsg "No 'resizepart' command in this parted" - return - fi - - kmsg "Resizing disk ${rootdev}" - - if ! out=$(parted_resizepart "$disk" "$partnum"); then - # Try fixing the GPT and try resizing again. - parted_fix_gpt "$disk" - if ! out=$(parted_resizepart "$disk" "$partnum"); then - kmsg "Failed to resize partition: ${out}" - return - fi - fi - - if ! out=$(resize_filesystem "$rootdev"); then - kmsg "Failed to resize filesystem: ${out}" - return - fi -} - -. /lib/expandfs-lib.sh -main diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/install gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/install --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/install 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/install 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Dracut install script for RHEL/CentOS - -inst "$moddir/expandfs-lib.sh" "/lib/expandfs-lib.sh" -inst_hook cmdline 50 "$moddir/expand_rootfs_dummy.sh" -inst_hook pre-mount 50 "$moddir/expand_rootfs.sh" -inst_hook pre-pivot 99 "$moddir/xfs_growfs.sh" - -dracut_install parted -dracut_install cut -dracut_install sed -dracut_install grep -dracut_install resize2fs -dracut_install e2fsck -dracut_install udevadm -dracut_install -o xfs_growfs diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/xfs_growfs.sh gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/xfs_growfs.sh --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/xfs_growfs.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/dracut/modules.d/50expand_rootfs/xfs_growfs.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kmsg() { - echo "xfs_growfs: $@" >/dev/kmsg -} - -main() { - if [ ! -e /tmp/xfs_resize ]; then - return - fi - - if ! type xfs_growfs >/dev/null; then - kmsg "XFS resize requested, but xfs_growfs not installed." - return - fi - - kmsg "Mounting filesystem rw for resize." - if ! $(mount -o rw,remount /sysroot); then - kmsg "Remount failed." - return - fi - - kmsg "Resizing XFS filesystem" - if ! out=$(xfs_growfs -d /sysroot); then - kmsg "Failed to resize: ${out}" - fi - mount -o ro,remount /sysroot -} - -main diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/hooks/expand-rootfs gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/hooks/expand-rootfs --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/hooks/expand-rootfs 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/hooks/expand-rootfs 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PREREQS="" -prereqs() -{ - echo "$PREREQS" -} - -case $1 in - prereqs) - prereqs - exit 0 - ;; -esac - -. /usr/share/initramfs-tools/hook-functions - -copy_exec /sbin/parted /bin -copy_exec /bin/grep /bin -copy_exec /bin/sed /bin -copy_exec /usr/bin/cut /bin -copy_exec /sbin/e2fsck /bin -copy_exec /sbin/blkid /bin -copy_exec /sbin/resize2fs /bin -copy_exec /bin/udevadm /bin diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-bottom/xfs_growfs gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-bottom/xfs_growfs --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-bottom/xfs_growfs 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-bottom/xfs_growfs 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PREREQ="" -prereqs() -{ - echo "$PREREQ" -} - -case $1 in - prereqs) - prereqs - exit 0 - ;; -esac - -. /scripts/functions - -if ! real_root=$(resolve_device "${ROOT}"); then - log_failure_message "Failed to resolve root device for \"${ROOT}\"" -fi - -if ! fs_type=$(get_fstype "${real_root}"); then - log_failure_message "Failed to determine fstype for \"${real_root}\"" -fi - -if [ $fs_type != "xfs" ]; then - exit 0 -fi - -if ! command -v xfs_growfs >/dev/null; then - echo "XFS resize requested, but xfs_growfs not installed." - exit 0 -fi - -if xfs_growfs -d -n /; then - log_begin_msg "Resizing xfs filesystem on ${real_root}" - if ! out=$(xfs_growfs -d /); then - log_failure_msg "Failed to resize ${real_root}: ${out}" - exit 1 - fi - log_end_msg -fi - -exit 0 diff -Nru gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-premount/expand_rootfs gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-premount/expand_rootfs --- gce-compute-image-packages-20190801/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-premount/expand_rootfs 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/gce-disk-expand/src/usr/share/initramfs-tools/scripts/local-premount/expand_rootfs 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PREREQ="" -prereqs() -{ - echo "$PREREQ" -} -case $1 in - prereqs) - prereqs - exit 0 - ;; -esac - -. /scripts/functions -. /scripts/expandfs-lib.sh - -if ! rootdev=$(resolve_device "${ROOT}"); then - log_failure_message "Failed to resolve root device for \"${ROOT}\"" -fi - -if ! fs_type=$(get_fstype "${rootdev}"); then - log_failure_message "Failed to determine fstype for \"${rootdev}\"" -fi - -if ! out=$(split_partition "$rootdev"); then - log_failure_message "Failed to detect disk and partition info: ${out}" - exit 0 -fi - -disk=${out%:*} -partnum=${out#*:} - -if ! parted_needresize "${disk}" "${partnum}"; then - log_success_message "Disk ${rootdev} doesn't need resizing." - exit 0 -fi - -if ! out=$(parted_fix_gpt "$disk"); then - log_failure_message "Failed to fix GPT: ${out}" - exit 0 -fi - -echo "Resizing partition on ${rootdev}" -if ! out=$(parted_resizepart "${disk}" "${partnum}"); then - log_failure_message "Failed to resize partition: ${out}" - exit 0 -fi - -echo "Resizing filesystem on ${rootdev}" -if ! out=$(resize_filesystem "${rootdev}"); then - log_failure_message "Failed to resize filesystem: ${out}" - exit 0 -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/changelog gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/changelog --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/changelog 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/changelog 1970-01-01 00:00:00.000000000 +0000 @@ -1,235 +0,0 @@ -google-compute-engine (1:20190801.00-g1) stable; urgency=medium - - * Re-enable boto config and drop writing plugin directory. - * Fix metadata script retrieval. - - -- Google Cloud Team Thu, 01 Aug 2019 14:06:02 -0700 - -google-compute-engine (1:20190729.00-g1) stable; urgency=medium - - * Suport Google Private Access over IPv6. - * Switch to v1 guest attributes URL. - - -- Google Cloud Team Mon, 29 Jul 2019 10:07:29 -0700 - -google-compute-engine (1:20190708.00-g1) stable; urgency=medium - - * Drop unnecessary build and package dependencies. - * Log to journal and console directly from systemd service files. - * Update Debian build dependencies. - - -- Google Cloud Team Mon, 08 Jul 2019 10:20:15 -0700 - -google-compute-engine (2.8.16-1) stable; urgency=low - - * Fix instance setup in Python 3 environments. - - -- Google Cloud Team Wed, 22 May 2019 12:00:00 -0700 - -google-compute-engine (2.8.15-1) stable; urgency=low - - * Fix XPS settings with more than 64 vCPUs. - - -- Google Cloud Team Tue, 21 May 2019 12:00:00 -0700 - -google-compute-engine (2.8.14-1) stable; urgency=low - - * Upstart systems: only run startup scripts at boot. - - -- Google Cloud Team Tue, 16 Apr 2019 12:00:00 -0700 - -google-compute-engine (2.8.13-1) stable; urgency=low - - * Fix metadata script retrieval to support Python 3. - - -- Google Cloud Team Thu, 24 Jan 2019 12:00:00 -0700 - -google-compute-engine (2.8.12-1) stable; urgency=low - - * Fix two factor enablement on change. - - -- Google Cloud Team Wed, 05 Dec 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.11-1) stable; urgency=low - - * Split up the gpasswd command into two commands. - * Update two factor enablement on change. - - -- Google Cloud Team Tue, 04 Dec 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.10-1) stable; urgency=low - - * Fix the gpasswd command default. - - -- Google Cloud Team Fri, 30 Nov 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.9-1) stable; urgency=low - - * Support enabling OS Login two factor authentication. - * Improve accounts support for FreeBSD. - * Improve SELinux support. - - -- Google Cloud Team Wed, 28 Nov 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.8-1) stable; urgency=low - - * Update sudoer group membership without overriding local groups. - - -- Google Cloud Team Tue, 23 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.7-1) stable; urgency=low - - * Remove users from sudoers group on removal (fixed). - - -- Google Cloud Team Thu, 18 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.6-1) stable; urgency=low - - * Revert PR: Remove users from sudoers group on removal. - - -- Google Cloud Team Thu, 11 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.5-1) stable; urgency=low - - * Remove users from sudoers group on removal. - * Remove gsutil dependency for metadata scripts. - - -- Google Cloud Team Thu, 05 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.4-1) stable; urgency=low - - * Remove ntp dependency. - * Support Debian 10 Buster. - * Restart the network daemon if networking is restarted. - * Prevent setup of the default ethernet interface. - * Accounts daemon can now verify username is 32 characters or less. - - -- Google Cloud Team Wed, 05 Sep 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.3-1) stable; urgency=low - - * Prevent IP forwarding daemon log spam. - * Make default shell configurable when executing metadata scripts. - * Rename distro directory to distro_lib. - - -- Google Cloud Team Mon, 11 June 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.2-1) stable; urgency=low - - * Prevent delay in configuring IP forwarding routes. - * Improve instance setup support for FreeBSD. - - -- Google Cloud Team Thu, 10 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.1-1) stable; urgency=low - - * Improve OS Login disablement. - - -- Google Cloud Team Fri, 04 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.0-1) stable; urgency=low - - * Create a new network daemon. - * Refactor the IP forwarding daemon and network setup. - * Improvements for using NSS cache in the accounts daemon. - - -- Google Cloud Team Tue, 01 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.7-1) stable; urgency=low - - * Add support for NSS cache in OS Login. - - -- Google Cloud Team Thu, 08 Mar 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.6-1) stable; urgency=low - - * Add distro specific logic. - - -- Google Cloud Team Wed, 21 Feb 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.5-2) stable; urgency=low - - * Fix dependencies for syslog. - - -- Google Cloud Team Tue, 06 Feb 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.5-1) stable; urgency=low - - * Revert hostname setting change in Debian. - - -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.4-1) stable; urgency=low - - * Fix hostname setting in Debian. - - -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.3-1) stable; urgency=low - - * Improve hostname setting and correctly restart rsyslog. - - -- Google Cloud Team Thu, 25 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.2-2) stable; urgency=low - - * Force IPv4 for apt. - - -- Google Cloud Team Wed, 13 Dec 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.2-1) stable; urgency=low - - * Generate SSH host keys when none are present. - * Improve logging when activating OS Login. - - -- Google Cloud Team Wed, 29 Nov 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.1-1) stable; urgency=low - - * Update set_hostname file name to prevent conflict. - * Add apt config to prevent auto-removal of google-compute-engine. - - -- Google Cloud Team Wed, 25 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.0-6) stable; urgency=low - - * Linux guest environment support for OS Login. - - -- Google Cloud Team Tue, 17 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.2-1) stable; urgency=low - - * Fix system hang during VM shutdown. - - -- Google Cloud Team Fri, 06 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.1-1) stable; urgency=low - - * Use curl to download metadata script files for SSL certificate validation. - * Use netifaces for retrieving MAC address names if the import exists. - - -- Google Cloud Team Thurs, 14 Sep 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-4) stable; urgency=low - - * Fix DHCP exit hook install. - - -- Google Cloud Team Mon, 28 Aug 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-3) stable; urgency=low - - * Add systemd preset. - - -- Google Cloud Team Fri, 25 Aug 2017 14:00:00 -0700 - -google-compute-image-packages (2.6.0-2) stable; urgency=low - - * Add DHCP exit hook script back into package. - - -- Google Cloud Team Fri, 25 Aug 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-1) stable; urgency=low - - * New packaging. - - -- Google Cloud Team Mon, 27 Jun 2017 12:00:00 -0700 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/compat gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/compat --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/compat 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -10 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/control gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/control --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/control 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/control 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -Source: google-compute-engine -Section: admin -Priority: optional -Maintainer: Google Cloud Team -Build-Depends: debhelper (>= 10) -Standards-Version: 3.9.8 -Homepage: https://github.com/GoogleCloudPlatform/compute-image-packages - -Package: google-compute-engine -Architecture: all -Depends: google-compute-engine-oslogin, - python-google-compute-engine, - python3-google-compute-engine, - ${misc:Depends}, - systemd -Recommends: rsyslog | system-log-daemon -Provides: irqbalance -Conflicts: google-compute-engine-jessie, - google-compute-engine-init-jessie, - google-config-jessie, - google-compute-engine-stretch, - google-compute-engine-init-stretch, - google-config-stretch, - google-compute-daemon, - google-startup-scripts, - irqbalance -Replaces: google-compute-engine-jessie, - google-compute-engine-init-jessie, - google-config-jessie, - google-compute-engine-stretch, - google-compute-engine-init-stretch, - google-config-stretch, - google-compute-daemon, - google-startup-scripts -Description: Google Compute Engine guest environment. - This package contains scripts, configuration, and systemd init files for - features specific to the Google Compute Engine cloud environment. diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/copyright gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/copyright --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/copyright 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/copyright 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: google-compute-engine -Upstream-Contact: gc-team@google.com - -Files: * -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -Files: debian/* -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/google-compute-engine.links gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/google-compute-engine.links --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/google-compute-engine.links 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/google-compute-engine.links 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -usr/bin/google_set_hostname etc/dhcp/dhclient-exit-hooks.d/google_set_hostname diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/install gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/install --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/install 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/install 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -etc/apt/apt.conf.d/* -etc/modprobe.d/* -etc/rsyslog.d/* -etc/sysctl.d/* -lib/systemd/system/* -lib/systemd/system-preset/* -lib/udev/rules.d/* -usr/bin/* diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/postinst gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/postinst --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/postinst 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/postinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/sh -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -# Service reload or restart. We do not start or restart -# google-shutdown-scripts.service google-startup-scripts.service. -set -e -if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true - if [ -n "$2" ]; then - _dh_action=reload-or-restart - else - _dh_action=start - fi - deb-systemd-invoke $_dh_action \ - google-instance-setup.service \ - google-accounts-daemon.service \ - google-clock-skew-daemon.service \ - google-network-daemon.service >/dev/null || true -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/preinst gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/preinst --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/preinst 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/preinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -#!/bin/sh -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -set -e -if [ -d /run/systemd/system ] ; then - if [ "$1" = upgrade ]; then - # Remove old services if they exist on upgrade. - if [ -f /lib/systemd/system/google-ip-forwarding-daemon.service ]; then - systemctl stop google-ip-forwarding-daemon.service - systemctl disable google-ip-forwarding-daemon.service - fi - - if [ -f /lib/systemd/system/google-network-setup.service ]; then - systemctl stop google-network-setup.service - systemctl disable google-network-setup.service - fi - fi -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/prerm gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/prerm --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/prerm 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/prerm 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -#!/bin/sh -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#DEBHELPER# - -# Stop all services on remove. -set -e -if [ -d /run/systemd/system ] && [ "$1" = remove ]; then - deb-systemd-invoke stop \ - google-instance-setup.service \ - google-accounts-daemon.service \ - google-clock-skew-daemon.service \ - google-network-daemon.service \ - google-shutdown-scripts.service \ - google-startup-scripts.service >/dev/null -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/rules gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/rules --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -#!/usr/bin/make -f - -%: - dh $@ --with systemd - -override_dh_systemd_start: - # Configured in postinst. diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/source/format gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/source/format --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/debian/source/format 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/debian/source/format 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -3.0 (quilt) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/google-compute-engine-el6.spec gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/google-compute-engine-el6.spec --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/google-compute-engine-el6.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/google-compute-engine-el6.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -Name: google-compute-engine -Epoch: 1 -Version: %{_version} -Release: g1.el6 -Summary: Google Compute Engine guest environment. -License: ASL 2.0 -Url: https://github.com/GoogleCloudPlatform/compute-image-packages -Source0: %{name}_%{version}.orig.tar.gz -Requires: curl -Requires: google-compute-engine-oslogin -Requires: python-google-compute-engine = 1:%{version} -Requires: rsyslog -# Old packages. -Obsoletes: google-compute-engine-init -Obsoletes: google-config -Obsoletes: google-startup-scripts - -BuildArch: noarch - -# Allow other files in the source that don't end up in the package. -%define _unpackaged_files_terminate_build 0 - -%description -This package contains scripts, configuration, and init files for features -specific to the Google Compute Engine cloud environment. - -%prep -%autosetup - -%install -cp -a src/{etc,usr} %{buildroot} -install -d %{buildroot}/lib/ -cp -a src/lib/udev %{buildroot}/lib -mkdir -p %{buildroot}/etc/dhcp -ln -sf /usr/bin/google_set_hostname %{buildroot}/etc/dhcp/dhclient-exit-hooks - -%files -%defattr(0644,root,root,0755) -%attr(0755,-,-) %{_bindir}/* -%attr(0755,-,-) %{_sbindir}/* -/lib/udev/rules.d/* -/etc/init/*.conf -/etc/dhcp/dhclient-exit-hooks -%config /etc/modprobe.d/* -%config /etc/rsyslog.d/* -%config /etc/sysctl.d/* - -%post -if [ $1 -eq 2 ]; then - # New service might not be enabled during upgrade. - systemctl enable google-network-daemon.service -fi - -# On upgrade run instance setup again to handle any new configs and restart -# daemons. -if [ $1 -eq 2 ]; then - stop -q -n google-accounts-daemon - stop -q -n google-clock-skew-daemon - stop -q -n google-network-daemon - /usr/bin/google_instance_setup - start -q -n google-accounts-daemon - start -q -n google-clock-skew-daemon - start -q -n google-network-daemon -fi - -if initctl status google-ip-forwarding-daemon | grep -q 'running'; then - stop -q -n google-ip-forwarding-daemon -fi - -%preun -# On uninstall only. -if [ $1 -eq 0 ]; then - stop -q -n google-accounts-daemon - stop -q -n google-clock-skew-daemon - stop -q -n google-network-daemon - if initctl status google-ip-forwarding-daemon | grep -q 'running'; then - stop -q -n google-ip-forwarding-daemon - fi -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/google-compute-engine.spec gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/google-compute-engine.spec --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/google-compute-engine.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/google-compute-engine.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# For EL7, if building on CentOS, override dist to be el7. -%if 0%{?rhel} == 7 - %define dist .el7 -%endif - -Name: google-compute-engine -Epoch: 1 -Version: %{_version} -Release: g1%{?dist} -Summary: Google Compute Engine guest environment. -License: ASL 2.0 -Url: https://github.com/GoogleCloudPlatform/compute-image-packages -Source0: %{name}_%{version}.orig.tar.gz -Requires: curl -Requires: google-compute-engine-oslogin -%if 0%{?rhel} == 8 -Requires: python3-google-compute-engine = 1:%{version} -%else -Requires: python-google-compute-engine = 1:%{version} -%endif -Requires: rsyslog - -BuildArch: noarch -BuildRequires: systemd - -# Allow other files in the source that don't end up in the package. -%define _unpackaged_files_terminate_build 0 - -%description -This package contains scripts, configuration, and init files for features -specific to the Google Compute Engine cloud environment. - -%prep -%autosetup - -%install -cp -a src/{etc,usr} %{buildroot} -install -d %{buildroot}/{%{_unitdir},%{_presetdir},%{_udevrulesdir}} -cp -a src/lib/systemd/system/* %{buildroot}/%{_unitdir} -cp -a src/lib/systemd/system-preset/* %{buildroot}/%{_presetdir} -cp -a src/lib/udev/rules.d/* %{buildroot}/%{_udevrulesdir} - -%files -%defattr(0644,root,root,0755) -%attr(0755,-,-) %{_bindir}/* -%attr(0755,-,-) /etc/dhcp/dhclient.d/google_hostname.sh -%{_udevrulesdir}/* -%{_unitdir}/* -%{_presetdir}/* -%config /etc/modprobe.d/* -%config /etc/rsyslog.d/* -%config /etc/sysctl.d/* - -%post -# On upgrade run instance setup again to handle any new configs and restart -# daemons. -if [ $1 -eq 2 ]; then - /usr/bin/google_instance_setup - systemctl reload-or-restart google-accounts-daemon.service - systemctl reload-or-restart google-clock-skew-daemon.service - systemctl reload-or-restart google-network-daemon.service -fi - -%systemd_post google-accounts-daemon.service -%systemd_post google-clock-skew-daemon.service -%systemd_post google-instance-setup.service -%systemd_post google-network-daemon.service -%systemd_post google-shutdown-scripts.service -%systemd_post google-startup-scripts.service - -# Remove old services. -if [ -f /lib/systemd/system/google-ip-forwarding-daemon.service ]; then - systemctl stop --no-block google-ip-forwarding-daemon - systemctl disable google-ip-forwarding-daemon.service -fi - -if [ -f /lib/systemd/system/google-network-setup.service ]; then - systemctl stop --no-block google-network-setup - systemctl disable google-network-setup.service -fi - -%preun -# On uninstall only. -if [ $1 -eq 0 ]; then - %systemd_preun google-accounts-daemon.service - %systemd_preun google-clock-skew-daemon.service - %systemd_preun google-instance-setup.service - %systemd_preun google-network-daemon.service - %systemd_preun google-shutdown-scripts.service - %systemd_preun google-startup-scripts.service -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/setup_deb.sh gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/setup_deb.sh --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/setup_deb.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/setup_deb.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="google-compute-engine" -VERSION="20190801.00" - -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -# Build dependencies. -sudo apt-get -y install dh-systemd - -# .deb creation tools. -sudo apt-get -y install debhelper devscripts build-essential - -rm -rf /tmp/debpackage -mkdir /tmp/debpackage -cd src -tar czvf /tmp/debpackage/${NAME}_${VERSION}.orig.tar.gz --transform "s/^\./${NAME}-${VERSION}/" . - -cd /tmp/debpackage -tar xzvf ${NAME}_${VERSION}.orig.tar.gz - -cd ${NAME}-${VERSION} - -cp -r ${working_dir}/packaging/debian ./ - -debuild -us -uc diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/setup_rpm.sh gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/setup_rpm.sh --- gce-compute-image-packages-20190801/packages/google-compute-engine/packaging/setup_rpm.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/packaging/setup_rpm.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="google-compute-engine" -VERSION="20190801.00" - -rpm_working_dir=/tmp/rpmpackage/${NAME}-${VERSION} -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -# RPM creation tools. -sudo yum -y install rpmdevtools - -rm -rf /tmp/rpmpackage -mkdir -p ${rpm_working_dir}/{SOURCES,SPECS} - -# EL6 has a separate .spec file. -if [[ -e /etc/redhat-release ]] && grep -q release\ 6 /etc/redhat-release; then - cp packaging/${NAME}-el6.spec ${rpm_working_dir}/SPECS/${NAME}.spec -else - cp packaging/${NAME}.spec ${rpm_working_dir}/SPECS/ -fi - -tar czvf ${rpm_working_dir}/SOURCES/${NAME}_${VERSION}.orig.tar.gz \ - --exclude .git --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -rpmbuild --define "_topdir ${rpm_working_dir}/" --define "_version ${VERSION}" \ - -ba ${rpm_working_dir}/SPECS/${NAME}.spec diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/apt/apt.conf.d/01autoremove-gce gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/apt/apt.conf.d/01autoremove-gce --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/apt/apt.conf.d/01autoremove-gce 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/apt/apt.conf.d/01autoremove-gce 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -APT -{ - NeverAutoRemove - { - "gce-compute-image-packages.*"; - "google-compute-engine.*"; - "python-google-compute-engine.*"; - "python3-google-compute-engine.*"; - }; -}; diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/apt/apt.conf.d/99ipv4-only gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/apt/apt.conf.d/99ipv4-only --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/apt/apt.conf.d/99ipv4-only 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/apt/apt.conf.d/99ipv4-only 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -# Force IPv4 for Apt. -Acquire::ForceIPv4 "true"; diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/dhcp/dhclient.d/google_hostname.sh gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/dhcp/dhclient.d/google_hostname.sh --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/dhcp/dhclient.d/google_hostname.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/dhcp/dhclient.d/google_hostname.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -google_hostname_config() { - google_set_hostname -} -google_hostname_restore() { - : -} diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-accounts-daemon.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-accounts-daemon.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-accounts-daemon.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-accounts-daemon.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -# Manages accounts from metadata SSH keys. -start on started google-network-daemon -oom -16 - -respawn -exec /usr/bin/google_accounts_daemon diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-clock-skew-daemon.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-clock-skew-daemon.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-clock-skew-daemon.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-clock-skew-daemon.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# Sync the system clock on migration. -start on started google-network-daemon - -respawn -exec /usr/bin/google_clock_skew_daemon diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-instance-setup.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-instance-setup.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-instance-setup.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-instance-setup.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -# Runs instance setup on boot. -start on started rsyslog - -task - -exec /usr/bin/google_instance_setup diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-network-daemon.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-network-daemon.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-network-daemon.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-network-daemon.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# Manages network interfaces. -start on stopped google-instance-setup - -respawn -exec /usr/bin/google_network_daemon diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-shutdown-scripts.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-shutdown-scripts.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-shutdown-scripts.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-shutdown-scripts.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# Runs a shutdown script from metadata. -start on starting rc RUNLEVEL=[06] -task - -exec /usr/bin/google_metadata_script_runner --script-type shutdown diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-startup-scripts.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-startup-scripts.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/init/google-startup-scripts.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/init/google-startup-scripts.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -# Runs a startup script from metadata. -start on started google-network-daemon and startup - -exec /usr/bin/google_metadata_script_runner --script-type startup diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/modprobe.d/gce-blacklist.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/modprobe.d/gce-blacklist.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/modprobe.d/gce-blacklist.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/modprobe.d/gce-blacklist.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# nouveau does not work with GCE GPU's. -blacklist nouveau - -# GCE does not have a floppy device. -blacklist floppy diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/rsyslog.d/90-google.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/rsyslog.d/90-google.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/rsyslog.d/90-google.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/rsyslog.d/90-google.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -# Google Compute Engine default console logging. -# -# daemon: logging from Google provided daemons. -# kern: logging information in case of an unexpected crash during boot. -# -daemon,kern.* /dev/console diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/sysctl.d/11-gce-network-security.conf gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/sysctl.d/11-gce-network-security.conf --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/etc/sysctl.d/11-gce-network-security.conf 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/etc/sysctl.d/11-gce-network-security.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Google-recommended kernel parameters - -# Turn on SYN-flood protections. Starting with 2.6.26, there is no loss -# of TCP functionality/features under normal conditions. When flood -# protections kick in under high unanswered-SYN load, the system -# should remain more stable, with a trade off of some loss of TCP -# functionality/features (e.g. TCP Window scaling). -net.ipv4.tcp_syncookies=1 - -# Ignore source-routed packets -net.ipv4.conf.all.accept_source_route=0 -net.ipv4.conf.default.accept_source_route=0 - -# Ignore ICMP redirects from non-GW hosts -net.ipv4.conf.all.accept_redirects=0 -net.ipv4.conf.default.accept_redirects=0 -net.ipv4.conf.all.secure_redirects=1 -net.ipv4.conf.default.secure_redirects=1 - -# Don't pass traffic between networks or act as a router -net.ipv4.ip_forward=0 -net.ipv4.conf.all.send_redirects=0 -net.ipv4.conf.default.send_redirects=0 - -# Turn on Source Address Verification in all interfaces to -# prevent some spoofing attacks. -net.ipv4.conf.all.rp_filter=1 -net.ipv4.conf.default.rp_filter=1 - -# Ignore ICMP broadcasts to avoid participating in Smurf attacks -net.ipv4.icmp_echo_ignore_broadcasts=1 - -# Ignore bad ICMP errors -net.ipv4.icmp_ignore_bogus_error_responses=1 - -# Log spoofed, source-routed, and redirect packets -net.ipv4.conf.all.log_martians=1 -net.ipv4.conf.default.log_martians=1 - -# RFC 1337 fix -net.ipv4.tcp_rfc1337=1 - -# Addresses of mmap base, heap, stack and VDSO page are randomized -kernel.randomize_va_space=2 - -# Reboot the machine soon after a kernel panic. -kernel.panic=10 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-accounts-daemon.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-accounts-daemon.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-accounts-daemon.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-accounts-daemon.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Google Compute Engine Accounts Daemon -After=network.target google-instance-setup.service google-network-daemon.service - -[Service] -Type=simple -ExecStart=/usr/bin/google_accounts_daemon -OOMScoreAdjust=-999 -Restart=always -StandardOutput=journal+console - -[Install] -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-clock-skew-daemon.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-clock-skew-daemon.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-clock-skew-daemon.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-clock-skew-daemon.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -[Unit] -Description=Google Compute Engine Clock Skew Daemon -After=network.target google-instance-setup.service google-network-daemon.service - -[Service] -Type=simple -ExecStart=/usr/bin/google_clock_skew_daemon -StandardOutput=journal+console - -[Install] -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-instance-setup.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-instance-setup.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-instance-setup.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-instance-setup.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Google Compute Engine Instance Setup -After=network-online.target network.target rsyslog.service -Before=sshd.service - -[Service] -Type=oneshot -ExecStart=/usr/bin/google_instance_setup -StandardOutput=journal+console - -[Install] -WantedBy=sshd.service -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-network-daemon.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-network-daemon.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-network-daemon.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-network-daemon.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Google Compute Engine Network Daemon -After=network-online.target network.target -After=google-instance-setup.service -PartOf=network.service - -[Service] -Type=simple -ExecStart=/usr/bin/google_network_daemon -StandardOutput=journal+console - -[Install] -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-shutdown-scripts.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -[Unit] -Description=Google Compute Engine Shutdown Scripts -After=network-online.target network.target rsyslog.service -After=google-instance-setup.service google-network-daemon.service - -[Service] -ExecStart=/bin/true -ExecStop=/usr/bin/google_metadata_script_runner --script-type shutdown -Type=oneshot -RemainAfterExit=true -TimeoutStopSec=0 -StandardOutput=journal+console - -[Install] -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system/google-startup-scripts.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Google Compute Engine Startup Scripts -After=network-online.target network.target rsyslog.service -After=google-instance-setup.service google-network-daemon.service - -[Service] -ExecStart=/usr/bin/google_metadata_script_runner --script-type startup -KillMode=process -Type=oneshot -StandardOutput=journal+console - -[Install] -WantedBy=multi-user.target diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system-preset/90-google-compute-engine.preset gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system-preset/90-google-compute-engine.preset --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/systemd/system-preset/90-google-compute-engine.preset 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/systemd/system-preset/90-google-compute-engine.preset 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -enable google-accounts-daemon.service -enable google-clock-skew-daemon.service -enable google-instance-setup.service -enable google-network-daemon.service -enable google-shutdown-scripts.service -enable google-startup-scripts.service diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/udev/rules.d/64-gce-disk-removal.rules gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/udev/rules.d/64-gce-disk-removal.rules --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/udev/rules.d/64-gce-disk-removal.rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/udev/rules.d/64-gce-disk-removal.rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# When a disk is removed, unmount any remaining attached volumes. - -ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*|vd*", RUN+="/bin/sh -c '/bin/umount -fl /dev/$name && /usr/bin/logger -p daemon.warn -s WARNING: hot-removed /dev/$name that was still mounted, data may have been corrupted'" diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/udev/rules.d/65-gce-disk-naming.rules gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/udev/rules.d/65-gce-disk-naming.rules --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/lib/udev/rules.d/65-gce-disk-naming.rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/lib/udev/rules.d/65-gce-disk-naming.rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Name the attached disks as the specified by deviceName. - -ACTION!="add|change", GOTO="gce_disk_naming_end" -SUBSYSTEM!="block", GOTO="gce_disk_naming_end" - -# SCSI naming -KERNEL=="sd*|vd*", IMPORT{program}="scsi_id --export --whitelisted -d $tempnode" - -# NVME naming -KERNEL=="nvme0n1*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-0" -KERNEL=="nvme0n2*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-1" -KERNEL=="nvme0n3*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-2" -KERNEL=="nvme0n4*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-3" -KERNEL=="nvme0n5*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-4" -KERNEL=="nvme0n6*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-5" -KERNEL=="nvme0n7*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-6" -KERNEL=="nvme0n8*", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-7" -KERNEL=="nvme*", ENV{ID_SERIAL}="Google_EphemeralDisk_$env{ID_SERIAL_SHORT}" - -# Symlinks -KERNEL=="sd*|vd*|nvme*", ENV{DEVTYPE}=="disk", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}" -KERNEL=="sd*|vd*|nvme*", ENV{DEVTYPE}=="partition", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}-part%n" - -LABEL="gce_disk_naming_end" diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_optimize_local_ssd gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_optimize_local_ssd --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_optimize_local_ssd 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_optimize_local_ssd 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -total_cpus=$(nproc) - -config_nvme() -{ - current_cpu=0 - for dev in /sys/bus/pci/drivers/nvme/* - do - if [ ! -d "$dev" ] - then - continue - fi - for irq_info in $dev/msi_irqs/* - do - if [ ! -f "$irq_info" ] - then - continue - fi - current_cpu=$((current_cpu % total_cpus)) - cpu_mask=$(printf "%x" $((1< "/proc/irq/$irq/smp_affinity" - current_cpu=$((current_cpu+1)) - done - done -} - -config_scsi() -{ - irqs=() - for device in /sys/bus/virtio/drivers/virtio_scsi/virtio* - do - ssd=0 - for target_path in $device/host*/target*/* - do - if [ ! -f "$target_path/model" ] - then - continue - fi - model=$(cat "$target_path/model") - if [[ $model =~ .*EphemeralDisk.* ]] - then - ssd=1 - for queue_path in $target_path/block/sd*/queue - do - echo noop > "$queue_path/scheduler" - echo 0 > "$queue_path/add_random" - echo 512 > "$queue_path/nr_requests" - echo 0 > "$queue_path/rotational" - echo 0 > "$queue_path/rq_affinity" - echo 1 > "$queue_path/nomerges" - done - fi - done - if [[ $ssd == 1 ]] - then - request_queue=$(basename "$device")-request - irq=$(cat /proc/interrupts | grep "$request_queue" | awk '{print $1}'| sed 's/://') - irqs+=($irq) - fi - done - irq_count=${#irqs[@]} - if [ "$irq_count" != 0 ] - then - stride=$((total_cpus / irq_count)) - stride=$((stride < 1 ? 1 : stride)) - current_cpu=0 - for irq in "${irqs[@]}" - do - current_cpu=$(($current_cpu % $total_cpus)) - cpu_mask=$(printf "%x" $((1<<$current_cpu))) - echo "Setting IRQ $irq smp_affinity to $cpu_mask." - echo "$cpu_mask" > "/proc/irq/$irq/smp_affinity" - current_cpu=$((current_cpu+stride)) - done - fi -} - -config_nvme -config_scsi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_set_hostname gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_set_hostname --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_set_hostname 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_set_hostname 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Deal with a new hostname assignment. - -if [ -n "$new_host_name" ] && [ -n "$new_ip_address" ]; then - # Delete entries with new_host_name or new_ip_address in /etc/hosts. - sed -i"" '/Added by Google/d' /etc/hosts - - # Add an entry for our new_host_name/new_ip_address in /etc/hosts. - echo "${new_ip_address} ${new_host_name} ${new_host_name%%.*} # Added by Google" >> /etc/hosts - - # Add an entry for reaching the metadata server in /etc/hosts. - echo "169.254.169.254 metadata.google.internal # Added by Google" >> /etc/hosts -fi - -# /sbin/dhclient-scripts in both ubuntu and centos have some problems for us: -# 1) BOUND doesn't always set hostname (e.g. if old_host_name is unset in -# precise pangolin) -# 2) Using too long of a FQDN as a hostname causes some tools to break in -# some distros (e.g. ssh-keygen) and hostname tool complains when given -# a FQDN that is > 64 bytes. -# -# As a result, we set the host name in all circumstances here, to the truncated -# unqualified domain name. - -if [ -n "$new_host_name" ]; then - hostname "${new_host_name%%.*}" - - # If NetworkManager is installed set the hostname with nmcli. - # to resolve issues with NetworkManager resetting the hostname - # to the FQDN on DHCP renew. - nmcli=$(which nmcli 2> /dev/null) - if [ -x "$nmcli" ]; then - nmcli general hostname "${new_host_name%%.*}" - fi - - # Restart rsyslog to update the hostname. - systemctl=$(which systemctl 2> /dev/null) - if [ -x "$systemctl" ]; then - hasrsyslog=$($systemctl | grep rsyslog | cut -f1 -d' ') - if [ ! -z "$hasrsyslog" ]; then - $systemctl -q --no-block restart "$hasrsyslog" - fi - else - pkill -HUP syslogd - fi -fi diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_set_multiqueue gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_set_multiqueue --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/bin/google_set_multiqueue 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/bin/google_set_multiqueue 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -#!/bin/bash -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# For a single-queue / no MSI-X virtionet device, sets the IRQ affinities to -# processor 0. For this virtionet configuration, distributing IRQs to all -# processors results in comparatively high cpu utilization and comparatively -# low network bandwidth. -# -# For a multi-queue / MSI-X virtionet device, sets the IRQ affinities to the -# per-IRQ affinity hint. The virtionet driver maps each virtionet TX (RX) queue -# MSI-X interrupt to a unique single CPU if the number of TX (RX) queues equals -# the number of online CPUs. The mapping of network MSI-X interrupt vector to -# CPUs is stored in the virtionet MSI-X interrupt vector affinity hint. This -# configuration allows network traffic to be spread across the CPUs, giving -# each CPU a dedicated TX and RX network queue, while ensuring that all packets -# from a single flow are delivered to the same CPU. - -function is_decimal_int() { - [ "${1}" -eq "${1}" ] > /dev/null 2>&1 -} - -function set_channels() { - ethtool -L "${1}" combined "${2}" > /dev/null 2>&1 -} - -echo "Running $(basename $0)." -NET_DEVS=/sys/bus/virtio/drivers/virtio_net/virtio* - -# Loop through all the virtionet devices and enable multi-queue -if [ -x "$(command -v ethtool)" ]; then - for dev in $NET_DEVS; do - ETH_DEVS=${dev}/net/* - for eth_dev in $ETH_DEVS; do - eth_dev=$(basename "$eth_dev") - if ! errormsg=$(ethtool -l "$eth_dev" 2>&1); then - echo "ethtool says that $eth_dev does not support virtionet multiqueue: $errormsg." - continue - fi - num_max_channels=$(ethtool -l "$eth_dev" | grep -m 1 Combined | cut -f2) - [ "${num_max_channels}" -eq "1" ] && continue - if is_decimal_int "$num_max_channels" && \ - set_channels "$eth_dev" "$num_max_channels"; then - echo "Set channels for $eth_dev to $num_max_channels." - else - echo "Could not set channels for $eth_dev to $num_max_channels." - fi - done - done -else - echo "ethtool not found: cannot configure virtionet multiqueue." -fi - -for dev in $NET_DEVS -do - dev=$(basename "$dev") - irq_dir=/proc/irq/* - for irq in $irq_dir - do - smp_affinity="${irq}/smp_affinity_list" - [ ! -f "${smp_affinity}" ] && continue - # Classify this IRQ as virtionet intx, virtionet MSI-X, or non-virtionet - # If the IRQ type is virtionet intx, a subdirectory with the same name as - # the device will be present. If the IRQ type is virtionet MSI-X, then - # a subdirectory of the form -.N will exist. - # In this case, N is the input (output) queue number, and is specified as - # a decimal integer ranging from 0 to K - 1 where K is the number of - # input (output) queues in the virtionet device. - virtionet_intx_dir="${irq}/${dev}" - virtionet_msix_dir_regex=".*/${dev}-(input|output)\.([0-9]+)$" - if [ -d "${virtionet_intx_dir}" ]; then - # All virtionet intx IRQs are delivered to CPU 0 - echo "Setting ${smp_affinity} to 01 for device ${dev}." - echo "01" > "${smp_affinity}" - continue - fi - # Not virtionet intx, probe for MSI-X - virtionet_msix_found=0 - for entry in ${irq}/${dev}*; do - if [[ "$entry" =~ ${virtionet_msix_dir_regex} ]]; then - virtionet_msix_found=1 - queue_num=${BASH_REMATCH[2]} - fi - done - affinity_hint="${irq}/affinity_hint" - [ "$virtionet_msix_found" -eq 0 -o ! -f "${affinity_hint}" ] && continue - - # Set the IRQ CPU affinity to the virtionet-initialized affinity hint - echo "Setting ${smp_affinity} to ${queue_num} for device ${dev}." - echo "${queue_num}" > "${smp_affinity}" - real_affinity=`cat ${smp_affinity}` - echo "${smp_affinity}: real affinity ${real_affinity}" - done -done - -XPS=/sys/class/net/e*/queues/tx*/xps_cpus -num_cpus=$(nproc) - -num_queues=0 -for q in $XPS; do - num_queues=$((num_queues + 1)) -done - -# If we have more CPUs than queues, then stripe CPUs across tx affinity -# as CPUNumber % queue_count. -for q in $XPS; do - queue_re=".*tx-([0-9]+).*$" - if [[ "$q" =~ ${queue_re} ]]; then - queue_num=${BASH_REMATCH[1]} - fi - - xps=0 - for cpu in `seq $queue_num $num_queues $((num_cpus - 1))`; do - xps=$((xps | (1 << cpu))) - done - - # Linux xps_cpus requires a hex number with commas every 32 bits. It ignores - # all bits above # cpus, so write a list of comma separated 32 bit hex values - # with a comma between dwords. - xps_dwords=() - for i in $(seq 0 $(((num_cpus - 1) / 32))) - do - xps_dwords+=(`printf "%08x" $((xps & 0xffffffff))`) - done - xps_string=$(IFS=, ; echo "${xps_dwords[*]}") - - - echo ${xps_string} > $q - printf "Queue %d XPS=%s for %s\n" $queue_num `cat $q` $q -done | sort -n -k2 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/sbin/google-dhclient-script gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/sbin/google-dhclient-script --- gce-compute-image-packages-20190801/packages/google-compute-engine/src/usr/sbin/google-dhclient-script 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine/src/usr/sbin/google-dhclient-script 1970-01-01 00:00:00.000000000 +0000 @@ -1,806 +0,0 @@ -#!/bin/bash -# -# dhclient-script: Network interface configuration script run by -# dhclient based on DHCP client communication -# -# Copyright (C) 2008-2014 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# Author(s): David Cantrell -# Jiri Popelka -# -# ---------- -# This script is a rewrite/reworking on dhclient-script originally -# included as part of dhcp-970306: -# dhclient-script for Linux. Dan Halbert, March, 1997. -# Updated for Linux 2.[12] by Brian J. Murrell, January 1999. -# Modified by David Cantrell for Fedora and RHEL -# -# This script is found in EL 7 and used to fix local routing in EL 6. -# ---------- - -PATH=/bin:/usr/bin:/sbin -# scripts in dhclient.d/ use $SAVEDIR (#833054) -SAVEDIR=/var/lib/dhclient - -LOGFACILITY="local7" -LOGLEVEL="notice" - -ETCDIR="/etc/dhcp" - -logmessage() { - msg="${1}" - logger -p ${LOGFACILITY}.${LOGLEVEL} -t "NET" "dhclient: ${msg}" -} - -eventually_add_hostnames_domain_to_search() { -# For the case when hostname for this machine has a domain that is not in domain_search list -# 1) get a hostname with `ipcalc --hostname` or `hostname` -# 2) get the domain from this hostname -# 3) add this domain to search line in resolv.conf if it's not already -# there (domain list that we have recently added there is a parameter of this function) -# We can't do this directly when generating resolv.conf in make_resolv_conf(), because -# we need to first save the resolv.conf with obtained values before we can call `ipcalc --hostname`. -# See bug 637763 - search="${1}" - if need_hostname; then - status=1 - if [ -n "${new_ip_address}" ]; then - eval $(/bin/ipcalc --silent --hostname ${new_ip_address} ; echo "status=$?") - elif [ -n "${new_ip6_address}" ]; then - eval $(/bin/ipcalc --silent --hostname ${new_ip6_address} ; echo "status=$?") - fi - - if [ ${status} -eq 0 ]; then - domain=$(echo $HOSTNAME | cut -s -d "." -f 2-) - fi - else - domain=$(hostname 2>/dev/null | cut -s -d "." -f 2-) - fi - - if [ -n "${domain}" ] && - [ ! "${domain}" = "localdomain" ] && - [ ! "${domain}" = "localdomain6" ] && - [ ! "${domain}" = "(none)" ] && - [[ ! "${domain}" = *\ * ]]; then - is_in="false" - for s in ${search}; do - if [ "${s}" = "${domain}" ] || - [ "${s}" = "${domain}." ]; then - is_in="true" - fi - done - - if [ "${is_in}" = "false" ]; then - # Add domain name to search list (#637763) - sed -i"" -e "s/${search}/${search} ${domain}/" /etc/resolv.conf - fi - fi -} - -make_resolv_conf() { - [ "${PEERDNS}" = "no" ] && return - - if [ "${reason}" = "RENEW" ] && - [ "${new_domain_name}" = "${old_domain_name}" ] && - [ "${new_domain_name_servers}" = "${old_domain_name_servers}" ]; then - return - fi - - if [ -n "${new_domain_name}" ] || - [ -n "${new_domain_name_servers}" ] || - [ -n "${new_domain_search}" ]; then - rscf="$(mktemp ${TMPDIR:-/tmp}/XXXXXX)" - [[ -z "${rscf}" ]] && return - echo "; generated by /usr/sbin/dhclient-script" > ${rscf} - - if [ -n "${SEARCH}" ]; then - search="${SEARCH}" - else - if [ -n "${new_domain_search}" ]; then - # Remove instaces of \032 (#450042) - search="${new_domain_search//\\032/ }" - elif [ -n "${new_domain_name}" ]; then - # Note that the DHCP 'Domain Name Option' is really just a domain - # name, and that this practice of using the domain name option as - # a search path is both nonstandard and deprecated. - search="${new_domain_name}" - fi - fi - - if [ -n "${search}" ]; then - echo "search ${search}" >> $rscf - fi - - if [ -n "${RES_OPTIONS}" ]; then - echo "options ${RES_OPTIONS}" >> ${rscf} - fi - - for nameserver in ${new_domain_name_servers} ; do - echo "nameserver ${nameserver}" >> ${rscf} - done - - change_resolv_conf ${rscf} - rm -f ${rscf} - - if [ -n "${search}" ]; then - eventually_add_hostnames_domain_to_search "${search}" - fi - elif [ -n "${new_dhcp6_name_servers}" ] || - [ -n "${new_dhcp6_domain_search}" ]; then - rscf="$(mktemp ${TMPDIR:-/tmp}/XXXXXX)" - [[ -z "${rscf}" ]] && return - echo "; generated by /usr/sbin/dhclient-script" > ${rscf} - - if [ -n "${SEARCH}" ]; then - search="${SEARCH}" - else - if [ -n "${new_dhcp6_domain_search}" ]; then - search="${new_dhcp6_domain_search//\\032/ }" - fi - fi - - if [ -n "${search}" ]; then - echo "search ${search}" >> $rscf - fi - - if [ -n "${RES_OPTIONS}" ]; then - echo "options ${RES_OPTIONS}" >> ${rscf} - fi - - shopt -s nocasematch - for nameserver in ${new_dhcp6_name_servers} ; do - # If the nameserver has a link-local address - # add a (interface name) to it. - if [[ "$nameserver" =~ ^fe80:: ]] - then - zone_id="%${interface}" - else - zone_id= - fi - echo "nameserver ${nameserver}$zone_id" >> ${rscf} - done - shopt -u nocasematch - - change_resolv_conf ${rscf} - rm -f ${rscf} - - if [ -n "${search}" ]; then - eventually_add_hostnames_domain_to_search "${search}" - fi - fi -} - -exit_with_hooks() { - exit_status="${1}" - - if [ -x ${ETCDIR}/dhclient-exit-hooks ]; then - . ${ETCDIR}/dhclient-exit-hooks - fi - - exit ${exit_status} -} - -quad2num() { - if [ $# -eq 4 ]; then - let n="${1} << 24 | ${2} << 16 | ${3} << 8 | ${4}" - echo "${n}" - return 0 - else - echo "0" - return 1 - fi -} - -ip2num() { - IFS="." quad2num ${1} -} - -num2ip() { - let n="${1}" - let o1="(n >> 24) & 0xff" - let o2="(n >> 16) & 0xff" - let o3="(n >> 8) & 0xff" - let o4="n & 0xff" - echo "${o1}.${o2}.${o3}.${o4}" -} - -get_network_address() { -# get network address for the given IP address and (netmask or prefix) - ip="${1}" - nm="${2}" - - if [ -n "${ip}" -a -n "${nm}" ]; then - if [[ "${nm}" = *.* ]]; then - ipcalc -s -n ${ip} ${nm} | cut -d '=' -f 2 - else - ipcalc -s -n ${ip}/${nm} | cut -d '=' -f 2 - fi - fi -} - -get_prefix() { -# get prefix for the given IP address and mask - ip="${1}" - nm="${2}" - - if [ -n "${ip}" -a -n "${nm}" ]; then - ipcalc -s -p ${ip} ${nm} | cut -d '=' -f 2 - fi -} - -class_bits() { - let ip=$(IFS='.' ip2num $1) - let bits=32 - let mask='255' - for ((i=0; i <= 3; i++, 'mask<<=8')); do - let v='ip&mask' - if [ "$v" -eq 0 ] ; then - let bits-=8 - else - break - fi - done - echo $bits -} - -is_router_reachable() { - # handle DHCP servers that give us a router not on our subnet - router="${1}" - routersubnet="$(get_network_address ${router} ${new_subnet_mask})" - mysubnet="$(get_network_address ${new_ip_address} ${new_subnet_mask})" - - if [ ! "${routersubnet}" = "${mysubnet}" ]; then - ip -4 route replace ${router}/32 dev ${interface} - if [ "$?" -ne 0 ]; then - logmessage "failed to create host route for ${router}" - return 1 - fi - fi - - return 0 -} - -add_default_gateway() { - router="${1}" - - if is_router_reachable ${router} ; then - metric="" - if [ $# -gt 1 ] && [ ${2} -gt 0 ]; then - metric="metric ${2}" - fi - ip -4 route replace default via ${router} dev ${interface} ${metric} - if [ $? -ne 0 ]; then - logmessage "failed to create default route: ${router} dev ${interface} ${metric}" - return 1 - else - return 0 - fi - fi - - return 1 -} - -execute_client_side_configuration_scripts() { -# execute any additional client side configuration scripts we have - if [ "${1}" == "config" ] || [ "${1}" == "restore" ]; then - for f in ${ETCDIR}/dhclient.d/*.sh ; do - if [ -x ${f} ]; then - subsystem="${f%.sh}" - subsystem="${subsystem##*/}" - . ${f} - "${subsystem}_${1}" - fi - done - fi -} - -flush_dev() { -# Instead of bringing the interface down (#574568) -# explicitly clear the ARP cache and flush all addresses & routes. - ip -4 addr flush dev ${1} >/dev/null 2>&1 - ip -4 route flush dev ${1} >/dev/null 2>&1 - ip -4 neigh flush dev ${1} >/dev/null 2>&1 -} - -dhconfig() { - if [ -n "${old_ip_address}" ] && [ -n "${alias_ip_address}" ] && - [ ! "${alias_ip_address}" = "${old_ip_address}" ]; then - # possible new alias, remove old alias first - ip -4 addr del ${old_ip_address} dev ${interface} label ${interface}:0 - fi - - if [ -n "${old_ip_address}" ] && - [ ! "${old_ip_address}" = "${new_ip_address}" ]; then - # IP address changed. Delete all routes, and clear the ARP cache. - flush_dev ${interface} - fi - - if [ "${reason}" = "BOUND" ] || [ "${reason}" = "REBOOT" ] || - [ ! "${old_ip_address}" = "${new_ip_address}" ] || - [ ! "${old_subnet_mask}" = "${new_subnet_mask}" ] || - [ ! "${old_network_number}" = "${new_network_number}" ] || - [ ! "${old_broadcast_address}" = "${new_broadcast_address}" ] || - [ ! "${old_routers}" = "${new_routers}" ] || - [ ! "${old_interface_mtu}" = "${new_interface_mtu}" ]; then - ip -4 addr add ${new_ip_address}/${new_prefix} broadcast ${new_broadcast_address} dev ${interface} \ - valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} >/dev/null 2>&1 - ip link set dev ${interface} up - - # The 576 MTU is only used for X.25 and dialup connections - # where the admin wants low latency. Such a low MTU can cause - # problems with UDP traffic, among other things. As such, - # disallow MTUs from 576 and below by default, so that broken - # MTUs are ignored, but higher stuff is allowed (1492, 1500, etc). - if [ -n "${new_interface_mtu}" ] && [ ${new_interface_mtu} -gt 576 ]; then - ip link set dev ${interface} mtu ${new_interface_mtu} - fi - - # static routes - if [ -n "${new_classless_static_routes}" ] || - [ -n "${new_static_routes}" ]; then - if [ -n "${new_classless_static_routes}" ]; then - IFS=', |' static_routes=(${new_classless_static_routes}) - else - IFS=', |' static_routes=(${new_static_routes}) - fi - route_targets=() - - for((i=0; i<${#static_routes[@]}; i+=2)); do - target=${static_routes[$i]} - if [ -n "${new_classless_static_routes}" ]; then - if [ ${target} = "0" ]; then - # If the DHCP server returns both a Classless Static Routes option and - # a Router option, the DHCP client MUST ignore the Router option. (RFC3442) - new_routers="" - prefix="0" - else - prefix=${target%%.*} - target=${target#*.} - IFS="." target_arr=(${target}) - unset IFS - ((pads=4-${#target_arr[@]})) - for j in $(seq $pads); do - target="${target}.0" - done - - # Client MUST zero any bits in the subnet number where the corresponding bit in the mask is zero. - # In other words, the subnet number installed in the routing table is the logical AND of - # the subnet number and subnet mask given in the Classless Static Routes option. (RFC3442) - target="$(get_network_address ${target} ${prefix})" - fi - else - prefix=$(class_bits ${target}) - fi - gateway=${static_routes[$i+1]} - - # special case 0.0.0.0 to allow static routing for link-local addresses - # (including IPv4 multicast) which will not have a next-hop (#769463, #787318) - if [ "${gateway}" = "0.0.0.0" ]; then - valid_gateway=0 - scope='scope link' - else - is_router_reachable ${gateway} - valid_gateway=$? - scope='' - fi - if [ ${valid_gateway} -eq 0 ]; then - metric='' - for t in ${route_targets[@]}; do - if [ ${t} = ${target} ]; then - if [ -z "${metric}" ]; then - metric=1 - else - ((metric=metric+1)) - fi - fi - done - - if [ -n "${metric}" ]; then - metric="metric ${metric}" - fi - - ip -4 route replace ${target}/${prefix} proto static via ${gateway} dev ${interface} ${metric} ${scope} - - if [ $? -ne 0 ]; then - logmessage "failed to create static route: ${target}/${prefix} via ${gateway} dev ${interface} ${metric}" - else - route_targets=(${route_targets[@]} ${target}) - fi - fi - done - fi - - # gateways - if [[ ( "${DEFROUTE}" != "no" ) && - (( -z "${GATEWAYDEV}" ) || ( "${GATEWAYDEV}" = "${interface}" )) ]]; then - if [[ ( -z "$GATEWAY" ) || - (( -n "$DHCLIENT_IGNORE_GATEWAY" ) && ( "$DHCLIENT_IGNORE_GATEWAY" = [Yy]* )) ]]; then - metric="${METRIC:-}" - let i="${METRIC:-0}" - default_routers=() - - for router in ${new_routers} ; do - added_router=- - - for r in ${default_routers[@]} ; do - if [ "${r}" = "${router}" ]; then - added_router=1 - fi - done - - if [ -z "${router}" ] || - [ "${added_router}" = "1" ] || - [ $(IFS=. ip2num ${router}) -le 0 ] || - [[ ( "${router}" = "${new_broadcast_address}" ) && - ( "${new_subnet_mask}" != "255.255.255.255" ) ]]; then - continue - fi - - default_routers=(${default_routers[@]} ${router}) - add_default_gateway ${router} ${metric} - let i=i+1 - metric=${i} - done - elif [ -n "${GATEWAY}" ]; then - routersubnet=$(get_network_address ${GATEWAY} ${new_subnet_mask}) - mysubnet=$(get_network_address ${new_ip_address} ${new_subnet_mask}) - - if [ "${routersubnet}" = "${mysubnet}" ]; then - ip -4 route replace default via ${GATEWAY} dev ${interface} - fi - fi - fi - - else # RENEW||REBIND - only update address lifetimes - ip -4 addr change ${new_ip_address}/${new_prefix} broadcast ${new_broadcast_address} dev ${interface} \ - valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} >/dev/null 2>&1 - fi - - if [ ! "${new_ip_address}" = "${alias_ip_address}" ] && - [ -n "${alias_ip_address}" ]; then - # Reset the alias address (fix: this should really only do this on changes) - ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 - ip -4 addr add ${alias_ip_address}/${alias_prefix} broadcast ${alias_broadcast_address} dev ${interface} label ${interface}:0 - ip -4 route replace ${alias_ip_address}/32 dev ${interface} - fi - - # After dhclient brings an interface UP with a new IP address, subnet mask, - # and routes, in the REBOOT/BOUND states -> search for "dhclient-up-hooks". - if [ "${reason}" = "BOUND" ] || [ "${reason}" = "REBOOT" ] || - [ ! "${old_ip_address}" = "${new_ip_address}" ] || - [ ! "${old_subnet_mask}" = "${new_subnet_mask}" ] || - [ ! "${old_network_number}" = "${new_network_number}" ] || - [ ! "${old_broadcast_address}" = "${new_broadcast_address}" ] || - [ ! "${old_routers}" = "${new_routers}" ] || - [ ! "${old_interface_mtu}" = "${new_interface_mtu}" ]; then - - if [ -x ${ETCDIR}/dhclient-${interface}-up-hooks ]; then - . ${ETCDIR}/dhclient-${interface}-up-hooks - elif [ -x ${ETCDIR}/dhclient-up-hooks ]; then - . ${ETCDIR}/dhclient-up-hooks - fi - fi - - make_resolv_conf - - if [ -n "${new_host_name}" ] && need_hostname; then - hostname ${new_host_name} || echo "See -nc option in dhclient(8) man page." - fi - - if [[ ( "${DHCP_TIME_OFFSET_SETS_TIMEZONE}" = [yY1]* ) && - ( -n "${new_time_offset}" ) ]]; then - # DHCP option "time-offset" is requested by default and should be - # handled. The geographical zone abbreviation cannot be determined - # from the GMT offset, but the $ZONEINFO/Etc/GMT$offset file can be - # used - note: this disables DST. - ((z=new_time_offset/3600)) - ((hoursWest=$(printf '%+d' $z))) - - if (( $hoursWest < 0 )); then - # tzdata treats negative 'hours west' as positive 'gmtoff'! - ((hoursWest*=-1)) - fi - - tzfile=/usr/share/zoneinfo/Etc/GMT$(printf '%+d' ${hoursWest}) - if [ -e ${tzfile} ]; then - cp -fp ${tzfile} /etc/localtime - touch /etc/localtime - fi - fi - - execute_client_side_configuration_scripts "config" -} - -# Section 18.1.8. (Receipt of Reply Messages) of RFC 3315 says: -# The client SHOULD perform duplicate address detection on each of -# the addresses in any IAs it receives in the Reply message before -# using that address for traffic. -add_ipv6_addr_with_DAD() { - ip -6 addr add ${new_ip6_address}/${new_ip6_prefixlen} \ - dev ${interface} scope global valid_lft ${new_max_life} \ - preferred_lft ${new_preferred_life} - - # repeatedly test whether newly added address passed - # duplicate address detection (DAD) - for i in $(seq 5); do - sleep 1 # give the DAD some time - - addr=$(ip -6 addr show dev ${interface} \ - | grep ${new_ip6_address}/${new_ip6_prefixlen}) - - # tentative flag == DAD is still not complete - tentative=$(echo "${addr}" | grep tentative) - # dadfailed flag == address is already in use somewhere else - dadfailed=$(echo "${addr}" | grep dadfailed) - - if [ -n "${dadfailed}" ] ; then - # address was added with valid_lft/preferred_lft 'forever', remove it - ip -6 addr del ${new_ip6_address}/${new_ip6_prefixlen} dev ${interface} - exit_with_hooks 3 - fi - if [ -z "${tentative}" ] ; then - if [ -n "${addr}" ]; then - # DAD is over - return 0 - else - # address was auto-removed (or not added at all) - exit_with_hooks 3 - fi - fi - done - return 0 -} - -dh6config() { - if [ -n "${old_ip6_prefix}" ] || - [ -n "${new_ip6_prefix}" ]; then - echo Prefix ${reason} old=${old_ip6_prefix} new=${new_ip6_prefix} - exit_with_hooks 0 - fi - - case "${reason}" in - BOUND6) - if [ -z "${new_ip6_address}" ] || - [ -z "${new_ip6_prefixlen}" ]; then - exit_with_hooks 2 - fi - - add_ipv6_addr_with_DAD - - make_resolv_conf - ;; - - RENEW6|REBIND6) - if [[ -n "${new_ip6_address}" ]] && - [[ -n "${new_ip6_prefixlen}" ]]; then - if [[ ! "${new_ip6_address}" = "${old_ip6_address}" ]]; then - add_ipv6_addr_with_DAD - else # only update address lifetimes - ip -6 addr change ${new_ip6_address}/${new_ip6_prefixlen} \ - dev ${interface} scope global valid_lft ${new_max_life} \ - preferred_lft ${new_preferred_life} - fi - fi - - if [ ! "${new_dhcp6_name_servers}" = "${old_dhcp6_name_servers}" ] || - [ ! "${new_dhcp6_domain_search}" = "${old_dhcp6_domain_search}" ]; then - make_resolv_conf - fi - ;; - - DEPREF6) - if [ -z "${new_ip6_prefixlen}" ]; then - exit_with_hooks 2 - fi - - ip -6 addr change ${new_ip6_address}/${new_ip6_prefixlen} \ - dev ${interface} scope global preferred_lft 0 - ;; - esac - - execute_client_side_configuration_scripts "config" -} - - -# -# ### MAIN -# - -if [ -x ${ETCDIR}/dhclient-enter-hooks ]; then - exit_status=0 - - # dhclient-enter-hooks can abort dhclient-script by setting - # the exit_status variable to a non-zero value - . ${ETCDIR}/dhclient-enter-hooks - if [ ${exit_status} -ne 0 ]; then - exit ${exit_status} - fi -fi - -if [ ! -r /etc/sysconfig/network-scripts/network-functions ]; then - echo "Missing /etc/sysconfig/network-scripts/network-functions, exiting." >&2 - exit 1 -fi - -if [ ! -r /etc/rc.d/init.d/functions ]; then - echo "Missing /etc/rc.d/init.d/functions, exiting." >&2 - exit 1 -fi - -. /etc/sysconfig/network-scripts/network-functions -. /etc/rc.d/init.d/functions - -if [ -f /etc/sysconfig/network ]; then - . /etc/sysconfig/network -fi - -if [ -f /etc/sysconfig/networking/network ]; then - . /etc/sysconfig/networking/network -fi - -cd /etc/sysconfig/network-scripts -CONFIG="${interface}" -need_config ${CONFIG} -source_config >/dev/null 2>&1 - -new_prefix="$(get_prefix ${new_ip_address} ${new_subnet_mask})" -old_prefix="$(get_prefix ${old_ip_address} ${old_subnet_mask})" -alias_prefix="$(get_prefix ${alias_ip_address} ${alias_subnet_mask})" - -case "${reason}" in - MEDIUM|ARPCHECK|ARPSEND) - # Do nothing - exit_with_hooks 0 - ;; - - PREINIT) - if [ -n "${alias_ip_address}" ]; then - # Flush alias, its routes will disappear too. - ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 - fi - - # upstream dhclient-script removes (ifconfig $interface 0 up) old adresses in PREINIT, - # but we sometimes (#125298) need (for iSCSI/nfs root to have a dhcp interface) to keep the existing ip - # flush_dev ${interface} - ip link set dev ${interface} up - if [ -n "${DHCLIENT_DELAY}" ] && [ ${DHCLIENT_DELAY} -gt 0 ]; then - # We need to give the kernel some time to get the interface up. - sleep ${DHCLIENT_DELAY} - fi - - exit_with_hooks 0 - ;; - - PREINIT6) - # ensure interface is up - ip link set dev ${interface} up - - # remove any stale addresses from aborted clients - ip -6 addr flush dev ${interface} scope global permanent - - # we need a link-local address to be ready (not tentative) - for i in $(seq 50); do - linklocal=$(ip -6 addr show dev ${interface} scope link) - # tentative flag means DAD is still not complete - tentative=$(echo "${linklocal}" | grep tentative) - [[ -n "${linklocal}" && -z "${tentative}" ]] && exit_with_hooks 0 - sleep 0.1 - done - - exit_with_hooks 0 - ;; - - BOUND|RENEW|REBIND|REBOOT) - if [ -z "${interface}" ] || [ -z "${new_ip_address}" ]; then - exit_with_hooks 2 - fi - if arping -D -q -c2 -I ${interface} ${new_ip_address}; then - dhconfig - exit_with_hooks 0 - else # DAD failed, i.e. address is already in use - ARP_REPLY=$(arping -D -c2 -I ${interface} ${new_ip_address} | grep reply | awk '{print toupper($5)}' | cut -d "[" -f2 | cut -d "]" -f1) - OUR_MACS=$(ip link show | grep link | awk '{print toupper($2)}' | uniq) - if [[ "${OUR_MACS}" = *"${ARP_REPLY}"* ]]; then - # in RENEW the reply can come from our system, that's OK - dhconfig - exit_with_hooks 0 - else - exit_with_hooks 1 - fi - fi - ;; - - BOUND6|RENEW6|REBIND6|DEPREF6) - dh6config - exit_with_hooks 0 - ;; - - EXPIRE6|RELEASE6|STOP6) - if [ -z "${old_ip6_address}" ] || [ -z "${old_ip6_prefixlen}" ]; then - exit_with_hooks 2 - fi - - ip -6 addr del ${old_ip6_address}/${old_ip6_prefixlen} \ - dev ${interface} - - execute_client_side_configuration_scripts "restore" - - if [ -x ${ETCDIR}/dhclient-${interface}-down-hooks ]; then - . ${ETCDIR}/dhclient-${interface}-down-hooks - elif [ -x ${ETCDIR}/dhclient-down-hooks ]; then - . ${ETCDIR}/dhclient-down-hooks - fi - - exit_with_hooks 0 - ;; - - EXPIRE|FAIL|RELEASE|STOP) - execute_client_side_configuration_scripts "restore" - - if [ -x ${ETCDIR}/dhclient-${interface}-down-hooks ]; then - . ${ETCDIR}/dhclient-${interface}-down-hooks - elif [ -x ${ETCDIR}/dhclient-down-hooks ]; then - . ${ETCDIR}/dhclient-down-hooks - fi - - if [ -n "${alias_ip_address}" ]; then - # Flush alias - ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 - fi - - if [ -n "${old_ip_address}" ]; then - # Delete addresses/routes/arp cache. - flush_dev ${interface} - fi - - if [ -n "${alias_ip_address}" ]; then - ip -4 addr add ${alias_ip_address}/${alias_prefix} broadcast ${alias_broadcast_address} dev ${interface} label ${interface}:0 - ip -4 route replace ${alias_ip_address}/32 dev ${interface} - fi - - exit_with_hooks 0 - ;; - - TIMEOUT) - if [ -n "${new_routers}" ]; then - if [ -n "${alias_ip_address}" ]; then - ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 - fi - - ip -4 addr add ${new_ip_address}/${new_prefix} \ - broadcast ${new_broadcast_address} dev ${interface} \ - valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} - set ${new_routers} - - if ping -q -c 1 -w 10 -I ${interface} ${1}; then - dhconfig - exit_with_hooks 0 - fi - - flush_dev ${interface} - exit_with_hooks 1 - else - exit_with_hooks 1 - fi - ;; - - *) - logmessage "unhandled state: ${reason}" - exit_with_hooks 1 - ;; -esac - -exit_with_hooks 0 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/google_oslogin_control gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/google_oslogin_control --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/google_oslogin_control 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/google_oslogin_control 1970-01-01 00:00:00.000000000 +0000 @@ -1,461 +0,0 @@ -#!/bin/sh -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -is_freebsd() { - [ "$(uname)" = "FreeBSD" ] - return $? -} - -nss_config="/etc/nsswitch.conf" -pam_sshd_config="/etc/pam.d/sshd" -pam_su_config="/etc/pam.d/su" -sshd_config="/etc/ssh/sshd_config" -group_config="/etc/security/group.conf" -sudoers_dir="/var/google-sudoers.d" -users_dir="/var/google-users.d" -added_comment="# Added by Google Compute Engine OS Login." -sshd_block="#### Google OS Login control. Do not edit this section. ####" -sshd_end_block="#### End Google OS Login control section. ####" -sudoers_file="/etc/sudoers.d/google-oslogin" -if is_freebsd; then - sudoers_file="/usr/local/etc/sudoers.d/google-oslogin" -fi - -# Update nsswitch.conf to include OS Login NSS module for passwd. -modify_nsswitch_conf() { - local nss_config="${1:-${nss_config}}" - - if ! grep -q '^passwd:.*oslogin' "$nss_config"; then - $sed -i"" '/^passwd:/ s/$/ cache_oslogin oslogin/' "$nss_config" - fi - - if is_freebsd && grep -q '^passwd:.*compat' "$nss_config"; then - $sed -i"" '/^passwd:/ s/compat/files/' "$nss_config" - fi -} - -restore_nsswitch_conf() { - local nss_config="${1:-${nss_config}}" - - $sed -i"" '/^passwd:/ s/ cache_oslogin oslogin//' "$nss_config" - if is_freebsd; then - $sed -i"" '/^passwd:/ s/files/compat/' "$nss_config" - fi -} - -modify_sshd_conf() ( - set -e - - local sshd_config="${1:-${sshd_config}}" - - local sshd_auth_keys_command="AuthorizedKeysCommand /usr/bin/google_authorized_keys" - local sshd_auth_keys_command_user="AuthorizedKeysCommandUser root" - local sshd_auth_methods="AuthenticationMethods publickey,keyboard-interactive" - local sshd_challenge="ChallengeResponseAuthentication yes" - - # Update google_authorized_keys path in FreeBSD. - if is_freebsd; then - sshd_auth_keys_command="AuthorizedKeysCommand /usr/local/bin/google_authorized_keys" - fi - - # Update directives for EL 6. - if grep -qs "release 6" /etc/redhat-release; then - sshd_auth_keys_command_user="AuthorizedKeysCommandRunAs root" - sshd_auth_methods="RequiredAuthentications2 publickey,keyboard-interactive" - fi - - add_or_update_sshd() { - local entry="$1" - local sshd_config="$2" - local directive="$(echo "$entry" | cut -d' ' -f1)" - local value="$(echo "$entry" | cut -d' ' -f2-)" - - # Check if directive is present. - if grep -Eq "^\s*${directive}" "$sshd_config"; then - # Check if value is incorrect. - if ! grep -Eq "^\s*${directive}(\s|=)+${value}" "$sshd_config"; then - # Comment out the line (because sshd_config is first-directive-found) - # and add to end section. - $sed -i"" -E "/^\s*${directive}/ s/^/${added_comment}\n#/" "$sshd_config" - $sed -i"" "/$sshd_end_block/ i${entry}" "$sshd_config" - fi - else - $sed -i"" "/$sshd_end_block/ i${entry}" "$sshd_config" - fi - } - - # Setup Google config block. - if ! grep -q "$sshd_block" "$sshd_config"; then - # Remove old-style additions. - $sed -i"" "/${added_comment}/,+1d" "$sshd_config" - printf "\n\n${sshd_block}\n${sshd_end_block}" >> "$sshd_config" - fi - - for entry in "$sshd_auth_keys_command" "$sshd_auth_keys_command_user"; do - add_or_update_sshd "$entry" "$sshd_config" - done - - if [ -n "$two_factor" ]; then - for entry in "$sshd_auth_methods" "$sshd_challenge"; do - add_or_update_sshd "$entry" "$sshd_config" - done - fi -) - -restore_sshd_conf() { - local sshd_config="${1:-${sshd_config}}" - - if ! grep -q "$sshd_block" "$sshd_config"; then - # Remove old-style additions. - $sed -i"" "/${added_comment}/,+1d" "$sshd_config" - else - # Uncomment commented-out fields and remove Google config block. - $sed -i"" "/${added_comment}/{n;s/^#//}" "$sshd_config" - $sed -i"" "/${added_comment}/d" "$sshd_config" - $sed -i"" "/${sshd_block}/,/${sshd_end_block}/d" "$sshd_config" - fi -} - -# Inserts pam modules to relevant pam stacks if missing. -modify_pam_config() ( - # TODO: idempotency of this function would be better assured if it wiped out - # and applied desired changes each time rather than detecting deltas. - - set -e - - local pam_sshd_config="${1:-${pam_sshd_config}}" - local pam_su_config="${1:-${pam_su_config}}" - - local pam_auth_oslogin="auth [success=done perm_denied=die default=ignore] pam_oslogin_login.so" - local pam_auth_group="auth [default=ignore] pam_group.so" - local pam_account_oslogin="account [success=ok default=ignore] pam_oslogin_admin.so" - local pam_account_admin="account [success=ok ignore=ignore default=die] pam_oslogin_login.so" - local pam_session_homedir="session [success=ok default=ignore] pam_mkhomedir.so" - local pam_account_su="account [success=bad ignore=ignore] pam_oslogin_login.so" - - # In FreeBSD, the used flags are not supported, replacing them with the - # previous ones (requisite and optional). This is not an exact feature parity - # with Linux. - if is_freebsd; then - pam_auth_oslogin="auth optional pam_oslogin_login.so" - pam_auth_group="auth optional pam_group.so" - pam_account_oslogin="account optional pam_oslogin_admin.so" - pam_account_admin="account requisite pam_oslogin_login.so" - pam_session_homedir="session optional pam_mkhomedir.so" - fi - - local added_config="" - local added_su_config="" - - # For COS this file is solely includes, so simply prepend the new config, - # making each entry the top of its stack. - if [ -e /etc/os-release ] && grep -q "ID=cos" /etc/os-release; then - added_config="${added_comment}\n" - for cfg in "$pam_account_admin" "$pam_account_oslogin" \ - "$pam_session_homedir" "$pam_auth_group"; do - grep -qE "^${cfg%% *}.*${cfg##* }" ${pam_sshd_config} || added_config="${added_config}${cfg}\n" - done - - if [ -n "$two_factor" ]; then - grep -q "$pam_auth_oslogin" "$pam_sshd_config" || added_config="${added_config}${pam_auth_oslogin}\n" - fi - - $sed -i"" "1i ${added_config}\n\n" "$pam_sshd_config" - - added_su_config="${added_comment}\n${pam_account_su}" - $sed -i"" "1i ${added_su_config}" "$pam_su_config" - - return 0 - fi - - # Find the distro-specific insertion point for auth and su. - if [ -e /etc/debian_version ]; then - # Get location of common-auth and check if preceding line is a comment. - insert=$($sed -rn "/^@include\s+common-auth/=" "$pam_sshd_config") - $sed -n "$((insert-1))p" "$pam_sshd_config" | grep -q '^#' && insert=$((insert-1)) - su_insert=$($sed -rn "/^@include\s+common-account/=" "$pam_su_config") - elif [ -e /etc/redhat-release ]; then - # Get location of password-auth. - insert=$($sed -rn "/^auth\s+(substack|include)\s+password-auth/=" \ - "$pam_sshd_config") - # Get location of system-auth. - su_insert=$($sed -rn "/^account\s+include\s+system-auth/=" "$pam_su_config") - elif [ -e /etc/os-release ] && grep -q 'ID="sles"' /etc/os-release; then - # Get location of common-auth. - insert=$($sed -rn "/^auth\s+include\s+common-auth/=" "$pam_sshd_config") - # Get location of common-account. - su_insert=$($sed -rn "/^account\s+include\s+common-account/=" "$pam_su_config") - elif [ -e /etc/arch-release ]; then - # Get location of system-remote-login. - insert=$($sed -rn "/^auth\s+include\s+system-remote-login/=" "$pam_sshd_config") - # TODO: find su_insert point for arch linux. - elif is_freebsd; then - # Get location of the first auth occurrence - insert=$($sed -rn '/^auth/=' "$pam_sshd_config" | head -1) - fi - - added_config="$added_comment" - if ! grep -qE '^auth.*pam_group' "$pam_sshd_config"; then - added_config="${added_config}\n${pam_auth_group}" - fi - - # This auth entry for OS Login+two factor MUST be added last, as it will - # short-circuit processing of the auth stack via [success=ok]. auth stack - # entries after this one will not be processed. - if [ -n "$two_factor" ] && ! grep -qE '^auth.*oslogin' "$pam_sshd_config"; then - added_config="${added_config}\n${pam_auth_oslogin}" - fi - - # Insert auth modules at top of `sshd:auth` stack. - if [ -n "$insert" ] && [ "$added_config" != "$added_comment" ]; then - $sed -i"" "${insert}i ${added_config}" "$pam_sshd_config" - fi - - # Insert su blocker at top of `su:account` stack. - if [ -n "$su_insert" ] && ! grep -qE "$pam_account_su" "$pam_su_config"; then - added_su_config="${added_comment}\n${pam_account_su}" - $sed -i"" "${su_insert}i ${added_su_config}" "$pam_su_config" - fi - - # Append account modules at end of `sshd:account` stack. - if ! grep -qE '^account.*oslogin' "$pam_sshd_config"; then - added_config="\\\n${added_comment}\n${pam_account_admin}\n${pam_account_oslogin}" - account_end=$($sed -n '/^account/=' "$pam_sshd_config" | tail -1) - $sed -i"" "${account_end}a ${added_config}" "$pam_sshd_config" - fi - - # Append mkhomedir module at end of `sshd:session` stack. - if ! grep -qE '^session.*mkhomedir' "$pam_sshd_config"; then - added_config="\\\n${added_comment}\n${pam_session_homedir}" - session_end=$($sed -n '/^session/=' "$pam_sshd_config" | tail -1) - $sed -i"" "${session_end}a ${added_config}" "$pam_sshd_config" - fi -) - -restore_pam_config() { - local pam_sshd_config="${1:-${pam_sshd_config}}" - local pam_su_config="${1:-${pam_su_config}}" - - $sed -i"" "/${added_comment}/d" "$pam_sshd_config" - $sed -i"" "/pam_oslogin/d" "$pam_sshd_config" - $sed -i"" "/^session.*mkhomedir/d" "$pam_sshd_config" - $sed -i"" "/^auth.*pam_group/d" "$pam_sshd_config" - - $sed -i"" "/${added_comment}/d" "$pam_su_config" - $sed -i"" "/pam_oslogin/d" "$pam_su_config" -} - -modify_group_conf() { - # In FreeBSD there is no pam_group config file similar to - # /etc/security/group.conf. - if is_freebsd; then - return - fi - - local group_config="${1:-${group_config}}" - local group_conf_entry="sshd;*;*;Al0000-2400;adm,dip,docker,lxd,plugdev,video" - - if ! grep -q "$group_conf_entry" "$group_config"; then - $sed -i"" "\$a ${added_comment}\n${group_conf_entry}" "$group_config" - fi -} - -restore_group_conf() { - # In FreeBSD there is no pam_group config file similar to - # /etc/security/group.conf. - if is_freebsd; then - return - fi - - local group_config="${1:-${group_config}}" - - $sed -i"" "/${added_comment}/{n;d}" "$group_config" - $sed -i"" "/${added_comment}/d" "$group_config" -} - -restart_service() { - local service="$1" - - # The other options will be wrappers to systemctl on - # systemd-enabled systems, so stop if found. - if readlink -f /sbin/init|grep -q systemd; then - if systemctl is-active --quiet "$service"; then - systemctl restart "$service" - return $? - else - return 0 - fi - fi - - # Use the service helper if it exists. - if command -v service > /dev/null; then - if ! service "$service" status 2>&1 | grep -Eq "unrecognized|does not exist"; then - service "$service" restart - return $? - else - return 0 - fi - fi - - # Fallback to trying sysvinit script of the same name. - if command -v /etc/init.d/"$service" > /dev/null; then - if /etc/init.d/"$service" status > /dev/null 2>&1; then - /etc/init.d/"$service" restart - return $? - else - return 0 - fi - fi - - # We didn't find any way to restart this service. - return 1 -} - -# Restart sshd unless --norestartsshd flag is set. -restart_sshd() { - if [ -n "$no_restart_sshd" ]; then - return 0 - fi - echo "Restarting SSHD" - for svc in "ssh" "sshd"; do - restart_service "$svc" - done -} - -restart_svcs() { - echo "Restarting optional services." - for svc in "nscd" "unscd" "systemd-logind" "cron" "crond"; do - restart_service "$svc" - done -} - -setup_google_dirs() { - for dir in "$sudoers_dir" "$users_dir"; do - [ -d "$dir" ] && continue - mkdir -p "$dir" - chmod 750 "$dir" - if fixfiles=$(command -v fixfiles); then - $fixfiles restore "$dir" - fi - done - echo "#includedir ${sudoers_dir}" > "$sudoers_file" - chmod 0440 "$sudoers_file" -} - -remove_google_dirs() { - for dir in "$sudoers_dir" "$users_dir"; do - rm -rf "$dir" - done - rm -f "$sudoers_file" -} - -activate() { - for func in modify_sshd_conf modify_nsswitch_conf \ - modify_pam_config setup_google_dirs restart_svcs restart_sshd \ - modify_group_conf; do - $func - [ $? -eq 0 ] || return 1 - done -} - -deactivate() { - for func in remove_google_dirs restore_nsswitch_conf \ - restore_sshd_conf restore_pam_config restart_svcs restart_sshd \ - restore_group_conf; do - $func - done -} - -# get_status checks each file for appropriate updates and exits on first -# failure. Checks for two factor config changes only if requested. -get_status() ( - set -e - - grep -Eq '^account.*oslogin' "$pam_sshd_config" - grep -Eq 'google_authorized_keys' "$sshd_config" - grep -Eq 'passwd:.*oslogin' "$nss_config" - if [ -n "$two_factor" ]; then - grep -Eq '^auth.*oslogin' "$pam_sshd_config" - grep -Eq '^(AuthenticationMethods|RequiredAuthentications2).*publickey,keyboard-interactive' "$sshd_config" - fi -) - -usage() { - echo "Usage: $(basename "$0") {activate|deactivate|status} [--norestartsshd] [--twofactor]" - echo "This script will activate or deactivate the features for" - echo "Google Compute Engine OS Login and (optionally) two-factor authentication." - echo "This script must be run as root." - exit 1 -} - - -# Main -if [ $(id -u) -ne 0 ] || [ $# -lt 1 ]; then - usage -fi - -sed="sed" -is_freebsd && sed="gsed" - -while [ $# -gt 0 ]; do - case "$1" in - --norestartsshd) - no_restart_sshd="true" - shift - ;; - --twofactor) - two_factor="true" - shift - ;; - activate) - action="activate" - shift - ;; - deactivate) - action="deactivate" - shift - ;; - status) - action="status" - shift - ;; - *) - shift - ;; - esac -done - -case "$action" in - activate) - echo "Activating Google Compute Engine OS Login." - activate - if [ $? -ne 0 ]; then - echo "Failed to apply changes, rolling back" - deactivate - exit 1 - fi - ;; - deactivate) - echo "Deactivating Google Compute Engine OS Login." - deactivate - ;; - status) - get_status - exit $? - ;; - *) - usage - ;; -esac diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/Makefile gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/Makefile --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/Makefile 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -all install : - $(MAKE) -C src $@ - -tests : - $(MAKE) -C test $@ - -clean : - $(MAKE) -C src clean - $(MAKE) -C test clean - -prowbuild : debian_deps all - -prowtest : debian_deps tests - -debian_deps : - sudo apt-get -y install g++ libcurl4-openssl-dev libjson-c-dev libpam-dev \ - googletest && touch $@ - -.PHONY : all clean install prowbuild prowtest diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/man/nss-cache-oslogin.8 gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/man/nss-cache-oslogin.8 --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/man/nss-cache-oslogin.8 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/man/nss-cache-oslogin.8 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -'\" t -.TH "NSS\-CACHE\-OSLOGIN" "8" "2019-06-06" "Google Cloud" - -.\" IF GNU troff set Aq to sequence aq (Apostrophe quote) -.ie \n(.g .ds Aq \(aq -.\" ELSE set Aq to ' -.el .ds Aq ' - -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l - -.SH "NAME" -nss-cache-oslogin, libnss_cache_oslogin.so.2 \- UNIX implementation for OS Login Users and Groups with local caching\&. - -.SH "SYNOPSIS" -libnss_cache_oslogin\&.so\&.2 - -.SH "DESCRIPTION" -\fBnss\-cache\-oslogin\fR is a plug\-in module for the GNU Name Service Switch -(NSS) functionality of the GNU C Library (\fBglibc\fR)\&. -This module provides UNIX name resolution from a local cache of users and groups -configured in the \fIGoogle Cloud OS Login\fR system\&. -The "passwd" and "group" services are supported by this module\&. -.PP -To use the NSS module, add "cache_oslogin" to the appropriate service lines -in nsswitch\&.conf\&. -It is recommended to place "cache_oslogin" after any system provided modules -and before the "oslogin" module, as it is a pass-through cache. - -.SH "FILES" -.IP /etc/oslogin_passwd.cache -user cache file -.IP /etc/oslogin_group.cache -group cache file - -.SH "NOTES" -Documentation for the Google Cloud OS Login service is available online at -.UR "https://cloud.google.com/compute/docs/oslogin" -.UE - -.SH "SEE ALSO" -.BR nss-oslogin (5), -.BR nsswitch.conf (5), -.BR nss (5), -.BR getent (1), -.BR initgroups (3) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/man/nss-oslogin.8 gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/man/nss-oslogin.8 --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/man/nss-oslogin.8 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/man/nss-oslogin.8 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -'\" t -.TH "NSS\-OSLOGIN" "8" "2019-06-06" "Google Cloud" - -.\" IF GNU troff set Aq to sequence aq (Apostrophe quote) -.ie \n(.g .ds Aq \(aq -.\" ELSE set Aq to ' -.el .ds Aq ' - -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l - -.SH "NAME" -nss-oslogin, libnss_oslogin.so.2 \- UNIX implementation for OS Login Users and Groups\&. - -.SH "SYNOPSIS" -libnss_oslogin\&.so\&.2 - -.SH "DESCRIPTION" -\fBnss\-oslogin\fR is a plug\-in module for the GNU Name Service Switch (NSS) -functionality of the GNU C Library (\fBglibc\fR). -This module provides UNIX name resolution for users and groups configured -through the \fIGoogle Cloud OS Login\fR system\&. -The "passwd", "group", and "initgroups" services are supported by this module. -.PP -To use the NSS module, add "oslogin" to the appropriate service lines -in nsswitch\&.conf\&. -It is recommended to place "oslogin" as the last entry for each service. - -.SH "NOTES" -Documentation for the Google Cloud OS Login service is available online at -.UR "https://cloud.google.com/compute/docs/oslogin" -.UE - -.SH "SEE ALSO" -.BR nss-cache-oslogin (8), -.BR nsswitch.conf (5), -.BR nss (5), -.BR getent (1), -.BR initgroups (3) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/changelog gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/changelog --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/changelog 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/changelog 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -google-compute-engine-oslogin (1:20190801.00-g1) stable; urgency=medium - - * Correct JSON refcount decrementing. - - -- Google Cloud Team Thu, 01 Aug 2019 13:57:16 -0700 - -google-compute-engine-oslogin (1:20190729.00-g1) stable; urgency=medium - - * Remove unnecessary binary search logic. - - -- Google Cloud Team Mon, 29 Jul 2019 10:11:00 -0700 - -google-compute-engine-oslogin (1:20190708.00-g1) stable; urgency=medium - - * Restructure Makefile and Debian control files. - * Add man pages. - - -- Google Cloud Team Mon, 08 Jul 2019 10:20:01 -0700 - -google-compute-engine-oslogin (1.5.3-1) unstable; urgency=low - - * Update OS Login control file for FreeBSD support. - - -- Google Cloud Team Wed, 22 May 2019 12:00:00 -0700 - -google-compute-engine-oslogin (1.5.2-1) unstable; urgency=low - - * Fix pam_group ordering detection. - * Restart cron on OS Login control. - * Add PAM entry to su:account stack. - - -- Google Cloud Team Tue, 16 Apr 2019 12:00:00 -0700 - -google-compute-engine-oslogin (1.5.1-1) unstable; urgency=low - - * Fix two factor auth action name. - - -- Google Cloud Team Tue, 13 Mar 2019 12:00:00 -0700 - -google-compute-engine-oslogin (1.5.0-1) unstable; urgency=low - - * Support Google prompt for two factor authentication. - - -- Google Cloud Team Tue, 19 Feb 2019 12:00:00 -0700 - -google-compute-engine-oslogin (1.4.3-1) unstable; urgency=low - - * Improve OS Login control file for BSD support. - * Improve SELinux support. - - -- Google Cloud Team Wed, 05 Dec 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.4.2-1+deb9) unstable; urgency=low - - * Improve OS Login control file. - * Restart systemd-logind on OS Login enable. - - -- Google Cloud Team Tue, 04 Dec 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.4.1-1+deb9) unstable; urgency=low - - * Improve SELinux support. - * Improve OS Login control file. - - -- Google Cloud Team Fri, 30 Nov 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.4.0-1+deb9) unstable; urgency=low - - * Support OS Login two factor authentication. - - -- Google Cloud Team Wed, 28 Nov 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.3.1-1+deb9) unstable; urgency=low - - * Add user name validation to pam modules. - * Return false on failed final load. - - -- Google Cloud Team Wed, 05 Sep 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.3.0-1+deb9) unstable; urgency=low - - * Include libnss cache as part of the OS Login package. - - -- Google Cloud Team Tue, 01 May 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.2.0-1+deb9) unstable; urgency=low - - * Add support for NSS cache. - - -- Google Cloud Team Thu, 08 Mar 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.5-1+deb9) unstable; urgency=low - - * Clear the CURL_GLOBAL_SSL bit on curl initialization. - - -- Google Cloud Team Mon, 26 Feb 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.4-1+deb9) unstable; urgency=low - - * Close socket connections when requesting metadata. - - -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.3-1+deb9) unstable; urgency=low - - * Change the OS Login uid restriction to allow uid 1000. - - -- Google Cloud Team Thu, 25 Jan 2018 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.2-1+deb9) unstable; urgency=low - - * Fix parsing logic for expiration time on SSH public keys. - * Fix home directory creation PAM config. - - -- MAINTAINER Wed, 29 Nov 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.1-1+deb9) unstable; urgency=low - - * Remove logging when checking OS Login status. - - -- MAINTAINER Wed, 25 Oct 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.1.0-1+deb9) unstable; urgency=low - - * OS Login is enabled via the google-compute-engine package. - - -- MAINTAINER Tue, 17 Oct 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.5-1+deb9) unstable; urgency=low - - * JSON parser accepts string types for int64 values. - - -- MAINTAINER Fri, 06 Oct 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.4-1+deb9) unstable; urgency=low - - * JSON parser casts uid and gid to unsigned integers. - - -- MAINTAINER Tue, 20 Sep 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.3-1+deb9) unstable; urgency=low - - * Strictly check for HTTP code 200. - - -- MAINTAINER Tue, 25 Aug 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.2-1+deb9) unstable; urgency=low - - * Improve security in case of transient errors. - - -- MAINTAINER Tue, 15 Aug 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.1-1+deb9) unstable; urgency=low - - * Fix for restarting sshd and nscd. - - -- MAINTAINER Mon, 17 Jul 2017 12:00:00 -0700 - -google-compute-engine-oslogin (1.0.0-1+deb9) unstable; urgency=low - - * Team Upload. - * Initial release. - - -- MAINTAINER Thu, 22 Jun 2017 12:00:00 -0700 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/compat gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/compat --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/compat 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -9 diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/control gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/control --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/control 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/control 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Source: google-compute-engine-oslogin -Maintainer: Google Cloud Team -Section: misc -Priority: optional -Standards-Version: 3.9.8 -Build-Depends: debhelper (>= 9), libcurl4-openssl-dev, libjson-c-dev | libjson0-dev, libpam-dev - -Package: google-compute-engine-oslogin -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: Google Compute Engine OS Login - Contains libraries, applications and configurations for using OS Login - on Google Compute Engine Virtual Machine Instances. diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/copyright gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/copyright --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/copyright 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/copyright 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: google-compute-engine-oslogin -Upstream-Contact: gc-team@google.com - -Files: * -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -Files: debian/* -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/rules gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/rules --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#!/usr/bin/make -f -%: - dh $@ - -override_dh_auto_install: - dh_auto_install -- LIBDIR=/lib/$(DEB_HOST_MULTIARCH) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/source/format gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/source/format --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/debian/source/format 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/debian/source/format 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -3.0 (quilt) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/google-compute-engine-oslogin.spec gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/google-compute-engine-oslogin.spec --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/google-compute-engine-oslogin.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/google-compute-engine-oslogin.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# For EL7, if building on CentOS, override dist to be el7. -%if 0%{?rhel} == 7 - %define dist .el7 -%endif - -Name: google-compute-engine-oslogin -Epoch: 1 -Version: %{_version} -Release: g1%{?dist} -Summary: OS Login Functionality for Google Compute Engine - -License: ASL 2.0 -Source0: %{name}_%{version}.orig.tar.gz - -BuildRequires: boost-devel -BuildRequires: gcc-c++ -BuildRequires: make -BuildRequires: libcurl -BuildRequires: json-c-devel -BuildRequires: pam-devel -%if 0%{?rhel} == 8 -BuildRequires: python3-policycoreutils -Requires: python3-policycoreutils -%else -BuildRequires: policycoreutils-python -Requires: policycoreutils-python -%endif -Requires: boost-regex -Requires: json-c - -%description -This package contains several libraries and changes to enable OS Login functionality -for Google Compute Engine. - -%global debug_package %{nil} - -%prep -%setup - -%build -make %{?_smp_mflags} LDLIBS="-lcurl -ljson-c -lboost_regex" - -%install -rm -rf %{buildroot} -make install DESTDIR=%{buildroot} LIBDIR=/%{_lib} INSTALL_SELINUX=y - -%files -%doc -/%{_lib}/libnss_oslogin-%{version}.so -/%{_lib}/libnss_cache_oslogin-%{version}.so -/%{_lib}/libnss_oslogin.so.2 -/%{_lib}/libnss_cache_oslogin.so.2 -/%{_lib}/security/pam_oslogin_admin.so -/%{_lib}/security/pam_oslogin_login.so -/usr/bin/google_authorized_keys -/usr/bin/google_oslogin_control -/usr/bin/google_oslogin_nss_cache -/usr/share/selinux/packages/oslogin.pp -%{_mandir}/man8/nss-oslogin.8.gz -%{_mandir}/man8/libnss_oslogin.so.2.8.gz -%{_mandir}/man8/nss-cache-oslogin.8.gz -%{_mandir}/man8/libnss_cache_oslogin.so.2.8.gz - -%post -/sbin/ldconfig -if [ $1 -gt 1 ]; then # This is an upgrade. - if semodule -l | grep -qi oslogin.el6; then - echo "Removing old SELinux module for OS Login." - semodule -r oslogin.el6 - fi -fi -echo "Installing SELinux module for OS Login." -semodule -i /usr/share/selinux/packages/oslogin.pp -if [ -e /var/google-sudoers.d ]; then - fixfiles restore /var/google-sudoers.d -fi - -%postun -/sbin/ldconfig -if [ $1 = 0 ]; then # This is an uninstall. - if semodule -l|grep -qi oslogin; then - echo "Removing SELinux module for OS Login." - semodule -r oslogin - fi -fi - -%changelog diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/setup_deb.sh gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/setup_deb.sh --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/setup_deb.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/setup_deb.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="google-compute-engine-oslogin" -VERSION="20190801.00" - -DEB=$(cut -d. -f1 /dev/null - -rm -rf /tmp/debpackage -mkdir /tmp/debpackage -tar czvf /tmp/debpackage/${NAME}_${VERSION}.orig.tar.gz --exclude .git \ - --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -pushd /tmp/debpackage -tar xzvf ${NAME}_${VERSION}.orig.tar.gz - -cd ${NAME}-${VERSION} - -cp -r ${working_dir}/packaging/debian ./ -echo "Building on Debian ${DEB}, modifying latest changelog entry." -sed -r -i"" "1s/^${NAME} \((.*)\) (.+;.*)/${NAME} (\1+deb${DEB}) \2/" \ - debian/changelog - -echo "Starting build" -DEB_BUILD_OPTIONS=noddebs debuild -us -uc - -popd diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/setup_rpm.sh gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/setup_rpm.sh --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/packaging/setup_rpm.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/packaging/setup_rpm.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -NAME="google-compute-engine-oslogin" -VERSION="20190801.00" - -rpm_working_dir=/tmp/rpmpackage/${NAME}-${VERSION} -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -sudo yum -y install rpmdevtools make gcc-c++ json-c \ - libcurl-devel pam-devel boost-devel json-c-devel - -if grep -q '^\(CentOS\|Red Hat\)[^0-9]*8\..' /etc/redhat-release; then - sudo yum -y install python3-policycoreutils -else - sudo yum -y install policycoreutils-python -fi - -rm -rf /tmp/rpmpackage -mkdir -p ${rpm_working_dir}/{SOURCES,SPECS} -cp packaging/${NAME}.spec ${rpm_working_dir}/SPECS/ - -tar czvf ${rpm_working_dir}/SOURCES/${NAME}_${VERSION}.orig.tar.gz --exclude .git --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -rpmbuild --define "_topdir ${rpm_working_dir}/" --define "_version ${VERSION}" \ - -ba ${rpm_working_dir}/SPECS/${NAME}.spec diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/README.md gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/README.md --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/README.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -## OS Login Guest Environment for Google Compute Engine - -This repository contains the system components responsible for providing Google -Cloud OS Login features on Google Compute Engine instances. - -**Table of Contents** - -* [Overview](#overview) -* [Components](#components) - * [Authorized Keys Command](#authorized-keys-command) - * [NSS Modules](#nss-modules) - * [PAM Modules](#pam-modules) -* [Utilities](#Utilities) - * [Control Script](#control-script) - * [SELinux Policy](#selinux-policy) -* [Source Packages](#source-packages) - * [DEB](#deb) - * [RPM](#rpm) - -## Overview - -The OS Login Guest Environment consists of the following main components: - -* **Authorized Keys Command** which provides SSH keys from the user's OS Login - profile to sshd for authenticating users at login. -* **NSS Modules** which provide support for making OS Login user and group - information available to the system, using NSS (Name Service Switch) - functionality. -* **PAM Modules** which provide authorization (and authentication if - two-factor support is enabled) support allowing the system to use Google - Cloud IAM permissions to control the ability to log into an instance or to - perform operations as root (via `sudo`). - -In addition to the main components, there are also the following utilities: - -* **google_oslogin_control** is a shell script for activating/deactivating the - OS Login components. -* **google_oslogin_nss_cache** is a utility for updating the local user and - group cache. -* **selinux** contains SELinux policy definition files and a compiled policy - package for configuring SELinux to support OS Login. - -The **packaging** directory also contains files used to generate `.deb` and -`.rpm` packages for the OS Login components. - -## Components - -#### Authorized Keys Command - -The `google_authorized_keys` binary is designed to be used with the sshd -`AuthorizedKeysCommand` option in [sshd_config(5)](https://linux.die.net/man/5/sshd_config). -It does the following: - -* Reads the user's profile information from the metadata server: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?username= - ``` -* Checks to make sure that the user is authorized to log in: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/authorize?email=&policy=login - ``` -* If the check is successful, returns the SSH keys associated with the user - for use by sshd. Otherwise, exits with an error code. - -#### NSS Modules - -`libnss_oslogin.so` and `libnss_cache_oslogin.so` are NSS service modules which -make OS Login users and groups available for use on the local system. The module -is activated by adding `oslogin` and `cache_oslogin` entries for services in -[nsswitch.conf(5)](https://linux.die.net/man/5/nsswitch.conf). - -* To return a list of all users, the NSS module queries: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?pagesize= - ``` -* To look up a user by username, the NSS module queries: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?username= - ``` -* To look up a user by UID, the NSS module queries: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?uid= - ``` - -#### PAM Modules - -`pam_oslogin_login.so` is a PAM module which determines whether a given user is -allowed to SSH into an instance. - -It is activated by adding an entry for the account group to the PAM service -config for sshd as: - ``` - account requisite pam_oslogin_login.so - ``` - -This module: - -* Retrieves the user's profile information from the metadata server: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?username= - ``` -* If the user does not have OS Login profile information it is passed on to - the system authentication modules to be processed as a local user. -* Otherwise, the module confirms whether the user has permissions to SSH into - the instance: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/authorize?email=&policy=login - ``` -* If the user is authorized, PAM returns a success message and SSH can - proceed. Otherwise, PAM returns a denied message and the SSH check will - fail. - -`pam_oslogin_admin.so` is a PAM module which determines whether a given user -should have admin (sudo) permissions on the instance. - -It is activated by adding an entry for the `account` group to the PAM service -config for sshd config as: - ``` - account optional pam_oslogin_admin.so - ``` - -This module: - -* Retrieves the user's profile information from the metadata server. - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/users?username= - ``` -* If the user is not an OS Login user (a local user account), the module - returns success. -* Otherwise, the module determines if the user has admin permissions: - ``` - http://metadata.google.internal/computeMetadata/v1/oslogin/authorize?email=&policy=adminLogin - ``` -* If the user is authorized as an admin, a file with the username is added to - `/var/google-sudoers.d/`. The file gives the user sudo privileges. -* If the authorization check fails for admin permissions, the file is removed - from `/var/google-sudoers.d/` if it exists. - -## Utilities - -#### Control Script - -The `google_oslogin_control` shell script activates or deactivates the OS Login -features. It is invoked by the google accounts daemon. The control file performs -the following tasks: - -* Adds (or removes) AuthorizedKeysCommand and AuthorizedKeysCommandUser lines - to (from) `sshd_config` and restarts sshd. -* Adds (or removes) `oslogin` and `cache_oslogin` to (from) `nsswitch.conf`. -* Adds (or removes) the `account` entries to (from) the PAM sshd config. Also - adds (or removes) the `pam_mkhomedir.so` module to automatically create the - home directory for an OS Login user. -* Creates (or deletes) the `/var/google-sudoers.d/` directory, and a file - called `google-oslogin` in `/etc/sudoers.d/` that includes the directory. - -#### SELinux Policy - -The `selinux` directory contains `.te` (type enforcement) and `.fc` (file -context) files used by SELinux to give the OS Login features the appropriate -SELinux permissions. These are compiled using `checkmodule` and -`semodule_package` to create an policy package `oslogin.pp`. - -## Source Packages - -There is currently support for creating packages for the following distros: - -* Debian 9 -* CentOS/RHEL 6 -* CentOS/RHEL 7 - -Files for these packages are in the `packaging/` directory. - -#### DEB - -_Note: the `packaging/setup_deb.sh` script performs these steps, but is not -production quality._ - -1. Install build dependencies: - ``` - sudo apt-get -y install make g++ libcurl4-openssl-dev libjson-c-dev libpam-dev - ``` -1. Install deb creation tools: - ``` - sudo apt-get -y install debhelper devscripts build-essential - ``` -1. Create a compressed tar file named - `google-compute-engine-oslogin_M.M.R.orig.tar.gz` using the files in this - directory, excluding the `packaging` directory (where M.M.R is the version - number). -1. In a separate directory, extract the `.orig.tar.gz` file and copy the - `debian` directory into the top level. -1. To build the package, run the command - ``` - debuild -us -uc - ``` - -#### RPM - -_Note: the `packaging/setup_rpm.sh` script performs these steps, but is not -production quality._ - -1. Install build dependencies: - ``` - sudo yum -y install make gcc-c++ libcurl-devel json-c json-c-devel pam-devel policycoreutils-python - ``` -1. Install rpm creation tools: - ``` - sudo yum -y install rpmdevtools - ``` -1. Create a compressed tar file named - `google-compute-engine-oslogin_M.M.R.orig.tar.gz` using the files in this - directory, excluding the `packaging` directory (where M.M.R is the version - number). -1. In a separate location, create a directory called `rpmbuild` and a - subdirectory called `SOURCES`. Copy the `.orig.tar.gz` file into the - `SOURCES` directory. -1. Copy the `SPECS` directory from the `rpmbuild` directory here into the - `rpmbuild` directory you created. -1. To build the package, run the command: - ``` - rpmbuild --define "_topdir /path/to/rpmbuild" -ba /path/to/rpmbuild/SPECS/google-compute-engine-oslogin.spec - ``` diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/Makefile gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/Makefile --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/Makefile 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -# SELINUX POLICY -MOD_BASE = oslogin -SELINUX_MODULE_SRC = $(MOD_BASE).te -SELINUX_MOD_FILE = $(MOD_BASE).mod -SELINUX_FC_FILE = $(MOD_BASE).fc -SELINUX_MODULE = $(MOD_BASE).pp - -all: $(SELINUX_MODULE) - -$(SELINUX_MOD_FILE): $(SELINUX_MODULE_SRC) - checkmodule -M -m -o $(SELINUX_MOD_FILE) $(SELINUX_MODULE_SRC) - -$(SELINUX_MODULE): $(SELINUX_MOD_FILE) - semodule_package -o $(SELINUX_MODULE) -m $(SELINUX_MOD_FILE) -f $(SELINUX_FC_FILE) - -clean: - rm -f $(SELINUX_MODULE) $(SELINUX_MOD_FILE) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/oslogin.fc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/oslogin.fc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/oslogin.fc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/oslogin.fc 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -/var/google-sudoers.d(/.*)? system_u:object_r:google_t:s0 -/var/google-users.d(/.*)? system_u:object_r:google_t:s0 Binary files /tmp/tmpQ_Q9xb/MkebbosahF/gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/oslogin.pp and /tmp/tmpQ_Q9xb/3iLA0DL4r6/gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/oslogin.pp differ diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/oslogin.te gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/oslogin.te --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/oslogin.te 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/oslogin.te 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ - -module oslogin 1.0; - - -require { - attribute file_type; - attribute non_security_file_type; - type http_port_t; - type sshd_t; - class tcp_socket name_connect; - class file { create getattr setattr write open unlink }; - class dir { search write remove_name add_name }; -} - -#============= types ============== - -type google_t; # defined in oslogin.fc -typeattribute google_t file_type, non_security_file_type; - -#============= sshd_t ============== - -allow sshd_t google_t:file { create getattr setattr write open unlink }; -allow sshd_t google_t:dir { search write remove_name add_name }; -allow sshd_t http_port_t:tcp_socket name_connect; diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/README.md gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/README.md --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/selinux/README.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/selinux/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -## SELinux policy module for OS Login - -This module adds specific policy updates which enable OS Login features to -function on SELinux-enabled systems (currently default on GCE RHEL6/7 images). - -It primarily enables `SSHD(8)` to make network calls to the metadata server to -verify OS Login users, and to create per-user `SUDOERS(5)` files in -`/var/google-sudoers.d` - -### Building the module - -The provided Makefile compiles type enforcement and file context files into a -binary SELinux policy module. It must be compiled on the oldest version of the -destination OS you intend to support, as binary module versions are not -backwards compatible. Therefore, this Makefile is not run as part of the normal -packaging process but is done 'by hand', only when changes are made to the -policy. diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/authorized_keys/authorized_keys.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/authorized_keys/authorized_keys.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/authorized_keys/authorized_keys.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/authorized_keys/authorized_keys.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include - -using std::cout; -using std::endl; -using std::string; - -using oslogin_utils::HttpGet; -using oslogin_utils::ParseJsonToSuccess; -using oslogin_utils::ParseJsonToKey; -using oslogin_utils::ParseJsonToEmail; -using oslogin_utils::ParseJsonToSshKeys; -using oslogin_utils::UrlEncode; -using oslogin_utils::kMetadataServerUrl; - -int main(int argc, char* argv[]) { - if (argc != 2) { - cout << "usage: authorized_keys [username]" << endl; - return 1; - } - std::stringstream url; - url << kMetadataServerUrl << "users?username=" << UrlEncode(argv[1]); - string user_response; - long http_code = 0; - if (!HttpGet(url.str(), &user_response, &http_code) || - user_response.empty() || http_code != 200) { - if (http_code == 404) { - // Return 0 if the user is not an oslogin user. If we returned a failure - // code, we would populate auth.log with useless error messages. - return 0; - } - return 1; - } - string email; - if (!ParseJsonToEmail(user_response, &email) || email.empty()) { - return 1; - } - // Redundantly verify that this user has permission to log in to this VM. - // Normally the PAM module determines this, but in the off chance a transient - // error causes the PAM module to permit a user without login permissions, - // perform the same check here. If this fails, we can guarantee that we won't - // accidentally allow a user to log in without permissions. - url.str(""); - url << kMetadataServerUrl << "authorize?email=" << UrlEncode(email) - << "&policy=login"; - string auth_response; - if (!HttpGet(url.str(), &auth_response, &http_code) || http_code != 200 || - auth_response.empty()) { - return 1; - } - if (!ParseJsonToSuccess(auth_response)) { - return 1; - } - // At this point, we've verified the user can log in. Grab the ssh keys from - // the user response. - std::vector ssh_keys = ParseJsonToSshKeys(user_response); - for (int i = 0; i < ssh_keys.size(); i++) { - cout << ssh_keys[i] << endl; - } - return 0; -} diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/cache_refresh/cache_refresh.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/cache_refresh/cache_refresh.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/cache_refresh/cache_refresh.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/cache_refresh/cache_refresh.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -// Copyright 2018 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - - -using oslogin_utils::BufferManager; -using oslogin_utils::MutexLock; -using oslogin_utils::NssCache; - -// File paths for the nss cache file. -static const char kDefaultFilePath[] = K_DEFAULT_FILE_PATH; -static const char kDefaultBackupFilePath[] = K_DEFAULT_BACKUP_FILE_PATH; - -// Local NSS Cache size. This affects the maximum number of passwd entries per -// http request. -static const uint64_t kNssCacheSize = 2048; - -// Passwd buffer size. We are guaranteed that a single OS Login user will not -// exceed 32k. -static const uint64_t kPasswdBufferSize = 32768; - -static NssCache nss_cache(kNssCacheSize); - -static pthread_mutex_t cache_mutex = PTHREAD_MUTEX_INITIALIZER; - -int main(int argc, char* argv[]) { - int error_code = 0; - // Temporary buffer to hold passwd entries before writing. - char buffer[kPasswdBufferSize]; - struct passwd pwd; - - // Perform the writes under a global lock. - MutexLock ml(&cache_mutex); - nss_cache.Reset(); - - // Check if there is a cache already. - struct stat stat_buf; - bool backup = !stat(kDefaultFilePath, &stat_buf); - if (backup) { - // Write a backup file first, in case lookup fails. - error_code = rename(kDefaultFilePath, kDefaultBackupFilePath); - if (error_code) { - openlog("nss_cache_oslogin", LOG_PID, LOG_USER); - syslog(LOG_ERR, "Could not create backup file."); - closelog(); - return error_code; - } - } - - std::ofstream cache_file(kDefaultFilePath); - if (cache_file.fail()) { - openlog("nss_cache_oslogin", LOG_PID, LOG_USER); - syslog(LOG_ERR, "Failed to open file %s.", kDefaultFilePath); - closelog(); - return -1; - } - chown(kDefaultFilePath, 0, 0); - chmod(kDefaultFilePath, S_IRUSR | S_IWUSR | S_IROTH); - - while (!nss_cache.OnLastPage() || nss_cache.HasNextPasswd()) { - BufferManager buffer_manager(buffer, kPasswdBufferSize); - if (!nss_cache.NssGetpwentHelper(&buffer_manager, &pwd, &error_code)) { - break; - } - cache_file << pwd.pw_name << ":" << pwd.pw_passwd << ":" << pwd.pw_uid - << ":" << pwd.pw_gid << ":" << pwd.pw_gecos << ":" << pwd.pw_dir - << ":" << pwd.pw_shell << "\n"; - } - cache_file.close(); - - // Check for errors. - if (error_code) { - openlog("nss_cache_oslogin", LOG_PID, LOG_USER); - if (error_code == ERANGE) { - syslog(LOG_ERR, "Received unusually large passwd entry."); - } else if (error_code == EINVAL) { - syslog(LOG_ERR, "Encountered malformed passwd entry."); - } else { - syslog(LOG_ERR, "Unknown error while retrieving passwd entry."); - } - // Restore the backup. - if (backup) { - if (rename(kDefaultBackupFilePath, kDefaultFilePath)) { - syslog(LOG_ERR, "Could not restore data from backup file."); - } - } - closelog(); - } - - // Remove the backup file on success. - if (!error_code && backup) { - remove(kDefaultBackupFilePath); - } - return error_code; -} diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/compat.h gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/compat.h --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/compat.h 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/compat.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -// Copyright 2018 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OSLOGIN_COMPAT_H -#define OSLOGIN_COMPAT_H - -#ifdef __FreeBSD__ - -#include - -#define DECLARE_NSS_METHOD_TABLE(name, ...) \ - static ns_mtab name[] = {__VA_ARGS__}; - -#define NSS_METHOD(method, func) { \ - .database = NSDB_PASSWD, \ - .name = #method, \ - .method = __nss_compat_ ## method, \ - .mdata = (void*)func \ -} - -#define NSS_REGISTER_METHODS(methods) ns_mtab * \ -nss_module_register (const char *name, unsigned int *size, \ - nss_module_unregister_fn *unregister) \ -{ \ - *size = sizeof (methods) / sizeof (methods[0]); \ - *unregister = NULL; \ - return (methods); \ -} - -#define NSS_CACHE_OSLOGIN_PATH "/usr/local/etc/oslogin_passwd.cache" -#define K_DEFAULT_FILE_PATH "/usr/local/etc/oslogin_passwd.cache" -#define K_DEFAULT_BACKUP_FILE_PATH "/usr/local/etc/oslogin_passwd.cache.bak" -#define PAM_SYSLOG(pamh, ...) syslog(__VA_ARGS__) -#define DEFAULT_SHELL "/bin/sh" - -#else /* __FreeBSD__ */ - -#include - -#define DECLARE_NSS_METHOD_TABLE(name, ...) -#define NSS_CACHE_OSLOGIN_PATH "/etc/oslogin_passwd.cache" -#define NSS_METHOD_PROTOTYPE(m) -#define NSS_REGISTER_METHODS(methods) -#define K_DEFAULT_FILE_PATH "/etc/oslogin_passwd.cache" -#define K_DEFAULT_BACKUP_FILE_PATH "/etc/oslogin_passwd.cache.bak" -#define PAM_SYSLOG pam_syslog -#define DEFAULT_SHELL "/bin/bash" - -#endif /* __FreeBSD__ */ - -#endif /* OSLOGIN_COMPAT_H */ diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/nss_cache_oslogin.h gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/nss_cache_oslogin.h --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/nss_cache_oslogin.h 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/nss_cache_oslogin.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -// Copyright 2018 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef NSS_CACHE_OSLOGIN_H -#define NSS_CACHE_OSLOGIN_H - -#ifdef DEBUG -#undef DEBUG -#define DEBUG(fmt, args...) \ - do { \ - fprintf(stderr, fmt, ##args); \ - } while (0) -#else -#define DEBUG(fmt, ...) \ - do { \ - } while (0) -#endif /* DEBUG */ - -#define NSS_CACHE_OSLOGIN_PATH_LENGTH 255 -extern char *_nss_cache_oslogin_setpwent_path(const char *path); - -enum nss_cache_oslogin_match { - NSS_CACHE_OSLOGIN_EXACT = 0, - NSS_CACHE_OSLOGIN_HIGH = 1, - NSS_CACHE_OSLOGIN_LOW = 2, - NSS_CACHE_OSLOGIN_ERROR = 3, -}; - -struct nss_cache_oslogin_args { - char *system_filename; - char *sorted_filename; - void *lookup_function; - void *lookup_value; - void *lookup_result; - char *buffer; - size_t buflen; - char *lookup_key; - size_t lookup_key_length; -}; - -#endif /* NSS_CACHE_OSLOGIN_H */ diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/oslogin_utils.h gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/oslogin_utils.h --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/include/oslogin_utils.h 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/include/oslogin_utils.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,238 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include - -#include -#include - -#define TOTP "TOTP" -#define AUTHZEN "AUTHZEN" -#define INTERNAL_TWO_FACTOR "INTERNAL_TWO_FACTOR" -#define IDV_PREREGISTERED_PHONE "IDV_PREREGISTERED_PHONE" - -using std::string; -using std::vector; - -namespace oslogin_utils { - -// Metadata server URL. -static const char kMetadataServerUrl[] = - "http://metadata.google.internal/computeMetadata/v1/oslogin/"; - -// BufferManager encapsulates and manages a buffer and length. This class is not -// thread safe. -class BufferManager { - public: - // Create a BufferManager that will dole out chunks of buf as requested. - BufferManager(char* buf, size_t buflen); - - // Copies a string to the buffer and sets the buffer to point to that - // string. Copied string is guaranteed to be null-terminated. - // Returns false and sets errnop if there is not enough space left in the - // buffer for the string. - bool AppendString(const string& value, char** buffer, int* errnop); - - // Return a pointer to a buffer of size bytes. Returns NULL and sets errnop to - // ERANGE if there is not enough space left in the buffer for the request. - void* Reserve(size_t bytes, int* errnop); - - private: - // Whether there is space available in the buffer. - bool CheckSpaceAvailable(size_t bytes_to_write) const; - - char* buf_; - size_t buflen_; - - // Not copyable or assignable. - BufferManager& operator=(const BufferManager&); - BufferManager(const BufferManager&); -}; - -// Challenge represents a security challenge available to the user. -class Challenge { - public: - int id; - string type; - string status; -}; - -class Group { - public: - int64_t gid; - string name; -}; - -// NssCache caches passwd entries for getpwent_r. This is used to prevent making -// an HTTP call on every getpwent_r invocation. Stores up to cache_size entries -// at a time. This class is not thread safe. -class NssCache { - public: - explicit NssCache(int cache_size); - - // Clears and resets the NssCache. - void Reset(); - - // Whether the cache has a next passwd entry. - bool HasNextPasswd(); - - // Whether the cache has reached the last page of the database. - bool OnLastPage() { return on_last_page_; } - - // Grabs the next passwd entry. Returns true on success. Sets errnop on - // failure. - bool GetNextPasswd(BufferManager* buf, passwd* result, int* errnop); - - // Loads a json array of passwd entries in the cache, starting at the - // beginning of the cache. This will remove all previous entries in the cache. - // response is expected to be a JSON array of passwd entries. Returns - // true on success. - bool LoadJsonArrayToCache(string response); - - // Helper method that effectively implements the getpwent_r nss method. Each - // call will iterate through the OsLogin database and return the next entry. - // Internally, the cache will keep track of pages of passwd entries, and will - // make an http call to the server if necessary to retrieve additional - // entries. Returns whether passwd retrieval was successful. If true, the - // passwd result will contain valid data. - bool NssGetpwentHelper(BufferManager* buf, struct passwd* result, - int* errnop); - - // Returns the page token for requesting the next page of passwd entries. - string GetPageToken() { return page_token_; } - - private: - // The maximum size of the cache. - int cache_size_; - - // Vector of passwds. These are represented as stringified json object. - std::vector passwd_cache_; - - // The page token for requesting the next page of passwds. - std::string page_token_; - - // Index for requesting the next passwd from the cache. - uint32_t index_; - - // Whether the NssCache has reached the last page of the database. - bool on_last_page_; - - // Not copyable or assignable. - NssCache& operator=(const NssCache&); - NssCache(const NssCache&); -}; - -// Auto locks and unlocks a given mutex on construction/destruction. Does NOT -// take ownership of the mutex. -class MutexLock { - public: - explicit MutexLock(pthread_mutex_t* mutex) : mutex_(mutex) { - pthread_mutex_lock(mutex_); - } - - ~MutexLock() { pthread_mutex_unlock(mutex_); } - - private: - // The mutex to lock/unlock - pthread_mutex_t* const mutex_; - - // Not copyable or assignable. - MutexLock& operator=(const MutexLock); - MutexLock(const MutexLock&); -}; - -// Callback invoked when Curl completes a request. -size_t OnCurlWrite(void* buf, size_t size, size_t nmemb, void* userp); - -// Uses Curl to issue a GET request to the given url. Returns whether the -// request was successful. If successful, the result from the server will be -// stored in response, and the HTTP response code will be stored in http_code. -bool HttpGet(const string& url, string* response, long* http_code); -bool HttpPost(const string& url, const string& data, string* response, - long* http_code); - -// Returns whether user_name is a valid OsLogin user name. -bool ValidateUserName(const string& user_name); - -// URL encodes the given parameter. Returns the encoded parameter. -std::string UrlEncode(const string& param); - -// Returns true if the given passwd contains valid fields. If pw_dir, pw_shell, -// or pw_passwd are not set, this will populate these entries with default -// values. -bool ValidatePasswd(struct passwd* result, BufferManager* buf, int* errnop); - -// Adds users and associated array of char* to provided buffer and store pointer -// to array in result.gr_mem. -bool AddUsersToGroup(std::vector users, struct group* result, - BufferManager* buf, int* errnop); - -// Iterates through all groups until one matching provided group is found, -// replacing gr_name with a buffermanager provided string. -bool FindGroup(struct group* grp, BufferManager* buf, int* errnop); - -// Iterates through all users for a group, storing results in a provided string -// vector. -bool GetUsersForGroup(string groupname, std::vector* users, - int* errnop); - -// Iterates through all groups for a user, storing results in a provided string -// vector. -bool GetGroupsForUser(string username, std::vector* groups, int* errnop); - -// Parses a JSON groups response, storing results in a provided Group vector. -bool ParseJsonToGroups(const string& json, std::vector* groups); - -// Parses a JSON users response, storing results in a provided string vector. -bool ParseJsonToUsers(const string& json, std::vector* users); - -// Parses a JSON LoginProfiles response for SSH keys. Returns a vector of valid -// ssh_keys. A key is considered valid if it's expiration date is greater than -// current unix time. -std::vector ParseJsonToSshKeys(const string& json); - -// Parses a JSON object and returns the value associated with a given key. -bool ParseJsonToKey(const string& json, const string& key, string* response); - -// Parses a JSON LoginProfiles response and returns the email under the "name" -// field. -bool ParseJsonToEmail(const string& json, string* email); - -// Parses a JSON LoginProfiles response and populates the passwd struct with the -// corresponding values set in the JSON object. Returns whether the parse was -// successful or not. If unsuccessful, errnop will also be set. -bool ParseJsonToPasswd(const string& response, struct passwd* result, - BufferManager* buf, int* errnop); - -// Parses a JSON adminLogin or login response and returns whether the user has -// the requested privilege. -bool ParseJsonToSuccess(const string& json); - -// Parses a JSON startSession response into a vector of Challenge objects. -bool ParseJsonToChallenges(const string& json, vector* challenges); - -// Calls the startSession API. -bool StartSession(const string& email, string* response); - -// Calls the continueSession API. -bool ContinueSession(bool alt, const string& email, const string& user_token, - const string& session_id, const Challenge& challenge, - string* response); - -// Returns user information from the metadata server. -bool GetUser(const string& username, string* response); -} // namespace oslogin_utils diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/Makefile gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/Makefile --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/Makefile 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -SHELL = /bin/sh -TOPDIR = $(realpath ..) - -VERSION = 20190801.00 - -CPPFLAGS = -Iinclude -I/usr/include/json-c -FLAGS = -fPIC -Wall -g -CFLAGS = $(FLAGS) -Wstrict-prototypes -CXXFLAGS = $(FLAGS) - -LDFLAGS = -shared -Wl,-soname,$(SONAME) -LDLIBS = -lcurl -ljson-c -PAMLIBS = -lpam $(LDLIBS) - -# Paths which should be overrideable. - -PREFIX = /usr -LIBDIR = $(PREFIX)/lib -BINDIR = $(PREFIX)/bin -PAMDIR = $(LIBDIR)/security -MANDIR = /usr/share/man - -NSS_OSLOGIN_SONAME = libnss_oslogin.so.2 -NSS_CACHE_OSLOGIN_SONAME = libnss_cache_oslogin.so.2 - -NSS_OSLOGIN = libnss_oslogin-$(VERSION).so -NSS_CACHE_OSLOGIN = libnss_cache_oslogin-$(VERSION).so - -PAM_LOGIN = pam_oslogin_login.so -PAM_ADMIN = pam_oslogin_admin.so - -BINARIES = google_oslogin_nss_cache google_authorized_keys - -all : $(NSS_OSLOGIN) $(NSS_CACHE_OSLOGIN) $(PAM_LOGIN) $(PAM_ADMIN) $(BINARIES) - -clean : - rm -f $(BINARIES) - find . -type f \( -iname '*.o' -o -iname '*.so' \) -delete - -.PHONY : all clean install - -# NSS modules. - -$(NSS_OSLOGIN) : SONAME = $(NSS_OSLOGIN_SONAME) -$(NSS_OSLOGIN) : nss/nss_oslogin.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $(LDFLAGS) $^ -o $@ $(LDLIBS) - -$(NSS_CACHE_OSLOGIN) : SONAME = $(NSS_CACHE_OSLOGIN_SONAME) -$(NSS_CACHE_OSLOGIN) : nss/nss_cache_oslogin.o nss/compat/getpwent_r.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $(LDFLAGS) $^ -o $@ $(LDLIBS) - -# PAM modules - -$(PAM_LOGIN) : pam/pam_oslogin_login.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) -shared $^ -o $@ $(PAMLIBS) - -$(PAM_ADMIN) : pam/pam_oslogin_admin.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) -shared $^ -o $@ $(PAMLIBS) - -# Utilities. - -google_authorized_keys : authorized_keys/authorized_keys.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $^ -o $@ $(LDLIBS) - -google_oslogin_nss_cache: cache_refresh/cache_refresh.o utils.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $^ -o $@ $(LDLIBS) - -install: all - install -d $(DESTDIR)$(LIBDIR) - install -d $(DESTDIR)$(PAMDIR) - install -d $(DESTDIR)$(BINDIR) - install -d $(DESTDIR)$(MANDIR)/man8 - install -m 0644 -t $(DESTDIR)$(LIBDIR) $(NSS_OSLOGIN) $(NSS_CACHE_OSLOGIN) - ln -sf $(NSS_OSLOGIN) $(DESTDIR)$(LIBDIR)/$(NSS_OSLOGIN_SONAME) - ln -sf $(NSS_CACHE_OSLOGIN) $(DESTDIR)$(LIBDIR)/$(NSS_CACHE_OSLOGIN_SONAME) - install -m 0644 -t $(DESTDIR)$(PAMDIR) $(PAM_ADMIN) $(PAM_LOGIN) - install -m 0755 -t $(DESTDIR)$(BINDIR) $(BINARIES) $(TOPDIR)/google_oslogin_control - install -m 0644 -t $(DESTDIR)$(MANDIR)/man8 $(TOPDIR)/man/nss-oslogin.8 $(TOPDIR)/man/nss-cache-oslogin.8 - gzip -9 $(DESTDIR)$(MANDIR)/man8/nss-oslogin.8 - gzip -9 $(DESTDIR)$(MANDIR)/man8/nss-cache-oslogin.8 - ln -sf nss-oslogin.8.gz $(DESTDIR)$(MANDIR)/man8/$(NSS_OSLOGIN_SONAME).8.gz - ln -sf nss-cache-oslogin.8.gz $(DESTDIR)$(MANDIR)/man8/$(NSS_CACHE_OSLOGIN_SONAME).8.gz -ifdef INSTALL_SELINUX - install -d $(DESTDIR)/usr/share/selinux/packages - install -m 0644 -t $(DESTDIR)/usr/share/selinux/packages $(TOPDIR)/selinux/oslogin.pp -endif diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/compat/getpwent_r.c gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/compat/getpwent_r.c --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/compat/getpwent_r.c 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/compat/getpwent_r.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -/* - * ---------------------------------------------------------------------- - * Copyright © 2005-2014 Rich Felker, et al. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * ---------------------------------------------------------------------- - * - * Adapted from http://www.musl-libc.org/ for libnss-cache - * Copyright © 2015 Kevin Bowling - */ - -#include - -#ifdef BSD - -#include -#include -#include -#include -#include - -static unsigned atou(char **s) -{ - unsigned x; - for (x=0; **s-'0'<10U; ++*s) x=10*x+(**s-'0'); - return x; -} - -int fgetpwent_r(FILE *f, struct passwd *pw, char *line, size_t size, struct passwd **res) -{ - char *s; - int rv = 0; - for (;;) { - line[size-1] = '\xff'; - if ( (fgets(line, size, f) == NULL) || ferror(f) || line[size-1] != '\xff' ) { - rv = (line[size-1] != '\xff') ? ERANGE : ENOENT; - line = 0; - pw = 0; - break; - } - line[strcspn(line, "\n")] = 0; - - s = line; - pw->pw_name = s++; - if (!(s = strchr(s, ':'))) continue; - - *s++ = 0; pw->pw_passwd = s; - if (!(s = strchr(s, ':'))) continue; - - *s++ = 0; pw->pw_uid = atou(&s); - if (*s != ':') continue; - - *s++ = 0; pw->pw_gid = atou(&s); - if (*s != ':') continue; - - *s++ = 0; pw->pw_gecos = s; - if (!(s = strchr(s, ':'))) continue; - - *s++ = 0; pw->pw_dir = s; - if (!(s = strchr(s, ':'))) continue; - - *s++ = 0; pw->pw_shell = s; - break; - } - *res = pw; - if (rv) errno = rv; - return rv; -} - -#endif // ifdef BSD diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/nss_cache_oslogin.c gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/nss_cache_oslogin.c --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/nss_cache_oslogin.c 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/nss_cache_oslogin.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,274 +0,0 @@ -// Copyright 2018 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An NSS module which adds supports for file /etc/oslogin_passwd.cache - -#include -#include - -#include - -// Locking implementation: use pthreads. -#include -static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; -#define NSS_CACHE_OSLOGIN_LOCK() \ - do { \ - pthread_mutex_lock(&mutex); \ - } while (0) -#define NSS_CACHE_OSLOGIN_UNLOCK() \ - do { \ - pthread_mutex_unlock(&mutex); \ - } while (0) - -static FILE *p_file = NULL; -static char p_filename[NSS_CACHE_OSLOGIN_PATH_LENGTH] = NSS_CACHE_OSLOGIN_PATH; -#ifdef BSD -extern int fgetpwent_r(FILE *, struct passwd *, char *, size_t, - struct passwd **); -#endif // ifdef BSD - -/* Common return code routine for all *ent_r_locked functions. - * We need to return TRYAGAIN if the underlying files guy raises ERANGE, - * so that our caller knows to try again with a bigger buffer. - */ - -static inline enum nss_status _nss_cache_oslogin_ent_bad_return_code( - int errnoval) { - enum nss_status ret; - - switch (errnoval) { - case ERANGE: - DEBUG("ERANGE: Try again with a bigger buffer\n"); - ret = NSS_STATUS_TRYAGAIN; - break; - case ENOENT: - default: - DEBUG("ENOENT or default case: Not found\n"); - ret = NSS_STATUS_NOTFOUND; - }; - return ret; -} - -// -// Routines for passwd map defined below here -// - -// _nss_cache_oslogin_pwuid_wrap() -// Internal wrapper for binary searches, using uid-specific calls. - -static enum nss_cache_oslogin_match _nss_cache_oslogin_pwuid_wrap( - FILE *file, struct nss_cache_oslogin_args *args) { - struct passwd *result = args->lookup_result; - uid_t *uid = args->lookup_value; - - if (fgetpwent_r(file, result, args->buffer, args->buflen, &result) == 0) { - if (result->pw_uid == *uid) { - DEBUG("SUCCESS: found user %d:%s\n", result->pw_uid, result->pw_name); - return NSS_CACHE_OSLOGIN_EXACT; - } - DEBUG("Failed match at uid %d\n", result->pw_uid); - if (result->pw_uid > *uid) { - return NSS_CACHE_OSLOGIN_HIGH; - } else { - return NSS_CACHE_OSLOGIN_LOW; - } - } - - return NSS_CACHE_OSLOGIN_ERROR; -} - -// _nss_cache_oslogin_pwnam_wrap() -// Internal wrapper for binary searches, using username-specific calls. - -static enum nss_cache_oslogin_match _nss_cache_oslogin_pwnam_wrap( - FILE *file, struct nss_cache_oslogin_args *args) { - struct passwd *result = args->lookup_result; - char *name = args->lookup_value; - int ret; - - if (fgetpwent_r(file, result, args->buffer, args->buflen, &result) == 0) { - ret = strcoll(result->pw_name, name); - if (ret == 0) { - DEBUG("SUCCESS: found user %s\n", result->pw_name); - return NSS_CACHE_OSLOGIN_EXACT; - } - DEBUG("Failed match at name %s\n", result->pw_name); - if (ret > 0) { - return NSS_CACHE_OSLOGIN_HIGH; - } else { - return NSS_CACHE_OSLOGIN_LOW; - } - } - - return NSS_CACHE_OSLOGIN_ERROR; -} - -// _nss_cache_oslogin_setpwent_locked() -// Internal setup routine - -static enum nss_status _nss_cache_oslogin_setpwent_locked(void) { - DEBUG("%s %s\n", "Opening", p_filename); - p_file = fopen(p_filename, "r"); - - if (p_file) { - return NSS_STATUS_SUCCESS; - } else { - return NSS_STATUS_UNAVAIL; - } -} - -// _nss_cache_oslogin_setpwent() -// Called by NSS to open the passwd file -// 'stayopen' parameter is ignored. - -enum nss_status _nss_cache_oslogin_setpwent(int stayopen) { - enum nss_status ret; - NSS_CACHE_OSLOGIN_LOCK(); - ret = _nss_cache_oslogin_setpwent_locked(); - NSS_CACHE_OSLOGIN_UNLOCK(); - return ret; -} - -// _nss_cache_oslogin_endpwent_locked() -// Internal close routine - -static enum nss_status _nss_cache_oslogin_endpwent_locked(void) { - DEBUG("Closing passwd.cache\n"); - if (p_file) { - fclose(p_file); - p_file = NULL; - } - return NSS_STATUS_SUCCESS; -} - -// _nss_cache_oslogin_endpwent() -// Called by NSS to close the passwd file - -enum nss_status _nss_cache_oslogin_endpwent(void) { - enum nss_status ret; - NSS_CACHE_OSLOGIN_LOCK(); - ret = _nss_cache_oslogin_endpwent_locked(); - NSS_CACHE_OSLOGIN_UNLOCK(); - return ret; -} - -// _nss_cache_oslogin_getpwent_r_locked() -// Called internally to return the next entry from the passwd file - -static enum nss_status _nss_cache_oslogin_getpwent_r_locked( - struct passwd *result, char *buffer, size_t buflen, int *errnop) { - enum nss_status ret = NSS_STATUS_SUCCESS; - - if (p_file == NULL) { - DEBUG("p_file == NULL, going to setpwent\n"); - ret = _nss_cache_oslogin_setpwent_locked(); - } - - if (ret == NSS_STATUS_SUCCESS) { - if (fgetpwent_r(p_file, result, buffer, buflen, &result) == 0) { - DEBUG("Returning user %d:%s\n", result->pw_uid, result->pw_name); - } else { - if (errno == ENOENT) { - errno = 0; - } - *errnop = errno; - ret = _nss_cache_oslogin_ent_bad_return_code(*errnop); - } - } - - return ret; -} - -// _nss_cache_oslogin_getpwent_r() -// Called by NSS to look up next entry in passwd file - -enum nss_status _nss_cache_oslogin_getpwent_r(struct passwd *result, - char *buffer, size_t buflen, - int *errnop) { - enum nss_status ret; - NSS_CACHE_OSLOGIN_LOCK(); - ret = _nss_cache_oslogin_getpwent_r_locked(result, buffer, buflen, errnop); - NSS_CACHE_OSLOGIN_UNLOCK(); - return ret; -} - -// _nss_cache_oslogin_getpwuid_r() -// Find a user account by uid - -enum nss_status _nss_cache_oslogin_getpwuid_r(uid_t uid, struct passwd *result, - char *buffer, size_t buflen, - int *errnop) { - enum nss_status ret; - - NSS_CACHE_OSLOGIN_LOCK(); - ret = _nss_cache_oslogin_setpwent_locked(); - - if (ret == NSS_STATUS_SUCCESS) { - while ((ret = _nss_cache_oslogin_getpwent_r_locked( - result, buffer, buflen, errnop)) == NSS_STATUS_SUCCESS) { - if (result->pw_uid == uid) break; - } - } - - _nss_cache_oslogin_endpwent_locked(); - NSS_CACHE_OSLOGIN_UNLOCK(); - - return ret; -} - -// _nss_cache_oslogin_getpwnam_r() -// Find a user account by name - -enum nss_status _nss_cache_oslogin_getpwnam_r(const char *name, - struct passwd *result, - char *buffer, size_t buflen, - int *errnop) { - enum nss_status ret; - - NSS_CACHE_OSLOGIN_LOCK(); - ret = _nss_cache_oslogin_setpwent_locked(); - - if (ret == NSS_STATUS_SUCCESS) { - while ((ret = _nss_cache_oslogin_getpwent_r_locked( - result, buffer, buflen, errnop)) == NSS_STATUS_SUCCESS) { - if (!strcmp(result->pw_name, name)) break; - } - } - - _nss_cache_oslogin_endpwent_locked(); - NSS_CACHE_OSLOGIN_UNLOCK(); - - return ret; -} - -NSS_METHOD_PROTOTYPE(__nss_compat_getpwnam_r); -NSS_METHOD_PROTOTYPE(__nss_compat_getpwuid_r); -NSS_METHOD_PROTOTYPE(__nss_compat_getpwent_r); -NSS_METHOD_PROTOTYPE(__nss_compat_setpwent); -NSS_METHOD_PROTOTYPE(__nss_compat_endpwent); - -DECLARE_NSS_METHOD_TABLE(methods, - { NSDB_PASSWD, "getpwnam_r", __nss_compat_getpwnam_r, - (void*)_nss_cache_oslogin_getpwnam_r }, - { NSDB_PASSWD, "getpwuid_r", __nss_compat_getpwuid_r, - (void*)_nss_cache_oslogin_getpwuid_r }, - { NSDB_PASSWD, "getpwent_r", __nss_compat_getpwent_r, - (void*)_nss_cache_oslogin_getpwent_r }, - { NSDB_PASSWD, "endpwent", __nss_compat_endpwent, - (void*)_nss_cache_oslogin_endpwent }, - { NSDB_PASSWD, "setpwent", __nss_compat_setpwent, - (void*)_nss_cache_oslogin_setpwent }, -) - -NSS_REGISTER_METHODS(methods) diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/nss_oslogin.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/nss_oslogin.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/nss/nss_oslogin.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/nss/nss_oslogin.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -using std::string; - -using oslogin_utils::AddUsersToGroup; -using oslogin_utils::BufferManager; -using oslogin_utils::FindGroup; -using oslogin_utils::GetGroupsForUser; -using oslogin_utils::GetUsersForGroup; -using oslogin_utils::Group; -using oslogin_utils::HttpGet; -using oslogin_utils::kMetadataServerUrl; -using oslogin_utils::MutexLock; -using oslogin_utils::NssCache; -using oslogin_utils::ParseJsonToPasswd; -using oslogin_utils::UrlEncode; - -// Size of the NssCache. This also determines how many users will be requested -// per HTTP call. -static const uint64_t kNssCacheSize = 2048; - -// NssCache for storing passwd entries. -static NssCache nss_cache(kNssCacheSize); - -// Protects access to nss_cache. -static pthread_mutex_t cache_mutex = PTHREAD_MUTEX_INITIALIZER; - -extern "C" { - -// Get a passwd entry by id. -enum nss_status _nss_oslogin_getpwuid_r(uid_t uid, struct passwd *result, - char *buffer, size_t buflen, - int *errnop) { - BufferManager buffer_manager(buffer, buflen); - std::stringstream url; - url << kMetadataServerUrl << "users?uid=" << uid; - string response; - long http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty()) { - *errnop = ENOENT; - return NSS_STATUS_NOTFOUND; - } - if (!ParseJsonToPasswd(response, result, &buffer_manager, errnop)) { - if (*errnop == EINVAL) { - openlog("nss_oslogin", LOG_PID, LOG_USER); - syslog(LOG_ERR, "Received malformed response from server: %s", - response.c_str()); - closelog(); - } - return *errnop == ERANGE ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND; - } - return NSS_STATUS_SUCCESS; -} - -// Get a passwd entry by name. -enum nss_status _nss_oslogin_getpwnam_r(const char *name, struct passwd *result, - char *buffer, size_t buflen, - int *errnop) { - BufferManager buffer_manager(buffer, buflen); - std::stringstream url; - url << kMetadataServerUrl << "users?username=" << UrlEncode(name); - string response; - long http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty()) { - *errnop = ENOENT; - return NSS_STATUS_NOTFOUND; - } - if (!ParseJsonToPasswd(response, result, &buffer_manager, errnop)) { - if (*errnop == EINVAL) { - openlog("nss_oslogin", LOG_PID, LOG_USER); - syslog(LOG_ERR, "Received malformed response from server: %s", - response.c_str()); - closelog(); - } - return *errnop == ERANGE ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND; - } - return NSS_STATUS_SUCCESS; -} - -enum nss_status _nss_oslogin_getgrby(struct group *grp, char *buf, - size_t buflen, int *errnop) { - BufferManager buffer_manager(buf, buflen); - if (!FindGroup(grp, &buffer_manager, errnop)) - return *errnop == ERANGE ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND; - - std::vector users; - if (!GetUsersForGroup(grp->gr_name, &users, errnop)) - return *errnop == ERANGE ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND; - - if (!AddUsersToGroup(users, grp, &buffer_manager, errnop)) - return *errnop == ERANGE ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND; - - return NSS_STATUS_SUCCESS; -} - -enum nss_status _nss_oslogin_getgrgid_r(gid_t gid, struct group *grp, char *buf, - size_t buflen, int *errnop) { - grp->gr_gid = gid; - return _nss_oslogin_getgrby(grp, buf, buflen, errnop); -} - -enum nss_status _nss_oslogin_getgrnam_r(const char *name, struct group *grp, - char *buf, size_t buflen, int *errnop) { - grp->gr_name = (char *)name; - return _nss_oslogin_getgrby(grp, buf, buflen, errnop); -} - -enum nss_status _nss_oslogin_initgroups_dyn(const char *user, gid_t skipgroup, - long int *start, long int *size, - gid_t **groupsp, long int limit, - int *errnop) { - std::vector grouplist; - if (!GetGroupsForUser(string(user), &grouplist, errnop)) { - return NSS_STATUS_NOTFOUND; - } - - gid_t *groups = *groupsp; - for (int i = 0; i < (int) grouplist.size(); i++) { - // Resize the buffer if needed. - if (*start == *size) { - gid_t *newgroups; - long int newsize = 2 * *size; - // Stop at limit if provided. - if (limit > 0) { - if (*size >= limit) { - *errnop = ERANGE; - return NSS_STATUS_TRYAGAIN; - } - newsize = MIN(limit, newsize); - } - newgroups = (gid_t *)realloc(groups, newsize * sizeof(gid_t *)); - if (newgroups == NULL) { - *errnop = EAGAIN; - return NSS_STATUS_TRYAGAIN; - } - *groupsp = groups = newgroups; - *size = newsize; - } - groups[(*start)++] = grouplist[i].gid; - } - return NSS_STATUS_SUCCESS; -} - -// nss_getpwent_r() is intentionally left unimplemented. This functionality is -// now covered by the nss_cache binary and nss_cache module. - -nss_status _nss_oslogin_getpwent_r() { return NSS_STATUS_NOTFOUND; } -nss_status _nss_oslogin_endpwent() { return NSS_STATUS_SUCCESS; } -nss_status _nss_oslogin_setpwent() { return NSS_STATUS_SUCCESS; } - -NSS_METHOD_PROTOTYPE(__nss_compat_getpwnam_r); -NSS_METHOD_PROTOTYPE(__nss_compat_getpwuid_r); -NSS_METHOD_PROTOTYPE(__nss_compat_getpwent_r); -NSS_METHOD_PROTOTYPE(__nss_compat_setpwent); -NSS_METHOD_PROTOTYPE(__nss_compat_endpwent); -NSS_METHOD_PROTOTYPE(__nss_compat_getgrnam_r); -NSS_METHOD_PROTOTYPE(__nss_compat_getgrgid_r); - -DECLARE_NSS_METHOD_TABLE(methods, - {NSDB_PASSWD, "getpwnam_r", __nss_compat_getpwnam_r, - (void *)_nss_oslogin_getpwnam_r}, - {NSDB_PASSWD, "getpwuid_r", __nss_compat_getpwuid_r, - (void *)_nss_oslogin_getpwuid_r}, - {NSDB_PASSWD, "getpwent_r", __nss_compat_getpwent_r, - (void *)_nss_oslogin_getpwent_r}, - {NSDB_PASSWD, "endpwent", __nss_compat_endpwent, - (void *)_nss_oslogin_endpwent}, - {NSDB_PASSWD, "setpwent", __nss_compat_setpwent, - (void *)_nss_oslogin_setpwent}, - {NSDB_GROUP, "getgrnam_r", __nss_compat_getgrnam_r, - (void *)_nss_oslogin_getgrnam_r}, - {NSDB_GROUP, "getgrgid_r", __nss_compat_getgrgid_r, - (void *)_nss_oslogin_getgrgid_r}, ) - -NSS_REGISTER_METHODS(methods) -} // extern "C" diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_admin.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_admin.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_admin.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_admin.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#define PAM_SM_ACCOUNT -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -using std::string; - -using oslogin_utils::HttpGet; -using oslogin_utils::GetUser; -using oslogin_utils::kMetadataServerUrl; -using oslogin_utils::ParseJsonToKey; -using oslogin_utils::ParseJsonToEmail; -using oslogin_utils::ParseJsonToSuccess; -using oslogin_utils::UrlEncode; -using oslogin_utils::ValidateUserName; - -static const char kSudoersDir[] = "/var/google-sudoers.d/"; - -extern "C" { - -PAM_EXTERN int pam_sm_acct_mgmt(pam_handle_t *pamh, int flags, int argc, - const char **argv) { - // The return value for this module should generally be ignored. By default we - // will return PAM_SUCCESS. - int pam_result = PAM_SUCCESS; - const char *user_name; - if ((pam_result = pam_get_user(pamh, &user_name, NULL)) != PAM_SUCCESS) { - PAM_SYSLOG(pamh, LOG_INFO, "Could not get pam user."); - return pam_result; - } - - if (!ValidateUserName(user_name)) { - // If the user name is not a valid oslogin user, don't bother continuing. - return PAM_SUCCESS; - } - - string response; - if (!GetUser(user_name, &response)) { - return PAM_SUCCESS; - } - - string email; - if (!ParseJsonToEmail(response, &email) || email.empty()) { - return PAM_SUCCESS; - } - - std::stringstream url; - url << kMetadataServerUrl << "authorize?email=" << UrlEncode(email) - << "&policy=adminLogin"; - - string filename = kSudoersDir; - filename.append(user_name); - struct stat buffer; - bool file_exists = !stat(filename.c_str(), &buffer); - long http_code; - if (HttpGet(url.str(), &response, &http_code) && http_code == 200 && - ParseJsonToSuccess(response)) { - if (!file_exists) { - PAM_SYSLOG(pamh, LOG_INFO, - "Granting sudo permissions to organization user %s.", - user_name); - std::ofstream sudoers_file; - sudoers_file.open(filename.c_str()); - sudoers_file << user_name << " ALL=(ALL:ALL) NOPASSWD: ALL" - << "\n"; - sudoers_file.close(); - chown(filename.c_str(), 0, 0); - chmod(filename.c_str(), S_IRUSR | S_IRGRP); - } - } else if (file_exists) { - remove(filename.c_str()); - } - return pam_result; -} -} diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_login.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_login.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_login.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/pam/pam_oslogin_login.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,273 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#define PAM_SM_ACCOUNT -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include - -using oslogin_utils::ContinueSession; -using oslogin_utils::GetUser; -using oslogin_utils::HttpGet; -using oslogin_utils::HttpPost; -using oslogin_utils::kMetadataServerUrl; -using oslogin_utils::ParseJsonToChallenges; -using oslogin_utils::ParseJsonToKey; -using oslogin_utils::ParseJsonToEmail; -using oslogin_utils::ParseJsonToSuccess; -using oslogin_utils::StartSession; -using oslogin_utils::UrlEncode; -using oslogin_utils::ValidateUserName; - -static const char kUsersDir[] = "/var/google-users.d/"; - -extern "C" { -PAM_EXTERN int pam_sm_acct_mgmt(pam_handle_t *pamh, int flags, int argc, - const char **argv) { - const char *user_name; - if (pam_get_user(pamh, &user_name, NULL) != PAM_SUCCESS) { - PAM_SYSLOG(pamh, LOG_INFO, "Could not get pam user."); - return PAM_AUTH_ERR; - } - - if (!ValidateUserName(user_name)) { - // Not a valid OS Login username. - return PAM_IGNORE; - } - - std::string users_filename = kUsersDir; - users_filename.append(user_name); - struct stat buffer; - bool file_exists = !stat(users_filename.c_str(), &buffer); - - std::string str_user_name(user_name); - std::stringstream url; - url << kMetadataServerUrl << "users?username=" << UrlEncode(str_user_name); - - std::string response; - long http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || response.empty() || - http_code != 200) { - if (http_code == 404) { - // This module is only consulted for OS Login users. - return PAM_IGNORE; - } - - // Check local file for that user as a last resort. - if (file_exists) { - return PAM_PERM_DENIED; - } - - // We can't confirm this is an OS Login user, ignore module. - return PAM_IGNORE; - } - - std::string email; - if (!ParseJsonToEmail(response, &email) || email.empty()) { - return PAM_AUTH_ERR; - } - - url.str(""); - url << kMetadataServerUrl << "authorize?email=" << UrlEncode(email) - << "&policy=login"; - if (HttpGet(url.str(), &response, &http_code) && http_code == 200 && - ParseJsonToSuccess(response)) { - if (!file_exists) { - std::ofstream users_file(users_filename.c_str()); - chown(users_filename.c_str(), 0, 0); - chmod(users_filename.c_str(), S_IRUSR | S_IWUSR | S_IRGRP); - } - PAM_SYSLOG(pamh, LOG_INFO, - "Organization user %s has login permission.", - user_name); - return PAM_SUCCESS; - } else { - if (file_exists) { - remove(users_filename.c_str()); - } - PAM_SYSLOG(pamh, LOG_INFO, - "Organization user %s does not have login permission.", - user_name); - - return PAM_PERM_DENIED; - } -} - -PAM_EXTERN int pam_sm_setcred(pam_handle_t * pamh, int flags, int argc, - const char **argv) { - return PAM_SUCCESS; -} - -PAM_EXTERN int pam_sm_authenticate(pam_handle_t * pamh, int flags, - int argc, const char **argv) -{ - const char* user_name; - if (pam_get_user(pamh, &user_name, NULL) != PAM_SUCCESS) { - PAM_SYSLOG(pamh, LOG_INFO, "Could not get pam user."); - return PAM_PERM_DENIED; - } - - std::string str_user_name(user_name); - if (!ValidateUserName(user_name)) { - return PAM_PERM_DENIED; - } - - std::string response; - if (!(GetUser(str_user_name, &response))) { - return PAM_PERM_DENIED; - } - - // System accounts begin with the prefix `sa_`. - std::string sa_prefix = "sa_"; - if (str_user_name.compare(0, sa_prefix.size(), sa_prefix) == 0) { - return PAM_SUCCESS; - } - - std::string email; - if (!ParseJsonToEmail(response, &email) || email.empty()) { - return PAM_PERM_DENIED; - } - - response = ""; - if (!StartSession(email, &response)) { - PAM_SYSLOG(pamh, LOG_ERR, - "Bad response from the two-factor start session request: %s", - response.empty() ? "empty response" : response.c_str()); - return PAM_PERM_DENIED; - } - - std::string status; - if (!ParseJsonToKey(response, "status", &status)) { - PAM_SYSLOG(pamh, LOG_ERR, - "Failed to parse status from start session response"); - return PAM_PERM_DENIED; - } - - if (status == "NO_AVAILABLE_CHALLENGES") { - return PAM_SUCCESS; // User is not two-factor enabled. - } - - std::string session_id; - if (!ParseJsonToKey(response, "sessionId", &session_id)) { - return PAM_PERM_DENIED; - } - - std::vector challenges; - if (!ParseJsonToChallenges(response, &challenges)) { - PAM_SYSLOG(pamh, LOG_ERR, - "Failed to parse challenge values from JSON response"); - return PAM_PERM_DENIED; - } - - std::map user_prompts; - user_prompts[AUTHZEN] = "Google phone prompt"; - user_prompts[TOTP] = "Security code from Google Authenticator application"; - user_prompts[INTERNAL_TWO_FACTOR] = "Security code from security key"; - user_prompts[IDV_PREREGISTERED_PHONE] = - "Voice or text message verification code"; - - oslogin_utils::Challenge challenge; - if (challenges.size() > 1) { - std::stringstream prompt; - prompt << "Available authentication methods: "; - for(vector::size_type i = 0; - i != challenges.size(); ++i) - prompt << "\n" << i+1 << ": " << user_prompts[challenges[i].type]; - prompt << "\n\nEnter a number: "; - - char *choice = NULL; - if (pam_prompt(pamh, PAM_PROMPT_ECHO_ON, &choice, "%s", - prompt.str().c_str()) != PAM_SUCCESS) { - pam_error(pamh, "Unable to get user input"); - } - - int choicei; - if (sscanf(choice, "%d", &choicei) == EOF) { - pam_error(pamh, "Unable to get user input"); - } - if (choicei > challenges.size()) { - pam_error(pamh, "Invalid option"); - } - challenge = challenges[choicei - 1]; - } else { - challenge = challenges[0]; - } - - if (challenge.status != "READY") { - // Call continueSession with the START_ALTERNATE flag. - if (!ContinueSession(true, email, "", session_id, challenge, &response)) { - PAM_SYSLOG(pamh, LOG_ERR, - "Bad response from two-factor continue session request: %s", - response.empty() ? "empty response" : response.c_str()); - return PAM_PERM_DENIED; - } - } - - char* user_token = NULL; - if (challenge.type == INTERNAL_TWO_FACTOR) { - if (pam_prompt(pamh, PAM_PROMPT_ECHO_ON, &user_token, - "Enter your security code: ") != PAM_SUCCESS) { - pam_error(pamh, "Unable to get user input"); - } - } else if (challenge.type == TOTP) { - if (pam_prompt(pamh, PAM_PROMPT_ECHO_ON, &user_token, - "Enter your one-time password: ") != PAM_SUCCESS) { - pam_error(pamh, "Unable to get user input"); - } - } else if (challenge.type == AUTHZEN) { - if (pam_prompt(pamh, PAM_PROMPT_ECHO_ON, &user_token, - "A login prompt has been sent to your enrolled device. " - "Press enter to continue") != PAM_SUCCESS) { - pam_error(pamh, "Unable to get user input"); - } - } else if (challenge.type == IDV_PREREGISTERED_PHONE) { - if (pam_prompt(pamh, PAM_PROMPT_ECHO_ON, &user_token, - "A security code has been sent to your phone. " - "Enter code to continue: ") != PAM_SUCCESS) { - pam_error(pamh, "Unable to get user input"); - } - } else { - PAM_SYSLOG(pamh, LOG_ERR, "Unsupported challenge type %s", - challenge.type.c_str()); - return PAM_PERM_DENIED; - } - - if (!ContinueSession(false, email, user_token, session_id, challenge, - &response)) { - PAM_SYSLOG(pamh, LOG_ERR, - "Bad response from two-factor continue session request: %s", - response.empty() ? "empty response" : response.c_str()); - return PAM_PERM_DENIED; - } - - if (!ParseJsonToKey(response, "status", &status) - || status != "AUTHENTICATED") { - return PAM_PERM_DENIED; - } - - return PAM_SUCCESS; -} -} diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/utils.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/utils.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/src/utils.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/src/utils.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,864 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Requires libcurl4-openssl-dev libjson0 and libjson0-dev -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#if defined(__clang__) || __GNUC__ > 4 || \ - (__GNUC__ == 4 && \ - (__GNUC_MINOR__ > 9 || (__GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ > 0))) -#include -#define Regex std -#else -#include -#define Regex boost -#endif - -#include -#include - -using std::string; - -// Maximum number of retries for HTTP requests. -const int kMaxRetries = 1; - -// Regex for validating user names. -const char kUserNameRegex[] = "^[a-zA-Z0-9._][a-zA-Z0-9._-]{0,31}$"; - -namespace oslogin_utils { - -BufferManager::BufferManager(char* buf, size_t buflen) - : buf_(buf), buflen_(buflen) {} - -bool BufferManager::AppendString(const string& value, char** buffer, - int* errnop) { - size_t bytes_to_write = value.length() + 1; - *buffer = static_cast(Reserve(bytes_to_write, errnop)); - if (*buffer == NULL) { - return false; - } - strncpy(*buffer, value.c_str(), bytes_to_write); - return true; -} - -bool BufferManager::CheckSpaceAvailable(size_t bytes_to_write) const { - if (bytes_to_write > buflen_) { - return false; - } - return true; -} - -void* BufferManager::Reserve(size_t bytes, int* errnop) { - if (!CheckSpaceAvailable(bytes)) { - *errnop = ERANGE; - return NULL; - } - void* result = buf_; - buf_ += bytes; - buflen_ -= bytes; - return result; -} - -NssCache::NssCache(int cache_size) - : cache_size_(cache_size), - passwd_cache_(cache_size), - page_token_(""), - on_last_page_(false) {} - -void NssCache::Reset() { - page_token_ = ""; - index_ = 0; - passwd_cache_.clear(); - on_last_page_ = false; -} - -bool NssCache::HasNextPasswd() { - return (index_ < passwd_cache_.size()) && !passwd_cache_[index_].empty(); -} - -bool NssCache::GetNextPasswd(BufferManager* buf, passwd* result, int* errnop) { - if (!HasNextPasswd()) { - *errnop = ENOENT; - return false; - } - string cached_passwd = passwd_cache_[index_]; - bool success = ParseJsonToPasswd(cached_passwd, result, buf, errnop); - if (success) { - index_++; - } - return success; -} - -bool NssCache::LoadJsonArrayToCache(string response) { - Reset(); - json_object* root = NULL; - root = json_tokener_parse(response.c_str()); - if (root == NULL) { - return false; - } - // First grab the page token. - json_object* page_token_object; - if (json_object_object_get_ex(root, "nextPageToken", &page_token_object)) { - page_token_ = json_object_get_string(page_token_object); - } else { - // If the page token is not found, assume something went wrong. - page_token_ = ""; - on_last_page_ = true; - return false; - } - // A page_token of 0 means we are done. This response will not contain any - // login profiles. - if (page_token_ == "0") { - page_token_ = ""; - on_last_page_ = true; - return false; - } - // Now grab all of the loginProfiles. - json_object* login_profiles = NULL; - if (!json_object_object_get_ex(root, "loginProfiles", &login_profiles)) { - page_token_ = ""; - return false; - } - if (json_object_get_type(login_profiles) != json_type_array) { - return false; - } - int arraylen = json_object_array_length(login_profiles); - if (arraylen == 0 || arraylen > cache_size_) { - page_token_ = ""; - return false; - } - for (int i = 0; i < arraylen; i++) { - json_object* profile = json_object_array_get_idx(login_profiles, i); - passwd_cache_.push_back( - json_object_to_json_string_ext(profile, JSON_C_TO_STRING_PLAIN)); - } - return true; -} - -bool NssCache::NssGetpwentHelper(BufferManager* buf, struct passwd* result, - int* errnop) { - if (!HasNextPasswd() && !OnLastPage()) { - std::stringstream url; - url << kMetadataServerUrl << "users?pagesize=" << cache_size_; - string page_token = GetPageToken(); - if (!page_token.empty()) { - url << "&pagetoken=" << page_token; - } - string response; - long http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty() || !LoadJsonArrayToCache(response)) { - // It is possible this to be true after LoadJsonArrayToCache(), so we - // must check it again. - if (!OnLastPage()) { - *errnop = ENOENT; - } - return false; - } - } - if (HasNextPasswd() && !GetNextPasswd(buf, result, errnop)) { - return false; - } - return true; -} - -size_t OnCurlWrite(void* buf, size_t size, size_t nmemb, void* userp) { - if (userp) { - std::ostream& os = *static_cast(userp); - std::streamsize len = size * nmemb; - if (os.write(static_cast(buf), len)) { - return len; - } - } - return 0; -} - -bool HttpDo(const string& url, const string& data, string* response, - long* http_code) { - if (response == NULL || http_code == NULL) { - return false; - } - CURLcode code(CURLE_FAILED_INIT); - curl_global_init(CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL); - CURL* curl = curl_easy_init(); - std::ostringstream response_stream; - int retry_count = 0; - if (curl) { - struct curl_slist* header_list = NULL; - header_list = curl_slist_append(header_list, "Metadata-Flavor: Google"); - if (header_list == NULL) { - curl_easy_cleanup(curl); - curl_global_cleanup(); - return false; - } - do { - response_stream.str(""); - response_stream.clear(); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, header_list); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, &OnCurlWrite); - curl_easy_setopt(curl, CURLOPT_FILE, &response_stream); - curl_easy_setopt(curl, CURLOPT_TIMEOUT, 5); - curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); - if (data != "") { - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data.c_str()); - } - - code = curl_easy_perform(curl); - if (code != CURLE_OK) { - curl_easy_cleanup(curl); - curl_global_cleanup(); - return false; - } - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, http_code); - } while (retry_count++ < kMaxRetries && *http_code == 500); - curl_slist_free_all(header_list); - } - *response = response_stream.str(); - curl_easy_cleanup(curl); - curl_global_cleanup(); - return true; -} - -bool HttpGet(const string& url, string* response, long* http_code) { - return HttpDo(url, "", response, http_code); -} - -bool HttpPost(const string& url, const string& data, string* response, - long* http_code) { - return HttpDo(url, data, response, http_code); -} - -bool ValidateUserName(const string& user_name) { - Regex::regex r(kUserNameRegex); - return Regex::regex_match(user_name, r); -} - -string UrlEncode(const string& param) { - CURL* curl = curl_easy_init(); - char* encoded = curl_easy_escape(curl, param.c_str(), param.length()); - if (encoded == NULL) { - curl_easy_cleanup(curl); - return ""; - } - string encoded_param = encoded; - curl_free(encoded); - curl_easy_cleanup(curl); - return encoded_param; -} - -bool ValidatePasswd(struct passwd* result, BufferManager* buf, int* errnop) { - // OS Login disallows uids less than 1000. - if (result->pw_uid < 1000) { - *errnop = EINVAL; - return false; - } - if (result->pw_gid == 0) { - *errnop = EINVAL; - return false; - } - if (strlen(result->pw_name) == 0) { - *errnop = EINVAL; - return false; - } - if (strlen(result->pw_dir) == 0) { - string home_dir = "/home/"; - home_dir.append(result->pw_name); - if (!buf->AppendString(home_dir, &result->pw_dir, errnop)) { - return false; - } - } - if (strlen(result->pw_shell) == 0) { - if (!buf->AppendString(DEFAULT_SHELL, &result->pw_shell, errnop)) { - return false; - } - } - - // OS Login does not utilize the passwd field and reserves the gecos field. - // Set these to be empty. - if (!buf->AppendString("", &result->pw_gecos, errnop)) { - return false; - } - if (!buf->AppendString("", &result->pw_passwd, errnop)) { - return false; - } - return true; -} - -bool ParseJsonToUsers(const string& json, std::vector* result) { - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - - json_object* users = NULL; - if (!json_object_object_get_ex(root, "usernames", &users)) { - return false; - } - if (json_object_get_type(users) != json_type_array) { - return false; - } - for (int idx = 0; idx < json_object_array_length(users); idx++) { - json_object* user = json_object_array_get_idx(users, idx); - const char* username = json_object_get_string(user); - result->push_back(string(username)); - } - return true; -} - -bool ParseJsonToGroups(const string& json, std::vector* result) { - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - - json_object* groups = NULL; - if (!json_object_object_get_ex(root, "posixGroups", &groups)) { - return false; - } - if (json_object_get_type(groups) != json_type_array) { - return false; - } - for (int idx = 0; idx < json_object_array_length(groups); idx++) { - json_object* group = json_object_array_get_idx(groups, idx); - - json_object* gid; - if (!json_object_object_get_ex(group, "gid", &gid)) { - return false; - } - - json_object* name; - if (!json_object_object_get_ex(group, "name", &name)) { - return false; - } - - Group g; - g.gid = json_object_get_int64(gid); - // get_int64 will confusingly return 0 if the string can't be converted to - // an integer. We can't rely on type check as it may be a string in the API. - if (g.gid == 0) { - return false; - } - g.name = json_object_get_string(name); - if (g.name == "") { - return false; - } - - result->push_back(g); - } - return true; -} - -std::vector ParseJsonToSshKeys(const string& json) { - std::vector result; - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return result; - } - // Locate the sshPublicKeys object. - json_object* login_profiles = NULL; - if (!json_object_object_get_ex(root, "loginProfiles", &login_profiles)) { - return result; - } - if (json_object_get_type(login_profiles) != json_type_array) { - return result; - } - login_profiles = json_object_array_get_idx(login_profiles, 0); - - json_object* ssh_public_keys = NULL; - if (!json_object_object_get_ex(login_profiles, "sshPublicKeys", - &ssh_public_keys)) { - return result; - } - - if (json_object_get_type(ssh_public_keys) != json_type_object) { - return result; - } - json_object_object_foreach(ssh_public_keys, key, obj) { - (void)(key); - if (json_object_get_type(obj) != json_type_object) { - continue; - } - string key_to_add = ""; - bool expired = false; - json_object_object_foreach(obj, key, val) { - string string_key(key); - int val_type = json_object_get_type(val); - if (string_key == "key") { - if (val_type != json_type_string) { - continue; - } - key_to_add = (char*)json_object_get_string(val); - } - if (string_key == "expirationTimeUsec") { - if (val_type == json_type_int || val_type == json_type_string) { - uint64_t expiry_usec = (uint64_t)json_object_get_int64(val); - struct timeval tp; - gettimeofday(&tp, NULL); - uint64_t cur_usec = tp.tv_sec * 1000000 + tp.tv_usec; - expired = cur_usec > expiry_usec; - } else { - continue; - } - } - } - if (!key_to_add.empty() && !expired) { - result.push_back(key_to_add); - } - } - return result; -} - -bool ParseJsonToPasswd(const string& json, struct passwd* result, - BufferManager* buf, int* errnop) { - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - *errnop = ENOENT; - return false; - } - json_object* login_profiles = NULL; - // If this is called from getpwent_r, loginProfiles won't be in the response. - if (json_object_object_get_ex(root, "loginProfiles", &login_profiles)) { - if (json_object_get_type(login_profiles) != json_type_array) { - return false; - } - root = login_profiles; - root = json_object_array_get_idx(root, 0); - } - // Locate the posixAccounts object. - json_object* posix_accounts = NULL; - if (!json_object_object_get_ex(root, "posixAccounts", &posix_accounts)) { - *errnop = ENOENT; - return false; - } - if (json_object_get_type(posix_accounts) != json_type_array) { - return false; - } - posix_accounts = json_object_array_get_idx(posix_accounts, 0); - - // Populate with some default values that ValidatePasswd can detect if they - // are not set. - result->pw_uid = 0; - result->pw_shell = (char*)""; - result->pw_name = (char*)""; - result->pw_dir = (char*)""; - - // Iterate through the json response and populate the passwd struct. - if (json_object_get_type(posix_accounts) != json_type_object) { - return false; - } - json_object_object_foreach(posix_accounts, key, val) { - int val_type = json_object_get_type(val); - // Convert char* to c++ string for easier comparison. - string string_key(key); - - if (string_key == "uid") { - if (val_type == json_type_int || val_type == json_type_string) { - result->pw_uid = (uint32_t)json_object_get_int64(val); - if (result->pw_uid == 0) { - *errnop = EINVAL; - return false; - } - } else { - *errnop = EINVAL; - return false; - } - } else if (string_key == "gid") { - if (val_type == json_type_int || val_type == json_type_string) { - result->pw_gid = (uint32_t)json_object_get_int64(val); - // Use the uid as the default group when gid is not set or is zero. - if (result->pw_gid == 0) { - result->pw_gid = result->pw_uid; - } - } else { - *errnop = EINVAL; - return false; - } - } else if (string_key == "username") { - if (val_type != json_type_string) { - *errnop = EINVAL; - return false; - } - if (!buf->AppendString((char*)json_object_get_string(val), - &result->pw_name, errnop)) { - return false; - } - } else if (string_key == "homeDirectory") { - if (val_type != json_type_string) { - *errnop = EINVAL; - return false; - } - if (!buf->AppendString((char*)json_object_get_string(val), - &result->pw_dir, errnop)) { - return false; - } - } else if (string_key == "shell") { - if (val_type != json_type_string) { - *errnop = EINVAL; - return false; - } - if (!buf->AppendString((char*)json_object_get_string(val), - &result->pw_shell, errnop)) { - return false; - } - } - } - - return ValidatePasswd(result, buf, errnop); -} - -bool AddUsersToGroup(std::vector users, struct group* result, - BufferManager* buf, int* errnop) { - if (users.size() < 1) { - return true; - } - - // Get some space for the char* array for number of users + 1 for NULL cap. - char** bufp; - if (!(bufp = - (char**)buf->Reserve(sizeof(char*) * (users.size() + 1), errnop))) { - return false; - } - result->gr_mem = bufp; - - for (int i = 0; i < (int) users.size(); i++) { - if (!buf->AppendString(users[i], bufp, errnop)) { - result->gr_mem = NULL; - return false; - } - } - *bufp = NULL; // End the array with a null pointer. - - return true; -} - -bool ParseJsonToEmail(const string& json, string* email) { - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - // Locate the email object. - json_object* login_profiles = NULL; - if (!json_object_object_get_ex(root, "loginProfiles", &login_profiles)) { - return false; - } - if (json_object_get_type(login_profiles) != json_type_array) { - return false; - } - login_profiles = json_object_array_get_idx(login_profiles, 0); - json_object* json_email = NULL; - if (!json_object_object_get_ex(login_profiles, "name", &json_email)) { - return false; - } - - *email = json_object_get_string(json_email); - return true; -} - -bool ParseJsonToSuccess(const string& json) { - json_object* root = NULL; - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - json_object* success = NULL; - if (!json_object_object_get_ex(root, "success", &success)) { - return false; - } - return (bool)json_object_get_boolean(success); -} - -bool ParseJsonToKey(const string& json, const string& key, string* response) { - json_object* root = NULL; - json_object* json_response = NULL; - const char* c_response; - - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - - if (!json_object_object_get_ex(root, key.c_str(), &json_response)) { - return false; - } - - if (!(c_response = json_object_get_string(json_response))) { - return false; - } - - *response = c_response; - return true; -} - -bool ParseJsonToChallenges(const string& json, - std::vector* challenges) { - json_object* root = NULL; - - root = json_tokener_parse(json.c_str()); - if (root == NULL) { - return false; - } - - json_object* jsonChallenges = NULL; - if (!json_object_object_get_ex(root, "challenges", &jsonChallenges)) { - return false; - } - - json_object *challengeId, *challengeType, *challengeStatus = NULL; - for (int i = 0; i < json_object_array_length(jsonChallenges); ++i) { - if (!json_object_object_get_ex(json_object_array_get_idx(jsonChallenges, i), - "challengeId", &challengeId)) { - return false; - } - if (!json_object_object_get_ex(json_object_array_get_idx(jsonChallenges, i), - "challengeType", &challengeType)) { - return false; - } - if (!json_object_object_get_ex(json_object_array_get_idx(jsonChallenges, i), - "status", &challengeStatus)) { - return false; - } - Challenge challenge; - challenge.id = json_object_get_int(challengeId); - challenge.type = json_object_get_string(challengeType); - challenge.status = json_object_get_string(challengeStatus); - - challenges->push_back(challenge); - } - - return true; -} - -bool FindGroup(struct group* result, BufferManager* buf, int* errnop) { - if (result->gr_name == NULL && result->gr_gid == 0) { - return false; - } - std::stringstream url; - std::vector groups; - - string response; - long http_code; - string pageToken = ""; - - do { - url.str(""); - url << kMetadataServerUrl << "groups"; - if (pageToken != "") url << "?pageToken=" << pageToken; - - response.clear(); - http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty()) { - *errnop = EAGAIN; - return false; - } - - if (!ParseJsonToKey(response, "nextPageToken", &pageToken)) { - pageToken = ""; - } - - groups.clear(); - if (!ParseJsonToGroups(response, &groups) || groups.empty()) { - *errnop = ENOENT; - return false; - } - - // Check for a match. - for (int i = 0; i < (int) groups.size(); i++) { - Group el = groups[i]; - if ((result->gr_name != NULL) && (string(result->gr_name) == el.name)) { - // Set the name even though it matches because the final string must - // be stored in the provided buffer. - if (!buf->AppendString(el.name, &result->gr_name, errnop)) { - return false; - } - result->gr_gid = el.gid; - return true; - } - if ((result->gr_gid != 0) && (result->gr_gid == el.gid)) { - if (!buf->AppendString(el.name, &result->gr_name, errnop)) { - return false; - } - return true; - } - } - } while (pageToken != ""); - // Not found. - *errnop = ENOENT; - return false; -} - -bool GetGroupsForUser(string username, std::vector* groups, - int* errnop) { - std::stringstream url; - - string response; - long http_code; - string pageToken = ""; - - do { - url.str(""); - url << kMetadataServerUrl << "groups?username=" << username; - if (pageToken != "") url << "?pageToken=" << pageToken; - - response.clear(); - http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty()) { - *errnop = EAGAIN; - return false; - } - - if (!ParseJsonToKey(response, "pageToken", &pageToken)) { - pageToken = ""; - } - - if (!ParseJsonToGroups(response, groups)) { - *errnop = ENOENT; - return false; - } - } while (pageToken != ""); - return true; -} - -bool GetUsersForGroup(string groupname, std::vector* users, - int* errnop) { - string response; - long http_code; - string pageToken = ""; - std::stringstream url; - - do { - url.str(""); - url << kMetadataServerUrl << "users?groupname=" << groupname; - if (pageToken != "") url << "?pageToken=" << pageToken; - - response.clear(); - http_code = 0; - if (!HttpGet(url.str(), &response, &http_code) || http_code != 200 || - response.empty()) { - *errnop = EAGAIN; - return false; - } - if (!ParseJsonToKey(response, "nextPageToken", &pageToken)) { - pageToken = ""; - } - if (!ParseJsonToUsers(response, users)) { - *errnop = EINVAL; - return false; - } - } while (pageToken != ""); - return true; -} - -bool GetUser(const string& username, string* response) { - std::stringstream url; - url << kMetadataServerUrl << "users?username=" << UrlEncode(username); - - long http_code = 0; - if (!HttpGet(url.str(), response, &http_code) || response->empty() || - http_code != 200) { - return false; - } - return true; -} - -bool StartSession(const string& email, string* response) { - bool ret = true; - struct json_object *jobj, *jarr; - - jarr = json_object_new_array(); - json_object_array_add(jarr, json_object_new_string(INTERNAL_TWO_FACTOR)); - json_object_array_add(jarr, json_object_new_string(AUTHZEN)); - json_object_array_add(jarr, json_object_new_string(TOTP)); - json_object_array_add(jarr, json_object_new_string(IDV_PREREGISTERED_PHONE)); - - jobj = json_object_new_object(); - json_object_object_add(jobj, "email", json_object_new_string(email.c_str())); - json_object_object_add(jobj, "supportedChallengeTypes", jarr); // Ownership transferred to jobj. - - const char* data; - data = json_object_to_json_string_ext(jobj, JSON_C_TO_STRING_PLAIN); - - std::stringstream url; - url << kMetadataServerUrl << "authenticate/sessions/start"; - - long http_code = 0; - if (!HttpPost(url.str(), data, response, &http_code) || response->empty() || - http_code != 200) { - ret = false; - } - - json_object_put(jobj); - - return ret; -} - -bool ContinueSession(bool alt, const string& email, const string& user_token, - const string& session_id, const Challenge& challenge, - string* response) { - bool ret = true; - struct json_object *jobj, *jresp; - - jobj = json_object_new_object(); - json_object_object_add(jobj, "email", json_object_new_string(email.c_str())); - json_object_object_add(jobj, "challengeId", - json_object_new_int(challenge.id)); - - if (alt) { - json_object_object_add(jobj, "action", - json_object_new_string("START_ALTERNATE")); - } else { - json_object_object_add(jobj, "action", json_object_new_string("RESPOND")); - } - - // AUTHZEN type and START_ALTERNATE action don't provide credentials. - if (challenge.type != AUTHZEN && !alt) { - jresp = json_object_new_object(); - json_object_object_add(jresp, "credential", - json_object_new_string(user_token.c_str())); - json_object_object_add(jobj, "proposalResponse", jresp); // Ownership transferred to jobj. - } - - const char* data = NULL; - data = json_object_to_json_string_ext(jobj, JSON_C_TO_STRING_PLAIN); - - std::stringstream url; - url << kMetadataServerUrl << "authenticate/sessions/" << session_id - << "/continue"; - long http_code = 0; - if (!HttpPost(url.str(), data, response, &http_code) || response->empty() || - http_code != 200) { - ret = false; - } - - json_object_put(jobj); - - return ret; -} -} // namespace oslogin_utils diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/test/Makefile gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/test/Makefile --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/test/Makefile 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/test/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -TOPDIR = $(realpath ..) - -CPPFLAGS += -I$(TOPDIR)/src/include -I/usr/include/json-c -CXXFLAGS += -g -Wall -Wextra -std=c++11 -LDLIBS = -lcurl -ljson-c -lpthread - -all : test_runner non_network_tests - -clean : - rm -f test_runner *.o - -gtest-all.o : $(GTEST_DIR)/src/gtest-all.cc - $(CXX) $(CXXFLAGS) -isystem $(GTEST)/include -I$(GTEST) $(CPPFLAGS) -c $^ - -test_runner : oslogin_utils_test.o $(TOPDIR)/src/utils.o gtest-all.o - $(CXX) $(CXXFLAGS) $(CPPFLAGS) $^ -o $@ $(LDLIBS) - -non_network_tests : test_runner - ./test_runner --gtest_filter=*-FindGroupTest.*:GetUsersForGroupTest.* - -network_tests : test_runner ping reset - ./test_runner --gtest_filter=FindGroupTest.*:GetUsersForGroupTest.* - -# run as $ make tests GTESTARGS="--gtest_filter=FindGroupTest.*" -tests : test_runner - ./test_runner ${GTESTARGS} - -ping : - nc -vzw2 metadata.google.internal 80 >/dev/null 2>&1 - -reset : - curl -Ss http://metadata.google.internal/reset >/dev/null 2>&1 - -.PHONY : all clean tests ping reset gtest prowtest diff -Nru gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/test/oslogin_utils_test.cc gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/test/oslogin_utils_test.cc --- gce-compute-image-packages-20190801/packages/google-compute-engine-oslogin/test/oslogin_utils_test.cc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/google-compute-engine-oslogin/test/oslogin_utils_test.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,627 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Requires libgtest-dev and gtest compiled and installed. -#include -#include -#include -#include -#include - -using std::string; -using std::vector; - -namespace oslogin_utils { - -// Test that the buffer can successfully append multiple strings. -TEST(BufferManagerTest, TestAppendString) { - size_t buflen = 20; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - - char* first_string; - char* second_string; - int test_errno = 0; - oslogin_utils::BufferManager buffer_manager(buffer, buflen); - buffer_manager.AppendString("test1", &first_string, &test_errno); - buffer_manager.AppendString("test2", &second_string, &test_errno); - ASSERT_EQ(test_errno, 0); - ASSERT_STREQ(first_string, "test1"); - ASSERT_STREQ(second_string, "test2"); - ASSERT_STREQ(buffer, "test1"); - ASSERT_STREQ((buffer + 6), "test2"); -} - -// Test that attempting to append a string larger than the buffer can handle -// fails with ERANGE. -TEST(BufferManagerTest, TestAppendStringTooLarge) { - size_t buflen = 1; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - - char* first_string; - int test_errno = 0; - oslogin_utils::BufferManager buffer_manager(buffer, buflen); - ASSERT_FALSE( - buffer_manager.AppendString("test1", &first_string, &test_errno)); - ASSERT_EQ(test_errno, ERANGE); -} - -// Test successfully loading and retrieving an array of JSON posix accounts. -TEST(NssCacheTest, TestLoadJsonArray) { - NssCache nss_cache(2); - string test_user1 = - "{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1337," - "\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}"; - string test_user2 = - "{\"name\":\"bar@example.com\"," - "\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"bar\",\"uid\":1338,\"gid\":1338," - "\"homeDirectory\":\"/home/bar\",\"shell\":\"/bin/bash\"}]}"; - string response = "{\"loginProfiles\": [" + test_user1 + ", " + test_user2 + - "], \"nextPageToken\": \"token\"}"; - - ASSERT_TRUE(nss_cache.LoadJsonArrayToCache(response)); - - size_t buflen = 500; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - - // Verify that the first user was stored. - ASSERT_TRUE(nss_cache.HasNextPasswd()); - ASSERT_TRUE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, 0); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1337); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); - - // Verify that the second user was stored. - ASSERT_TRUE(nss_cache.HasNextPasswd()); - ASSERT_TRUE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, 0); - ASSERT_EQ(result.pw_uid, 1338); - ASSERT_EQ(result.pw_gid, 1338); - ASSERT_STREQ(result.pw_name, "bar"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/bar"); - - // Verify that there are no more users stored. - ASSERT_FALSE(nss_cache.HasNextPasswd()); - ASSERT_FALSE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, ENOENT); -} - -// Test successfully loading and retrieving a partial array. -TEST(NssCacheTest, TestLoadJsonPartialArray) { - NssCache nss_cache(2); - string test_user1 = - "{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1337," - "\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}"; - string response = - "{\"loginProfiles\": [" + test_user1 + "], \"nextPageToken\": \"token\"}"; - - ASSERT_TRUE(nss_cache.LoadJsonArrayToCache(response)); - - size_t buflen = 500; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - - // Verify that the first user was stored. - ASSERT_TRUE(nss_cache.HasNextPasswd()); - ASSERT_TRUE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, 0); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1337); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); - - ASSERT_EQ(nss_cache.GetPageToken(), "token"); - - // Verify that there are no more users stored. - ASSERT_FALSE(nss_cache.HasNextPasswd()); - ASSERT_FALSE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, ENOENT); -} - -// Test successfully loading and retrieving the final response. -TEST(NssCacheTest, TestLoadJsonFinalResponse) { - NssCache nss_cache(2); - string response = "{\"nextPageToken\": \"0\"}"; - - ASSERT_FALSE(nss_cache.LoadJsonArrayToCache(response)); - ASSERT_EQ(nss_cache.GetPageToken(), ""); - - size_t buflen = 500; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - - // Verify that there are no more users stored. - ASSERT_FALSE(nss_cache.HasNextPasswd()); - ASSERT_TRUE(nss_cache.OnLastPage()); - ASSERT_FALSE(nss_cache.GetNextPasswd(&buf, &result, &test_errno)); - ASSERT_EQ(test_errno, ENOENT); -} - -// Tests that resetting, and checking HasNextPasswd does not crash. -TEST(NssCacheTest, ResetNullPtrTest) { - NssCache nss_cache(2); - nss_cache.Reset(); - ASSERT_FALSE(nss_cache.HasNextPasswd()); -} - -// Test parsing a valid JSON response from the metadata server. -TEST(ParseJsonPasswdTest, ParseJsonToPasswdSucceeds) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1338," - "\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1338); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); -} - -// Test parsing a valid JSON response from the metadata server with uid > 2^31. -TEST(ParseJsonPasswdTest, ParseJsonToPasswdSucceedsWithHighUid) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":4294967295,\"gid\":" - "4294967295,\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(result.pw_uid, 4294967295); - ASSERT_EQ(result.pw_gid, 4294967295); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); -} - -TEST(ParseJsonPasswdTest, ParseJsonToPasswdSucceedsWithStringUid) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":\"1337\",\"gid\":" - "\"1338\",\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1338); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); -} - -TEST(ParseJsonPasswdTest, ParseJsonToPasswdNoLoginProfilesSucceeds) { - string test_user = - "{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1337," - "\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1337); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); -} - -// Test parsing a JSON response without enough space in the buffer. -TEST(ParseJsonPasswdTest, ParseJsonToPasswdFailsWithERANGE) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1337," - "\"homeDirectory\":\"/home/foo\",\"shell\":\"/bin/bash\"}]}]}"; - - size_t buflen = 1; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_FALSE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(test_errno, ERANGE); -} - -// Test parsing malformed JSON responses. -TEST(ParseJsonPasswdTest, ParseJsonToPasswdFailsWithEINVAL) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\": \"bad_stuff\"" - ",\"gid\":1337,\"homeDirectory\":\"/home/foo\"," - "\"shell\":\"/bin/bash\"}]}]}"; - string test_user2 = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\": 1337," - "\"gid\":\"bad_stuff\",\"homeDirectory\":\"/home/foo\"," - "\"shell\":\"/bin/bash\"}]}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_FALSE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(test_errno, EINVAL); - // Reset errno. - test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user2, &result, &buf, &test_errno)); - ASSERT_EQ(test_errno, 0); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1337); -} - -// Test parsing a partially filled response. Validate should fill empty fields -// with default values. -TEST(ParseJsonPasswdTest, ValidatePartialJsonResponse) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"uid\":1337,\"gid\":1337}]" - "}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_TRUE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(result.pw_uid, 1337); - ASSERT_EQ(result.pw_gid, 1337); - ASSERT_STREQ(result.pw_name, "foo"); - ASSERT_STREQ(result.pw_shell, "/bin/bash"); - ASSERT_STREQ(result.pw_dir, "/home/foo"); -} - -// Test parsing an invalid response. Validate should cause the parse to fail if -// there is no uid. -TEST(ParseJsonPasswdTest, ValidateInvalidJsonResponse) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"gid\":1337}]" - "}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - struct passwd result; - int test_errno = 0; - ASSERT_FALSE(ParseJsonToPasswd(test_user, &result, &buf, &test_errno)); - ASSERT_EQ(test_errno, EINVAL); -} - -// Test parsing a valid JSON response from the metadata server. -TEST(ParseJsonToGroupsTest, ParseJsonToGroupsSucceeds) { - string test_group = "{\"posixGroups\":[{\"name\":\"demo\",\"gid\":123452}]}"; - - std::vector groups; - ASSERT_TRUE(ParseJsonToGroups(test_group, &groups)); - ASSERT_EQ(groups[0].gid, 123452); - ASSERT_EQ(groups[0].name, "demo"); -} - -// Test parsing a valid JSON response from the metadata server with gid > 2^31. -TEST(ParseJsonToGroupsTest, ParseJsonToGroupsSucceedsWithHighGid) { - string test_group = - "{\"posixGroups\":[{\"name\":\"demo\",\"gid\":4294967295}]}"; - - std::vector groups; - ASSERT_TRUE(ParseJsonToGroups(test_group, &groups)); - ASSERT_EQ(groups[0].gid, 4294967295); - ASSERT_EQ(groups[0].name, "demo"); -} - -TEST(ParseJsonToGroupsTest, ParseJsonToGroupsSucceedsWithStringGid) { - string test_group = - "{\"posixGroups\":[{\"name\":\"demo\",\"gid\":\"123452\"}]}"; - - std::vector groups; - ASSERT_TRUE(ParseJsonToGroups(test_group, &groups)); - ASSERT_EQ(groups[0].gid, 123452); - ASSERT_EQ(groups[0].name, "demo"); -} - -// Test parsing malformed JSON responses. -TEST(ParseJsonToGroupsTest, ParseJsonToGroupsFails) { - string test_badgid = - "{\"posixGroups\":[{\"name\":\"demo\",\"gid\":\"this-should-be-int\"}]}"; - string test_nogid = "{\"posixGroups\":[{\"name\":\"demo\"}]}"; - string test_noname = "{\"posixGroups\":[{\"gid\":123452}]}"; - - std::vector groups; - ASSERT_FALSE(ParseJsonToGroups(test_badgid, &groups)); - ASSERT_FALSE(ParseJsonToGroups(test_nogid, &groups)); - ASSERT_FALSE(ParseJsonToGroups(test_noname, &groups)); -} - -// Test parsing a valid JSON response from the metadata server. -TEST(ParseJsonToUsersTest, ParseJsonToUsersSucceeds) { - string test_group_users = - "{\"usernames\":[\"user0001\",\"user0002\",\"user0003\",\"user0004\"," - "\"user0005\"]}"; - - std::vector users; - ASSERT_TRUE(ParseJsonToUsers(test_group_users, &users)); - ASSERT_FALSE(users.empty()); - ASSERT_EQ(users.size(), 5); - - ASSERT_EQ(users[0], "user0001"); - ASSERT_EQ(users[1], "user0002"); - ASSERT_EQ(users[2], "user0003"); - ASSERT_EQ(users[3], "user0004"); - ASSERT_EQ(users[4], "user0005"); -} - -// Test parsing a valid JSON response from the metadata server. -TEST(ParseJsonToUsersTest, ParseJsonToUsersEmptyGroupSucceeds) { - string test_group_users = "{\"usernames\":[]}"; - - std::vector users; - ASSERT_TRUE(ParseJsonToUsers(test_group_users, &users)); - ASSERT_TRUE(users.empty()); -} - -// Test parsing malformed JSON responses. -TEST(ParseJsonToUsersTest, ParseJsonToUsersFails) { - string test_group_users = - "{\"badstuff\":[\"user0001\",\"user0002\",\"user0003\",\"user0004\"," - "\"user0005\"]}"; - - std::vector users; - ASSERT_FALSE(ParseJsonToUsers(test_group_users, &users)); -} - -TEST(GetUsersForGroupTest, GetUsersForGroupSucceeds) { - string response; - long http_code; - ASSERT_TRUE( - HttpGet("http://metadata.google.internal/reset", &response, &http_code)); - - std::vector users; - int errnop = 0; - - ASSERT_TRUE(GetUsersForGroup("demo", &users, &errnop)); - ASSERT_FALSE(users.empty()); - ASSERT_EQ(users[0], "user000173_grande_focustest_org"); - ASSERT_EQ(errnop, 0); -} - -TEST(FindGroupTest, FindGroupByGidSucceeds) { - string response; - long http_code; - ASSERT_TRUE( - HttpGet("http://metadata.google.internal/reset", &response, &http_code)); - - size_t buflen = 200 * sizeof(char); - char* buffer = (char*)malloc(buflen); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - int errnop = 0; - - struct group grp = {}; - grp.gr_gid = 123452; - ASSERT_TRUE(FindGroup(&grp, &buf, &errnop)); - ASSERT_EQ(errnop, 0); -} - -TEST(FindGroupTest, FindGroupByNameSucceeds) { - string response; - long http_code; - ASSERT_TRUE( - HttpGet("http://metadata.google.internal/reset", &response, &http_code)); - - size_t buflen = 200 * sizeof(char); - char* buffer = (char*)malloc(buflen); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - int errnop; - - const char* match = "demo"; - struct group grp = {}; - grp.gr_name = (char*)match; - ASSERT_TRUE(FindGroup(&grp, &buf, &errnop)); -} - -TEST(ParseJsonEmailTest, SuccessfullyParsesEmail) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"posixAccounts\":[" - "{\"primary\":true,\"username\":\"foo\",\"gid\":1337}]" - "}]}"; - - string email; - ASSERT_TRUE(ParseJsonToEmail(test_user, &email)); - ASSERT_EQ(email, "foo@example.com"); -} - -TEST(ParseJsonEmailTest, FailsParseEmail) { - string email; - ASSERT_FALSE(ParseJsonToEmail("random_junk", &email)); - ASSERT_EQ(email, ""); -} - -TEST(ParseJsonSshKeyTest, ParseJsonToSshKeysSucceeds) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"sshPublicKeys\":" - "{\"fingerprint\": {\"key\": \"test_key\"}}}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - std::vector result = ParseJsonToSshKeys(test_user); - ASSERT_EQ(result.size(), 1); - ASSERT_EQ(result[0], "test_key"); -} - -TEST(ParseJsonSshKeyTest, ParseJsonToSshKeysMultipleKeys) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"sshPublicKeys\":" - "{\"fingerprint\": {\"key\": \"test_key\"}, \"fingerprint2\": {\"key\": " - "\"test_key2\"}}}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - std::vector result = ParseJsonToSshKeys(test_user); - ASSERT_EQ(result.size(), 2); - ASSERT_EQ(result[0], "test_key"); - ASSERT_EQ(result[1], "test_key2"); -} - -TEST(ParseJsonSshKeyTest, ParseJsonToSshKeysFiltersExpiredKeys) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"sshPublicKeys\":" - "{\"fingerprint\": {\"key\": \"test_key\"}, \"fingerprint2\": {\"key\": " - "\"test_key2\", \"expirationTimeUsec\": 0}}}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - std::vector result = ParseJsonToSshKeys(test_user); - ASSERT_EQ(result.size(), 1); - ASSERT_EQ(result[0], "test_key"); -} - -TEST(ParseJsonSshKeyTest, ParseJsonToSshKeysFiltersMalformedExpiration) { - string test_user = - "{\"loginProfiles\":[{\"name\":\"foo@example.com\",\"sshPublicKeys\":" - "{\"fingerprint\": {\"key\": \"test_key\"}, \"fingerprint2\": {\"key\": " - "\"test_key2\", \"expirationTimeUsec\": \"bad_stuff\"}}}]}"; - - size_t buflen = 200; - char* buffer = (char*)malloc(buflen * sizeof(char)); - ASSERT_STRNE(buffer, NULL); - BufferManager buf(buffer, buflen); - std::vector result = ParseJsonToSshKeys(test_user); - ASSERT_EQ(result.size(), 1); - ASSERT_EQ(result[0], "test_key"); -} - -TEST(ParseJsonAuthorizeSuccess, SuccessfullyAuthorized) { - string response = "{\"success\": true}"; - ASSERT_TRUE(ParseJsonToSuccess(response)); -} - -TEST(ValidateUserNameTest, ValidateValidUserNames) { - string cases[] = {"user", "_", ".", ".abc_", - "_abc-", "ABC", "A_.-", "ausernamethirtytwocharacterslong"}; - for (auto test_user : cases) { - ASSERT_TRUE(ValidateUserName(test_user)); - } -} - -TEST(ValidateUserNameTest, ValidateInvalidUserNames) { - string cases[] = { - "", - "!#$%^", - "-abc", - "#abc", - "^abc", - "abc*xyz", - "abc xyz", - "xyz*", - "xyz$", - "usernamethirtythreecharacterslong", - "../../etc/shadow", - }; - for (auto test_user : cases) { - ASSERT_FALSE(ValidateUserName(test_user)); - } -} - -TEST(ParseJsonKeyTest, TestKey) { - string test_json = "{\"some_key\":\"some_value\"}"; - string value; - ASSERT_TRUE(ParseJsonToKey(test_json, "some_key", &value)); - ASSERT_EQ(value, "some_value"); -} - -TEST(ParseJsonKeyTest, TestMissingKey) { - string test_json = "{\"some_key\":\"some_value\"}"; - string value; - ASSERT_FALSE(ParseJsonToKey(test_json, "some_other_key", &value)); - ASSERT_EQ(value, ""); -} - -TEST(ParseJsonChallengesTest, TestChallenges) { - string challenges_json = - "{\"status\":\"CHALLENGE_REQUIRED\",\"sessionId\":" - "\"testSessionId\",\"challenges\":[{\"challengeId\":1,\"challengeType\":" - "\"TOTP\",\"status\":\"READY\"}, {\"challengeId\":2,\"challengeType\":" - "\"AUTHZEN\",\"status\":\"PROPOSED\"}]}"; - vector challenges; - ASSERT_TRUE(ParseJsonToChallenges(challenges_json, &challenges)); - ASSERT_EQ(challenges.size(), 2); - ASSERT_EQ(challenges[0].id, 1); - ASSERT_EQ(challenges[0].type, "TOTP"); -} - -TEST(ParseJsonChallengesTest, TestMalformedChallenges) { - string challenges_json = - "{\"status\":\"CHALLENGE_REQUIRED\",\"sessionId\":" - "\"testSessionId\",\"challenges\":[{\"challengeId\":1,\"challengeType\":" - "\"TOTP\",\"status\":\"READY\"}, {\"challengeId\":2,\"challengeType\":" - "\"AUTHZEN\"}]}"; - vector challenges; - ASSERT_FALSE(ParseJsonToChallenges(challenges_json, &challenges)); - ASSERT_EQ(challenges.size(), 1); -} -} // namespace oslogin_utils -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/.coveragerc gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/.coveragerc --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/.coveragerc 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/.coveragerc 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -[run] -branch = True -include = google_compute_engine/* - -[report] -exclude_lines = - pragma: no cover - if __name__ == .__main__.: diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_daemon.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,310 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manage user accounts on a Google Compute Engine instances.""" - -import datetime -import json -import logging.handlers -import optparse -import random - -from google_compute_engine import config_manager -from google_compute_engine import constants -from google_compute_engine import file_utils -from google_compute_engine import logger -from google_compute_engine import metadata_watcher -from google_compute_engine.accounts import accounts_utils -from google_compute_engine.accounts import oslogin_utils - -LOCKFILE = constants.LOCALSTATEDIR + '/lock/google_accounts.lock' - - -class AccountsDaemon(object): - """Manage user accounts based on changes to metadata.""" - - invalid_users = set() - user_ssh_keys = {} - - def __init__( - self, groups=None, remove=False, gpasswd_add_cmd=None, - gpasswd_remove_cmd=None, groupadd_cmd=None, useradd_cmd=None, - userdel_cmd=None, usermod_cmd=None, debug=False): - """Constructor. - - Args: - groups: string, a comma separated list of groups. - remove: bool, True if deprovisioning a user should be destructive. - useradd_cmd: string, command to create a new user. - userdel_cmd: string, command to delete a user. - usermod_cmd: string, command to modify user's groups. - groupadd_cmd: string, command to add a new group. - gpasswd_add_cmd: string, command to add an user to a group. - gpasswd_remove_cmd: string, command to remove an user from a group. - debug: bool, True if debug output should write to the console. - """ - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='google-accounts', debug=debug, facility=facility) - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - self.utils = accounts_utils.AccountsUtils( - logger=self.logger, groups=groups, remove=remove, - gpasswd_add_cmd=gpasswd_add_cmd, gpasswd_remove_cmd=gpasswd_remove_cmd, - groupadd_cmd=groupadd_cmd, useradd_cmd=useradd_cmd, - userdel_cmd=userdel_cmd, usermod_cmd=usermod_cmd) - self.oslogin = oslogin_utils.OsLoginUtils(logger=self.logger) - - try: - with file_utils.LockFile(LOCKFILE): - self.logger.info('Starting Google Accounts daemon.') - timeout = 60 + random.randint(0, 30) - self.watcher.WatchMetadata( - self.HandleAccounts, recursive=True, timeout=timeout) - except (IOError, OSError) as e: - self.logger.warning(str(e)) - - def _HasExpired(self, key): - """Check whether an SSH key has expired. - - Uses Google-specific semantics of the OpenSSH public key format's comment - field to determine if an SSH key is past its expiration timestamp, and - therefore no longer to be trusted. This format is still subject to change. - Reliance on it in any way is at your own risk. - - Args: - key: string, a single public key entry in OpenSSH public key file format. - This will be checked for Google-specific comment semantics, and if - present, those will be analysed. - - Returns: - bool, True if the key has Google-specific comment semantics and has an - expiration timestamp in the past, or False otherwise. - """ - self.logger.debug('Processing key: %s.', key) - - try: - schema, json_str = key.split(None, 3)[2:] - except (ValueError, AttributeError): - self.logger.debug('No schema identifier. Not expiring key.') - return False - - if schema != 'google-ssh': - self.logger.debug('Invalid schema %s. Not expiring key.', schema) - return False - - try: - json_obj = json.loads(json_str) - except ValueError: - self.logger.debug('Invalid JSON %s. Not expiring key.', json_str) - return False - - if 'expireOn' not in json_obj: - self.logger.debug('No expiration timestamp. Not expiring key.') - return False - - expire_str = json_obj['expireOn'] - format_str = '%Y-%m-%dT%H:%M:%S+0000' - try: - expire_time = datetime.datetime.strptime(expire_str, format_str) - except ValueError: - self.logger.warning( - 'Expiration timestamp "%s" not in format %s. Not expiring key.', - expire_str, format_str) - return False - - # Expire the key if and only if we have exceeded the expiration timestamp. - return datetime.datetime.utcnow() > expire_time - - def _ParseAccountsData(self, account_data): - """Parse the SSH key data into a user map. - - Args: - account_data: string, the metadata server SSH key attributes data. - - Returns: - dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}. - """ - if not account_data: - return {} - lines = [line for line in account_data.splitlines() if line] - user_map = {} - for line in lines: - if not all(ord(c) < 128 for c in line): - self.logger.info('SSH key contains non-ascii character: %s.', line) - continue - split_line = line.split(':', 1) - if len(split_line) != 2: - self.logger.info('SSH key is not a complete entry: %s.', split_line) - continue - user, key = split_line - if self._HasExpired(key): - self.logger.debug('Expired SSH key for user %s: %s.', user, key) - continue - if user not in user_map: - user_map[user] = [] - user_map[user].append(key) - logging.debug('User accounts: %s.', user_map) - return user_map - - def _GetInstanceAndProjectAttributes(self, metadata_dict): - """Get dictionaries for instance and project attributes. - - Args: - metadata_dict: json, the deserialized contents of the metadata server. - - Returns: - tuple, two dictionaries for instance and project attributes. - """ - metadata_dict = metadata_dict or {} - - try: - instance_data = metadata_dict['instance']['attributes'] - except KeyError: - instance_data = {} - self.logger.warning('Instance attributes were not found.') - - try: - project_data = metadata_dict['project']['attributes'] - except KeyError: - project_data = {} - self.logger.warning('Project attributes were not found.') - - return instance_data, project_data - - def _GetAccountsData(self, metadata_dict): - """Get the user accounts specified in metadata server contents. - - Args: - metadata_dict: json, the deserialized contents of the metadata server. - - Returns: - dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}. - """ - instance_data, project_data = self._GetInstanceAndProjectAttributes( - metadata_dict) - valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] - block_project = instance_data.get('block-project-ssh-keys', '').lower() - if block_project != 'true' and not instance_data.get('sshKeys'): - valid_keys.append(project_data.get('ssh-keys')) - valid_keys.append(project_data.get('sshKeys')) - accounts_data = '\n'.join([key for key in valid_keys if key]) - return self._ParseAccountsData(accounts_data) - - def _UpdateUsers(self, update_users): - """Provision and update Linux user accounts based on account metadata. - - Args: - update_users: dict, authorized users mapped to their public SSH keys. - """ - for user, ssh_keys in update_users.items(): - if not user or user in self.invalid_users: - continue - configured_keys = self.user_ssh_keys.get(user, []) - if set(ssh_keys) != set(configured_keys): - if not self.utils.UpdateUser(user, ssh_keys): - self.invalid_users.add(user) - else: - self.user_ssh_keys[user] = ssh_keys[:] - - def _RemoveUsers(self, remove_users): - """Deprovision Linux user accounts that do not appear in account metadata. - - Args: - remove_users: list, the username strings of the Linux accounts to remove. - """ - for username in remove_users: - self.utils.RemoveUser(username) - self.user_ssh_keys.pop(username, None) - self.invalid_users -= set(remove_users) - - def _GetEnableOsLoginValue(self, metadata_dict): - """Get the value of the enable-oslogin metadata key. - - Args: - metadata_dict: json, the deserialized contents of the metadata server. - - Returns: - bool, True if OS Login is enabled for VM access. - """ - instance_data, project_data = self._GetInstanceAndProjectAttributes( - metadata_dict) - instance_value = instance_data.get('enable-oslogin') - project_value = project_data.get('enable-oslogin') - value = instance_value or project_value or '' - - return value.lower() == 'true' - - def _GetEnableTwoFactorValue(self, metadata_dict): - """Get the value of the enable-oslogin-2fa metadata key. - - Args: - metadata_dict: json, the deserialized contents of the metadata server. - - Returns: - bool, True if two factor authentication is enabled for VM access. - """ - instance_data, project_data = self._GetInstanceAndProjectAttributes( - metadata_dict) - instance_value = instance_data.get('enable-oslogin-2fa') - project_value = project_data.get('enable-oslogin-2fa') - value = instance_value or project_value or '' - - return value.lower() == 'true' - - def HandleAccounts(self, result): - """Called when there are changes to the contents of the metadata server. - - Args: - result: json, the deserialized contents of the metadata server. - """ - self.logger.debug('Checking for changes to user accounts.') - configured_users = self.utils.GetConfiguredUsers() - enable_oslogin = self._GetEnableOsLoginValue(result) - enable_two_factor = self._GetEnableTwoFactorValue(result) - if enable_oslogin: - desired_users = {} - self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor) - else: - desired_users = self._GetAccountsData(result) - self.oslogin.UpdateOsLogin(False) - remove_users = sorted(set(configured_users) - set(desired_users.keys())) - self._UpdateUsers(desired_users) - self._RemoveUsers(remove_users) - self.utils.SetConfiguredUsers(desired_users.keys()) - - -def main(): - parser = optparse.OptionParser() - parser.add_option( - '-d', '--debug', action='store_true', dest='debug', - help='print debug output to the console.') - (options, _) = parser.parse_args() - instance_config = config_manager.ConfigManager() - if instance_config.GetOptionBool('Daemons', 'accounts_daemon'): - AccountsDaemon( - groups=instance_config.GetOptionString('Accounts', 'groups'), - remove=instance_config.GetOptionBool('Accounts', 'deprovision_remove'), - useradd_cmd=instance_config.GetOptionString('Accounts', 'useradd_cmd'), - userdel_cmd=instance_config.GetOptionString('Accounts', 'userdel_cmd'), - usermod_cmd=instance_config.GetOptionString('Accounts', 'usermod_cmd'), - groupadd_cmd=instance_config.GetOptionString( - 'Accounts', 'groupadd_cmd'), - gpasswd_add_cmd=instance_config.GetOptionString('Accounts', 'gpasswd_add_cmd'), - gpasswd_remove_cmd=instance_config.GetOptionString('Accounts', 'gpasswd_remove_cmd'), - debug=bool(options.debug)) - - -if __name__ == '__main__': - main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/accounts_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,386 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for provisioning or deprovisioning a Linux user account.""" - -import grp -import os -import pwd -import re -import shutil -import subprocess -import tempfile - -from google_compute_engine import constants -from google_compute_engine import file_utils - -USER_REGEX = re.compile(r'\A[A-Za-z0-9._][A-Za-z0-9._-]{0,31}\Z') -DEFAULT_GPASSWD_ADD_CMD = 'gpasswd -a {user} {group}' -DEFAULT_GPASSWD_REMOVE_CMD = 'gpasswd -d {user} {group}' -DEFAULT_GROUPADD_CMD = 'groupadd {group}' -DEFAULT_USERADD_CMD = 'useradd -m -s /bin/bash -p * {user}' -DEFAULT_USERDEL_CMD = 'userdel -r {user}' -DEFAULT_USERMOD_CMD = 'usermod -G {groups} {user}' - - -class AccountsUtils(object): - """System user account configuration utilities.""" - - google_comment = '# Added by Google' - - def __init__( - self, logger, groups=None, remove=False, gpasswd_add_cmd=None, - gpasswd_remove_cmd=None, groupadd_cmd=None, useradd_cmd=None, - userdel_cmd=None, usermod_cmd=None): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - groups: string, a comma separated list of groups. - remove: bool, True if deprovisioning a user should be destructive. - gpasswd_add_cmd: string, command to add an user to a group. - gpasswd_remove_cmd: string, command to remove an user from a group. - groupadd_cmd: string, command to add a new group. - useradd_cmd: string, command to create a new user. - userdel_cmd: string, command to delete a user. - usermod_cmd: string, command to modify user's groups. - """ - self.gpasswd_add_cmd = gpasswd_add_cmd or DEFAULT_GPASSWD_ADD_CMD - self.gpasswd_remove_cmd = gpasswd_remove_cmd or DEFAULT_GPASSWD_REMOVE_CMD - self.groupadd_cmd = groupadd_cmd or DEFAULT_GROUPADD_CMD - self.useradd_cmd = useradd_cmd or DEFAULT_USERADD_CMD - self.userdel_cmd = userdel_cmd or DEFAULT_USERDEL_CMD - self.usermod_cmd = usermod_cmd or DEFAULT_USERMOD_CMD - self.logger = logger - self.google_sudoers_group = 'google-sudoers' - self.google_sudoers_file = ( - constants.LOCALBASE + '/etc/sudoers.d/google_sudoers') - self.google_users_dir = constants.LOCALBASE + '/var/lib/google' - self.google_users_file = os.path.join(self.google_users_dir, 'google_users') - - self._CreateSudoersGroup() - self.groups = groups.split(',') if groups else [] - self.groups = list(filter(self._GetGroup, self.groups)) - self.remove = remove - - def _GetGroup(self, group): - """Retrieve a Linux group. - - Args: - group: string, the name of the Linux group to retrieve. - - Returns: - grp.struct_group, the Linux group or None if it does not exist. - """ - try: - return grp.getgrnam(group) - except KeyError: - return None - - def _CreateSudoersGroup(self): - """Create a Linux group for Google added sudo user accounts.""" - if not self._GetGroup(self.google_sudoers_group): - try: - command = self.groupadd_cmd.format(group=self.google_sudoers_group) - subprocess.check_call(command.split(' ')) - except subprocess.CalledProcessError as e: - self.logger.warning('Could not create the sudoers group. %s.', str(e)) - - if not os.path.exists(self.google_sudoers_file): - try: - with open(self.google_sudoers_file, 'w') as group: - message = '%{0} ALL=(ALL:ALL) NOPASSWD:ALL'.format( - self.google_sudoers_group) - group.write(message) - except IOError as e: - self.logger.error( - 'Could not write sudoers file. %s. %s', - self.google_sudoers_file, str(e)) - return - - file_utils.SetPermissions( - self.google_sudoers_file, mode=0o440, uid=0, gid=0) - - def _GetUser(self, user): - """Retrieve a Linux user account. - - Args: - user: string, the name of the Linux user account to retrieve. - - Returns: - pwd.struct_passwd, the Linux user or None if it does not exist. - """ - try: - return pwd.getpwnam(user) - except KeyError: - return None - - def _AddUser(self, user): - """Configure a Linux user account. - - Args: - user: string, the name of the Linux user account to create. - - Returns: - bool, True if user creation succeeded. - """ - self.logger.info('Creating a new user account for %s.', user) - - command = self.useradd_cmd.format(user=user) - try: - subprocess.check_call(command.split(' ')) - except subprocess.CalledProcessError as e: - self.logger.warning('Could not create user %s. %s.', user, str(e)) - return False - else: - self.logger.info('Created user account %s.', user) - return True - - def _UpdateUserGroups(self, user, groups): - """Update group membership for a Linux user. - - Args: - user: string, the name of the Linux user account. - groups: list, the group names to add the user as a member. - - Returns: - bool, True if user update succeeded. - """ - groups = ','.join(groups) - self.logger.debug('Updating user %s with groups %s.', user, groups) - command = self.usermod_cmd.format(user=user, groups=groups) - try: - subprocess.check_call(command.split(' ')) - except subprocess.CalledProcessError as e: - self.logger.warning('Could not update user %s. %s.', user, str(e)) - return False - else: - self.logger.debug('Updated user account %s.', user) - return True - - def _UpdateAuthorizedKeys(self, user, ssh_keys): - """Update the authorized keys file for a Linux user with a list of SSH keys. - - Args: - user: string, the name of the Linux user account. - ssh_keys: list, the SSH key strings associated with the user. - - Raises: - IOError, raised when there is an exception updating a file. - OSError, raised when setting permissions or writing to a read-only - file system. - """ - pw_entry = self._GetUser(user) - if not pw_entry: - return - - uid = pw_entry.pw_uid - gid = pw_entry.pw_gid - home_dir = pw_entry.pw_dir - ssh_dir = os.path.join(home_dir, '.ssh') - - # Not all sshd's support multiple authorized_keys files so we have to - # share one with the user. We add each of our entries as follows: - # # Added by Google - # authorized_key_entry - authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys') - - # Do not write to the authorized keys file if it is a symlink. - if os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file): - self.logger.warning( - 'Not updating authorized keys for user %s. File is a symlink.', user) - return - - # Create home directory if it does not exist. This can happen if _GetUser - # (getpwnam) returns non-local user info (e.g., from LDAP). - if not os.path.exists(home_dir): - file_utils.SetPermissions(home_dir, mode=0o755, uid=uid, gid=gid, - mkdir=True) - - # Create ssh directory if it does not exist. - file_utils.SetPermissions(ssh_dir, mode=0o700, uid=uid, gid=gid, mkdir=True) - - # Create entry in the authorized keys file. - prefix = self.logger.name + '-' - with tempfile.NamedTemporaryFile( - mode='w', prefix=prefix, delete=True) as updated_keys: - updated_keys_file = updated_keys.name - if os.path.exists(authorized_keys_file): - lines = open(authorized_keys_file).readlines() - else: - lines = [] - - google_lines = set() - for i, line in enumerate(lines): - if line.startswith(self.google_comment): - google_lines.update([i, i+1]) - - # Write user's authorized key entries. - for i, line in enumerate(lines): - if i not in google_lines and line: - line += '\n' if not line.endswith('\n') else '' - updated_keys.write(line) - - # Write the Google authorized key entries at the end of the file. - # Each entry is preceded by '# Added by Google'. - for ssh_key in ssh_keys: - ssh_key += '\n' if not ssh_key.endswith('\n') else '' - updated_keys.write('%s\n' % self.google_comment) - updated_keys.write(ssh_key) - - # Write buffered data to the updated keys file without closing it and - # update the Linux user's authorized keys file. - updated_keys.flush() - shutil.copy(updated_keys_file, authorized_keys_file) - - file_utils.SetPermissions( - authorized_keys_file, mode=0o600, uid=uid, gid=gid) - - def _UpdateSudoer(self, user, sudoer=False): - """Update sudoer group membership for a Linux user account. - - Args: - user: string, the name of the Linux user account. - sudoer: bool, True if the user should be a sudoer. - - Returns: - bool, True if user update succeeded. - """ - if sudoer: - self.logger.info('Adding user %s to the Google sudoers group.', user) - command = self.gpasswd_add_cmd.format( - user=user, group=self.google_sudoers_group) - else: - self.logger.info('Removing user %s from the Google sudoers group.', user) - command = self.gpasswd_remove_cmd.format( - user=user, group=self.google_sudoers_group) - - try: - subprocess.check_call(command.split(' ')) - except subprocess.CalledProcessError as e: - self.logger.warning('Could not update user %s. %s.', user, str(e)) - return False - else: - self.logger.debug('Removed user %s from the Google sudoers group.', user) - return True - - def _RemoveAuthorizedKeys(self, user): - """Remove a Linux user account's authorized keys file to prevent login. - - Args: - user: string, the Linux user account to remove access. - """ - pw_entry = self._GetUser(user) - if not pw_entry: - return - - home_dir = pw_entry.pw_dir - authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys') - if os.path.exists(authorized_keys_file): - try: - os.remove(authorized_keys_file) - except OSError as e: - message = 'Could not remove authorized keys for user %s. %s.' - self.logger.warning(message, user, str(e)) - - def GetConfiguredUsers(self): - """Retrieve the list of configured Google user accounts. - - Returns: - list, the username strings of users congfigured by Google. - """ - if os.path.exists(self.google_users_file): - users = open(self.google_users_file).readlines() - else: - users = [] - return [user.strip() for user in users] - - def SetConfiguredUsers(self, users): - """Set the list of configured Google user accounts. - - Args: - users: list, the username strings of the Linux accounts. - """ - prefix = self.logger.name + '-' - with tempfile.NamedTemporaryFile( - mode='w', prefix=prefix, delete=True) as updated_users: - updated_users_file = updated_users.name - for user in users: - updated_users.write(user + '\n') - updated_users.flush() - if not os.path.exists(self.google_users_dir): - os.makedirs(self.google_users_dir) - shutil.copy(updated_users_file, self.google_users_file) - - file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0) - - def UpdateUser(self, user, ssh_keys): - """Update a Linux user with authorized SSH keys. - - Args: - user: string, the name of the Linux user account. - ssh_keys: list, the SSH key strings associated with the user. - - Returns: - bool, True if the user account updated successfully. - """ - if not bool(USER_REGEX.match(user)): - self.logger.warning('Invalid user account name %s.', user) - return False - if not self._GetUser(user): - # User does not exist. Attempt to create the user and add them to the - # appropriate user groups. - if not (self._AddUser(user) - and self._UpdateUserGroups(user, self.groups)): - return False - # Add the user to the google sudoers group. - if not self._UpdateSudoer(user, sudoer=True): - return False - - # Don't try to manage account SSH keys with a shell set to disable - # logins. This helps avoid problems caused by operator and root sharing - # a home directory in CentOS and RHEL. - pw_entry = self._GetUser(user) - if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin': - message = 'Not updating user %s. User set `nologin` as login shell.' - self.logger.debug(message, user) - return True - - try: - self._UpdateAuthorizedKeys(user, ssh_keys) - except (IOError, OSError) as e: - message = 'Could not update the authorized keys file for user %s. %s.' - self.logger.warning(message, user, str(e)) - return False - else: - return True - - def RemoveUser(self, user): - """Remove a Linux user account. - - Args: - user: string, the Linux user account to remove. - """ - self.logger.info('Removing user %s.', user) - if self.remove: - command = self.userdel_cmd.format(user=user) - try: - subprocess.check_call(command.split(' ')) - except subprocess.CalledProcessError as e: - self.logger.warning('Could not remove user %s. %s.', user, str(e)) - else: - self.logger.info('Removed user account %s.', user) - self._RemoveAuthorizedKeys(user) - self._UpdateSudoer(user, sudoer=False) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/oslogin_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/oslogin_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/oslogin_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/oslogin_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ -#!/usr/bin/python -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for provisioning or deprovisioning a Linux user account.""" - -import errno -import os -import subprocess -import time - -from google_compute_engine import constants - -NSS_CACHE_DURATION_SEC = 21600 # 6 hours in seconds. - - -class OsLoginUtils(object): - """Utilities for OS Login activation.""" - - def __init__(self, logger): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - self.logger = logger - self.oslogin_installed = True - self.update_time = 0 - - def _RunOsLoginControl(self, params): - """Run the OS Login control script. - - Args: - params: list, the params to pass to the script - - Returns: - int, the return code from the call, or None if the script is not found. - """ - try: - return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params) - except OSError as e: - if e.errno == errno.ENOENT: - return None - else: - raise - - def _GetStatus(self, two_factor=False): - """Check whether OS Login is installed. - - Args: - two_factor: bool, True if two factor should be enabled. - - Returns: - bool, True if OS Login is installed. - """ - params = ['status'] - if two_factor: - params += ['--twofactor'] - retcode = self._RunOsLoginControl(params) - if retcode is None: - if self.oslogin_installed: - self.logger.warning('OS Login not installed.') - self.oslogin_installed = False - return None - - # Prevent log spam when OS Login is not installed. - self.oslogin_installed = True - if not os.path.exists(constants.OSLOGIN_NSS_CACHE): - return False - return not retcode - - def _RunOsLoginNssCache(self): - """Run the OS Login NSS cache binary. - - Returns: - int, the return code from the call, or None if the script is not found. - """ - try: - return subprocess.call([constants.OSLOGIN_NSS_CACHE_SCRIPT]) - except OSError as e: - if e.errno == errno.ENOENT: - return None - else: - raise - - def _RemoveOsLoginNssCache(self): - """Remove the OS Login NSS cache file.""" - if os.path.exists(constants.OSLOGIN_NSS_CACHE): - try: - os.remove(constants.OSLOGIN_NSS_CACHE) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False): - """Update whether OS Login is enabled and update NSS cache if necessary. - - Args: - oslogin_desired: bool, enable OS Login if True, disable if False. - two_factor_desired: bool, enable two factor if True, disable if False. - - Returns: - int, the return code from updating OS Login, or None if not present. - """ - oslogin_configured = self._GetStatus(two_factor=False) - if oslogin_configured is None: - return None - two_factor_configured = self._GetStatus(two_factor=True) - # Two factor can only be enabled when OS Login is enabled. - two_factor_desired = two_factor_desired and oslogin_desired - - if oslogin_desired: - params = ['activate'] - if two_factor_desired: - params += ['--twofactor'] - # OS Login is desired and not enabled. - if not oslogin_configured: - self.logger.info('Activating OS Login.') - return self._RunOsLoginControl(params) or self._RunOsLoginNssCache() - # Enable two factor authentication. - if two_factor_desired and not two_factor_configured: - self.logger.info('Activating OS Login two factor authentication.') - return self._RunOsLoginControl(params) or self._RunOsLoginNssCache() - # Deactivate two factor authentication. - if two_factor_configured and not two_factor_desired: - self.logger.info('Reactivating OS Login with two factor disabled.') - return (self._RunOsLoginControl(['deactivate']) - or self._RunOsLoginControl(params)) - # OS Login features are already enabled. Update the cache if appropriate. - current_time = time.time() - if current_time - self.update_time > NSS_CACHE_DURATION_SEC: - self.update_time = current_time - return self._RunOsLoginNssCache() - - elif oslogin_configured: - self.logger.info('Deactivating OS Login.') - return (self._RunOsLoginControl(['deactivate']) - or self._RemoveOsLoginNssCache()) - - # No action was needed. - return 0 diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_daemon_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_daemon_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_daemon_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_daemon_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,563 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for accounts_daemon.py module.""" - -import datetime - -from google_compute_engine.accounts import accounts_daemon -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class AccountsDaemonTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_watcher = mock.Mock() - self.mock_utils = mock.Mock() - self.mock_oslogin = mock.Mock() - - self.mock_setup = mock.create_autospec(accounts_daemon.AccountsDaemon) - self.mock_setup.logger = self.mock_logger - self.mock_setup.watcher = self.mock_watcher - self.mock_setup.utils = self.mock_utils - self.mock_setup.oslogin = self.mock_oslogin - - @mock.patch('google_compute_engine.accounts.accounts_daemon.accounts_utils') - @mock.patch('google_compute_engine.accounts.accounts_daemon.metadata_watcher') - @mock.patch('google_compute_engine.accounts.accounts_daemon.logger') - @mock.patch('google_compute_engine.accounts.accounts_daemon.file_utils') - def testAccountsDaemon( - self, mock_lock, mock_logger, mock_watcher, mock_utils): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mocks.attach_mock(mock_utils, 'utils') - with mock.patch.object( - accounts_daemon.AccountsDaemon, 'HandleAccounts') as mock_handle: - accounts_daemon.AccountsDaemon(groups='foo,bar', remove=True, debug=True) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.utils.AccountsUtils( - logger=mock_logger_instance, groups='foo,bar', remove=True, - gpasswd_add_cmd=mock.ANY, gpasswd_remove_cmd=mock.ANY, - groupadd_cmd=mock.ANY, useradd_cmd=mock.ANY, - userdel_cmd=mock.ANY, usermod_cmd=mock.ANY), - mock.call.lock.LockFile(accounts_daemon.LOCKFILE), - mock.call.lock.LockFile().__enter__(), - mock.call.logger.Logger().info(mock.ANY), - mock.call.watcher.MetadataWatcher().WatchMetadata( - mock_handle, recursive=True, timeout=mock.ANY), - mock.call.lock.LockFile().__exit__(None, None, None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_daemon.accounts_utils') - @mock.patch('google_compute_engine.accounts.accounts_daemon.metadata_watcher') - @mock.patch('google_compute_engine.accounts.accounts_daemon.logger') - @mock.patch('google_compute_engine.accounts.accounts_daemon.file_utils') - def testAccountsDaemonError( - self, mock_lock, mock_logger, mock_watcher, mock_utils): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mocks.attach_mock(mock_utils, 'utils') - mock_lock.LockFile.side_effect = IOError('Test Error') - with mock.patch.object(accounts_daemon.AccountsDaemon, 'HandleAccounts'): - accounts_daemon.AccountsDaemon() - expected_calls = [ - mock.call.logger.Logger( - name=mock.ANY, debug=False, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.utils.AccountsUtils( - logger=mock_logger_instance, groups=None, remove=False, - gpasswd_add_cmd=mock.ANY, gpasswd_remove_cmd=mock.ANY, - groupadd_cmd=mock.ANY, useradd_cmd=mock.ANY, - userdel_cmd=mock.ANY, usermod_cmd=mock.ANY), - mock.call.lock.LockFile(accounts_daemon.LOCKFILE), - mock.call.logger.Logger().warning('Test Error'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testHasExpired(self): - - def _GetTimestamp(days): - """Create a timestamp in the correct format with a days offset. - - Args: - days: int, number of days to add to the current date. - - Returns: - string, a timestamp with the format '%Y-%m-%dT%H:%M:%S+0000'. - """ - format_str = '%Y-%m-%dT%H:%M:%S+0000' - today = datetime.datetime.now() - timestamp = today + datetime.timedelta(days=days) - return timestamp.strftime(format_str) - - ssh_keys = { - None: False, - '': False, - 'Invalid': False, - 'user:ssh-rsa key user@domain.com': False, - 'user:ssh-rsa key google {"expireOn":"%s"}' % _GetTimestamp(-1): False, - 'user:ssh-rsa key google-ssh': False, - 'user:ssh-rsa key google-ssh {invalid:json}': False, - 'user:ssh-rsa key google-ssh {"userName":"user"}': False, - 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % _GetTimestamp(1): False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % _GetTimestamp(-1): True, - } - - for key, expired in ssh_keys.items(): - self.assertEqual( - accounts_daemon.AccountsDaemon._HasExpired(self.mock_setup, key), - expired) - - def testParseAccountsData(self): - user_map = { - 'a': ['1', '2'], - 'b': ['3', '4', '5'], - } - accounts_data = 'skip\n' - for user, keys in user_map.items(): - for key in keys: - accounts_data += '%s:%s\n' % (user, key) - # Make the _HasExpired function treat odd numbers as expired SSH keys. - self.mock_setup._HasExpired.side_effect = lambda key: int(key) % 2 == 0 - - self.assertEqual( - accounts_daemon.AccountsDaemon._ParseAccountsData( - self.mock_setup, None), {}) - self.assertEqual( - accounts_daemon.AccountsDaemon._ParseAccountsData( - self.mock_setup, ''), {}) - expected_users = {'a': ['1'], 'b': ['3', '5']} - self.assertEqual(accounts_daemon.AccountsDaemon._ParseAccountsData( - self.mock_setup, accounts_data), expected_users) - - def testParseAccountsDataNonAscii(self): - accounts_data = [ - 'username:rsa ssh-ke%s invalid\n' % chr(165), - 'use%sname:rsa ssh-key\n' % chr(174), - 'username:rsa ssh-key\n', - ] - accounts_data = ''.join(accounts_data) - self.mock_setup._HasExpired.return_value = False - expected_users = {'username': ['rsa ssh-key']} - self.assertEqual(accounts_daemon.AccountsDaemon._ParseAccountsData( - self.mock_setup, accounts_data), expected_users) - - def testGetInstanceAndProjectAttributes(self): - - def _AssertAttributeDict(data, expected): - """Test the correct accounts data is returned. - - Args: - data: dictionary, the faux metadata server contents. - expected: list, the faux SSH keys expected to be set. - """ - self.assertEqual( - accounts_daemon.AccountsDaemon._GetInstanceAndProjectAttributes( - self.mock_setup, data), expected) - - data = None - _AssertAttributeDict(data, ({}, {})) - - data = {'test': 'data'} - expected = ({}, {}) - _AssertAttributeDict(data, expected) - - data = {'instance': {'attributes': {}}} - expected = ({}, {}) - _AssertAttributeDict(data, expected) - - data = {'instance': {'attributes': {'ssh-keys': '1'}}} - expected = ({'ssh-keys': '1'}, {}) - _AssertAttributeDict(data, expected) - - data = {'instance': {'attributes': {'ssh-keys': '1', 'sshKeys': '2'}}} - expected = ({'ssh-keys': '1', 'sshKeys': '2'}, {}) - _AssertAttributeDict(data, expected) - - data = {'project': {'attributes': {'ssh-keys': '1'}}} - expected = ({}, {'ssh-keys': '1'}) - _AssertAttributeDict(data, expected) - - data = {'project': {'attributes': {'ssh-keys': '1', 'sshKeys': '2'}}} - expected = ({}, {'ssh-keys': '1', 'sshKeys': '2'}) - _AssertAttributeDict(data, expected) - - data = { - 'instance': { - 'attributes': { - 'ssh-keys': '1', - 'sshKeys': '2', - }, - }, - 'project': { - 'attributes': { - 'ssh-keys': '3', - }, - }, - } - expected = ({'ssh-keys': '1', 'sshKeys': '2'}, {'ssh-keys': '3'}) - _AssertAttributeDict(data, expected) - - data = { - 'instance': { - 'attributes': { - 'ssh-keys': '1', - 'block-project-ssh-keys': 'false', - }, - }, - 'project': { - 'attributes': { - 'ssh-keys': '2', - }, - }, - } - expected = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'ssh-keys': '2'}) - _AssertAttributeDict(data, expected) - - data = { - 'instance': { - 'attributes': { - 'ssh-keys': '1', - 'block-project-ssh-keys': 'true', - }, - }, - 'project': { - 'attributes': { - 'ssh-keys': '2', - }, - }, - } - expected = ({'block-project-ssh-keys': 'true', 'ssh-keys': '1'}, - {'ssh-keys': '2'}) - _AssertAttributeDict(data, expected) - - data = { - 'instance': { - 'attributes': { - 'ssh-keys': '1', - 'block-project-ssh-keys': 'false', - }, - }, - 'project': { - 'attributes': { - 'ssh-keys': '2', - 'sshKeys': '3', - }, - }, - } - expected = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2'}) - _AssertAttributeDict(data, expected) - - def testGetAccountsData(self): - - def _AssertAccountsData(data, expected): - """Test the correct accounts data is returned. - - Args: - data: dictionary, the faux metadata server contents. - expected: list, the faux SSH keys expected to be set. - """ - self.mock_setup._GetInstanceAndProjectAttributes.return_value = data - accounts_daemon.AccountsDaemon._GetAccountsData(self.mock_setup, data) - if expected: - call_args, _ = self.mock_setup._ParseAccountsData.call_args - actual = call_args[0] - self.assertEqual(set(actual.split()), set(expected)) - else: - self.mock_setup._ParseAccountsData.assert_called_once_with(expected) - self.mock_setup._ParseAccountsData.reset_mock() - - data = ({}, {}) - _AssertAccountsData(data, '') - - data = ({'ssh-keys': '1'}, {}) - _AssertAccountsData(data, ['1']) - - data = ({'ssh-keys': '1', 'sshKeys': '2'}, {}) - _AssertAccountsData(data, ['1', '2']) - - data = ({}, {'ssh-keys': '1'}) - _AssertAccountsData(data, ['1']) - - data = ({}, {'ssh-keys': '1', 'sshKeys': '2'}) - _AssertAccountsData(data, ['1', '2']) - - data = ({'ssh-keys': '1', 'sshKeys': '2'}, {'ssh-keys': '3'}) - _AssertAccountsData(data, ['1', '2']) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'ssh-keys': '2'}) - _AssertAccountsData(data, ['1', '2']) - - data = ({'block-project-ssh-keys': 'true', 'ssh-keys': '1'}, - {'ssh-keys': '2'}) - _AssertAccountsData(data, ['1']) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2'}) - _AssertAccountsData(data, ['1', '2', '3']) - - def testGetEnableOsLoginValue(self): - - def _AssertEnableOsLogin(data, expected): - """Test the correct value for enable-oslogin is returned. - - Args: - data: dictionary, the faux metadata server contents. - expected: bool, if True, OS Login is enabled. - """ - self.mock_setup._GetInstanceAndProjectAttributes.return_value = data - actual = accounts_daemon.AccountsDaemon._GetEnableOsLoginValue( - self.mock_setup, data) - self.assertEqual(actual, expected) - - data = ({}, {}) - _AssertEnableOsLogin(data, False) - - data = ({'enable-oslogin': 'true'}, {}) - _AssertEnableOsLogin(data, True) - - data = ({'enable-oslogin': 'false'}, {}) - _AssertEnableOsLogin(data, False) - - data = ({'enable-oslogin': 'yep'}, {}) - _AssertEnableOsLogin(data, False) - - data = ({'enable-oslogin': 'True'}, {}) - _AssertEnableOsLogin(data, True) - - data = ({'enable-oslogin': 'TRUE'}, {}) - _AssertEnableOsLogin(data, True) - - data = ({'enable-oslogin': ''}, {}) - _AssertEnableOsLogin(data, False) - - data = ({'enable-oslogin': 'true'}, {'enable-oslogin': 'true'}) - _AssertEnableOsLogin(data, True) - - data = ({'enable-oslogin': 'false'}, {'enable-oslogin': 'true'}) - _AssertEnableOsLogin(data, False) - - data = ({'enable-oslogin': ''}, {'enable-oslogin': 'true'}) - _AssertEnableOsLogin(data, True) - - data = ({}, {'enable-oslogin': 'true'}) - _AssertEnableOsLogin(data, True) - - data = ({}, {'enable-oslogin': 'false'}) - _AssertEnableOsLogin(data, False) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2'}) - _AssertEnableOsLogin(data, False) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2', 'enable-oslogin': 'true'}) - _AssertEnableOsLogin(data, True) - - def testGetEnableTwoFactorValue(self): - - def _AssertEnableTwoFactor(data, expected): - """Test the correct value for enable-oslogin-2fa is returned. - - Args: - data: dictionary, the faux metadata server contents. - expected: bool, if True, two factor authentication is enabled. - """ - self.mock_setup._GetInstanceAndProjectAttributes.return_value = data - actual = accounts_daemon.AccountsDaemon._GetEnableTwoFactorValue( - self.mock_setup, data) - self.assertEqual(actual, expected) - - data = ({}, {}) - _AssertEnableTwoFactor(data, False) - - data = ({'enable-oslogin-2fa': 'true'}, {}) - _AssertEnableTwoFactor(data, True) - - data = ({'enable-oslogin-2fa': 'false'}, {}) - _AssertEnableTwoFactor(data, False) - - data = ({'enable-oslogin-2fa': 'yep'}, {}) - _AssertEnableTwoFactor(data, False) - - data = ({'enable-oslogin-2fa': 'True'}, {}) - _AssertEnableTwoFactor(data, True) - - data = ({'enable-oslogin-2fa': 'TRUE'}, {}) - _AssertEnableTwoFactor(data, True) - - data = ({'enable-oslogin-2fa': ''}, {}) - _AssertEnableTwoFactor(data, False) - - data = ({'enable-oslogin-2fa': 'true'}, {'enable-oslogin-2fa': 'true'}) - _AssertEnableTwoFactor(data, True) - - data = ({'enable-oslogin-2fa': 'false'}, {'enable-oslogin-2fa': 'true'}) - _AssertEnableTwoFactor(data, False) - - data = ({'enable-oslogin-2fa': ''}, {'enable-oslogin-2fa': 'true'}) - _AssertEnableTwoFactor(data, True) - - data = ({}, {'enable-oslogin-2fa': 'true'}) - _AssertEnableTwoFactor(data, True) - - data = ({}, {'enable-oslogin-2fa': 'false'}) - _AssertEnableTwoFactor(data, False) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2'}) - _AssertEnableTwoFactor(data, False) - - data = ({'block-project-ssh-keys': 'false', 'ssh-keys': '1'}, - {'sshKeys': '3', 'ssh-keys': '2', 'enable-oslogin-2fa': 'true'}) - _AssertEnableTwoFactor(data, True) - - def testUpdateUsers(self): - update_users = { - 'a': '1', - 'b': '2', - 'c': '3', - 'invalid': '4', - 'valid': '5', - 'unchanged': ['1', '2', '3'], - } - self.mock_setup.user_ssh_keys = { - 'unchanged': ['3', '2', '1'], - } - self.mock_setup.invalid_users = set(['invalid']) - # Make UpdateUser succeed for fake names longer than one character. - self.mock_utils.UpdateUser.side_effect = lambda user, _: len(user) > 1 - accounts_daemon.AccountsDaemon._UpdateUsers(self.mock_setup, update_users) - expected_calls = [ - mock.call('a', '1'), - mock.call('b', '2'), - mock.call('c', '3'), - mock.call('valid', '5'), - ] - self.mock_utils.UpdateUser.assert_has_calls(expected_calls, any_order=True) - self.assertEqual( - self.mock_utils.UpdateUser.call_count, len(expected_calls)) - self.assertEqual( - self.mock_setup.invalid_users, set(['invalid', 'a', 'b', 'c'])) - self.assertEqual( - self.mock_setup.user_ssh_keys, - {'valid': '5', 'unchanged': ['3', '2', '1']}) - - def testRemoveUsers(self): - remove_users = ['a', 'b', 'c', 'valid'] - self.mock_setup.user_ssh_keys = { - 'a': ['1'], - 'b': ['2'], - 'c': ['3'], - 'invalid': ['key'], - } - self.mock_setup.invalid_users = set(['invalid', 'a', 'b', 'c']) - accounts_daemon.AccountsDaemon._RemoveUsers(self.mock_setup, remove_users) - expected_calls = [ - mock.call('a'), - mock.call('b'), - mock.call('c'), - mock.call('valid'), - ] - self.mock_utils.RemoveUser.assert_has_calls(expected_calls) - self.assertEqual(self.mock_setup.invalid_users, set(['invalid'])) - self.assertEqual(self.mock_setup.user_ssh_keys, {'invalid': ['key']}) - - def testHandleAccountsNoOsLogin(self): - configured = ['c', 'c', 'b', 'b', 'a', 'a'] - desired = {'d': '1', 'c': '2'} - mocks = mock.Mock() - mocks.attach_mock(self.mock_utils, 'utils') - mocks.attach_mock(self.mock_setup, 'setup') - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_utils.GetConfiguredUsers.return_value = configured - self.mock_setup._GetAccountsData.return_value = desired - self.mock_setup._GetEnableOsLoginValue.return_value = False - self.mock_oslogin.UpdateOsLogin.return_value = 0 - result = 'result' - expected_add = ['c', 'd'] - expected_remove = ['a', 'b'] - - accounts_daemon.AccountsDaemon.HandleAccounts(self.mock_setup, result) - expected_calls = [ - mock.call.setup.logger.debug(mock.ANY), - mock.call.utils.GetConfiguredUsers(), - mock.call.setup._GetEnableOsLoginValue(result), - mock.call.setup._GetEnableTwoFactorValue(result), - mock.call.setup._GetAccountsData(result), - mock.call.oslogin.UpdateOsLogin(False), - mock.call.setup._UpdateUsers(desired), - mock.call.setup._RemoveUsers(mock.ANY), - mock.call.utils.SetConfiguredUsers(mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - call_args, _ = self.mock_utils.SetConfiguredUsers.call_args - self.assertEqual(set(call_args[0]), set(expected_add)) - call_args, _ = self.mock_setup._RemoveUsers.call_args - self.assertEqual(set(call_args[0]), set(expected_remove)) - - def testHandleAccountsOsLogin(self): - configured = ['c', 'c', 'b', 'b', 'a', 'a'] - desired = {} - mocks = mock.Mock() - mocks.attach_mock(self.mock_utils, 'utils') - mocks.attach_mock(self.mock_setup, 'setup') - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_utils.GetConfiguredUsers.return_value = configured - self.mock_setup._GetAccountsData.return_value = desired - self.mock_setup._GetEnableOsLoginValue.return_value = True - self.mock_setup._GetEnableTwoFactorValue.return_value = False - self.mock_oslogin.UpdateOsLogin.return_value = 0 - result = 'result' - expected_add = [] - expected_remove = ['a', 'b', 'c'] - - accounts_daemon.AccountsDaemon.HandleAccounts(self.mock_setup, result) - expected_calls = [ - mock.call.setup.logger.debug(mock.ANY), - mock.call.utils.GetConfiguredUsers(), - mock.call.setup._GetEnableOsLoginValue(result), - mock.call.setup._GetEnableTwoFactorValue(result), - mock.call.oslogin.UpdateOsLogin(True, two_factor_desired=False), - mock.call.setup._UpdateUsers(desired), - mock.call.setup._RemoveUsers(mock.ANY), - mock.call.utils.SetConfiguredUsers(mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - call_args, _ = self.mock_utils.SetConfiguredUsers.call_args - self.assertEqual(set(call_args[0]), set(expected_add)) - call_args, _ = self.mock_setup._RemoveUsers.call_args - self.assertEqual(set(call_args[0]), set(expected_remove)) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/accounts_utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,746 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for accounts_utils.py module.""" - -import subprocess - -from google_compute_engine.accounts import accounts_utils -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class AccountsUtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.sudoers_group = 'google-sudoers' - self.sudoers_file = '/sudoers/file' - self.users_dir = '/users' - self.users_file = '/users/file' - self.gpasswd_add_cmd = 'gpasswd -a {user} {group}' - self.gpasswd_remove_cmd = 'gpasswd -d {user} {group}' - self.groupadd_cmd = 'groupadd {group}' - self.useradd_cmd = 'useradd -m -s /bin/bash -p * {user}' - self.userdel_cmd = 'userdel -r {user}' - self.usermod_cmd = 'usermod -G {groups} {user}' - - self.mock_utils = mock.create_autospec(accounts_utils.AccountsUtils) - self.mock_utils.google_comment = accounts_utils.AccountsUtils.google_comment - self.mock_utils.google_sudoers_group = self.sudoers_group - self.mock_utils.google_sudoers_file = self.sudoers_file - self.mock_utils.google_users_dir = self.users_dir - self.mock_utils.google_users_file = self.users_file - self.mock_utils.logger = self.mock_logger - self.mock_utils.gpasswd_add_cmd = self.gpasswd_add_cmd - self.mock_utils.gpasswd_remove_cmd = self.gpasswd_remove_cmd - self.mock_utils.groupadd_cmd = self.groupadd_cmd - self.mock_utils.useradd_cmd = self.useradd_cmd - self.mock_utils.userdel_cmd = self.userdel_cmd - self.mock_utils.usermod_cmd = self.usermod_cmd - - @mock.patch('google_compute_engine.accounts.accounts_utils.AccountsUtils._GetGroup') - @mock.patch('google_compute_engine.accounts.accounts_utils.AccountsUtils._CreateSudoersGroup') - def testAccountsUtils(self, mock_create, mock_group): - mock_logger = mock.Mock() - mock_group.side_effect = lambda group: 'google' in group - - utils = accounts_utils.AccountsUtils( - logger=mock_logger, groups='foo,google,bar', remove=True) - mock_create.assert_called_once_with() - self.assertEqual(utils.logger, mock_logger) - self.assertEqual(sorted(utils.groups), ['google']) - self.assertTrue(utils.remove) - - @mock.patch('google_compute_engine.accounts.accounts_utils.grp') - def testGetGroup(self, mock_grp): - mock_grp.getgrnam.return_value = 'Test' - self.assertEqual( - accounts_utils.AccountsUtils._GetGroup(self.mock_utils, 'valid'), - 'Test') - mock_grp.getgrnam.side_effect = KeyError('Test Error') - self.assertEqual( - accounts_utils.AccountsUtils._GetGroup(self.mock_utils, 'invalid'), - None) - expected_calls = [ - mock.call.getgrnam('valid'), - mock.call.getgrnam('invalid'), - ] - self.assertEqual(mock_grp.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testCreateSudoersGroup(self, mock_exists, mock_call, mock_permissions): - mock_open = mock.mock_open() - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(mock_permissions, 'permissions') - mocks.attach_mock(self.mock_utils._GetGroup, 'group') - mocks.attach_mock(self.mock_logger, 'logger') - self.mock_utils._GetGroup.return_value = False - mock_exists.return_value = False - command = self.groupadd_cmd.format(group=self.sudoers_group) - - with mock.patch('%s.open' % builtin, mock_open, create=False): - accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils) - mock_open().write.assert_called_once_with(mock.ANY) - - expected_calls = [ - mock.call.group(self.sudoers_group), - mock.call.call(command.split(' ')), - mock.call.exists(self.sudoers_file), - mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testCreateSudoersGroupSkip( - self, mock_exists, mock_call, mock_permissions): - mock_open = mock.mock_open() - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(mock_permissions, 'permissions') - mocks.attach_mock(self.mock_utils._GetGroup, 'group') - mocks.attach_mock(self.mock_logger, 'logger') - self.mock_utils._GetGroup.return_value = True - mock_exists.return_value = True - - with mock.patch('%s.open' % builtin, mock_open, create=False): - accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils) - mock_open().write.assert_not_called() - - expected_calls = [ - mock.call.group(self.sudoers_group), - mock.call.exists(self.sudoers_file), - mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testCreateSudoersGroupError( - self, mock_exists, mock_call, mock_permissions): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(mock_permissions, 'permissions') - mocks.attach_mock(self.mock_utils._GetGroup, 'group') - mocks.attach_mock(self.mock_logger, 'logger') - self.mock_utils._GetGroup.return_value = False - mock_exists.return_value = True - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - command = self.groupadd_cmd.format(group=self.sudoers_group) - - accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils) - expected_calls = [ - mock.call.group(self.sudoers_group), - mock.call.call(command.split(' ')), - mock.call.logger.warning(mock.ANY, mock.ANY), - mock.call.exists(self.sudoers_file), - mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testCreateSudoersGroupWriteError(self, mock_exists): - mock_open = mock.mock_open() - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_utils._GetGroup, 'group') - mocks.attach_mock(self.mock_logger, 'logger') - self.mock_utils._GetGroup.return_value = True - mock_exists.return_value = False - mock_open.side_effect = IOError() - - accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils) - expected_calls = [ - mock.call.group(self.sudoers_group), - mock.call.exists(self.sudoers_file), - mock.call.logger.error(mock.ANY, self.sudoers_file, mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.pwd') - def testGetUser(self, mock_pwd): - mock_pwd.getpwnam.return_value = 'Test' - self.assertEqual( - accounts_utils.AccountsUtils._GetUser(self.mock_utils, 'valid'), - 'Test') - mock_pwd.getpwnam.side_effect = KeyError('Test Error') - self.assertEqual( - accounts_utils.AccountsUtils._GetUser(self.mock_utils, 'invalid'), - None) - expected_calls = [ - mock.call.getpwnam('valid'), - mock.call.getpwnam('invalid'), - ] - self.assertEqual(mock_pwd.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testAddUser(self, mock_call): - user = 'user' - command = self.useradd_cmd.format(user=user) - - self.assertTrue( - accounts_utils.AccountsUtils._AddUser(self.mock_utils, user)) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [mock.call.info(mock.ANY, user)] * 2 - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testAddUserError(self, mock_call): - user = 'user' - command = self.useradd_cmd.format(user=user) - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - self.assertFalse( - accounts_utils.AccountsUtils._AddUser(self.mock_utils, user)) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [ - mock.call.info(mock.ANY, user), - mock.call.warning(mock.ANY, user, mock.ANY), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testUpdateUserGroups(self, mock_call): - user = 'user' - groups = ['a', 'b', 'c'] - groups_string = ','.join(groups) - command = self.usermod_cmd.format(user=user, groups=groups_string) - - self.assertTrue( - accounts_utils.AccountsUtils._UpdateUserGroups( - self.mock_utils, user, groups)) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [ - mock.call.debug(mock.ANY, user, groups_string), - mock.call.debug(mock.ANY, user), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testUpdateUserGroupsError(self, mock_call): - user = 'user' - groups = ['a', 'b', 'c'] - groups_string = ','.join(groups) - command = self.usermod_cmd.format(user=user, groups=groups_string) - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - self.assertFalse( - accounts_utils.AccountsUtils._UpdateUserGroups( - self.mock_utils, user, groups)) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [ - mock.call.debug(mock.ANY, user, groups_string), - mock.call.warning(mock.ANY, user, mock.ANY), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy') - @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.islink') - def testUpdateAuthorizedKeys( - self, mock_islink, mock_exists, mock_tempfile, mock_copy, - mock_permissions): - mock_open = mock.mock_open() - user = 'user' - ssh_keys = ['Google key 1', 'Google key 2'] - temp_dest = '/tmp/dest' - pw_uid = 1 - pw_gid = 2 - pw_dir = '/home' - ssh_dir = '/home/.ssh' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', pw_uid, pw_gid, '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_islink.return_value = False - mock_exists.return_value = True - mock_tempfile.return_value = mock_tempfile - mock_tempfile.__enter__.return_value.name = temp_dest - self.mock_logger.name = 'test' - - with mock.patch('%s.open' % builtin, mock_open, create=False): - mock_open().readlines.return_value = [ - 'User key a\n', - 'User key b\n', - '\n', - self.mock_utils.google_comment + '\n', - 'Google key a\n', - self.mock_utils.google_comment + '\n', - 'Google key b\n', - 'User key c\n', - ] - accounts_utils.AccountsUtils._UpdateAuthorizedKeys( - self.mock_utils, user, ssh_keys) - - expected_calls = [ - mock.call(mode='w', prefix='test-', delete=True), - mock.call.__enter__(), - mock.call.__enter__().write('User key a\n'), - mock.call.__enter__().write('User key b\n'), - mock.call.__enter__().write('\n'), - mock.call.__enter__().write('User key c\n'), - mock.call.__enter__().write(self.mock_utils.google_comment + '\n'), - mock.call.__enter__().write('Google key 1\n'), - mock.call.__enter__().write(self.mock_utils.google_comment + '\n'), - mock.call.__enter__().write('Google key 2\n'), - mock.call.__enter__().flush(), - mock.call.__exit__(None, None, None), - ] - self.assertEqual(mock_tempfile.mock_calls, expected_calls) - mock_copy.assert_called_once_with(temp_dest, authorized_keys_file) - expected_calls = [ - mock.call(ssh_dir, mode=0o700, uid=pw_uid, gid=pw_gid, mkdir=True), - mock.call(authorized_keys_file, mode=0o600, uid=pw_uid, gid=pw_gid), - ] - self.assertEqual(mock_permissions.mock_calls, expected_calls) - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy') - @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.islink') - def testUpdateAuthorizedKeysNoKeys( - self, mock_islink, mock_exists, mock_tempfile, mock_copy, - mock_permissions): - user = 'user' - ssh_keys = ['Google key 1'] - temp_dest = '/tmp/dest' - pw_uid = 1 - pw_gid = 2 - pw_dir = '/home' - ssh_dir = '/home/.ssh' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', pw_uid, pw_gid, '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_islink.return_value = False - mock_exists.side_effect = [True, False] - mock_tempfile.return_value = mock_tempfile - mock_tempfile.__enter__.return_value.name = temp_dest - self.mock_logger.name = 'test' - - # The authorized keys file does not exist so write a new one. - accounts_utils.AccountsUtils._UpdateAuthorizedKeys( - self.mock_utils, user, ssh_keys) - expected_calls = [ - mock.call(mode='w', prefix='test-', delete=True), - mock.call.__enter__(), - mock.call.__enter__().write(self.mock_utils.google_comment + '\n'), - mock.call.__enter__().write('Google key 1\n'), - mock.call.__enter__().flush(), - mock.call.__exit__(None, None, None), - ] - self.assertEqual(mock_tempfile.mock_calls, expected_calls) - mock_copy.assert_called_once_with(temp_dest, authorized_keys_file) - expected_calls = [ - mock.call(ssh_dir, mode=0o700, uid=pw_uid, gid=pw_gid, mkdir=True), - mock.call(authorized_keys_file, mode=0o600, uid=pw_uid, gid=pw_gid), - ] - self.assertEqual(mock_permissions.mock_calls, expected_calls) - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy') - @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.islink') - def testUpdateAuthorizedKeysNoHomeDir( - self, mock_islink, mock_exists, mock_tempfile, mock_copy, - mock_permissions): - user = 'user' - ssh_keys = ['Google key 1'] - temp_dest = '/tmp/dest' - pw_uid = 1 - pw_gid = 2 - pw_dir = '/home/user' - ssh_dir = '/home/user/.ssh' - authorized_keys_file = '/home/user/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', pw_uid, pw_gid, '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_islink.return_value = False - mock_exists.side_effect = [False, False] - mock_tempfile.return_value = mock_tempfile - mock_tempfile.__enter__.return_value.name = temp_dest - self.mock_logger.name = 'test' - accounts_utils.AccountsUtils._UpdateAuthorizedKeys( - self.mock_utils, user, ssh_keys) - expected_calls = [ - mock.call(pw_dir, mode=0o755, uid=pw_uid, gid=pw_gid, mkdir=True), - mock.call(ssh_dir, mode=0o700, uid=pw_uid, gid=pw_gid, mkdir=True), - mock.call(authorized_keys_file, mode=0o600, uid=pw_uid, gid=pw_gid), - ] - self.assertEqual(mock_permissions.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - def testUpdateAuthorizedKeysNoUser(self, mock_permissions): - user = 'user' - ssh_keys = ['key'] - self.mock_utils._GetUser.return_value = None - - # The user does not exist, so do not write authorized keys. - accounts_utils.AccountsUtils._UpdateAuthorizedKeys( - self.mock_utils, user, ssh_keys) - self.mock_utils._GetUser.assert_called_once_with(user) - mock_permissions.assert_not_called() - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.islink') - def testUpdateAuthorizedKeysSymlink(self, mock_islink, mock_permissions): - user = 'user' - ssh_keys = ['Google key 1'] - pw_uid = 1 - pw_gid = 2 - pw_dir = '/home' - ssh_dir = '/home/.ssh' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', pw_uid, pw_gid, '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_islink.side_effect = [False, True] - - accounts_utils.AccountsUtils._UpdateAuthorizedKeys( - self.mock_utils, user, ssh_keys) - expected_calls = [mock.call(ssh_dir), mock.call(authorized_keys_file)] - self.assertEqual(mock_islink.mock_calls, expected_calls) - self.mock_logger.warning.assert_called_once_with(mock.ANY, user) - mock_permissions.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testUpdateSudoer(self, mock_call): - user = 'user' - command = self.gpasswd_remove_cmd.format( - user=user, group=self.sudoers_group) - - self.assertTrue( - accounts_utils.AccountsUtils._UpdateSudoer(self.mock_utils, user)) - mock.call.assert_called_once_with(command.split(' ')) - expected_calls = [ - mock.call.info(mock.ANY, user), - mock.call.debug(mock.ANY, user), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testUpdateSudoerAddSudoer(self, mock_call): - user = 'user' - command = self.gpasswd_add_cmd.format(user=user, group=self.sudoers_group) - - self.assertTrue( - accounts_utils.AccountsUtils._UpdateSudoer( - self.mock_utils, user, sudoer=True)) - mock.call.assert_called_once_with(command.split(' ')) - expected_calls = [ - mock.call.info(mock.ANY, user), - mock.call.debug(mock.ANY, user), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testUpdateSudoerError(self, mock_call): - user = 'user' - command = self.usermod_cmd.format(user=user, groups=self.sudoers_group) - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - self.assertFalse( - accounts_utils.AccountsUtils._UpdateSudoer(self.mock_utils, user)) - mock.call.assert_called_once_with(command.split(' ')) - expected_calls = [ - mock.call.info(mock.ANY, user), - mock.call.warning(mock.ANY, user, mock.ANY), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testRemoveAuthorizedKeys(self, mock_exists, mock_remove): - user = 'user' - pw_dir = '/home' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', '', '', '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_exists.return_value = True - - accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user) - self.mock_utils._GetUser.assert_called_once_with(user) - mock_exists.assert_called_once_with(authorized_keys_file) - mock_remove.assert_called_once_with(authorized_keys_file) - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testRemoveAuthorizedKeysNoKeys(self, mock_exists, mock_remove): - user = 'user' - pw_dir = '/home' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', '', '', '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_exists.return_value = False - - accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user) - self.mock_utils._GetUser.assert_called_once_with(user) - mock_exists.assert_called_once_with(authorized_keys_file) - mock_remove.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testRemoveAuthorizedKeysNoUser(self, mock_exists): - user = 'user' - self.mock_utils._GetUser.return_value = None - - accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user) - self.mock_utils._GetUser.assert_called_once_with(user) - mock_exists.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testRemoveAuthorizedKeysError(self, mock_exists, mock_remove): - user = 'user' - pw_dir = '/home' - authorized_keys_file = '/home/.ssh/authorized_keys' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', '', '', '', pw_dir, '')) - self.mock_utils._GetUser.return_value = pw_entry - mock_exists.return_value = True - mock_remove.side_effect = OSError('Test Error') - - accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user) - self.mock_utils._GetUser.assert_called_once_with(user) - mock_exists.assert_called_once_with(authorized_keys_file) - mock_remove.assert_called_once_with(authorized_keys_file) - self.mock_logger.warning.assert_called_once_with(mock.ANY, user, mock.ANY) - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testGetConfiguredUsers(self, mock_exists): - mock_open = mock.mock_open() - mock_exists.return_value = True - with mock.patch('%s.open' % builtin, mock_open, create=False): - mock_open().readlines.return_value = ['a\n', 'b\n', 'c\n', '\n'] - self.assertEqual( - accounts_utils.AccountsUtils.GetConfiguredUsers(self.mock_utils), - ['a', 'b', 'c', '']) - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - def testGetConfiguredUsersEmpty(self, mock_exists): - mock_exists.return_value = False - self.assertEqual( - accounts_utils.AccountsUtils.GetConfiguredUsers(self.mock_utils), []) - - @mock.patch('google_compute_engine.accounts.accounts_utils.os.makedirs') - @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists') - @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions') - @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy') - @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile') - def testSetConfiguredUsers( - self, mock_tempfile, mock_copy, mock_permissions, mock_exists, - mock_makedirs): - temp_dest = '/temp/dest' - users = ['a', 'b', 'c'] - mock_tempfile.return_value = mock_tempfile - mock_tempfile.__enter__.return_value.name = temp_dest - mock_exists.return_value = False - self.mock_logger.name = 'test' - - accounts_utils.AccountsUtils.SetConfiguredUsers(self.mock_utils, users) - - expected_calls = [ - mock.call(mode='w', prefix='test-', delete=True), - mock.call.__enter__(), - mock.call.__enter__().write('a\n'), - mock.call.__enter__().write('b\n'), - mock.call.__enter__().write('c\n'), - mock.call.__enter__().flush(), - mock.call.__exit__(None, None, None), - ] - self.assertEqual(mock_tempfile.mock_calls, expected_calls) - mock_makedirs.assert_called_once_with(self.users_dir) - mock_copy.assert_called_once_with(temp_dest, self.users_file) - mock_permissions.assert_called_once_with( - self.users_file, mode=0o600, uid=0, gid=0) - - def testUpdateUser(self): - valid_users = [ - 'user', - '_', - '.', - '.abc_', - '_abc-', - 'ABC', - 'A_.-', - ] - groups = ['a', 'b', 'c'] - keys = ['Key 1', 'Key 2'] - pw_entry = accounts_utils.pwd.struct_passwd(tuple(['']*7)) - self.mock_utils.groups = groups - self.mock_utils._GetUser.return_value = pw_entry - self.mock_utils._AddUser.return_value = True - self.mock_utils._UpdateUserGroups.return_value = True - for user in valid_users: - self.assertTrue( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, keys)) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=True) - self.mock_utils._UpdateSudoer.reset_mock() - self.mock_utils._UpdateAuthorizedKeys.assert_called_once_with(user, keys) - self.mock_utils._UpdateAuthorizedKeys.reset_mock() - self.mock_logger.warning.assert_not_called() - - def testUpdateUserInvalidUser(self): - self.mock_utils._GetUser = mock.Mock() - invalid_users = [ - '', - '!#$%^', - '-abc', - '#abc', - '^abc', - 'abc*xyz', - 'abc xyz', - 'xyz*', - 'xyz$', - 'areallylongusernamethatexceedsthethirtytwocharacterlimit' - ] - for user in invalid_users: - self.assertFalse( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, [])) - self.mock_logger.warning.assert_called_once_with(mock.ANY, user) - self.mock_logger.reset_mock() - self.mock_utils._GetUser.assert_not_called() - - def testUpdateUserFailedAddUser(self): - self.mock_utils._UpdateUserGroups = mock.Mock() - user = 'user' - self.mock_utils._GetUser.return_value = False - self.mock_utils._AddUser.return_value = False - - self.assertFalse( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, [])) - self.mock_utils._GetUser.assert_called_once_with(user) - self.mock_utils._AddUser.assert_called_once_with(user) - - def testUpdateUserFailedUpdateGroups(self): - user = 'user' - groups = ['a', 'b', 'c'] - self.mock_utils.groups = groups - self.mock_utils._GetUser.return_value = False - self.mock_utils._AddUser.return_value = True - self.mock_utils._UpdateUserGroups.return_value = False - - self.assertFalse( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, [])) - self.mock_utils._GetUser.assert_called_once_with(user) - self.mock_utils._AddUser.assert_called_once_with(user) - self.mock_utils._UpdateUserGroups.assert_called_once_with(user, groups) - - def testUpdateUserNoLogin(self): - self.mock_utils._UpdateAuthorizedKeys = mock.Mock() - user = 'user' - groups = ['a', 'b', 'c'] - pw_shell = '/sbin/nologin' - pw_entry = accounts_utils.pwd.struct_passwd( - ('', '', '', '', '', '', pw_shell)) - self.mock_utils.groups = groups - self.mock_utils._GetUser.return_value = pw_entry - self.mock_utils._UpdateUserGroups.return_value = True - - self.assertTrue( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, [])) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=True) - self.mock_utils._UpdateAuthorizedKeys.assert_not_called() - - def testUpdateUserSudoersError(self): - user = 'user' - groups = ['a', 'b', 'c'] - keys = ['Key 1', 'Key 2'] - pw_entry = accounts_utils.pwd.struct_passwd(tuple(['']*7)) - self.mock_utils.groups = groups - self.mock_utils._GetUser.return_value = pw_entry - self.mock_utils._UpdateSudoer.return_value = False - - self.assertFalse( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, keys)) - self.mock_utils._GetUser.assert_called_once_with(user) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=True) - - def testUpdateUserError(self): - user = 'user' - groups = ['a', 'b', 'c'] - keys = ['Key 1', 'Key 2'] - pw_entry = accounts_utils.pwd.struct_passwd(tuple(['']*7)) - self.mock_utils.groups = groups - self.mock_utils._GetUser.return_value = pw_entry - self.mock_utils._AddUser.return_value = True - self.mock_utils._UpdateAuthorizedKeys.side_effect = IOError('Test Error') - - self.assertFalse( - accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, keys)) - self.mock_logger.warning.assert_called_once_with(mock.ANY, user, mock.ANY) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testRemoveUser(self, mock_call): - user = 'user' - self.mock_utils.remove = False - - accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user) - self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=False) - mock_call.assert_not_called() - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testRemoveUserForce(self, mock_call): - user = 'user' - command = self.userdel_cmd.format(user=user) - self.mock_utils.remove = True - - accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [mock.call.info(mock.ANY, user)] * 2 - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=False) - - @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call') - def testRemoveUserError(self, mock_call): - user = 'user' - command = self.userdel_cmd.format(user=user) - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - self.mock_utils.remove = True - - accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user) - mock.call.assert_called_once_with(command.split(' ')), - expected_calls = [ - mock.call.info(mock.ANY, user), - mock.call.warning(mock.ANY, user, mock.ANY), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user) - self.mock_utils._UpdateSudoer.assert_called_once_with(user, sudoer=False) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/oslogin_utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/oslogin_utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/accounts/tests/oslogin_utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/accounts/tests/oslogin_utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,340 +0,0 @@ -#!/usr/bin/python -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for oslogin_utils.py module.""" - -import itertools - -from google_compute_engine.accounts import oslogin_utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class OsLoginUtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.oslogin_control_script = 'google_oslogin_control' - self.oslogin_nss_cache = '/etc/oslogin_passwd.cache' - self.oslogin_nss_cache_script = 'google_oslogin_nss_cache' - - self.mock_oslogin = mock.create_autospec(oslogin_utils.OsLoginUtils) - self.mock_oslogin.logger = self.mock_logger - self.mock_oslogin.oslogin_installed = True - self.mock_oslogin.update_time = 0 - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testRunOsLoginControl(self, mock_call): - expected_return_value = 0 - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.return_value = expected_return_value - - self.assertEqual( - oslogin_utils.OsLoginUtils._RunOsLoginControl( - self.mock_oslogin, ['activate']), expected_return_value) - expected_calls = [ - mock.call.call([self.oslogin_control_script, 'activate']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testRunOsLoginControlStatus(self, mock_call): - expected_return_value = 3 - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.return_value = expected_return_value - - self.assertEqual( - oslogin_utils.OsLoginUtils._RunOsLoginControl( - self.mock_oslogin, ['status']), expected_return_value) - expected_calls = [ - mock.call.call([self.oslogin_control_script, 'status']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testOsLoginNotInstalled(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.side_effect = OSError(2, 'Not Found') - - self.assertIsNone( - oslogin_utils.OsLoginUtils._RunOsLoginControl( - self.mock_oslogin, ['status'])) - expected_calls = [ - mock.call.call([self.oslogin_control_script, 'status']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testOsLoginControlError(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.side_effect = OSError - - with self.assertRaises(OSError): - oslogin_utils.OsLoginUtils._RunOsLoginControl( - self.mock_oslogin, ['status']) - expected_calls = [ - mock.call.call([self.oslogin_control_script, 'status']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testGetStatusActive(self, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_oslogin._RunOsLoginControl.return_value = 0 - mock_exists.return_value = True - - self.assertTrue( - oslogin_utils.OsLoginUtils._GetStatus( - self.mock_oslogin, two_factor=False)) - expected_calls = [ - mock.call.oslogin._RunOsLoginControl(['status']), - mock.call.exists(self.oslogin_nss_cache), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testGetStatusNotActive(self, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_oslogin._RunOsLoginControl.return_value = 3 - mock_exists.return_value = True - - self.assertFalse( - oslogin_utils.OsLoginUtils._GetStatus( - self.mock_oslogin, two_factor=True)) - expected_calls = [ - mock.call.oslogin._RunOsLoginControl(['status', '--twofactor']), - mock.call.exists(self.oslogin_nss_cache), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testGetStatusNoCache(self, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - self.mock_oslogin._RunOsLoginControl.return_value = 0 - mock_exists.return_value = False - - self.assertFalse(oslogin_utils.OsLoginUtils._GetStatus(self.mock_oslogin)) - expected_calls = [mock.call.exists(self.oslogin_nss_cache)] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testGetStatusNotInstalled(self): - mocks = mock.Mock() - self.mock_oslogin._RunOsLoginControl.return_value = None - mocks.attach_mock(self.mock_logger, 'logger') - - self.assertTrue(self.mock_oslogin.oslogin_installed) - self.assertFalse(oslogin_utils.OsLoginUtils._GetStatus(self.mock_oslogin)) - self.assertFalse(self.mock_oslogin.oslogin_installed) - self.assertFalse(oslogin_utils.OsLoginUtils._GetStatus(self.mock_oslogin)) - # Should only log once, even though called twice. - expected_calls = [ - mock.call.logger.warning(mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testRunOsLoginNssCache(self, mock_call): - expected_return_value = 0 - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.return_value = expected_return_value - - self.assertEqual( - oslogin_utils.OsLoginUtils._RunOsLoginNssCache(self.mock_oslogin), - expected_return_value) - expected_calls = [ - mock.call.call([self.oslogin_nss_cache_script]), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testRunOsLoginNssCacheNotInstalled(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.side_effect = OSError(2, 'Not Found') - - self.assertIsNone( - oslogin_utils.OsLoginUtils._RunOsLoginNssCache(self.mock_oslogin)) - expected_calls = [ - mock.call.call([self.oslogin_nss_cache_script]), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.subprocess.call') - def testRunOsLoginNssCacheError(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mock_call.side_effect = OSError - - with self.assertRaises(OSError): - oslogin_utils.OsLoginUtils._RunOsLoginNssCache(self.mock_oslogin) - expected_calls = [ - mock.call.call([self.oslogin_nss_cache_script]), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.remove') - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testRemoveOsLoginNssCache(self, mock_exists, mock_remove): - mock_exists.return_value = True - - oslogin_utils.OsLoginUtils._RemoveOsLoginNssCache(self.mock_oslogin) - mock_exists.assert_called_once_with(self.oslogin_nss_cache) - mock_remove.assert_called_once_with(self.oslogin_nss_cache) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.remove') - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testRemoveOsLoginNssCacheNotFound(self, mock_exists, mock_remove): - mock_exists.return_value = False - - oslogin_utils.OsLoginUtils._RemoveOsLoginNssCache(self.mock_oslogin) - mock_exists.assert_called_once_with(self.oslogin_nss_cache) - mock_remove.assert_not_called() - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.remove') - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testRemoveOsLoginNssCacheNotInstalled(self, mock_exists, mock_remove): - mock_exists.return_value = True - mock_remove.side_effect = OSError(2, 'Not Found') - - oslogin_utils.OsLoginUtils._RemoveOsLoginNssCache(self.mock_oslogin) - mock_exists.assert_called_once_with(self.oslogin_nss_cache) - mock_remove.assert_called_once_with(self.oslogin_nss_cache) - - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.remove') - @mock.patch('google_compute_engine.accounts.oslogin_utils.os.path.exists') - def testRemoveOsLoginNssCacheError(self, mock_exists, mock_remove): - mock_exists.return_value = True - mock_remove.side_effect = OSError - - with self.assertRaises(OSError): - oslogin_utils.OsLoginUtils._RemoveOsLoginNssCache(self.mock_oslogin) - - @mock.patch('time.time') - def testUpdateOsLoginUpdateCache(self, mock_time): - mocks = mock.Mock() - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_oslogin._RunOsLoginControl.return_value = 0 - self.mock_oslogin._GetStatus.return_value = True - mock_time.return_value = 6 * 60 * 60 + 1 - - oslogin_utils.OsLoginUtils.UpdateOsLogin( - self.mock_oslogin, True, two_factor_desired=True) - expected_calls = [ - mock.call.oslogin._GetStatus(two_factor=False), - mock.call.oslogin._GetStatus(two_factor=True), - mock.call.oslogin._RunOsLoginNssCache(), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('time.time') - def testUpdateOsLogin(self, mock_time): - - def _AssertNoUpdate(): - expected_calls = [ - mock.call.oslogin._GetStatus(two_factor=False), - mock.call.oslogin._GetStatus(two_factor=True), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertEqual(return_value, 0) - - def _AssertActivated(two_factor=False): - params = ['activate', '--twofactor'] if two_factor else ['activate'] - expected_calls = [ - mock.call.oslogin._GetStatus(two_factor=False), - mock.call.oslogin._GetStatus(two_factor=True), - mock.call.logger.info(mock.ANY), - mock.call.oslogin._RunOsLoginControl(params), - mock.call.oslogin._RunOsLoginNssCache(), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def _AssertDeactivated(): - expected_calls = [ - mock.call.oslogin._GetStatus(two_factor=False), - mock.call.oslogin._GetStatus(two_factor=True), - mock.call.logger.info(mock.ANY), - mock.call.oslogin._RunOsLoginControl(['deactivate']), - mock.call.oslogin._RemoveOsLoginNssCache(), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def _AssertReactivated(): - expected_calls = [ - mock.call.oslogin._GetStatus(two_factor=False), - mock.call.oslogin._GetStatus(two_factor=True), - mock.call.logger.info(mock.ANY), - mock.call.oslogin._RunOsLoginControl(['deactivate']), - mock.call.oslogin._RunOsLoginControl(['activate']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - parameters = list(itertools.product([False, True], repeat=4)) - for (oslogin, two_factor, oslogin_config, two_factor_config) in parameters: - mocks = mock.Mock() - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_oslogin._RunOsLoginControl.return_value = 0 - self.mock_oslogin._GetStatus.side_effect = [ - oslogin_config, two_factor_config] - mock_time.return_value = 6 * 60 * 60 - return_value = oslogin_utils.OsLoginUtils.UpdateOsLogin( - self.mock_oslogin, oslogin, two_factor_desired=two_factor) - - if oslogin_config: - if not oslogin: - _AssertDeactivated() - elif two_factor_config: - if not two_factor: - _AssertReactivated() - else: - _AssertNoUpdate() - else: - if two_factor: - _AssertActivated(two_factor=True) - else: - _AssertNoUpdate() - else: - if oslogin: - _AssertActivated(two_factor=two_factor) - else: - _AssertNoUpdate() - self.mock_logger.reset_mock() - self.mock_oslogin.reset_mock() - - def testUpdateOsLoginNotInstalled(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_oslogin, 'oslogin') - self.mock_oslogin._RunOsLoginControl.return_value = 0 - self.mock_oslogin._GetStatus.return_value = None - - return_value = oslogin_utils.OsLoginUtils.UpdateOsLogin( - self.mock_oslogin, True) - expected_calls = [mock.call.oslogin._GetStatus(two_factor=False)] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertEqual(return_value, None) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library used to set up the system boto.cfg file. - -If a project ID is not provided, this request the project ID from the -metadata server and install the compute authentication plugin. - -Note the config starts with the content in /etc/boto.cfg.template, -overrides settings, and then persists it into /etc/boto.cfg. This -is done so that the system boto.cfg can be removed prior to image -packaging. -""" - -import os - -from google_compute_engine import config_manager -from google_compute_engine import constants -from google_compute_engine import logger -from google_compute_engine import metadata_watcher - - -class BotoConfig(object): - """Creates a boto config file for standalone GSUtil.""" - - # WARNING: The path should remain as /etc/boto.cfg in order to not break - # tools, such as gsutil, that rely on loading well-known Boto config paths. - # If you want to change this, please consult the gsutil team - # (GoogleCloudPlatform/gsutil) first. - boto_config = constants.BOTOCONFDIR + '/etc/boto.cfg' - boto_config_template = constants.BOTOCONFDIR + '/etc/boto.cfg.template' - boto_config_script = os.path.abspath(__file__) - boto_config_header = ( - 'This file is automatically created at boot time by the %s script. Do ' - 'not edit this file directly. If you need to add items to this file, ' - 'create or edit %s instead and then re-run google_instance_setup.') - - def __init__(self, project_id=None, debug=False): - """Constructor. - - Args: - project_id: string, the project ID to use in the config file. - debug: bool, True if debug output should write to the console. - """ - self.logger = logger.Logger(name='boto-setup', debug=debug) - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - self._CreateConfig(project_id) - - def _GetNumericProjectId(self): - """Get the numeric project ID for this VM. - - Returns: - string, the numeric project ID if one is found. - """ - project_id = 'project/numeric-project-id' - return self.watcher.GetMetadata(metadata_key=project_id, recursive=False) - - def _CreateConfig(self, project_id): - """Create the boto config to support standalone GSUtil. - - Args: - project_id: string, the project ID to use in the config file. - """ - project_id = project_id or self._GetNumericProjectId() - - # Our project doesn't support service accounts. - if not project_id: - return - - self.boto_config_header %= ( - self.boto_config_script, self.boto_config_template) - config = config_manager.ConfigManager( - config_file=self.boto_config_template, - config_header=self.boto_config_header) - - # WARNING: If you want to change the contents of this config file, please - # consult the gsutil team (GoogleCloudPlatform/gsutil) first. - config.SetOption('GSUtil', 'default_project_id', project_id) - config.SetOption('GSUtil', 'default_api_version', '2') - config.SetOption('GoogleCompute', 'service_account', 'default') - config.WriteConfig(config_file=self.boto_config) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Authentication module for using Google Compute service accounts.""" - -from boto import auth_handler -from google_compute_engine import logger -from google_compute_engine import metadata_watcher - -GS_SCOPES = set([ - 'https://www.googleapis.com/auth/devstorage.read_only', - 'https://www.googleapis.com/auth/devstorage.read_write', - 'https://www.googleapis.com/auth/devstorage.full_control', -]) - - -class ComputeAuth(auth_handler.AuthHandler): - """Google Compute service account auth handler. - - The boto library reads the system config file (/etc/boto.cfg) and looks - at a config value called 'plugin_directory'. It then loads the Python - files and find classes derived from boto.auth_handler.AuthHandler. - """ - - capability = ['google-oauth2', 's3'] - metadata_key = 'instance/service-accounts' - - def __init__(self, path, config, provider): - self.logger = logger.Logger(name='compute-auth') - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - self.service_account = config.get('GoogleCompute', 'service_account', '') - self.scopes = None - if provider.name == 'google' and self.service_account: - self.scopes = self._GetGsScopes() - if not self.scopes: - raise auth_handler.NotReadyToAuthenticate() - - def _GetGsScopes(self): - """Return all Google Storage scopes available on this VM.""" - service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key) - try: - scopes = service_accounts[self.service_account]['scopes'] - return list(GS_SCOPES.intersection(set(scopes))) if scopes else None - except KeyError: - return None - - def _GetAccessToken(self): - """Return an OAuth 2.0 access token for Google Storage.""" - service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key) - try: - return service_accounts[self.service_account]['token']['access_token'] - except KeyError: - return None - - def add_auth(self, http_request): - http_request.headers['Authorization'] = 'OAuth %s' % self._GetAccessToken() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/tests/boto_config_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/tests/boto_config_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/tests/boto_config_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/tests/boto_config_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for boto_config.py module.""" - -from google_compute_engine.boto import boto_config -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class BotoConfigTest(unittest.TestCase): - - def setUp(self): - self.project_id = 'project' - boto_config.BotoConfig.boto_config = 'config' - boto_config.BotoConfig.boto_config_template = 'template' - boto_config.BotoConfig.boto_config_script = '/tmp/test.py' - boto_config.BotoConfig.boto_config_header = '%s %s' - - @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher') - @mock.patch('google_compute_engine.boto.boto_config.logger') - @mock.patch('google_compute_engine.boto.boto_config.config_manager') - def testCreateConfig(self, mock_config, mock_logger, mock_watcher): - mock_config_instance = mock.Mock() - mock_config.ConfigManager.return_value = mock_config_instance - mocks = mock.Mock() - mocks.attach_mock(mock_config.ConfigManager, 'config') - mocks.attach_mock(mock_config_instance.SetOption, 'set') - mocks.attach_mock(mock_config_instance.WriteConfig, 'write') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - - boto_config.BotoConfig(self.project_id, debug=True) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY, debug=True), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.config( - config_file='template', config_header='/tmp/test.py template'), - mock.call.set('GSUtil', 'default_project_id', self.project_id), - mock.call.set('GSUtil', 'default_api_version', '2'), - mock.call.set('GoogleCompute', 'service_account', 'default'), - mock.call.write(config_file='config'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher') - @mock.patch('google_compute_engine.boto.boto_config.config_manager') - def testCreateConfigProjectId(self, mock_config, mock_watcher): - mock_config_instance = mock.Mock() - mock_config.ConfigManager.return_value = mock_config_instance - mock_watcher_instance = mock.Mock() - mock_watcher.MetadataWatcher.return_value = mock_watcher_instance - mock_watcher_instance.GetMetadata.return_value = self.project_id - - boto_config.BotoConfig() - mock_watcher_instance.GetMetadata.assert_called_once_with( - metadata_key='project/numeric-project-id', recursive=False) - expected_calls = [ - mock.call('GSUtil', 'default_project_id', self.project_id), - ] - mock_config_instance.SetOption.assert_has_calls(expected_calls) - - @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher') - @mock.patch('google_compute_engine.boto.boto_config.config_manager') - def testCreateConfigExit(self, mock_config, mock_watcher): - mock_config_instance = mock.Mock() - mock_config.ConfigManager.return_value = mock_config_instance - mock_watcher_instance = mock.Mock() - mock_watcher.MetadataWatcher.return_value = mock_watcher_instance - mock_watcher_instance.GetMetadata.return_value = None - - boto_config.BotoConfig() - mock_watcher_instance.GetMetadata.assert_called_once_with( - metadata_key='project/numeric-project-id', recursive=False) - mock_config.SetOption.assert_not_called() - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/tests/compute_auth_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/tests/compute_auth_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/boto/tests/compute_auth_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/boto/tests/compute_auth_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for compute_auth.py module.""" - -import sys - -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - -if sys.version_info < (3, 0): - from google_compute_engine.boto import compute_auth - - -@unittest.skipIf(sys.version_info > (3, 0), 'Skipping for python3.') -class ComputeAuthTest(unittest.TestCase): - - def setUp(self): - self.metadata_key = 'instance/service-accounts' - self.service_account = 'service_account' - self.mock_config = mock.Mock() - self.mock_config.get.return_value = self.service_account - self.mock_provider = mock.Mock() - self.mock_provider.name = 'google' - - @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher') - @mock.patch('google_compute_engine.boto.compute_auth.logger') - def testCreateConfig(self, mock_logger, mock_watcher): - scopes = list(compute_auth.GS_SCOPES)[1:2] - service_accounts = {self.service_account: {'scopes': scopes}} - mock_watcher.GetMetadata.return_value = service_accounts - mock_watcher.MetadataWatcher.return_value = mock_watcher - mocks = mock.Mock() - mocks.attach_mock(mock_watcher, 'watcher') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(self.mock_config, 'config') - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - - mock_compute_auth = compute_auth.ComputeAuth( - None, self.mock_config, self.mock_provider) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.config.get('GoogleCompute', 'service_account', ''), - mock.call.watcher.GetMetadata(metadata_key=self.metadata_key) - ] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertEqual(mock_compute_auth.scopes, scopes) - - @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher') - def testCreateConfigNoScopes(self, mock_watcher): - mock_watcher.GetMetadata.return_value = {} - mock_watcher.MetadataWatcher.return_value = mock_watcher - - with self.assertRaises(compute_auth.auth_handler.NotReadyToAuthenticate): - compute_auth.ComputeAuth(None, self.mock_config, self.mock_provider) - - def testCreateConfigNoServiceAccount(self): - self.mock_config.get.return_value = None - - with self.assertRaises(compute_auth.auth_handler.NotReadyToAuthenticate): - compute_auth.ComputeAuth(None, self.mock_config, self.mock_provider) - - @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher') - def testGetAccessToken(self, mock_watcher): - mock_auth = mock.create_autospec(compute_auth.ComputeAuth) - mock_auth.watcher = mock_watcher - mock_auth.metadata_key = self.metadata_key - mock_auth.service_account = self.service_account - mock_watcher.GetMetadata.side_effect = [ - {self.service_account: {'token': {'access_token': 'test'}}}, - {}, - ] - - self.assertEqual( - compute_auth.ComputeAuth._GetAccessToken(mock_auth), 'test') - self.assertEqual( - compute_auth.ComputeAuth._GetAccessToken(mock_auth), None) - expected_calls = [mock.call(metadata_key=self.metadata_key)] * 2 - self.assertEqual(mock_watcher.GetMetadata.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher') - def testAddAuth(self, mock_watcher): - mock_auth = mock.create_autospec(compute_auth.ComputeAuth) - mock_auth._GetAccessToken.return_value = 'token' - mock_request = mock.Mock() - mock_request.headers = {} - - compute_auth.ComputeAuth.add_auth(mock_auth, mock_request) - self.assertEqual(mock_request.headers['Authorization'], 'OAuth token') - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/clock_skew/clock_skew_daemon.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/clock_skew/clock_skew_daemon.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/clock_skew/clock_skew_daemon.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/clock_skew/clock_skew_daemon.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manage clock skew after migration on a Google Compute Engine instance.""" - -import logging.handlers -import optparse - -from google_compute_engine import config_manager -from google_compute_engine import constants -from google_compute_engine import file_utils -from google_compute_engine import logger -from google_compute_engine import metadata_watcher -from google_compute_engine.compat import distro_utils - -LOCKFILE = constants.LOCALSTATEDIR + '/lock/google_clock_skew.lock' - - -class ClockSkewDaemon(object): - """Responds to drift-token changes.""" - - drift_token = 'instance/virtual-clock/drift-token' - - def __init__(self, debug=False): - """Constructor. - - Args: - debug: bool, True if debug output should write to the console. - """ - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='google-clock-skew', debug=debug, facility=facility) - self.distro_utils = distro_utils.Utils(debug=debug) - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - try: - with file_utils.LockFile(LOCKFILE): - self.logger.info('Starting Google Clock Skew daemon.') - self.watcher.WatchMetadata( - self.HandleClockSync, metadata_key=self.drift_token, - recursive=False) - except (IOError, OSError) as e: - self.logger.warning(str(e)) - - def HandleClockSync(self, response): - """Called when clock drift token changes. - - Args: - response: string, the metadata response with the new drift token value. - """ - self.logger.info('Clock drift token has changed: %s.', response) - self.distro_utils.HandleClockSync(self.logger) - - -def main(): - parser = optparse.OptionParser() - parser.add_option( - '-d', '--debug', action='store_true', dest='debug', - help='print debug output to the console.') - (options, _) = parser.parse_args() - instance_config = config_manager.ConfigManager() - if instance_config.GetOptionBool('Daemons', 'clock_skew_daemon'): - ClockSkewDaemon(debug=bool(options.debug)) - - -if __name__ == '__main__': - main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for clock_skew_daemon.py module.""" - -from google_compute_engine.clock_skew import clock_skew_daemon -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class ClockSkewDaemonTest(unittest.TestCase): - - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher') - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger') - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile') - def testClockSkewDaemon(self, mock_lock, mock_logger, mock_watcher): - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - metadata_key = clock_skew_daemon.ClockSkewDaemon.drift_token - mock_logger.return_value = mock_logger - mock_watcher.MetadataWatcher.return_value = mock_watcher - with mock.patch.object( - clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync') as mock_handle: - clock_skew_daemon.ClockSkewDaemon() - expected_calls = [ - mock.call.logger(name=mock.ANY, debug=False, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger), - mock.call.lock(clock_skew_daemon.LOCKFILE), - mock.call.lock().__enter__(), - mock.call.logger.info(mock.ANY), - mock.call.watcher.WatchMetadata( - mock_handle, metadata_key=metadata_key, recursive=False), - mock.call.lock().__exit__(None, None, None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher') - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger') - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile') - def testClockSkewDaemonError(self, mock_lock, mock_logger, mock_watcher): - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mock_lock.side_effect = IOError('Test Error') - mock_logger.return_value = mock_logger - with mock.patch.object( - clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync'): - clock_skew_daemon.ClockSkewDaemon(debug=True) - expected_calls = [ - mock.call.logger(name=mock.ANY, debug=True, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger), - mock.call.lock(clock_skew_daemon.LOCKFILE), - mock.call.logger.warning('Test Error'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.distro_utils') - def testHandleClockSync(self, mock_distro_utils): - mock_sync = mock.create_autospec(clock_skew_daemon.ClockSkewDaemon) - mock_logger = mock.Mock() - mock_sync.logger = mock_logger - mock_sync.distro_utils = mock_distro_utils - - clock_skew_daemon.ClockSkewDaemon.HandleClockSync(mock_sync, 'Response') - expected_calls = [mock.call.info(mock.ANY, 'Response')] - self.assertEqual(mock_logger.mock_calls, expected_calls) - expected_calls = [mock.call.HandleClockSync(mock_logger)] - self.assertEqual(mock_distro_utils.mock_calls, expected_calls) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/compat.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/compat.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/compat.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/compat.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A module for resolving compatibility issues between Python 2 and Python 3.""" - -import logging -import subprocess -import sys - -if sys.version_info >= (3, 7): - import distro -else: - import platform as distro - -if 'freebsd' in sys.platform: - # Note: Do not use .version() method which is from either platform or distro. - # platform.version() and distro.version() return different values. - # platform.version() returns 'FreeBSD 11.2-RELEASE-p9.....'. - # distro.version() returns '11.2'. - distro_name = 'freebsd' - # distro_version is not used for FreeBSD later in this code. - distro_version = None -else: - distribution = distro.linux_distribution() - distro_name = distribution[0].lower() - distro_version = distribution[1].split('.')[0] -distro_utils = None - -if 'centos' in distro_name and distro_version == '6': - import google_compute_engine.distro_lib.el_6.utils as distro_utils -elif 'centos' in distro_name: - import google_compute_engine.distro_lib.el_7.utils as distro_utils -elif 'red hat enterprise linux' in distro_name and distro_version == '6': - import google_compute_engine.distro_lib.el_6.utils as distro_utils -elif 'red hat enterprise linux' in distro_name: - import google_compute_engine.distro_lib.el_7.utils as distro_utils -elif 'fedora' in distro_name: - import google_compute_engine.distro_lib.el_7.utils as distro_utils -elif 'debian' in distro_name and distro_version == '8': - import google_compute_engine.distro_lib.debian_8.utils as distro_utils -elif 'debian' in distro_name: - import google_compute_engine.distro_lib.debian_9.utils as distro_utils -elif 'suse' in distro_name and distro_version == '11': - import google_compute_engine.distro_lib.sles_11.utils as distro_utils -elif 'suse' in distro_name: - import google_compute_engine.distro_lib.sles_12.utils as distro_utils -elif 'freebsd' in distro_name: - import google_compute_engine.distro_lib.freebsd_11.utils as distro_utils -else: - # Default to Debian 9. - import google_compute_engine.distro_lib.debian_9.utils as distro_utils - -RETRY_LIMIT = 3 -TIMEOUT = 10 - -if sys.version_info >= (3, 0): - # Python 3 imports. - import configparser as parser - import http.client as httpclient - import io as stringio - import urllib.error as urlerror - import urllib.parse as urlparse - import urllib.request as urlrequest - import urllib.request as urlretrieve -else: - # Python 2 imports. - import ConfigParser as parser - import httplib as httpclient - import StringIO as stringio - import urllib as urlparse - import urllib as urlretrieve - import urllib2 as urlrequest - import urllib2 as urlerror - -if sys.version_info < (2, 7): - - class NullHandler(logging.Handler): - - def emit(self, record): - pass - - def handle(self, record): - pass - - def createLock(self): - pass - - logging.NullHandler = NullHandler - -if sys.version_info < (2, 7, 9): - - # Native Python libraries do not check SSL certificates. - def curlretrieve(url, filename=None, *args, **kwargs): - command = ['curl', '--max-time', str(TIMEOUT), '--retry', str(RETRY_LIMIT)] - if filename: - command += ['-o', filename] - command += ['--', url] - subprocess.check_call(command) - - urlretrieve.urlretrieve = curlretrieve - -if sys.version_info < (3, 2): - parser.SafeConfigParser.read_file = parser.SafeConfigParser.readfp - parser.Parser = parser.SafeConfigParser -else: - parser.Parser = parser.ConfigParser diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/config_manager.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/config_manager.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/config_manager.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/config_manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library for retrieving and modifying configuration settings.""" - -import os -import textwrap - -from google_compute_engine import constants -from google_compute_engine import file_utils -from google_compute_engine.compat import parser - -CONFIG = constants.SYSCONFDIR + '/instance_configs.cfg' - - -class ConfigManager(object): - """Process the configuration defaults.""" - - def __init__(self, config_file=None, config_header=None): - """Constructor. - - Args: - config_file: string, the location of the config file. - config_header: string, the message to write at the top of the config. - """ - self.config_file = config_file or CONFIG - self.config_header = config_header - self.config = parser.Parser() - self.config.read(self.config_file) - - def _AddHeader(self, fp): - """Create a file header in the config. - - Args: - fp: int, a file pointer for writing the header. - """ - text = textwrap.wrap( - textwrap.dedent(self.config_header), break_on_hyphens=False) - fp.write('\n'.join(['# ' + line for line in text])) - fp.write('\n\n') - - def GetOptionString(self, section, option): - """Get the value of an option in the config file. - - Args: - section: string, the section of the config file to check. - option: string, the option to retrieve the value of. - - Returns: - string, the value of the option or None if the option doesn't exist. - """ - if self.config.has_option(section, option): - return self.config.get(section, option) - else: - return None - - def GetOptionBool(self, section, option): - """Get the value of an option in the config file. - - Args: - section: string, the section of the config file to check. - option: string, the option to retrieve the value of. - - Returns: - bool, True if the option is enabled or not set. - """ - return (not self.config.has_option(section, option) - or self.config.getboolean(section, option)) - - def SetOption(self, section, option, value, overwrite=True): - """Set the value of an option in the config file. - - Args: - section: string, the section of the config file to check. - option: string, the option to set the value of. - value: string, the value to set the option. - overwrite: bool, True to overwrite an existing value in the config file. - """ - if not overwrite and self.config.has_option(section, option): - return - if not self.config.has_section(section): - self.config.add_section(section) - self.config.set(section, option, str(value)) - - def WriteConfig(self, config_file=None): - """Write the config values to a given file. - - Args: - config_file: string, the file location of the config file to write. - """ - config_file = config_file or self.config_file - config_name = os.path.splitext(os.path.basename(config_file))[0] - config_lock = ( - '%s/lock/google_%s.lock' % (constants.LOCALSTATEDIR, config_name)) - with file_utils.LockFile(config_lock): - with open(config_file, 'w') as config_fp: - if self.config_header: - self._AddHeader(config_fp) - self.config.write(config_fp) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/constants.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/constants.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/constants.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/constants.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/usr/bin/python -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A module for global constants.""" - -import platform - -OSLOGIN_CONTROL_SCRIPT = 'google_oslogin_control' -OSLOGIN_NSS_CACHE_SCRIPT = 'google_oslogin_nss_cache' - -if platform.system() == 'FreeBSD': - BOTOCONFDIR = '/usr/local' - LOCALBASE = '/usr/local' - LOCALSTATEDIR = '/var/spool' - OSLOGIN_NSS_CACHE = '/usr/local/etc/oslogin_passwd.cache' - SYSCONFDIR = '/usr/local/etc' - SYSLOG_SOCKET = '/var/run/log' -elif platform.system() == 'OpenBSD': - BOTOCONFDIR = '' - LOCALBASE = '/usr/local' - LOCALSTATEDIR = '/var/spool' - OSLOGIN_NSS_CACHE = '/usr/local/etc/oslogin_passwd.cache' - SYSCONFDIR = '/usr/local/etc' - SYSLOG_SOCKET = '/dev/log' -else: - BOTOCONFDIR = '' - LOCALBASE = '' - LOCALSTATEDIR = '/var' - OSLOGIN_NSS_CACHE = '/etc/oslogin_passwd.cache' - SYSCONFDIR = '/etc/default' - SYSLOG_SOCKET = '/dev/log' diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for utils.py module.""" - -from google_compute_engine.distro_lib.debian_8 import utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - @mock.patch('google_compute_engine.distro_lib.helpers.CallEnableRouteAdvertisements') - def testEnableIpv6(self, mock_call_enable_ra, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - mocks.attach_mock(mock_call_enable_ra, 'enable_ra') - - utils.Utils.EnableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.enable_ra(['A', 'B'], mock.ANY), - mock.call.dhclient(['A', 'B'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - def testDisableIpv6(self, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - - utils.Utils.DisableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.dhclient(['A', 'B'], mock.ANY, None, release_lease=True), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclient') - def testEnableNetworkInterfaces(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [mock.call.call(['A', 'B'], mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_8/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on Debian 8.""" - -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on Debian 8.""" - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallEnableRouteAdvertisements(interfaces, logger) - helpers.CallDhclientIpv6(interfaces, logger) - - def DisableIpv6(self, interfaces, logger): - """Disable Ipv6 by giving up the DHCP lease using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallDhclientIpv6(interfaces, logger, None, release_lease=True) - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallDhclient(interfaces, logger) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for utils.py module.""" - -from google_compute_engine.distro_lib.debian_9 import utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - @mock.patch('google_compute_engine.distro_lib.helpers.CallEnableRouteAdvertisements') - def testEnableIpv6(self, mock_call_enable_ra, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - mocks.attach_mock(mock_call_enable_ra, 'enable_ra') - - utils.Utils.EnableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.enable_ra(['A', 'B'], mock.ANY), - mock.call.dhclient(['A', 'B'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - def testDisableIpv6(self, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - - utils.Utils.DisableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.dhclient(['A', 'B'], mock.ANY, None, release_lease=True), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclient') - def testEnableNetworkInterfaces(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [mock.call.call(['A', 'B'], mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/debian_9/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on Debian 9.""" - -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on Debian 9.""" - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallEnableRouteAdvertisements(interfaces, logger) - helpers.CallDhclientIpv6(interfaces, logger) - - def DisableIpv6(self, interfaces, logger): - """Disable Ipv6 by giving up the DHCP lease using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallDhclientIpv6(interfaces, logger, None, release_lease=True) - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallDhclient(interfaces, logger) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for utils.py module.""" - -from google_compute_engine.distro_lib.el_6 import utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - - def tearDown(self): - pass - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - @mock.patch('google_compute_engine.distro_lib.helpers.CallEnableRouteAdvertisements') - def testEnableIpv6(self, mock_call_enable_ra, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - mocks.attach_mock(mock_call_enable_ra, 'enable_ra') - - utils.Utils.EnableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.enable_ra(['A', 'B'], mock.ANY), - mock.call.dhclient(['A', 'B'], mock.ANY, dhclient_script=None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - def testDisableIpv6(self, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - - utils.Utils.DisableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.dhclient(['A', 'B'], mock.ANY, None, release_lease=True), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclient') - def testEnableNetworkInterfaces(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['A', 'B'], self.mock_logger, - dhclient_script='test_script') - expected_calls = [ - mock.call.call(['A', 'B'], mock.ANY, dhclient_script='test_script'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_6/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on EL 6.""" - -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on EL 6.""" - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallEnableRouteAdvertisements(interfaces, logger) - helpers.CallDhclientIpv6( - interfaces, logger, dhclient_script=dhclient_script) - - def DisableIpv6(self, interfaces, logger): - """Disable Ipv6 by giving up the DHCP lease using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallDhclientIpv6(interfaces, logger, None, release_lease=True) - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallDhclient(interfaces, logger, dhclient_script=dhclient_script) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for utils.py module.""" - -import os -import shutil -import tempfile - -from google_compute_engine.distro_lib.el_7 import utils -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - # Create a temporary directory. - self.test_dir = tempfile.mkdtemp() - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - self.mock_setup.network_path = '/etc/sysconfig/network-scripts' - - def tearDown(self): - # Remove the directory after the test. - shutil.rmtree(self.test_dir) - - def testModifyInterface(self): - config_file = os.path.join(self.test_dir, 'config.cfg') - config_content = [ - '# File comment.\n', - 'A="apple"\n', - 'B=banana\n', - 'B=banana\n', - ] - with open(config_file, 'w') as config: - for line in config_content: - config.write(line) - - # Write a value for an existing config without overriding it. - utils.Utils._ModifyInterface( - self.mock_setup, config_file, 'A', 'aardvark', replace=False) - self.assertEqual(open(config_file).readlines(), config_content) - # Write a value for a config that is not already set. - utils.Utils._ModifyInterface( - self.mock_setup, config_file, 'C', 'none', replace=False) - config_content.append('C=none\n') - self.assertEqual(open(config_file).readlines(), config_content) - # Write a value for an existing config with replacement. - utils.Utils._ModifyInterface( - self.mock_setup, config_file, 'A', 'aardvark', replace=True) - config_content[1] = 'A=aardvark\n' - self.assertEqual(open(config_file).readlines(), config_content) - # Write a value for an existing config with multiple occurrences. - utils.Utils._ModifyInterface( - self.mock_setup, config_file, 'B', '"banana"', replace=True) - config_content[2] = config_content[3] = 'B="banana"\n' - self.assertEqual(open(config_file).readlines(), config_content) - - @mock.patch('google_compute_engine.distro_lib.el_7.utils.os.path.exists') - def testDisableNetworkManager(self, mock_exists): - mock_open = mock.mock_open() - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_open, 'open') - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_setup._ModifyInterface, 'modify') - mock_exists.side_effect = [True, False] - - with mock.patch('%s.open' % builtin, mock_open, create=False): - utils.Utils._DisableNetworkManager( - self.mock_setup, ['eth0', 'eth1'], self.mock_logger) - expected_calls = [ - mock.call.exists('/etc/sysconfig/network-scripts/ifcfg-eth0'), - mock.call.modify(mock.ANY, 'DEVICE', 'eth0', replace=False), - mock.call.modify(mock.ANY, 'NM_CONTROLLED', 'no', replace=True), - mock.call.exists('/etc/sysconfig/network-scripts/ifcfg-eth1'), - mock.call.open('/etc/sysconfig/network-scripts/ifcfg-eth1', 'w'), - mock.call.open().__enter__(), - mock.call.open().write(mock.ANY), - mock.call.open().__exit__(None, None, None), - mock.call.logger.info(mock.ANY, 'eth1'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - @mock.patch('google_compute_engine.distro_lib.helpers.CallEnableRouteAdvertisements') - def testEnableIpv6(self, mock_call_enable_ra, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - mocks.attach_mock(mock_call_enable_ra, 'enable_ra') - - utils.Utils.EnableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.enable_ra(['A', 'B'], mock.ANY), - mock.call.dhclient(['A', 'B'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclientIpv6') - def testDisableIpv6(self, mock_call_dhclient): - mocks = mock.Mock() - mocks.attach_mock(mock_call_dhclient, 'dhclient') - - utils.Utils.DisableIpv6(self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [ - mock.call.dhclient(['A', 'B'], mock.ANY, None, release_lease=True), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.el_7.utils.os.path.exists') - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclient') - def testEnableNetworkInterfaces(self, mock_call, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_setup._DisableNetworkManager, 'disable') - mocks.attach_mock(mock_call, 'call_dhclient') - mock_exists.side_effect = [True, False] - - # Enable interfaces with network manager enabled. - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['A', 'B'], self.mock_logger) - # Enable interfaces with network manager is not present. - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['C', 'D'], self.mock_logger) - expected_calls = [ - mock.call.exists('/etc/sysconfig/network-scripts'), - mock.call.disable(['A', 'B'], mock.ANY), - mock.call.call_dhclient(['A', 'B'], mock.ANY), - mock.call.exists('/etc/sysconfig/network-scripts'), - mock.call.call_dhclient(['C', 'D'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/el_7/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on EL 7.""" - -import fileinput -import os -import re - -from google_compute_engine import constants -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on EL 7.""" - - network_path = constants.LOCALBASE + '/etc/sysconfig/network-scripts' - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallEnableRouteAdvertisements(interfaces, logger) - helpers.CallDhclientIpv6(interfaces, logger) - - def DisableIpv6(self, interfaces, logger): - """Disable Ipv6 by giving up the DHCP lease using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallDhclientIpv6(interfaces, logger, None, release_lease=True) - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - # Should always exist in EL 7. - if os.path.exists(self.network_path): - self._DisableNetworkManager(interfaces, logger) - helpers.CallDhclient(interfaces, logger) - - def _DisableNetworkManager(self, interfaces, logger): - """Disable network manager management on a list of network interfaces. - - Args: - interfaces: list of string, the output device names enable. - logger: logger object, used to write to SysLog and serial port. - """ - for interface in interfaces: - interface_config = os.path.join( - self.network_path, 'ifcfg-%s' % interface) - if os.path.exists(interface_config): - self._ModifyInterface( - interface_config, 'DEVICE', interface, replace=False) - self._ModifyInterface( - interface_config, 'NM_CONTROLLED', 'no', replace=True) - else: - with open(interface_config, 'w') as interface_file: - interface_content = [ - '# Added by Google.', - 'BOOTPROTO=none', - 'DEFROUTE=no', - 'DEVICE=%s' % interface, - 'IPV6INIT=no', - 'NM_CONTROLLED=no', - 'NOZEROCONF=yes', - '', - ] - interface_file.write('\n'.join(interface_content)) - logger.info('Created config file for interface %s.', interface) - - def _ModifyInterface( - self, interface_config, config_key, config_value, replace=False): - """Write a value to a config file if not already present. - - Args: - interface_config: string, the path to a config file. - config_key: string, the configuration key to set. - config_value: string, the value to set for the configuration key. - replace: bool, replace the configuration option if already present. - """ - config_entry = '%s=%s' % (config_key, config_value) - if not open(interface_config).read().count(config_key): - with open(interface_config, 'a') as config: - config.write('%s\n' % config_entry) - elif replace: - for line in fileinput.input(interface_config, inplace=True): - print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip())) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for utils.py module.""" - -from google_compute_engine.distro_lib.freebsd_11 import utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallDhclient') - def testEnableNetworkInterfaces(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['A', 'B'], self.mock_logger) - expected_calls = [mock.call.call(['A', 'B'], mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallNtpdate') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIfconfig') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, 66) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/freebsd_11/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on FreeBSD 11.""" - -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on FreeBSD 11.""" - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - pass - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - helpers.CallDhclient(interfaces, logger) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallNtpdate(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIfconfig(logger) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/helpers.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/helpers.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/helpers.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/helpers.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Distro helpers.""" - -import os -import subprocess - - -def CallDhclient( - interfaces, logger, dhclient_script=None): - """Configure the network interfaces using dhclient. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - logger.info('Enabling the Ethernet interfaces %s.', interfaces) - - dhclient_command = ['dhclient'] - - if dhclient_script and os.path.exists(dhclient_script): - dhclient_command += ['-sf', dhclient_script] - - try: - subprocess.check_call(dhclient_command + ['-x'] + interfaces) - subprocess.check_call(dhclient_command + interfaces) - except subprocess.CalledProcessError: - logger.warning('Could not enable interfaces %s.', interfaces) - - -def CallDhclientIpv6(interfaces, logger, dhclient_script=None, - release_lease=False): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - release_lease: Release the IPv6 lease. - """ - logger.info('Calling Dhclient for IPv6 configuration ' - 'on the Ethernet interfaces %s.', interfaces) - - timeout_command = ['timeout', '5'] - dhclient_command = ['dhclient'] - - if release_lease: - try: - subprocess.check_call( - timeout_command + dhclient_command + [ - '-6', '-r', '-v'] + interfaces) - except subprocess.CalledProcessError: - logger.warning('Could not release IPv6 lease on interface %s.', - interfaces) - return - - if dhclient_script and os.path.exists(dhclient_script): - dhclient_command += ['-sf', dhclient_script] - - try: - subprocess.check_call( - timeout_command + dhclient_command + ['-1', '-6', '-v'] + interfaces) - except subprocess.CalledProcessError: - logger.warning('Could not enable IPv6 on interface %s.', interfaces) - - -def CallEnableRouteAdvertisements(interfaces, logger): - """Enable route advertisements. - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - """ - for interface in interfaces: - accept_ra = ( - 'net.ipv6.conf.{interface}.accept_ra_rt_info_max_plen'.format( - interface=interface)) - CallSysctl(logger, accept_ra, 128) - -def CallHwclock(logger): - """Sync clock using hwclock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - command = ['/sbin/hwclock', '--hctosys'] - try: - subprocess.check_call(command) - except subprocess.CalledProcessError: - logger.warning('Failed to sync system time with hardware clock.') - else: - logger.info('Synced system time with hardware clock.') - - -def CallNtpdate(logger): - """Sync clock using ntpdate. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - ntpd_inactive = subprocess.call(['service', 'ntpd', 'status']) - try: - if not ntpd_inactive: - subprocess.check_call(['service', 'ntpd', 'stop']) - subprocess.check_call( - 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True) - if not ntpd_inactive: - subprocess.check_call(['service', 'ntpd', 'start']) - except subprocess.CalledProcessError: - logger.warning('Failed to sync system time with ntp server.') - else: - logger.info('Synced system time with ntp server.') - -def CallSysctl(logger, name, value): - """Write a variable using sysctl. - - Args: - logger: logger object, used to write to SysLog and serial port. - name: string name of the sysctl variable. - value: value of the sysctl variable. - """ - logger.info('Configuring sysctl %s.', name) - - sysctl_command = [ - 'sysctl', '-w', '{name}={value}'.format(name=name, value=value)] - try: - subprocess.check_call(sysctl_command) - except subprocess.CalledProcessError: - logger.warning('Unable to configure sysctl %s.', name) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,302 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for configuring IP address forwarding.""" - -import re -import subprocess -try: - # The following modules are required by IpForwardingUtilsIfconfig. - import netifaces - import netaddr -except ImportError: - netifaces = None - netaddr = None - - -IP_REGEX = re.compile(r'\A(\d{1,3}\.){3}\d{1,3}\Z') -IP_ALIAS_REGEX = re.compile(r'\A(\d{1,3}\.){3}\d{1,3}/\d{1,2}\Z') - - -class IpForwardingUtilsBase(object): - """System IP address configuration utilities.""" - - def ParseForwardedIps(self, forwarded_ips): - """Parse and validate forwarded IP addresses. - - Args: - forwarded_ips: list, the IP address strings to parse. - - Returns: - list, the valid IP address strings. - """ - pass - - def GetForwardedIps(self, interface, interface_ip=None): - """Retrieve the list of configured forwarded IP addresses. - - Args: - interface: string, the output device to query. - interface_ip: string, current interface ip address. - - Returns: - list, the IP address strings. - """ - pass - - def AddForwardedIp(self, address, interface): - """Configure a new IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - pass - - def RemoveForwardedIp(self, address, interface): - """Delete an IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - pass - - -class IpForwardingUtilsIproute(IpForwardingUtilsBase): - """System IP address configuration utilities. - - Command used to add IPs: - ip route add to local $IP/32 dev eth0 proto 66 - Command used to fetch list of configured IPs: - ip route ls table local type local dev eth0 scope host proto 66 - """ - - def __init__(self, logger, proto_id=None): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - self.logger = logger - self.proto_id = proto_id or '66' - - def _CreateRouteOptions(self, **kwargs): - """Create a dictionary of parameters to append to the ip route command. - - Args: - **kwargs: dict, the string parameters to update in the ip route command. - - Returns: - dict, the string parameters to append to the ip route command. - """ - options = { - 'proto': self.proto_id, - 'scope': 'host', - } - options.update(kwargs) - return options - - def _RunIpRoute(self, args=None, options=None): - """Run a command with ip route and return the response. - - Args: - args: list, the string ip route command args to execute. - options: dict, the string parameters to append to the ip route command. - - Returns: - string, the standard output from the ip route command execution. - """ - args = args or [] - options = options or {} - command = ['ip', 'route'] - command.extend(args) - for item in options.items(): - command.extend(item) - try: - process = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - except OSError as e: - self.logger.warning('Exception running %s. %s.', command, str(e)) - else: - if process.returncode: - message = 'Non-zero exit status running %s. %s.' - self.logger.warning(message, command, stderr.strip()) - else: - return stdout.decode('utf-8', 'replace') - return '' - - def ParseForwardedIps(self, forwarded_ips): - """Parse and validate forwarded IP addresses. - - Args: - forwarded_ips: list, the IP address strings to parse. - - Returns: - list, the valid IP address strings. - """ - addresses = [] - forwarded_ips = forwarded_ips or [] - for ip in forwarded_ips: - if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)): - addresses.append(ip[:-3] if ip.endswith('/32') else ip) - else: - self.logger.warning('Could not parse IP address: "%s".', ip) - return addresses - - def GetForwardedIps(self, interface, interface_ip=None): - """Retrieve the list of configured forwarded IP addresses. - - Args: - interface: string, the output device to query. - interface_ip: string, current interface ip address. - - Returns: - list, the IP address strings. - """ - args = ['ls', 'table', 'local', 'type', 'local'] - options = self._CreateRouteOptions(dev=interface) - result = self._RunIpRoute(args=args, options=options) - result = re.sub(r'local\s', r'', result) - return self.ParseForwardedIps(result.split()) - - def AddForwardedIp(self, address, interface): - """Configure a new IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address - args = ['add', 'to', 'local', address] - options = self._CreateRouteOptions(dev=interface) - self._RunIpRoute(args=args, options=options) - - def RemoveForwardedIp(self, address, interface): - """Delete an IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address - args = ['delete', 'to', 'local', address] - options = self._CreateRouteOptions(dev=interface) - self._RunIpRoute(args=args, options=options) - - -class IpForwardingUtilsIfconfig(IpForwardingUtilsBase): - """System IP address configuration utilities.""" - - def __init__(self, logger): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - - self.logger = logger - - def _RunIfconfig(self, args=None, options=None): - """Run a command with ifconfig and return the response. - - Args: - args: list, the string ip route command args to execute. - options: dict, the string parameters to append to the ip route command. - - Returns: - string, the standard output from the ip route command execution. - """ - args = args or [] - options = options or {} - command = ['ifconfig'] - command.extend(args) - for item in options.items(): - command.extend(item) - try: - process = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - except OSError as e: - self.logger.warning('Exception running %s. %s.', command, str(e)) - else: - if process.returncode: - message = 'Non-zero exit status running %s. %s.' - self.logger.warning(message, command, stderr.strip()) - else: - return stdout.decode('utf-8', 'replace') - return '' - - def ParseForwardedIps(self, forwarded_ips): - """Parse and validate forwarded IP addresses. - - Args: - forwarded_ips: list, the IP address strings to parse. - - Returns: - list, the valid IP address strings. - """ - addresses = [] - forwarded_ips = forwarded_ips or [] - for ip in forwarded_ips: - if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)): - addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))]) - else: - self.logger.warning('Could not parse IP address: "%s".', ip) - return addresses - - def GetForwardedIps(self, interface, interface_ip=None): - """Retrieve the list of configured forwarded IP addresses. - - Args: - interface: string, the output device to query. - interface_ip: string, current interface ip address. - - Returns: - list, the IP address strings. - """ - try: - ips = netifaces.ifaddresses(interface) - ips = ips[netifaces.AF_INET] - except (ValueError, IndexError): - return [] - forwarded_ips = [] - for ip in ips: - if ip['addr'] != interface_ip: - full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits()) - forwarded_ips.append(full_addr) - return self.ParseForwardedIps(forwarded_ips) - - def AddForwardedIp(self, address, interface): - """Configure a new IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - for ip in list(netaddr.IPNetwork(address)): - self._RunIfconfig(args=[interface, 'alias', '%s/32' % str(ip)]) - - def RemoveForwardedIp(self, address, interface): - """Delete an IP address on the network interface. - - Args: - address: string, the IP address to configure. - interface: string, the output device to use. - """ - ip = netaddr.IPNetwork(address) - self._RunIfconfig(args=[interface, '-alias', str(ip.ip)]) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Unittest for utils.py module.""" - -import subprocess - -from google_compute_engine.distro_lib.sles_11 import utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - - def testEnableNetworkInterfacesWithSingleNic(self): - mocks = mock.Mock() - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['eth0'], self.mock_logger) - expected_calls = [] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testEnableNetworkInterfacesWithMultipleNics(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_setup._Dhcpcd, 'dhcpcd') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['eth0', 'eth1', 'eth2'], self.mock_logger) - expected_calls = [ - mock.call.dhcpcd(['eth1', 'eth2'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch( - 'google_compute_engine.distro_lib.sles_11.utils.subprocess.check_call') - def testDhcpcd(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - mock_call.side_effect = [ - None, None, None, None, - subprocess.CalledProcessError(1, 'Test'), - subprocess.CalledProcessError(1, 'Test'), - ] - - utils.Utils._Dhcpcd( - self.mock_setup, ['eth1', 'eth2', 'eth3'], self.mock_logger) - expected_calls = [ - mock.call.call(['/sbin/dhcpcd', '-x', 'eth1']), - mock.call.call(['/sbin/dhcpcd', 'eth1']), - mock.call.call(['/sbin/dhcpcd', '-x', 'eth2']), - mock.call.call(['/sbin/dhcpcd', 'eth2']), - mock.call.call(['/sbin/dhcpcd', '-x', 'eth3']), - mock.call.logger.info(mock.ANY, 'eth3'), - mock.call.call(['/sbin/dhcpcd','eth3']), - mock.call.logger.warning(mock.ANY, 'eth3'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_11/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on SUSE 11.""" - -import os -import subprocess - -from google_compute_engine import constants -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on SUSE 11.""" - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - pass - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - interfaces_to_up = [i for i in interfaces if i != 'eth0'] - if interfaces_to_up: - logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up) - self._Dhcpcd(interfaces_to_up, logger) - - def _Dhcpcd(self, interfaces, logger): - """Use dhcpcd to activate the interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - """ - for interface in interfaces: - dhcpcd = ['/sbin/dhcpcd'] - try: - subprocess.check_call(dhcpcd + ['-x', interface]) - except subprocess.CalledProcessError: - # Dhcpcd not yet running for this device. - logger.info('Dhcpcd not yet running for interface %s.', interface) - try: - subprocess.check_call(dhcpcd + [interface]) - except subprocess.CalledProcessError: - # The interface is already active. - logger.warning('Could not activate interface %s.', interface) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/tests/utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/tests/utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/tests/utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/tests/utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Unittest for utils.py module.""" - -import subprocess - -from google_compute_engine.distro_lib.sles_12 import utils -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class UtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(utils.Utils) - self.mock_setup.network_path = '/etc/sysconfig/network' - - def testEnableNetworkInterfacesWithSingleNic(self): - mocks = mock.Mock() - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['eth0'], self.mock_logger) - expected_calls = [] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testEnableNetworkInterfacesWithMultipleNics(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_setup._WriteIfcfg, 'writeIfcfg') - mocks.attach_mock(self.mock_setup._Ifup, 'ifup') - - utils.Utils.EnableNetworkInterfaces( - self.mock_setup, ['eth0', 'eth1', 'eth2'], self.mock_logger) - expected_calls = [ - mock.call.writeIfcfg(['eth1', 'eth2'], mock.ANY), - mock.call.ifup(['eth1', 'eth2'], mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testWriteIfcfg(self): - mocks = mock.Mock() - mock_open = mock.mock_open() - mocks.attach_mock(mock_open, 'open') - with mock.patch('%s.open' % builtin, mock_open, create=False): - - utils.Utils._WriteIfcfg( - self.mock_setup, ['eth1', 'eth2'], self.mock_logger) - expected_calls = [ - mock.call.open('/etc/sysconfig/network/ifcfg-eth1', 'w'), - mock.call.open().__enter__(), - mock.call.open().write(mock.ANY), - mock.call.open().__exit__(None, None, None), - mock.call.open('/etc/sysconfig/network/ifcfg-eth2', 'w'), - mock.call.open().__enter__(), - mock.call.open().write(mock.ANY), - mock.call.open().__exit__(None, None, None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch( - 'google_compute_engine.distro_lib.sles_12.utils.subprocess.check_call') - def testIfup(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - mock_call.side_effect = [ - None, subprocess.CalledProcessError(1, 'Test'), - ] - - utils.Utils._Ifup(self.mock_setup, ['eth1', 'eth2'], self.mock_logger) - utils.Utils._Ifup(self.mock_setup, ['eth1', 'eth2'], self.mock_logger) - expectedIfupCall = [ - '/usr/sbin/wicked', 'ifup', '--timeout', '1', 'eth1', 'eth2', - ] - expected_calls = [ - mock.call.call(expectedIfupCall), - mock.call.call(expectedIfupCall), - mock.call.logger.warning(mock.ANY, ['eth1', 'eth2']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.CallHwclock') - def testHandleClockSync(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.HandleClockSync(self.mock_setup, self.mock_logger) - expected_calls = [mock.call.call(mock.ANY)] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.IpForwardingUtilsIproute') - def testIpForwardingUtils(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - - utils.Utils.IpForwardingUtils(self.mock_setup, self.mock_logger, '66') - expected_calls = [mock.call.call(mock.ANY, '66')] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific for use on SUSE 12.""" - -import os -import subprocess - -from google_compute_engine import constants -from google_compute_engine.distro_lib import helpers -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.distro_lib import utils - - -class Utils(utils.Utils): - """Utilities used by Linux guest services on SUSE 12.""" - - network_path = constants.LOCALBASE + '/etc/sysconfig/network' - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Configure the network interfaces for IPv6 using dhclient. - - Args: - interface: string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - pass - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - interfaces_to_up = [i for i in interfaces if i != 'eth0'] - if interfaces_to_up: - logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up) - self._WriteIfcfg(interfaces_to_up, logger) - self._Ifup(interfaces_to_up, logger) - - def _WriteIfcfg(self, interfaces, logger): - """Write ifcfg files for multi-NIC support. - - Overwrites the files. This allows us to update ifcfg-* in the future. - Disable the network setup to override this behavior and customize the - configurations. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - """ - for interface in interfaces: - interface_config = os.path.join( - self.network_path, 'ifcfg-%s' % interface) - interface_content = [ - '# Added by Google.', - 'STARTMODE=hotplug', - 'BOOTPROTO=dhcp', - 'DHCLIENT_SET_DEFAULT_ROUTE=yes', - 'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface, - '', - ] - with open(interface_config, 'w') as interface_file: - interface_file.write('\n'.join(interface_content)) - logger.info('Created ifcfg file for interface %s.', interface) - - def _Ifup(self, interfaces, logger): - """Activate network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - """ - ifup = ['/usr/sbin/wicked', 'ifup', '--timeout', '1'] - try: - subprocess.check_call(ifup + interfaces) - except subprocess.CalledProcessError: - logger.warning('Could not activate interfaces %s.', interfaces) - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - helpers.CallHwclock(logger) - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/helpers_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/helpers_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/helpers_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/helpers_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,240 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for helpers.py module.""" - -import subprocess - -from google_compute_engine.distro_lib import helpers -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class HelpersTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - - @mock.patch('google_compute_engine.distro_lib.helpers.os.path.exists') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallDhclient(self, mock_call, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - - mock_exists.side_effect = [False, True] - mock_call.side_effect = [ - None, None, None, None, None, None, - subprocess.CalledProcessError(1, 'Test'), - ] - - helpers.CallDhclient(['a', 'b'], self.mock_logger, 'test_script') - helpers.CallDhclient(['c', 'd'], self.mock_logger, 'test_script') - helpers.CallDhclient(['e', 'f'], self.mock_logger, None) - helpers.CallDhclient(['g', 'h'], self.mock_logger, None) - - expected_calls = [ - mock.call.logger.info(mock.ANY, ['a', 'b']), - mock.call.exists('test_script'), - mock.call.call(['dhclient', '-x', 'a', 'b']), - mock.call.call(['dhclient', 'a', 'b']), - mock.call.logger.info(mock.ANY, ['c', 'd']), - mock.call.exists('test_script'), - mock.call.call(['dhclient', '-sf', 'test_script', '-x', 'c', 'd']), - mock.call.call(['dhclient', '-sf', 'test_script', 'c', 'd']), - mock.call.logger.info(mock.ANY, ['e', 'f']), - mock.call.call(['dhclient', '-x', 'e', 'f']), - mock.call.call(['dhclient', 'e', 'f']), - mock.call.logger.info(mock.ANY, ['g', 'h']), - mock.call.call(['dhclient', '-x', 'g', 'h']), - mock.call.logger.warning(mock.ANY, ['g', 'h']), - ] - - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.os.path.exists') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallDhclientIpv6NonExistentScript(self, mock_call, mock_exists): - mock_logger = mock.Mock() - - mock_exists.side_effect = [False] - helpers.CallDhclientIpv6(['a', 'b'], mock_logger, 'test_script') - mock_call.assert_has_calls( - [ - mock.call.call( - ['timeout', '5', 'dhclient', '-1', '-6', '-v', 'a', 'b']), - ]) - - @mock.patch('google_compute_engine.distro_lib.helpers.os.path.exists') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallDhclientIpv6(self, mock_call, mock_exists): - mock_logger = mock.Mock() - mock_exists.side_effect = [True] - mock_call.side_effect = [ - None, - None, - subprocess.CalledProcessError(1, 'Test'), - None, - None, - subprocess.CalledProcessError(1, 'Test'), - ] - - helpers.CallDhclientIpv6(['a', 'b'], mock_logger, 'test_script') - helpers.CallDhclientIpv6(['c', 'd'], mock_logger, None) - helpers.CallDhclientIpv6(['e', 'f'], mock_logger, None) - helpers.CallDhclientIpv6( - ['g', 'h'], mock_logger, 'test_script', release_lease=True) - helpers.CallDhclientIpv6(['i', 'j'], mock_logger, None, release_lease=True) - helpers.CallDhclientIpv6(['k', 'l'], mock_logger, None, release_lease=True) - - expected_calls = [ - mock.call.call( - [ - 'timeout', '5','dhclient', '-sf', 'test_script', '-1', '-6', - '-v', 'a', 'b', - ]), - mock.call.call( - [ - 'timeout', '5', 'dhclient', '-1', '-6', '-v', 'c', 'd', - ]), - mock.call.call( - [ - 'timeout', '5', 'dhclient', '-1', '-6', '-v', 'e', 'f', - ]), - mock.call.call( - [ - 'timeout', '5', 'dhclient', '-6', '-r', '-v', 'g', 'h', - ]), - mock.call.call( - [ - 'timeout', '5', 'dhclient', '-6', '-r', '-v', 'i', 'j', - ]), - mock.call.call( - [ - 'timeout', '5', 'dhclient', '-6', '-r', '-v', 'k', 'l', - ]), - ] - - self.assertEqual(mock_call.mock_calls, expected_calls) - mock_logger.assert_has_calls( - [ - mock.call.warning(mock.ANY, ['e', 'f']), - ]) - mock_logger.assert_has_calls( - [ - mock.call.warning(mock.ANY, ['k', 'l']), - ]) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testEnableRouteAdvertisements(self, mock_call): - mock_logger = mock.Mock() - interfaces = ['foo', 'bar', 'baz'] - helpers.CallEnableRouteAdvertisements(interfaces, mock_logger) - mock_call.assert_has_calls([ - mock.call( - [ - 'sysctl', '-w', - 'net.ipv6.conf.%s.accept_ra_rt_info_max_plen=128' % interface, - ]) - for interface in interfaces]) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallHwclock(self, mock_call): - command = ['/sbin/hwclock', '--hctosys'] - mock_logger = mock.Mock() - - helpers.CallHwclock(mock_logger) - mock_call.assert_called_once_with(command) - expected_calls = [mock.call.info(mock.ANY)] - self.assertEqual(mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallHwclockError(self, mock_call): - command = ['/sbin/hwclock', '--hctosys'] - mock_logger = mock.Mock() - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - helpers.CallHwclock(mock_logger) - mock_call.assert_called_once_with(command) - expected_calls = [mock.call.warning(mock.ANY)] - self.assertEqual(mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.call') - def testCallNtpdateActive(self, mock_call, mock_check_call): - command_status = ['service', 'ntpd', 'status'] - command_stop = ['service', 'ntpd', 'stop'] - command_start = ['service', 'ntpd', 'start'] - command_ntpdate = 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`' - mock_logger = mock.Mock() - mock_call.return_value = 0 - mock_check_call.return_value = True - - helpers.CallNtpdate(mock_logger) - mock_call.assert_called_once_with(command_status) - expected_calls = [ - mock.call(command_stop), - mock.call(command_ntpdate, shell=True), - mock.call(command_start), - ] - self.assertEqual(mock_check_call.mock_calls, expected_calls) - expected_calls = [mock.call.info(mock.ANY)] - self.assertEqual(mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.call') - def testCallNtpdateInactive(self, mock_call, mock_check_call): - command_status = ['service', 'ntpd', 'status'] - command_ntpdate = 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`' - mock_logger = mock.Mock() - mock_call.return_value = 1 - - helpers.CallNtpdate(mock_logger) - mock_call.assert_called_once_with(command_status) - mock_check_call.assert_called_once_with(command_ntpdate, shell=True) - expected_calls = [mock.call.info(mock.ANY)] - self.assertEqual(mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.call') - def testCallNtpdateError(self, mock_call, mock_check_call): - command_status = ['service', 'ntpd', 'status'] - command_ntpdate = 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`' - mock_logger = mock.Mock() - mock_check_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - helpers.CallNtpdate(mock_logger) - mock_call.assert_called_once_with(command_status) - mock_check_call.assert_called_once_with(command_ntpdate, shell=True) - expected_calls = [mock.call.warning(mock.ANY)] - self.assertEqual(mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.helpers.subprocess.check_call') - def testCallSysctl(self, mock_call): - command = ['sysctl', '-w'] - mock_logger = mock.Mock() - expected_log_calls = [] - for name in ['foo', 'bar', 'baz']: - for value in ['foo', 'bar', 'baz']: - params = ['{name}={value}'.format(name=name, value=value)] - helpers.CallSysctl(mock_logger, name, value) - mock_call.assert_called_with(command + params) - expected_log_calls.append(mock.call.info(mock.ANY, name)) - self.assertEqual(mock_logger.mock_calls, expected_log_calls) - - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - helpers.CallSysctl(mock_logger, 'fail', 1) - mock_logger.assert_has_calls([mock.call.warning(mock.ANY, 'fail')]) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/ip_forwarding_utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/ip_forwarding_utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/ip_forwarding_utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/tests/ip_forwarding_utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,435 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for ip_forwarding_utils.py module.""" - -from google_compute_engine.distro_lib import ip_forwarding_utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -def _CreateMockProcess(returncode, stdout, stderr): - mock_process = mock.Mock() - mock_process.returncode = returncode - mock_process.communicate.return_value = (stdout, stderr) - return mock_process - - -class IpForwardingUtilsIprouteTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.options = {'hello': 'world'} - with mock.patch( - 'google_compute_engine.distro_lib.ip_forwarding_utils' - '.subprocess') as mock_subprocess: - mock_subprocess.Popen.return_value = _CreateMockProcess( - 0, b'out', b'') - self.mock_utils = ip_forwarding_utils.IpForwardingUtilsIproute( - self.mock_logger) - self.mock_utils.proto_id = 'proto' - - def testCreateRouteOptions(self): - # Default options. - expected_options = { - 'proto': 'proto', - 'scope': 'host', - } - self.assertEqual(self.mock_utils._CreateRouteOptions(), expected_options) - - # Update dictionary when arguments are specified. - expected_options = { - 'proto': 'proto', - 'scope': 'host', - 'num': 1, - 'string': 'hello world', - } - self.assertEqual( - self.mock_utils._CreateRouteOptions(num=1, string='hello world'), - expected_options) - - # Update the default options. - expected_options = { - 'proto': 'test 1', - 'scope': 'test 2', - } - self.assertEqual( - self.mock_utils._CreateRouteOptions(proto='test 1', scope='test 2'), - expected_options) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIpRoute(self, mock_subprocess): - mock_process = _CreateMockProcess(0, b'out', b'') - mock_subprocess.Popen.return_value = mock_process - args = ['foo', 'bar'] - options = {'one': 'two'} - - self.assertEqual( - self.mock_utils._RunIpRoute(args=args, options=options), 'out') - command = ['ip', 'route', 'foo', 'bar', 'one', 'two'] - mock_subprocess.Popen.assert_called_once_with( - command, stdout=mock_subprocess.PIPE, stderr=mock_subprocess.PIPE) - mock_process.communicate.assert_called_once_with() - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIpRouteReturnCode(self, mock_subprocess): - mock_process = _CreateMockProcess(1, b'out', b'error\n') - mock_subprocess.Popen.return_value = mock_process - - self.assertEqual( - self.mock_utils._RunIpRoute(args=['foo', 'bar'], options=self.options), - '') - command = ['ip', 'route', 'foo', 'bar', 'hello', 'world'] - self.mock_logger.warning.assert_called_once_with( - mock.ANY, command, b'error') - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIpRouteException(self, mock_subprocess): - mock_subprocess.Popen.side_effect = OSError('Test Error') - - self.assertEqual( - self.mock_utils._RunIpRoute(args=['foo', 'bar'], options=self.options), - '') - command = ['ip', 'route', 'foo', 'bar', 'hello', 'world'] - self.mock_logger.warning.assert_called_once_with( - mock.ANY, command, 'Test Error') - - def testParseForwardedIps(self): - self.assertEqual(self.mock_utils.ParseForwardedIps(None), []) - self.assertEqual(self.mock_utils.ParseForwardedIps([]), []) - self.assertEqual(self.mock_utils.ParseForwardedIps([None]), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['invalid']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1a1a1a1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1.1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1111.1.1.1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1111']), []) - expected_calls = [ - mock.call.warning(mock.ANY, None), - mock.call.warning(mock.ANY, 'invalid'), - mock.call.warning(mock.ANY, '1a1a1a1'), - mock.call.warning(mock.ANY, '1.1.1.1.1'), - mock.call.warning(mock.ANY, '1111.1.1.1'), - mock.call.warning(mock.ANY, '1.1.1.1111'), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testParseForwardedIpsComplex(self): - forwarded_ips = { - '{{}}\n\"hello\"\n!@#$%^&*()\n\n': False, - '1111.1.1.1': False, - '1.1.1.1': True, - 'hello': False, - '123.123.123.123': True, - '1.1.1.': False, - '1.1.1.a': False, - None: False, - '1.0.0.0': True, - '1.1.1.1/1': True, - '1.1.1.1/11': True, - '123.123.123.123/1': True, - '123.123.123.123/123': False, - '123.123.123.123/a': False, - '123.123.123.123/': False, - } - input_ips = forwarded_ips.keys() - valid_ips = [ip for ip, valid in forwarded_ips.items() if valid] - invalid_ips = [ip for ip, valid in forwarded_ips.items() if not valid] - - self.assertEqual(self.mock_utils.ParseForwardedIps(input_ips), valid_ips) - expected_calls = [mock.call.warning(mock.ANY, ip) for ip in invalid_ips] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testParseForwardedIpsSubnet(self): - forwarded_ips = { - '1.1.1.1': '1.1.1.1', - '1.1.1.1/32': '1.1.1.1', - '1.1.1.1/1': '1.1.1.1/1', - '1.1.1.1/10': '1.1.1.1/10', - '1.1.1.1/24': '1.1.1.1/24', - } - for ip, value in forwarded_ips.items(): - self.assertEqual(self.mock_utils.ParseForwardedIps([ip]), [value]) - - def testGetForwardedIps(self): - mock_options = mock.Mock() - mock_options.return_value = self.options - mock_run = mock.Mock() - mock_run.return_value = 'a\n b \nlocal c' - mock_parse = mock.Mock() - mock_parse.return_value = ['Test'] - self.mock_utils._CreateRouteOptions = mock_options - self.mock_utils._RunIpRoute = mock_run - self.mock_utils.ParseForwardedIps = mock_parse - - self.assertEqual( - self.mock_utils.GetForwardedIps('interface', 'ip'), ['Test']) - mock_options.assert_called_once_with(dev='interface') - mock_run.assert_called_once_with( - args=['ls', 'table', 'local', 'type', 'local'], options=self.options) - mock_parse.assert_called_once_with(['a', 'b', 'c']) - - def testAddForwardedIp(self): - mock_options = mock.Mock() - mock_options.return_value = self.options - mock_run = mock.Mock() - self.mock_utils._CreateRouteOptions = mock_options - self.mock_utils._RunIpRoute = mock_run - - self.mock_utils.AddForwardedIp('1.1.1.1', 'interface') - mock_options.assert_called_once_with(dev='interface') - mock_run.assert_called_once_with( - args=['add', 'to', 'local', '1.1.1.1/32'], options=self.options) - - def testAddIpAlias(self): - mock_options = mock.Mock() - mock_options.return_value = self.options - mock_run = mock.Mock() - self.mock_utils._CreateRouteOptions = mock_options - self.mock_utils._RunIpRoute = mock_run - - self.mock_utils.AddForwardedIp('1.1.1.1/24', 'interface') - mock_options.assert_called_once_with(dev='interface') - mock_run.assert_called_once_with( - args=['add', 'to', 'local', '1.1.1.1/24'], options=self.options) - - def testRemoveForwardedIp(self): - mock_options = mock.Mock() - mock_options.return_value = self.options - mock_run = mock.Mock() - self.mock_utils._CreateRouteOptions = mock_options - self.mock_utils._RunIpRoute = mock_run - - self.mock_utils.RemoveForwardedIp('1.1.1.1', 'interface') - mock_options.assert_called_once_with(dev='interface') - mock_run.assert_called_once_with( - args=['delete', 'to', 'local', '1.1.1.1/32'], options=self.options) - - def testRemoveAliasIp(self): - mock_options = mock.Mock() - mock_options.return_value = self.options - mock_run = mock.Mock() - self.mock_utils._CreateRouteOptions = mock_options - self.mock_utils._RunIpRoute = mock_run - - self.mock_utils.RemoveForwardedIp('1.1.1.1/24', 'interface') - mock_options.assert_called_once_with(dev='interface') - mock_run.assert_called_once_with( - args=['delete', 'to', 'local', '1.1.1.1/24'], options=self.options) - - -class IpForwardingUtilsIfconfigTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - with mock.patch( - 'google_compute_engine.distro_lib.ip_forwarding_utils' - '.subprocess') as mock_subprocess: - mock_subprocess.Popen.return_value = _CreateMockProcess( - 0, b'out', b'') - self.mock_utils = ip_forwarding_utils.IpForwardingUtilsIfconfig( - self.mock_logger) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIfconfig(self, mock_subprocess): - mock_process = _CreateMockProcess(0, b'out', b'') - mock_subprocess.Popen.return_value = mock_process - args = ['foo', 'bar'] - options = {'one': 'two'} - - self.assertEqual( - self.mock_utils._RunIfconfig(args=args, options=options), 'out') - command = ['ifconfig', 'foo', 'bar', 'one', 'two'] - mock_subprocess.Popen.assert_called_once_with( - command, stdout=mock_subprocess.PIPE, stderr=mock_subprocess.PIPE) - mock_process.communicate.assert_called_once_with() - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIfconfigReturnCode(self, mock_subprocess): - mock_process = _CreateMockProcess(1, b'out', b'error\n') - mock_subprocess.Popen.return_value = mock_process - - self.assertEqual( - self.mock_utils._RunIfconfig(args=['foo', 'bar']), - '') - command = ['ifconfig', 'foo', 'bar'] - self.mock_logger.warning.assert_called_once_with( - mock.ANY, command, b'error') - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.subprocess') - def testRunIfconfigException(self, mock_subprocess): - mock_subprocess.Popen.side_effect = OSError('Test Error') - - self.assertEqual( - self.mock_utils._RunIfconfig(args=['foo', 'bar']), - '') - command = ['ifconfig', 'foo', 'bar'] - self.mock_logger.warning.assert_called_once_with( - mock.ANY, command, 'Test Error') - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testParseForwardedIps(self, mock_netaddr): - self.assertEqual(self.mock_utils.ParseForwardedIps(None), []) - self.assertEqual(self.mock_utils.ParseForwardedIps([]), []) - self.assertEqual(self.mock_utils.ParseForwardedIps([None]), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['invalid']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1a1a1a1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1.1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1111.1.1.1']), []) - self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1111']), []) - expected_calls = [ - mock.call.warning(mock.ANY, None), - mock.call.warning(mock.ANY, 'invalid'), - mock.call.warning(mock.ANY, '1a1a1a1'), - mock.call.warning(mock.ANY, '1.1.1.1.1'), - mock.call.warning(mock.ANY, '1111.1.1.1'), - mock.call.warning(mock.ANY, '1.1.1.1111'), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - self.assertEqual(mock_netaddr.IPNetwork.mock_calls, []) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testParseForwardedIpsComplex(self, mock_netaddr): - def side_effect(arg): - return [arg] - mock_netaddr.IPNetwork.side_effect = side_effect - forwarded_ips = { - '{{}}\n\"hello\"\n!@#$%^&*()\n\n': False, - '1111.1.1.1': False, - '1.1.1.1': True, - 'hello': False, - '123.123.123.123': True, - '1.1.1.': False, - '1.1.1.a': False, - None: False, - '1.0.0.0': True, - '1.1.1.1/1': True, - '1.1.1.1/11': True, - '123.123.123.123/1': True, - '123.123.123.123/123': False, - '123.123.123.123/a': False, - '123.123.123.123/': False, - } - input_ips = forwarded_ips.keys() - valid_ips = [ip for ip, valid in forwarded_ips.items() if valid] - invalid_ips = [ip for ip, valid in forwarded_ips.items() if not valid] - - self.assertEqual(self.mock_utils.ParseForwardedIps(input_ips), valid_ips) - expected_calls = [mock.call.warning(mock.ANY, ip) for ip in invalid_ips] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - expected_calls = [mock.call.IPNetwork(ip) for ip in valid_ips] - self.assertEqual(mock_netaddr.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testParseForwardedIpsSubnet(self, mock_netaddr): - mock_netaddr.IPNetwork.return_value = ['1.1.1.1'] - forwarded_ips = [ - '1.1.1.1', - '1.1.1.1/32', - '1.1.1.1/1', - '1.1.1.1/10', - '1.1.1.1/24' - ] - for ip in forwarded_ips: - self.assertEqual(self.mock_utils.ParseForwardedIps([ip]), ['1.1.1.1']) - expected_calls = [mock.call.IPNetwork(ip) for ip in forwarded_ips] - self.assertEqual(mock_netaddr.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netifaces') - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testGetForwardedIps(self, mock_netaddr, mock_netifaces): - mock_netifaces.AF_INET = 0 - mock_netifaces.ifaddresses.return_value = [[ - {'addr': 'a', 'netmask': 'a mask'}, - {'addr': 'b', 'netmask': 'b mask'}, - {'addr': 'c', 'netmask': 'c mask'}, - ]] - mock_netaddr.IPAddress().netmask_bits.return_value = 32 - mock_parse = mock.Mock() - mock_parse.return_value = ['Test'] - self.mock_utils.ParseForwardedIps = mock_parse - - self.assertEqual( - self.mock_utils.GetForwardedIps('interface', 'ip'), ['Test']) - mock_netifaces.ifaddresses.assert_called_once_with('interface') - mock_parse.assert_called_once_with(['a/32', 'b/32', 'c/32']) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netifaces') - def testGetForwardedIpsEmpty(self, mock_netifaces): - mock_netifaces.AF_INET = 0 - mock_netifaces.ifaddresses.return_value = [] - - self.assertEqual( - self.mock_utils.GetForwardedIps('interface', 'ip'), []) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testAddForwardedIp(self, mock_netaddr): - mock_netaddr.IPNetwork.return_value = ['1.1.1.1'] - mock_run = mock.Mock() - self.mock_utils._RunIfconfig = mock_run - - self.mock_utils.AddForwardedIp('1.1.1.1', 'interface') - mock_netaddr.IPNetwork.assert_called_once_with('1.1.1.1') - mock_run.assert_called_once_with( - args=['interface', 'alias', '1.1.1.1/32']) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testAddIpAlias(self, mock_netaddr): - mock_netaddr.IPNetwork.return_value = [ - '1.1.1.0', '1.1.1.1', '1.1.1.2', '1.1.1.3' - ] - mock_run = mock.Mock() - self.mock_utils._RunIfconfig = mock_run - - self.mock_utils.AddForwardedIp('1.1.1.1/30', 'interface') - mock_netaddr.IPNetwork.assert_called_once_with('1.1.1.1/30') - expected_calls = [ - mock.call(args=['interface', 'alias', '1.1.1.0/32']), - mock.call(args=['interface', 'alias', '1.1.1.1/32']), - mock.call(args=['interface', 'alias', '1.1.1.2/32']), - mock.call(args=['interface', 'alias', '1.1.1.3/32']) - ] - self.assertEqual(mock_run.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testRemoveForwardedIp(self, mock_netaddr): - mock_ip = mock.Mock() - mock_ip.ip = '1.1.1.1' - mock_netaddr.IPNetwork.return_value = mock_ip - mock_run = mock.Mock() - self.mock_utils._RunIfconfig = mock_run - - self.mock_utils.RemoveForwardedIp('1.1.1.1', 'interface') - mock_netaddr.IPNetwork.assert_called_once_with('1.1.1.1') - mock_run.assert_called_once_with( - args=['interface', '-alias', '1.1.1.1']) - - @mock.patch('google_compute_engine.distro_lib.ip_forwarding_utils.netaddr') - def testRemoveAliasIp(self, mock_netaddr): - mock_ip = mock.Mock() - mock_ip.ip = '1.1.1.1' - mock_netaddr.IPNetwork.return_value = mock_ip - mock_run = mock.Mock() - self.mock_utils._RunIfconfig = mock_run - - self.mock_utils.RemoveForwardedIp('1.1.1.1/24', 'interface') - mock_netaddr.IPNetwork.assert_called_once_with('1.1.1.1/24') - mock_run.assert_called_once_with( - args=['interface', '-alias', '1.1.1.1']) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/distro_lib/utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/distro_lib/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities that are distro specific.""" - - -class Utils(object): - """Utilities used by Linux guest services.""" - - def __init__(self, debug=False): - """Constructor. - - Args: - debug: bool, True if debug output should write to the console. - """ - self.debug = debug - - def EnableIpv6(self, interfaces, logger, dhclient_script=None): - """Enable IPv6 on the list of network interfaces. - - Args: - interfaces: list of string, the output device names for enabling IPv6. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - pass - - def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - logger: logger object, used to write to SysLog and serial port. - dhclient_script: string, the path to a dhclient script used by dhclient. - """ - pass - - def HandleClockSync(self, logger): - """Sync the software clock with the hypervisor clock. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - pass - - def IpForwardingUtils(self, logger, proto_id=None): - """Get system IP address configuration utilities. - - Args: - logger: logger object, used to write to SysLog and serial port. - proto_id: string, the routing protocol identifier for Google IP changes. - """ - pass diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/file_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/file_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/file_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/file_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library providing file utilities for setting permissions and locking.""" - -import contextlib -import errno -import fcntl -import os -import subprocess - - -def _SetSELinuxContext(path): - """Set the appropriate SELinux context, if SELinux tools are installed. - - Calls /sbin/restorecon on the provided path to set the SELinux context as - specified by policy. This call does not operate recursively. - - Only some OS configurations use SELinux. It is therefore acceptable for - restorecon to be missing, in which case we do nothing. - - Args: - path: string, the path on which to fix the SELinux context. - """ - restorecon = '/sbin/restorecon' - if os.path.isfile(restorecon) and os.access(restorecon, os.X_OK): - subprocess.call([restorecon, path]) - - -def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False): - """Set the permissions and ownership of a path. - - Args: - path: string, the path for which owner ID and group ID needs to be setup. - mode: octal string, the permissions to set on the path. - uid: int, the owner ID to be set for the path. - gid: int, the group ID to be set for the path. - mkdir: bool, True if the directory needs to be created. - """ - if mkdir and not os.path.exists(path): - os.mkdir(path, mode or 0o777) - elif mode: - os.chmod(path, mode) - if uid and gid: - os.chown(path, uid, gid) - _SetSELinuxContext(path) - - -def Lock(fd, path, blocking): - """Lock the provided file descriptor. - - Args: - fd: int, the file descriptor of the file to lock. - path: string, the name of the file to lock. - blocking: bool, whether the function should return immediately. - - Raises: - IOError, raised from flock while attempting to lock a file. - """ - operation = fcntl.LOCK_EX if blocking else fcntl.LOCK_EX | fcntl.LOCK_NB - try: - fcntl.flock(fd, operation) - except IOError as e: - if e.errno == errno.EWOULDBLOCK: - raise IOError('Exception locking %s. File already locked.' % path) - else: - raise IOError('Exception locking %s. %s.' % (path, str(e))) - - -def Unlock(fd, path): - """Release the lock on the file. - - Args: - fd: int, the file descriptor of the file to unlock. - path: string, the name of the file to lock. - - Raises: - IOError, raised from flock while attempting to release a file lock. - """ - try: - fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB) - except IOError as e: - if e.errno == errno.EWOULDBLOCK: - raise IOError('Exception unlocking %s. Locked by another process.' % path) - else: - raise IOError('Exception unlocking %s. %s.' % (path, str(e))) - - -@contextlib.contextmanager -def LockFile(path, blocking=False): - """Interface to flock-based file locking to prevent concurrent executions. - - Args: - path: string, the name of the file to lock. - blocking: bool, whether the function should return immediately. - - Yields: - None, yields when a lock on the file is obtained. - - Raises: - IOError, raised from flock locking operations on a file. - OSError, raised from file operations. - """ - fd = os.open(path, os.O_CREAT) - try: - Lock(fd, path, blocking) - yield - finally: - try: - Unlock(fd, path) - finally: - os.close(fd) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_config.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_config.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_config.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_config.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library used to set up the instance's default configurations file. - -Note that the configurations in -/etc/default/instance_configs.cfg.template override the values set in -/etc/default/instance_configs.cfg. The system instance_configs.cfg may be -overridden during package upgrade. -""" - -import logging -import os - -from google_compute_engine import config_manager -from google_compute_engine import constants -from google_compute_engine.compat import parser -from google_compute_engine.compat import stringio - - -class InstanceConfig(config_manager.ConfigManager): - """Creates a defaults config file for instance configuration.""" - - instance_config = constants.SYSCONFDIR + '/instance_configs.cfg' - instance_config_distro = '%s.distro' % instance_config - instance_config_template = '%s.template' % instance_config - instance_config_script = os.path.abspath(__file__) - instance_config_header = ( - 'This file is automatically created at boot time by the %s script. Do ' - 'not edit this file directly. If you need to add items to this file, ' - 'create or edit %s instead and then run ' - '/usr/bin/google_instance_setup.') - instance_config_options = { - 'Accounts': { - 'deprovision_remove': 'false', - 'groups': 'adm,dip,docker,lxd,plugdev,video', - - # The encrypted password is set to '*' for SSH on Linux systems - # without PAM. - # - # SSH uses '!' as its locked account token: - # https://github.com/openssh/openssh-portable/blob/master/configure.ac - # - # When the token is specified, SSH denies login: - # https://github.com/openssh/openssh-portable/blob/master/auth.c - # - # To solve the issue, make the password '*' which is also recognized - # as locked but does not prevent SSH login. - 'gpasswd_add_cmd': 'gpasswd -a {user} {group}', - 'gpasswd_remove_cmd': 'gpasswd -d {user} {group}', - 'groupadd_cmd': 'groupadd {group}', - 'useradd_cmd': 'useradd -m -s /bin/bash -p * {user}', - 'userdel_cmd': 'userdel -r {user}', - 'usermod_cmd': 'usermod -G {groups} {user}', - }, - 'Daemons': { - 'accounts_daemon': 'true', - 'clock_skew_daemon': 'true', - 'ip_forwarding_daemon': 'true', # Deprecated. - 'network_daemon': 'true', - }, - 'Instance': { - 'instance_id': '0', - }, - 'InstanceSetup': { - 'host_key_types': 'ecdsa,ed25519,rsa', - 'optimize_local_ssd': 'true', - 'network_enabled': 'true', - # WARNING: Do not change the value of 'set_boto_config' without first - # consulting the gsutil team (GoogleCloudPlatform/gsutil). - 'set_boto_config': 'true', - 'set_host_keys': 'true', - 'set_multiqueue': 'true', - }, - 'IpForwarding': { - 'ethernet_proto_id': '66', - 'ip_aliases': 'true', - 'target_instance_ips': 'true', - }, - 'MetadataScripts': { - 'run_dir': '', - 'startup': 'true', - 'shutdown': 'true', - 'default_shell': '/bin/bash', - }, - 'NetworkInterfaces': { - 'setup': 'true', - 'ip_forwarding': 'true', - 'dhcp_command': '', - 'dhclient_script': '/sbin/google-dhclient-script', - }, - } - - def __init__(self, logger=logging, instance_config_metadata=None): - """Constructor. - - Inherit from the ConfigManager class. Read the template for instance - defaults and write new sections and options. This prevents package - updates from overriding user set defaults. - - Args: - logger: logger object, used to write to SysLog and serial port. - instance_config_metadata: string, a config file specified in metadata. - """ - self.logger = logger - self.instance_config_metadata = instance_config_metadata - self.instance_config_header %= ( - self.instance_config_script, self.instance_config_template) - # User provided instance configs should always take precedence. - super(InstanceConfig, self).__init__( - config_file=self.instance_config_template, - config_header=self.instance_config_header) - - # Use the instance config settings from metadata if specified. Then use - # settings in an instance config file if one exists. If a config - # file does not already exist, try to use the distro provided defaults. If - # no file exists, use the default configuration settings. - config_files = [self.instance_config, self.instance_config_distro] - config_defaults = [] - if self.instance_config_metadata: - config = parser.Parser() - try: - config.read_file(stringio.StringIO(self.instance_config_metadata)) - except parser.Error as e: - self.logger.error('Error parsing metadata configs: %s', str(e)) - else: - config_defaults.append( - dict((s, dict(config.items(s))) for s in config.sections())) - for config_file in config_files: - if os.path.exists(config_file): - config = parser.Parser() - try: - config.read(config_file) - except parser.Error as e: - self.logger.error('Error parsing config file: %s', str(e)) - else: - config_defaults.append( - dict((s, dict(config.items(s))) for s in config.sections())) - config_defaults.append(self.instance_config_options) - - for defaults in config_defaults: - for section, options in sorted(defaults.items()): - for option, value in sorted(options.items()): - super(InstanceConfig, self).SetOption( - section, option, value, overwrite=False) - - def WriteConfig(self): - """Write the config values to the instance defaults file.""" - super(InstanceConfig, self).WriteConfig(config_file=self.instance_config) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_setup.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_setup.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_setup.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_setup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run initialization code the first time the instance boots.""" - -import logging.handlers -import optparse -import os -import re -import shutil -import subprocess -import tempfile - -from google_compute_engine import constants -from google_compute_engine import file_utils -from google_compute_engine import logger -from google_compute_engine import metadata_watcher -from google_compute_engine.boto import boto_config -from google_compute_engine.compat import urlerror -from google_compute_engine.compat import urlrequest -from google_compute_engine.instance_setup import instance_config - - -class PutRequest(urlrequest.Request): - def get_method(self): - return 'PUT' - - -GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/v1/' - 'instance/guest-attributes') -HOSTKEY_NAMESPACE = 'hostkeys' - - -class InstanceSetup(object): - """Initialize the instance the first time it boots.""" - - def __init__(self, debug=False): - """Constructor. - - Args: - debug: bool, True if debug output should write to the console. - """ - self.debug = debug - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='instance-setup', debug=self.debug, facility=facility) - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - self.metadata_dict = None - self.instance_config = instance_config.InstanceConfig(logger=self.logger) - - if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'): - self.metadata_dict = self.watcher.GetMetadata() - instance_config_metadata = self._GetInstanceConfig() - self.instance_config = instance_config.InstanceConfig( - logger=self.logger, instance_config_metadata=instance_config_metadata) - if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'): - host_key_types = self.instance_config.GetOptionString( - 'InstanceSetup', 'host_key_types') - self._SetSshHostKeys(host_key_types=host_key_types) - if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'): - self._SetupBotoConfig() - if self.instance_config.GetOptionBool( - 'InstanceSetup', 'optimize_local_ssd'): - self._RunScript('google_optimize_local_ssd') - if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'): - self._RunScript('google_set_multiqueue') - try: - self.instance_config.WriteConfig() - except (IOError, OSError) as e: - self.logger.warning(str(e)) - - def _GetInstanceConfig(self): - """Get the instance configuration specified in metadata. - - Returns: - string, the instance configuration data. - """ - try: - instance_data = self.metadata_dict['instance']['attributes'] - except KeyError: - instance_data = {} - self.logger.warning('Instance attributes were not found.') - - try: - project_data = self.metadata_dict['project']['attributes'] - except KeyError: - project_data = {} - self.logger.warning('Project attributes were not found.') - - return (instance_data.get('google-instance-configs') - or project_data.get('google-instance-configs')) - - def _RunScript(self, script): - """Run a script and log the streamed script output. - - Args: - script: string, the file location of an executable script. - """ - process = subprocess.Popen( - script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) - while True: - for line in iter(process.stdout.readline, b''): - self.logger.info(line.decode('utf-8').rstrip('\n')) - if process.poll() is not None: - break - - def _GetInstanceId(self): - """Get the instance ID for this VM. - - Returns: - string, the instance ID for the VM. - """ - try: - return str(self.metadata_dict['instance']['id']) - except KeyError: - self.logger.warning('Instance ID was not found in metadata.') - return None - - def _GenerateSshKey(self, key_type, key_dest): - """Generate a new SSH key. - - Args: - key_type: string, the type of the SSH key. - key_dest: string, a file location to store the SSH key. - - Returns: - tuple, key_type and public key string. - """ - # Create a temporary file to save the created RSA keys. - with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp: - temp_key = temp.name - - command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q'] - try: - self.logger.info('Generating SSH key %s.', key_dest) - subprocess.check_call(command) - except subprocess.CalledProcessError: - self.logger.warning('Could not create SSH key %s.', key_dest) - return - - shutil.move(temp_key, key_dest) - shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest) - - file_utils.SetPermissions(key_dest, mode=0o600) - file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644) - with open('%s.pub' % key_dest, 'r') as pk: - key_data = pk.read() - - key_values = key_data.split() - if len(key_values) < 2: - self.logger.warning('Could not read host key from %s.pub.', key_dest) - return - else: - return key_values[0], key_values[1] - - def _WriteHostKeyToGuestAttributes(self, key_type, key_value): - """Write a host key to guest attributes, ignoring errors.""" - headers = {'Metadata-Flavor': 'Google'} - url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) - key_value = key_value.encode('utf-8') - req = PutRequest(url, key_value, headers) - try: - response = urlrequest.urlopen(req) - self.logger.debug(response) - self.logger.info('Wrote %s host key to guest attributes.', key_type) - except urlerror.HTTPError: - self.logger.info('Unable to write %s host key to guest attributes.', - key_type) - - def _StartSshd(self): - """Initialize the SSH daemon.""" - # Exit as early as possible. - # Instance setup systemd scripts block sshd from starting. - if os.path.exists(constants.LOCALBASE + '/bin/systemctl'): - return - elif (os.path.exists('/etc/init.d/ssh') - or os.path.exists('/etc/init/ssh.conf')): - subprocess.call(['service', 'ssh', 'start']) - subprocess.call(['service', 'ssh', 'reload']) - elif (os.path.exists('/etc/init.d/sshd') - or os.path.exists('/etc/init/sshd.conf')): - subprocess.call(['service', 'sshd', 'start']) - subprocess.call(['service', 'sshd', 'reload']) - - def _SetSshHostKeys(self, host_key_types=None): - """Regenerates SSH host keys when the VM is restarted with a new IP address. - - Booting a VM from an image with a known SSH key allows a number of attacks. - This function will regenerating the host key whenever the IP address - changes. This applies the first time the instance is booted, and each time - the disk is used to boot a new instance. - - Args: - host_key_types: string, a comma separated list of host key types. - """ - section = 'Instance' - instance_id = self._GetInstanceId() - if instance_id != self.instance_config.GetOptionString( - section, 'instance_id'): - self.logger.info('Generating SSH host keys for instance %s.', instance_id) - file_regex = re.compile(r'ssh_host_(?P[a-z0-9]*)_key\Z') - key_dir = '/etc/ssh' - key_files = [f for f in os.listdir(key_dir) if file_regex.match(f)] - key_types = host_key_types.split(',') if host_key_types else [] - key_types_files = ['ssh_host_%s_key' % key_type for key_type in key_types] - for key_file in set(key_files) | set(key_types_files): - key_type = file_regex.match(key_file).group('type') - key_dest = os.path.join(key_dir, key_file) - key_data = self._GenerateSshKey(key_type, key_dest) - if key_data: - self._WriteHostKeyToGuestAttributes(key_data[0], key_data[1]) - self._StartSshd() - self.instance_config.SetOption(section, 'instance_id', str(instance_id)) - - def _GetNumericProjectId(self): - """Get the numeric project ID. - - Returns: - string, the numeric project ID. - """ - try: - return str(self.metadata_dict['project']['numericProjectId']) - except KeyError: - self.logger.warning('Numeric project ID was not found in metadata.') - return None - - def _SetupBotoConfig(self): - """Set the boto config so GSUtil works with provisioned service accounts.""" - project_id = self._GetNumericProjectId() - try: - boto_config.BotoConfig(project_id, debug=self.debug) - except (IOError, OSError) as e: - self.logger.warning(str(e)) - - -def main(): - parser = optparse.OptionParser() - parser.add_option( - '-d', '--debug', action='store_true', dest='debug', - help='print debug output to the console.') - (options, _) = parser.parse_args() - InstanceSetup(debug=bool(options.debug)) - - -if __name__ == '__main__': - main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_config_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_config_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_config_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_config_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for instance_config.py module.""" - -from google_compute_engine.instance_setup import instance_config -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class InstanceConfigTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - instance_config.InstanceConfig.instance_config = 'config' - instance_config.InstanceConfig.instance_config_distro = 'distro' - instance_config.InstanceConfig.instance_config_template = 'template' - instance_config.InstanceConfig.instance_config_script = '/tmp/test.py' - instance_config.InstanceConfig.instance_config_header = '%s %s' - instance_config.InstanceConfig.instance_config_options = { - 'third': { - 'e': '3', - 'c': '1', - 'd': '2', - }, - 'first': { - 'a': 'false', - }, - 'second': { - 'b': 'true', - }, - } - - @mock.patch('google_compute_engine.instance_setup.instance_config.os.path.exists') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.SetOption') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.__init__') - def testInstanceConfig(self, mock_init, mock_set, mock_exists): - mocks = mock.Mock() - mocks.attach_mock(mock_init, 'init') - mocks.attach_mock(mock_set, 'set') - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_logger, 'logger') - mock_exists.return_value = False - - instance_config.InstanceConfig(logger=self.mock_logger) - expected_calls = [ - mock.call.init( - config_file='template', config_header='/tmp/test.py template'), - mock.call.exists('config'), - mock.call.exists('distro'), - mock.call.set('first', 'a', 'false', overwrite=False), - mock.call.set('second', 'b', 'true', overwrite=False), - mock.call.set('third', 'c', '1', overwrite=False), - mock.call.set('third', 'd', '2', overwrite=False), - mock.call.set('third', 'e', '3', overwrite=False), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.instance_setup.instance_config.os.path.exists') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.SetOption') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.__init__') - def testInstanceConfigExists(self, mock_init, mock_set, mock_exists): - config_parser = instance_config.parser.Parser() - config_metadata = '[first]\na = true' - mock_config = mock.create_autospec(instance_config.parser.Parser) - with mock.patch( - 'google_compute_engine.instance_setup.instance_config' - '.parser') as mock_parser: - mock_config.read = mock.Mock() - mock_config.sections = mock.Mock() - mock_config.sections.return_value = ['a', 'b'] - mock_config.items = lambda key: {'key: %s' % key: 'value: %s' % key} - mock_parser.Parser.side_effect = [ - config_parser, mock_config, mock_config] - mocks = mock.Mock() - mocks.attach_mock(mock_init, 'init') - mocks.attach_mock(mock_set, 'set') - mocks.attach_mock(mock_parser, 'parser') - mocks.attach_mock(mock_config, 'config') - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_logger, 'logger') - mock_exists.return_value = True - - instance_config.InstanceConfig( - logger=self.mock_logger, instance_config_metadata=config_metadata) - expected_calls = [ - mock.call.init( - config_file='template', config_header='/tmp/test.py template'), - mock.call.parser.Parser(), - mock.call.exists('config'), - mock.call.parser.Parser(), - mock.call.config.read('config'), - mock.call.config.sections(), - mock.call.exists('distro'), - mock.call.parser.Parser(), - mock.call.config.read('distro'), - mock.call.config.sections(), - mock.call.set('first', 'a', 'true', overwrite=False), - mock.call.set('a', 'key: a', 'value: a', overwrite=False), - mock.call.set('b', 'key: b', 'value: b', overwrite=False), - mock.call.set('a', 'key: a', 'value: a', overwrite=False), - mock.call.set('b', 'key: b', 'value: b', overwrite=False), - mock.call.set('first', 'a', 'false', overwrite=False), - mock.call.set('second', 'b', 'true', overwrite=False), - mock.call.set('third', 'c', '1', overwrite=False), - mock.call.set('third', 'd', '2', overwrite=False), - mock.call.set('third', 'e', '3', overwrite=False) - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.instance_setup.instance_config.parser.Parser.read') - @mock.patch('google_compute_engine.instance_setup.instance_config.os.path.exists') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.SetOption') - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.__init__') - def testInstanceConfigError(self, mock_init, mock_set, mock_exists, mock_read): - mock_read.side_effect = instance_config.parser.ParsingError('Error') - mocks = mock.Mock() - mocks.attach_mock(mock_init, 'init') - mocks.attach_mock(mock_set, 'set') - mocks.attach_mock(mock_read, 'read') - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(self.mock_logger, 'logger') - mock_exists.return_value = True - - instance_config.InstanceConfig( - logger=self.mock_logger, instance_config_metadata='Error') - expected_calls = [ - mock.call.init( - config_file='template', config_header='/tmp/test.py template'), - mock.call.logger.error(mock.ANY, mock.ANY), - mock.call.exists('config'), - mock.call.read('config'), - mock.call.logger.error(mock.ANY, mock.ANY), - mock.call.exists('distro'), - mock.call.read('distro'), - mock.call.logger.error(mock.ANY, mock.ANY), - mock.call.set('first', 'a', 'false', overwrite=False), - mock.call.set('second', 'b', 'true', overwrite=False), - mock.call.set('third', 'c', '1', overwrite=False), - mock.call.set('third', 'd', '2', overwrite=False), - mock.call.set('third', 'e', '3', overwrite=False) - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.WriteConfig') - def testWriteConfig(self, mock_write): - mock_config = instance_config.InstanceConfig() - instance_config.InstanceConfig.WriteConfig(mock_config) - mock_write.assert_called_once_with(config_file='config') - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_setup_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_setup_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_setup_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/instance_setup/tests/instance_setup_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,426 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for instance_setup.py module.""" - -import subprocess - -from google_compute_engine.instance_setup import instance_setup -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class InstanceSetupTest(unittest.TestCase): - - def setUp(self): - self.mock_instance_config = mock.Mock() - self.mock_logger = mock.Mock() - self.mock_setup = mock.create_autospec(instance_setup.InstanceSetup) - self.mock_setup.debug = False - self.mock_setup.instance_config = self.mock_instance_config - self.mock_setup.logger = self.mock_logger - - @mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config') - @mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher') - @mock.patch('google_compute_engine.instance_setup.instance_setup.logger') - def testInstanceSetup(self, mock_logger, mock_watcher, mock_config): - mock_setup = mock.create_autospec(instance_setup.InstanceSetup) - mocks = mock.Mock() - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mocks.attach_mock(mock_config, 'config') - mocks.attach_mock(mock_setup, 'setup') - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mock_watcher_instance = mock.Mock() - mock_watcher_instance.GetMetadata.return_value = {'hello': 'world'} - mock_watcher.MetadataWatcher.return_value = mock_watcher_instance - mock_config_instance = mock.Mock() - mock_config_instance.GetOptionBool.return_value = True - mock_config_instance.GetOptionString.return_value = 'type' - mock_config.InstanceConfig.return_value = mock_config_instance - mock_setup._GetInstanceConfig.return_value = 'config' - - instance_setup.InstanceSetup.__init__(mock_setup) - expected_calls = [ - # Setup and reading the configuration file. - mock.call.logger.Logger( - name=mock.ANY, debug=False, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.config.InstanceConfig(logger=mock_logger_instance), - # Check network access for reaching the metadata server. - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'network_enabled'), - mock.call.watcher.MetadataWatcher().GetMetadata(), - # Get the instance config if specified in metadata. - mock.call.setup._GetInstanceConfig(), - mock.call.config.InstanceConfig( - logger=mock_logger_instance, instance_config_metadata='config'), - # Setup for SSH host keys if necessary. - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'set_host_keys'), - mock.call.config.InstanceConfig().GetOptionString( - 'InstanceSetup', 'host_key_types'), - mock.call.setup._SetSshHostKeys(host_key_types='type'), - # Setup for the boto config if necessary. - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'set_boto_config'), - mock.call.setup._SetupBotoConfig(), - # Setup for local SSD. - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'optimize_local_ssd'), - mock.call.setup._RunScript('google_optimize_local_ssd'), - # Setup for multiqueue virtio driver. - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'set_multiqueue'), - mock.call.setup._RunScript('google_set_multiqueue'), - # Write the updated config file. - mock.call.config.InstanceConfig().WriteConfig(), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertEqual(mock_setup.metadata_dict, {'hello': 'world'}) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config') - @mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher') - @mock.patch('google_compute_engine.instance_setup.instance_setup.logger') - def testInstanceSetupException(self, mock_logger, mock_watcher, mock_config): - mock_setup = mock.create_autospec(instance_setup.InstanceSetup) - mocks = mock.Mock() - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_watcher, 'watcher') - mocks.attach_mock(mock_config, 'config') - mocks.attach_mock(mock_setup, 'setup') - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mock_config_instance = mock.Mock() - mock_config_instance.GetOptionBool.return_value = False - mock_config_instance.WriteConfig.side_effect = IOError('Test Error') - mock_config.InstanceConfig.return_value = mock_config_instance - - instance_setup.InstanceSetup.__init__(mock_setup) - expected_calls = [ - mock.call.logger.Logger( - name=mock.ANY, debug=False, facility=mock.ANY), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.config.InstanceConfig(logger=mock_logger_instance), - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'network_enabled'), - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'optimize_local_ssd'), - mock.call.config.InstanceConfig().GetOptionBool( - 'InstanceSetup', 'set_multiqueue'), - mock.call.config.InstanceConfig().WriteConfig(), - mock.call.logger.Logger().warning('Test Error'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertIsNone(mock_setup.metadata_dict) - - def testGetInstanceConfig(self): - instance_config = 'test' - self.mock_setup.metadata_dict = { - 'instance': { - 'attributes': { - 'google-instance-configs': instance_config, - } - }, - 'project': { - 'attributes': { - 'google-instance-configs': 'Unused config.', - } - } - } - self.assertEqual( - instance_setup.InstanceSetup._GetInstanceConfig(self.mock_setup), - instance_config) - self.mock_logger.warning.assert_not_called() - - def testGetInstanceConfigProject(self): - instance_config = 'test' - self.mock_setup.metadata_dict = { - 'instance': { - 'attributes': {} - }, - 'project': { - 'attributes': { - 'google-instance-configs': instance_config, - } - } - } - self.assertEqual( - instance_setup.InstanceSetup._GetInstanceConfig(self.mock_setup), - instance_config) - self.mock_logger.warning.assert_not_called() - - def testGetInstanceConfigNone(self): - self.mock_setup.metadata_dict = { - 'instance': { - 'attributes': {} - }, - 'project': { - 'attributes': {} - } - } - self.assertIsNone( - instance_setup.InstanceSetup._GetInstanceConfig(self.mock_setup)) - self.mock_logger.warning.assert_not_called() - - def testGetInstanceConfigNoMetadata(self): - self.mock_setup.metadata_dict = {} - self.assertIsNone( - instance_setup.InstanceSetup._GetInstanceConfig(self.mock_setup)) - self.assertEqual(self.mock_logger.warning.call_count, 2) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess') - def testRunScript(self, mock_subprocess): - mock_readline = mock.Mock() - mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')] - mock_stdout = mock.Mock() - mock_stdout.readline = mock_readline - mock_process = mock.Mock() - mock_process.poll.return_value = 0 - mock_process.stdout = mock_stdout - mock_subprocess.Popen.return_value = mock_process - script = '/tmp/script.py' - - instance_setup.InstanceSetup._RunScript(self.mock_setup, script) - expected_calls = [mock.call('a'), mock.call('b')] - self.assertEqual(self.mock_logger.info.mock_calls, expected_calls) - mock_subprocess.Popen.assert_called_once_with( - script, shell=True, stderr=mock_subprocess.STDOUT, - stdout=mock_subprocess.PIPE) - mock_process.poll.assert_called_once_with() - - def testGetInstanceId(self): - self.mock_setup.metadata_dict = {'instance': {'attributes': {}, 'id': 123}} - self.assertEqual( - instance_setup.InstanceSetup._GetInstanceId(self.mock_setup), '123') - self.mock_logger.warning.assert_not_called() - - def testGetInstanceIdNotFound(self): - self.mock_setup.metadata_dict = {'instance': {'attributes': {}}} - self.assertIsNone( - instance_setup.InstanceSetup._GetInstanceId(self.mock_setup)) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.file_utils.SetPermissions') - @mock.patch('google_compute_engine.instance_setup.instance_setup.shutil.move') - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call') - @mock.patch('google_compute_engine.instance_setup.instance_setup.tempfile.NamedTemporaryFile') - def testGenerateSshKey( - self, mock_tempfile, mock_call, mock_move, mock_permissions): - mocks = mock.Mock() - mocks.attach_mock(mock_tempfile, 'tempfile') - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(mock_move, 'move') - mocks.attach_mock(mock_permissions, 'permissions') - mocks.attach_mock(self.mock_logger, 'logger') - key_type = 'key-type' - key_dest = '/key/dest' - temp_dest = '/tmp/dest' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.__enter__.return_value.name = temp_dest - mock_open = mock.mock_open() - key_file_contents = 'ssh-rsa asdfasdf' - expected_key_data = ('ssh-rsa', 'asdfasdf') - - with mock.patch('%s.open' % builtin, mock_open, create=False): - mock_open().read.return_value = key_file_contents - key_data = instance_setup.InstanceSetup._GenerateSshKey( - self.mock_setup, key_type, key_dest) - expected_calls = [ - mock.call.tempfile(prefix=key_type, delete=True), - mock.call.tempfile.__enter__(), - mock.call.tempfile.__exit__(None, None, None), - mock.call.logger.info(mock.ANY, key_dest), - mock.call.call( - ['ssh-keygen', '-t', key_type, '-f', temp_dest, '-N', '', '-q']), - mock.call.move(temp_dest, key_dest), - mock.call.move('%s.pub' % temp_dest, '%s.pub' % key_dest), - mock.call.permissions(key_dest, mode=0o600), - mock.call.permissions('%s.pub' % key_dest, mode=0o644), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - self.assertEqual(key_data, expected_key_data) - - mock_open().read.return_value = '' - key_data = instance_setup.InstanceSetup._GenerateSshKey( - self.mock_setup, key_type, key_dest) - self.assertEqual(key_data, None) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call') - def testGenerateSshKeyProcessError(self, mock_call): - key_type = 'key-type' - key_dest = '/key/dest' - mock_call.side_effect = subprocess.CalledProcessError(1, 'Test') - - instance_setup.InstanceSetup._GenerateSshKey( - self.mock_setup, key_type, key_dest) - self.mock_logger.info.assert_called_once_with(mock.ANY, key_dest) - self.mock_logger.warning.assert_called_once_with(mock.ANY, key_dest) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call') - @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists') - def testStartSshdSysVinit(self, mock_exists, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mock_exists.side_effect = [False, False, True] - - instance_setup.InstanceSetup._StartSshd(self.mock_setup) - expected_calls = [ - mock.call.exists('/bin/systemctl'), - mock.call.exists('/etc/init.d/ssh'), - mock.call.exists('/etc/init/ssh.conf'), - mock.call.call(['service', 'ssh', 'start']), - mock.call.call(['service', 'ssh', 'reload']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call') - @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists') - def testStartSshdUpstart(self, mock_exists, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mock_exists.side_effect = [False, False, False, False, True] - - instance_setup.InstanceSetup._StartSshd(self.mock_setup) - expected_calls = [ - mock.call.exists('/bin/systemctl'), - mock.call.exists('/etc/init.d/ssh'), - mock.call.exists('/etc/init/ssh.conf'), - mock.call.exists('/etc/init.d/sshd'), - mock.call.exists('/etc/init/sshd.conf'), - mock.call.call(['service', 'sshd', 'start']), - mock.call.call(['service', 'sshd', 'reload']), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call') - @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists') - def testStartSshdSystemd(self, mock_exists, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_call, 'call') - mock_exists.return_value = True - - instance_setup.InstanceSetup._StartSshd(self.mock_setup) - expected_calls = [mock.call.exists('/bin/systemctl')] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testSetSshHostKeys(self): - self.mock_instance_config.GetOptionString.return_value = '123' - mock_instance_id = mock.Mock() - mock_instance_id.return_value = '123' - self.mock_setup._GetInstanceId = mock_instance_id - - instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup) - self.mock_instance_config.SetOption.assert_not_called() - - @mock.patch('google_compute_engine.instance_setup.instance_setup.urlrequest.urlopen') - @mock.patch('google_compute_engine.instance_setup.instance_setup.PutRequest') - def testWriteHostKeyToGuestAttributes(self, mock_put, mock_urlopen): - key_type = 'ssh-rsa' - key_value = 'asdfasdf' - encoded_key_value = key_value.encode('utf-8') - expected_url = ('http://metadata.google.internal/computeMetadata/v1/' - 'instance/guest-attributes/hostkeys/%s' % key_type) - headers = {'Metadata-Flavor': 'Google'} - - instance_setup.InstanceSetup._WriteHostKeyToGuestAttributes( - self.mock_setup, key_type, key_value) - self.mock_logger.info.assert_called_with( - 'Wrote %s host key to guest attributes.', key_type) - mock_put.assert_called_with(expected_url, encoded_key_value, headers) - - mock_urlopen.side_effect = instance_setup.urlerror.HTTPError( - 'http://foo', 403, 'Forbidden', {}, None) - instance_setup.InstanceSetup._WriteHostKeyToGuestAttributes( - self.mock_setup, key_type, key_value) - self.mock_logger.info.assert_called_with( - 'Unable to write %s host key to guest attributes.', key_type) - - def testPutRequest(self): - put_request = instance_setup.PutRequest('http://example.com/') - self.assertEqual(put_request.get_method(), 'PUT') - - @mock.patch('google_compute_engine.instance_setup.instance_setup.os.listdir') - def testSetSshHostKeysFirstBoot(self, mock_listdir): - self.mock_instance_config.GetOptionString.return_value = None - mock_instance_id = mock.Mock() - mock_instance_id.return_value = '123' - self.mock_setup._GetInstanceId = mock_instance_id - mock_generate_key = mock.Mock() - mock_generate_key.return_value = ('ssh-rsa', 'asdfasdf') - self.mock_setup._GenerateSshKey = mock_generate_key - mock_listdir.return_value = [ - 'ssh_config', - 'ssh_host_dsa_key', - 'ssh_host_dsa_key.pub', - 'ssh_host_ed25519_key', - 'ssh_host_ed25519_key.pub', - 'ssh_host_rsa_key', - 'ssh_host_rsa_key.pub', - ] - - instance_setup.InstanceSetup._SetSshHostKeys( - self.mock_setup, host_key_types='rsa,dsa,abc') - expected_calls = [ - mock.call('abc', '/etc/ssh/ssh_host_abc_key'), - mock.call('dsa', '/etc/ssh/ssh_host_dsa_key'), - mock.call('ed25519', '/etc/ssh/ssh_host_ed25519_key'), - mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'), - ] - - self.assertEqual(sorted(mock_generate_key.mock_calls), expected_calls) - self.mock_instance_config.SetOption.assert_called_once_with( - 'Instance', 'instance_id', '123') - - def testGetNumericProjectId(self): - self.mock_setup.metadata_dict = { - 'project': { - 'attributes': {}, - 'numericProjectId': 123, - } - } - self.assertEqual( - instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup), - '123') - self.mock_logger.warning.assert_not_called() - - def testGetNumericProjectIdNotFound(self): - self.mock_setup.metadata_dict = {'project': {'attributes': {}}} - self.assertIsNone( - instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup)) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig') - def testSetupBotoConfig(self, mock_boto): - mock_project_id = mock.Mock() - mock_project_id.return_value = '123' - self.mock_setup._GetNumericProjectId = mock_project_id - instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup) - mock_boto.assert_called_once_with('123', debug=False) - - @mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig') - def testSetupBotoConfigLocked(self, mock_boto): - mock_boto.side_effect = IOError('Test Error') - instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup) - self.mock_logger.warning.assert_called_once_with('Test Error') - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/logger.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/logger.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/logger.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/logger.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library for logging text to SysLog and the serial console.""" - -from google_compute_engine import constants -from google_compute_engine.compat import logging - - -def Logger(name, debug=False, facility=None): - """Get a logging object with handlers for sending logs to SysLog. - - Args: - name: string, the name of the logger which will be added to log entries. - debug: bool, True if debug output should write to the console. - facility: int, an encoding of the SysLog handler's facility and priority. - - Returns: - logging object, an object for logging entries. - """ - logger = logging.getLogger(name) - logger.handlers = [] - logger.addHandler(logging.NullHandler()) - logger.propagate = False - logger.setLevel(logging.DEBUG) - formatter = logging.Formatter(name + ': %(levelname)s %(message)s') - - if debug: - # Create a handler for console logging. - console_handler = logging.StreamHandler() - console_handler.setLevel(logging.DEBUG) - console_handler.setFormatter(formatter) - logger.addHandler(console_handler) - - if facility: - # Create a handler for sending logs to SysLog. - syslog_handler = logging.handlers.SysLogHandler( - address=constants.SYSLOG_SOCKET, facility=facility) - syslog_handler.setLevel(logging.INFO) - syslog_handler.setFormatter(formatter) - logger.addHandler(syslog_handler) - - return logger diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_executor.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_executor.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_executor.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_executor.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Execute user provided metadata scripts.""" - -import os -import stat -import subprocess - - -class ScriptExecutor(object): - """A class for executing user provided metadata scripts.""" - - def __init__(self, logger, script_type, default_shell=None): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - script_type: string, the type of the script we are running. - default_shell: string, the default shell to execute the script. - """ - self.logger = logger - self.script_type = script_type - self.default_shell = default_shell or '/bin/bash' - - def _MakeExecutable(self, metadata_script): - """Add executable permissions to a file. - - Args: - metadata_script: string, the path to the executable file. - """ - mode = os.stat(metadata_script).st_mode - os.chmod(metadata_script, mode | stat.S_IEXEC) - - def _RunScript(self, metadata_key, metadata_script): - """Run a script and log the streamed script output. - - Args: - metadata_key: string, the key specifing the metadata script. - metadata_script: string, the file location of an executable script. - """ - process = subprocess.Popen( - metadata_script, shell=True, - executable=self.default_shell, - stderr=subprocess.STDOUT, stdout=subprocess.PIPE) - while True: - for line in iter(process.stdout.readline, b''): - message = line.decode('utf-8', 'replace').rstrip('\n') - if message: - self.logger.info('%s: %s', metadata_key, message) - if process.poll() is not None: - break - self.logger.info('%s: Return code %s.', metadata_key, process.returncode) - - def RunScripts(self, script_dict): - """Run the metadata scripts; execute a URL script first if one is provided. - - Args: - script_dict: a dictionary mapping metadata keys to script files. - """ - metadata_types = ['%s-script-url', '%s-script'] - metadata_keys = [key % self.script_type for key in metadata_types] - metadata_keys = [key for key in metadata_keys if script_dict.get(key)] - if not metadata_keys: - self.logger.info('No %s scripts found in metadata.', self.script_type) - for metadata_key in metadata_keys: - metadata_script = script_dict.get(metadata_key) - self._MakeExecutable(metadata_script) - self._RunScript(metadata_key, metadata_script) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manage the retrieval and excution of metadata scripts.""" - -import contextlib -import logging.handlers -import optparse -import shutil -import tempfile - -from google_compute_engine import config_manager -from google_compute_engine import logger -from google_compute_engine.metadata_scripts import script_executor -from google_compute_engine.metadata_scripts import script_retriever - - -@contextlib.contextmanager -def _CreateTempDir(prefix, run_dir=None): - """Context manager for creating a temporary directory. - - Args: - prefix: string, the prefix for the temporary directory. - run_dir: string, the base directory location of the temporary directory. - - Yields: - string, the temporary directory created. - """ - temp_dir = tempfile.mkdtemp(prefix=prefix + '-', dir=run_dir) - try: - yield temp_dir - finally: - shutil.rmtree(temp_dir) - - -class ScriptManager(object): - """A class for retrieving and executing metadata scripts.""" - - def __init__( - self, script_type, default_shell=None, run_dir=None, debug=False): - """Constructor. - - Args: - script_type: string, the metadata script type to run. - default_shell: string, the default shell to execute the script. - run_dir: string, the base directory location of the temporary directory. - debug: bool, True if debug output should write to the console. - """ - self.script_type = script_type - self.default_shell = default_shell - name = '%s-script' % self.script_type - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger(name=name, debug=debug, facility=facility) - self.retriever = script_retriever.ScriptRetriever(self.logger, script_type) - self.executor = script_executor.ScriptExecutor( - self.logger, script_type, default_shell=default_shell) - self._RunScripts(run_dir=run_dir) - - def _RunScripts(self, run_dir=None): - """Retrieve metadata scripts and execute them. - - Args: - run_dir: string, the base directory location of the temporary directory. - """ - with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir: - try: - self.logger.info('Starting %s scripts.', self.script_type) - script_dict = self.retriever.GetScripts(dest_dir) - self.executor.RunScripts(script_dict) - finally: - self.logger.info('Finished running %s scripts.', self.script_type) - - -def main(): - script_types = ('startup', 'shutdown') - parser = optparse.OptionParser() - parser.add_option( - '-d', '--debug', action='store_true', dest='debug', - help='print debug output to the console.') - parser.add_option( - '--script-type', dest='script_type', help='metadata script type.') - (options, _) = parser.parse_args() - if options.script_type and options.script_type.lower() in script_types: - script_type = options.script_type.lower() - else: - valid_args = ', '.join(script_types) - message = 'No valid argument specified. Options: [%s].' % valid_args - raise ValueError(message) - - instance_config = config_manager.ConfigManager() - if instance_config.GetOptionBool('MetadataScripts', script_type): - ScriptManager( - script_type, - default_shell=instance_config.GetOptionString( - 'MetadataScripts', 'default_shell'), - run_dir=instance_config.GetOptionString('MetadataScripts', 'run_dir'), - debug=bool(options.debug)) - - -if __name__ == '__main__': - main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,257 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Retrieve and store user provided metadata scripts.""" - -import functools -import re -import socket -import tempfile -import time - -from google_compute_engine import metadata_watcher -from google_compute_engine.compat import httpclient -from google_compute_engine.compat import urlerror -from google_compute_engine.compat import urlrequest -from google_compute_engine.compat import urlretrieve - - -def _RetryOnUnavailable(func): - """Function decorator template to retry on a service unavailable exception.""" - - @functools.wraps(func) - def Wrapper(*args, **kwargs): - final_exception = None - for _ in range(3): - try: - response = func(*args, **kwargs) - except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: - final_exception = e - time.sleep(5) - continue - else: - return response - raise final_exception - return Wrapper - - -@_RetryOnUnavailable -def _UrlOpenWithRetry(request): - """Call urlopen with retry.""" - return urlrequest.urlopen(request) - - -@_RetryOnUnavailable -def _UrlRetrieveWithRetry(url, dest): - """Call urlretrieve with retry.""" - return urlretrieve.urlretrieve(url, dest) - - -class ScriptRetriever(object): - """A class for retrieving and storing user provided metadata scripts.""" - token_metadata_key = 'instance/service-accounts/default/token' - # Cached authentication token to be used when downloading from bucket. - token = None - - def __init__(self, logger, script_type): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - script_type: string, the metadata script type to run. - """ - self.logger = logger - self.script_type = script_type - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - - def _DownloadAuthUrl(self, url, dest_dir): - """Download a Google Storage URL using an authentication token. - - If the token cannot be fetched, fallback to unauthenticated download. - - Args: - url: string, the URL to download. - dest_dir: string, the path to a directory for storing metadata scripts. - - Returns: - string, the path to the file storing the metadata script. - """ - dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) - dest_file.close() - dest = dest_file.name - - self.logger.info( - 'Downloading url from %s to %s using authentication token.', url, dest) - - if not self.token: - response = self.watcher.GetMetadata( - self.token_metadata_key, recursive=False, retry=False) - - if not response: - self.logger.info( - 'Authentication token not found. Attempting unauthenticated ' - 'download.') - return self._DownloadUrl(url, dest_dir) - - self.token = '%s %s' % ( - response.get('token_type', ''), response.get('access_token', '')) - - try: - request = urlrequest.Request(url) - request.add_unredirected_header('Metadata-Flavor', 'Google') - request.add_unredirected_header('Authorization', self.token) - content = _UrlOpenWithRetry(request).read() - except Exception as e: - self.logger.warning('Could not download %s. %s.', url, str(e)) - return None - - with open(dest, 'wb') as f: - f.write(content) - - return dest - - def _DownloadUrl(self, url, dest_dir): - """Download a script from a given URL. - - Args: - url: string, the URL to download. - dest_dir: string, the path to a directory for storing metadata scripts. - - Returns: - string, the path to the file storing the metadata script. - """ - dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) - dest_file.close() - dest = dest_file.name - - self.logger.info('Downloading url from %s to %s.', url, dest) - try: - _UrlRetrieveWithRetry(url, dest) - return dest - except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: - self.logger.warning('Could not download %s. %s.', url, str(e)) - except Exception as e: - self.logger.warning('Exception downloading %s. %s.', url, str(e)) - return None - - def _DownloadScript(self, url, dest_dir): - """Download the contents of the URL to the destination. - - Args: - url: string, the URL to download. - dest_dir: string, the path to a directory for storing metadata scripts. - - Returns: - string, the path to the file storing the metadata script. - """ - # Check for the preferred Google Storage URL format: - # gs:/// - if url.startswith(r'gs://'): - # Convert the string into a standard URL. - url = re.sub('^gs://', 'https://storage.googleapis.com/', url) - return self._DownloadAuthUrl(url, dest_dir) - - header = r'http[s]?://' - domain = r'storage\.googleapis\.com' - - # Many of the Google Storage URLs are supported below. - # It is prefered that customers specify their object using - # its gs:/// url. - bucket = r'(?P[a-z0-9][-_.a-z0-9]*[a-z0-9])' - - # Accept any non-empty string that doesn't contain a wildcard character - obj = r'(?P[^\*\?]+)' - - # Check for the Google Storage URLs: - # http://.storage.googleapis.com/ - # https://.storage.googleapis.com/ - gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) - match = gs_regex.match(url) - if match: - return self._DownloadAuthUrl(url, dest_dir) - - # Check for the other possible Google Storage URLs: - # http://storage.googleapis.com// - # https://storage.googleapis.com// - # - # The following are deprecated but checked: - # http://commondatastorage.googleapis.com// - # https://commondatastorage.googleapis.com// - gs_regex = re.compile( - r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) - match = gs_regex.match(url) - if match: - return self._DownloadAuthUrl(url, dest_dir) - - # Unauthenticated download of the object. - return self._DownloadUrl(url, dest_dir) - - def _GetAttributeScripts(self, attribute_data, dest_dir): - """Retrieve the scripts from attribute metadata. - - Args: - attribute_data: dict, the contents of the attributes metadata. - dest_dir: string, the path to a directory for storing metadata scripts. - - Returns: - dict, a dictionary mapping metadata keys to files storing scripts. - """ - script_dict = {} - attribute_data = attribute_data or {} - metadata_key = '%s-script' % self.script_type - metadata_value = attribute_data.get(metadata_key) - if metadata_value: - self.logger.info('Found %s in metadata.', metadata_key) - with tempfile.NamedTemporaryFile( - mode='w', dir=dest_dir, delete=False) as dest: - dest.write(metadata_value.lstrip()) - script_dict[metadata_key] = dest.name - - metadata_key = '%s-script-url' % self.script_type - metadata_value = attribute_data.get(metadata_key) - if metadata_value: - self.logger.info('Found %s in metadata.', metadata_key) - downloaded_dest = self._DownloadScript(metadata_value, dest_dir) - if downloaded_dest is None: - self.logger.warning('Failed to download metadata script.') - script_dict[metadata_key] = downloaded_dest - - return script_dict - - def GetScripts(self, dest_dir): - """Retrieve the scripts to execute. - - Args: - dest_dir: string, the path to a directory for storing metadata scripts. - - Returns: - dict, a dictionary mapping set metadata keys with associated scripts. - """ - metadata_dict = self.watcher.GetMetadata() or {} - - try: - instance_data = metadata_dict['instance']['attributes'] - except KeyError: - instance_data = None - self.logger.warning('Instance attributes were not found.') - - try: - project_data = metadata_dict['project']['attributes'] - except KeyError: - project_data = None - self.logger.warning('Project attributes were not found.') - - return (self._GetAttributeScripts(instance_data, dest_dir) - or self._GetAttributeScripts(project_data, dest_dir)) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_executor_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_executor_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_executor_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_executor_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for script_executor.py module.""" - -import stat - -from google_compute_engine.metadata_scripts import script_executor -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class ScriptExecutorTest(unittest.TestCase): - - def setUp(self): - self.script_type = 'test' - self.metadata_script = '/tmp/script' - self.mock_logger = mock.Mock() - self.executor = script_executor.ScriptExecutor( - self.mock_logger, self.script_type) - - @mock.patch('google_compute_engine.metadata_scripts.script_executor.os') - def testMakeExecutable(self, mock_os): - st_mode = 1 - chmod_mode = st_mode + stat.S_IEXEC - mock_os_stat = mock.Mock() - mock_os_stat.st_mode = st_mode - mock_os.stat.return_value = mock_os_stat - self.executor._MakeExecutable(self.metadata_script) - mock_os.chmod.assert_called_once_with(self.metadata_script, chmod_mode) - - @mock.patch('google_compute_engine.metadata_scripts.script_executor.subprocess') - def testRunScript(self, mock_subprocess): - mock_readline = mock.Mock() - mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')] - mock_stdout = mock.Mock() - mock_stdout.readline = mock_readline - mock_process = mock.Mock() - mock_process.poll.return_value = 0 - mock_process.stdout = mock_stdout - mock_process.returncode = 1 - mock_subprocess.Popen.return_value = mock_process - metadata_key = '%s-script' % self.script_type - - self.executor._RunScript(metadata_key, self.metadata_script) - expected_calls = [ - mock.call('%s: %s', metadata_key, 'a'), - mock.call('%s: %s', metadata_key, 'b'), - mock.call('%s: Return code %s.', metadata_key, 1), - ] - self.assertEqual(self.mock_logger.info.mock_calls, expected_calls) - mock_subprocess.Popen.assert_called_once_with( - self.metadata_script, shell=True, executable='/bin/bash', - stderr=mock_subprocess.STDOUT, stdout=mock_subprocess.PIPE) - mock_process.poll.assert_called_once_with() - - def testRunScripts(self): - self.executor._MakeExecutable = mock.Mock() - self.executor._RunScript = mock.Mock() - mocks = mock.Mock() - mocks.attach_mock(self.executor._MakeExecutable, 'make_executable') - mocks.attach_mock(self.executor._RunScript, 'run_script') - mocks.attach_mock(self.mock_logger, 'logger') - script_dict = { - '%s-script' % self.script_type: 'a', - '%s-script-key' % self.script_type: 'b', - '%s-script-url' % self.script_type: 'c', - } - - self.executor.RunScripts(script_dict) - expected_calls = [ - mock.call.make_executable('c'), - mock.call.run_script('%s-script-url' % self.script_type, 'c'), - mock.call.make_executable('a'), - mock.call.run_script('%s-script' % self.script_type, 'a'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testRunScriptsEmpty(self): - self.executor._MakeExecutable = mock.Mock() - self.executor._RunScript = mock.Mock() - mocks = mock.Mock() - mocks.attach_mock(self.executor._MakeExecutable, 'make_executable') - mocks.attach_mock(self.executor._RunScript, 'run_script') - mocks.attach_mock(self.mock_logger, 'logger') - script_dict = { - '%s-invalid' % self.script_type: 'script', - } - - self.executor.RunScripts(script_dict) - expected_calls = [ - mock.call.logger.info(mock.ANY, self.script_type), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_manager_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_manager_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_manager_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_manager_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for script_manager.py module.""" - -from google_compute_engine.metadata_scripts import script_manager -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class ScriptManagerTest(unittest.TestCase): - - @mock.patch('google_compute_engine.metadata_scripts.script_manager.script_retriever') - @mock.patch('google_compute_engine.metadata_scripts.script_manager.logger') - @mock.patch('google_compute_engine.metadata_scripts.script_manager.script_executor') - @mock.patch('google_compute_engine.metadata_scripts.script_manager.shutil.rmtree') - @mock.patch('google_compute_engine.metadata_scripts.script_manager.tempfile.mkdtemp') - def testRunScripts( - self, mock_mkdir, mock_rmtree, mock_executor, mock_logger, - mock_retriever): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mock_retriever_instance = mock.Mock() - mock_retriever.ScriptRetriever.return_value = mock_retriever_instance - mocks = mock.Mock() - mocks.attach_mock(mock_mkdir, 'mkdir') - mocks.attach_mock(mock_rmtree, 'rmtree') - mocks.attach_mock(mock_executor, 'executor') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_retriever, 'retriever') - run_dir = '/var/run' - script_type = 'test' - script_name = '%s-script' % script_type - script_prefix = '%s-' % script_type - test_dir = 'test-dir' - test_dict = {'test': 'dict'} - mock_mkdir.return_value = test_dir - mock_retriever_instance.GetScripts.return_value = test_dict - - script_manager.ScriptManager(script_type, run_dir=run_dir) - expected_calls = [ - mock.call.logger.Logger( - name=script_name, debug=False, facility=mock.ANY), - mock.call.retriever.ScriptRetriever(mock_logger_instance, script_type), - mock.call.executor.ScriptExecutor( - mock_logger_instance, script_type, default_shell=None), - mock.call.mkdir(prefix=script_prefix, dir=run_dir), - mock.call.logger.Logger().info(mock.ANY, script_type), - mock.call.retriever.ScriptRetriever().GetScripts(test_dir), - mock.call.executor.ScriptExecutor().RunScripts(test_dict), - mock.call.logger.Logger().info(mock.ANY, script_type), - mock.call.rmtree(test_dir), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_retriever_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_retriever_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_retriever_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/tests/script_retriever_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,421 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for script_retriever.py module.""" - -import subprocess - -from google_compute_engine.compat import urlerror -from google_compute_engine.metadata_scripts import script_retriever -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class ScriptRetrieverTest(unittest.TestCase): - - def setUp(self): - self.script_type = 'test' - self.dest_dir = '/tmp' - self.dest = '/tmp/file' - self.mock_logger = mock.Mock() - self.mock_watcher = mock.Mock() - self.retriever = script_retriever.ScriptRetriever( - self.mock_logger, self.script_type) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlrequest.Request') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlrequest.urlopen') - def testDownloadAuthUrl(self, mock_urlopen, mock_request, mock_tempfile): - auth_url = 'https://storage.googleapis.com/fake/url' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - self.retriever.token = 'bar' - - mock_open = mock.mock_open() - with mock.patch('%s.open' % builtin, mock_open): - self.assertEqual( - self.retriever._DownloadAuthUrl(auth_url, self.dest_dir), self.dest) - - mock_tempfile.assert_called_once_with(dir=self.dest_dir, delete=False) - mock_tempfile.close.assert_called_once_with() - - self.mock_logger.info.assert_called_once_with( - mock.ANY, auth_url, self.dest) - mock_request.assert_called_with(auth_url) - mocked_request = mock_request() - mocked_request.add_unredirected_header.assert_called_with( - 'Authorization', 'bar') - mock_urlopen.assert_called_with(mocked_request) - urlopen_read = mock_urlopen().read(return_value=b'foo') - self.mock_logger.warning.assert_not_called() - - mock_open.assert_called_once_with(self.dest, 'wb') - handle = mock_open() - handle.write.assert_called_once_with(urlopen_read) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlrequest.Request') - @mock.patch('google_compute_engine.metadata_watcher.MetadataWatcher.GetMetadata') - def testDownloadAuthUrlExceptionAndToken( - self, mock_get_metadata, mock_request, mock_tempfile): - auth_url = 'https://storage.googleapis.com/fake/url' - metadata_prefix = 'http://metadata.google.internal/computeMetadata/v1/' - token_url = metadata_prefix + 'instance/service-accounts/default/token' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - self.retriever.token = None - - mock_get_metadata.return_value = { - 'token_type': 'foo', 'access_token': 'bar'} - mock_request.return_value = mock_request - mock_request.side_effect = urlerror.URLError('Error.') - - self.assertIsNone(self.retriever._DownloadAuthUrl(auth_url, self.dest_dir)) - - mock_get_metadata.return_value = mock_get_metadata - # GetMetadata includes a prefix, so remove it. - stripped_url = token_url.replace(metadata_prefix, '') - mock_get_metadata.assert_called_once_with( - stripped_url, recursive=False, retry=False) - - self.assertEqual(self.retriever.token, 'foo bar') - - self.mock_logger.info.assert_called_once_with( - mock.ANY, auth_url, self.dest) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.ScriptRetriever._DownloadUrl') - @mock.patch('google_compute_engine.metadata_watcher.MetadataWatcher.GetMetadata') - def testDownloadAuthUrlFallback( - self, mock_get_metadata, mock_download_url, mock_tempfile): - auth_url = 'https://storage.googleapis.com/fake/url' - metadata_prefix = 'http://metadata.google.internal/computeMetadata/v1/' - token_url = metadata_prefix + 'instance/service-accounts/default/token' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - self.retriever.token = None - - mock_get_metadata.return_value = None - mock_download_url.return_value = None - - self.assertIsNone(self.retriever._DownloadAuthUrl(auth_url, self.dest_dir)) - - mock_get_metadata.return_value = mock_get_metadata - # GetMetadata includes a prefix, so remove it. - prefix = 'http://metadata.google.internal/computeMetadata/v1/' - stripped_url = token_url.replace(prefix, '') - mock_get_metadata.assert_called_once_with( - stripped_url, recursive=False, retry=False) - mock_download_url.assert_called_once_with(auth_url, self.dest_dir) - - self.assertIsNone(self.retriever.token) - - expected_calls = [ - mock.call(mock.ANY, auth_url, self.dest), - mock.call(mock.ANY), - ] - self.assertEqual(self.mock_logger.info.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve') - def testDownloadUrl(self, mock_retrieve, mock_tempfile): - url = 'http://www.google.com/fake/url' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - self.assertEqual( - self.retriever._DownloadUrl(url, self.dest_dir), self.dest) - mock_tempfile.assert_called_once_with(dir=self.dest_dir, delete=False) - mock_tempfile.close.assert_called_once_with() - self.mock_logger.info.assert_called_once_with(mock.ANY, url, self.dest) - mock_retrieve.assert_called_once_with(url, self.dest) - self.mock_logger.warning.assert_not_called() - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.time') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve') - def testDownloadUrlProcessError(self, mock_retrieve, mock_tempfile, mock_time): - url = 'http://www.google.com/fake/url' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - mock_success = mock.Mock() - mock_success.getcode.return_value = script_retriever.httpclient.OK - # Success after 3 timeout. Since max_retry = 3, the final result is fail. - mock_retrieve.side_effect = [ - script_retriever.socket.timeout(), - script_retriever.socket.timeout(), - script_retriever.socket.timeout(), - mock_success, - ] - self.assertIsNone(self.retriever._DownloadUrl(url, self.dest_dir)) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.time') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve') - def testDownloadUrlWithRetry(self, mock_retrieve, mock_tempfile, mock_time): - url = 'http://www.google.com/fake/url' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - mock_success = mock.Mock() - mock_success.getcode.return_value = script_retriever.httpclient.OK - # Success after 2 timeout. Since max_retry = 3, the final result is success. - mock_retrieve.side_effect = [ - script_retriever.socket.timeout(), - script_retriever.socket.timeout(), - mock_success, - ] - self.assertIsNotNone(self.retriever._DownloadUrl(url, self.dest_dir)) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve') - def testDownloadUrlException(self, mock_retrieve, mock_tempfile): - url = 'http://www.google.com/fake/url' - mock_tempfile.return_value = mock_tempfile - mock_tempfile.name = self.dest - mock_retrieve.side_effect = Exception('Error.') - self.assertIsNone(self.retriever._DownloadUrl(url, self.dest_dir)) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - def _CreateUrls(self, bucket, obj, gs_match=True): - """Creates a URL for each of the supported Google Storage URL formats. - - Args: - bucket: string, the Google Storage bucket name. - obj: string, the object name in the bucket. - gs_match: bool, True if the bucket and object names are valid. - - Returns: - (list, dict): - list, the URLs to download. - dict, a Google Storage URL mapped to the expected 'gs://' format. - """ - gs_url = 'gs://%s/%s' % (bucket, obj) - gs_urls = {gs_url: gs_url} - url_formats = [ - 'http://%s.storage.googleapis.com/%s', - 'https://%s.storage.googleapis.com/%s', - 'http://storage.googleapis.com/%s/%s', - 'https://storage.googleapis.com/%s/%s', - 'http://commondatastorage.googleapis.com/%s/%s', - 'https://commondatastorage.googleapis.com/%s/%s', - ] - url_formats = [url % (bucket, obj) for url in url_formats] - if gs_match: - gs_urls.update(dict((url, gs_url) for url in url_formats)) - return ([], gs_urls) - else: - return (url_formats, gs_urls) - - def testDownloadScript(self): - mock_auth_download = mock.Mock() - self.retriever._DownloadAuthUrl = mock_auth_download - mock_download = mock.Mock() - self.retriever._DownloadUrl = mock_download - download_urls = [] - download_gs_urls = {} - - component_urls = [ - ('@#$%^', '\n\n\n\n', False), - ('///////', '///////', False), - ('Abc', 'xyz', False), - (' abc', 'xyz', False), - ('abc', 'xyz?', False), - ('abc', 'xyz*', False), - ('', 'xyz', False), - ('a', 'xyz', False), - ('abc', '', False), - ('hello', 'world', True), - ('hello', 'world!', True), - ('hello', 'world !', True), - ('hello', 'w o r l d ', True), - ('hello', 'w\no\nr\nl\nd ', True), - ('123_hello', '1!@#$%^', True), - ('123456', 'hello.world', True), - ] - - for bucket, obj, gs_match in component_urls: - urls, gs_urls = self._CreateUrls(bucket, obj, gs_match=gs_match) - download_urls.extend(urls) - download_gs_urls.update(gs_urls) - - # All Google Storage URLs are downloaded with an authentication token. - for url, gs_url in download_gs_urls.items(): - mock_download.reset_mock() - mock_auth_download.reset_mock() - self.retriever._DownloadScript(gs_url, self.dest_dir) - new_gs_url = gs_url.replace('gs://', 'https://storage.googleapis.com/') - mock_auth_download.assert_called_once_with(new_gs_url, self.dest_dir) - mock_download.assert_not_called() - - for url in download_urls: - mock_download.reset_mock() - self.retriever._DownloadScript(url, self.dest_dir) - mock_download.assert_called_once_with(url, self.dest_dir) - - for url, gs_url in download_gs_urls.items(): - if url.startswith('gs://'): - continue - mock_auth_download.reset_mock() - mock_auth_download.return_value = None - mock_download.reset_mock() - self.retriever._DownloadScript(url, self.dest_dir) - mock_auth_download.assert_called_once_with(url, self.dest_dir) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - def testGetAttributeScripts(self, mock_tempfile): - script = 'echo Hello World.\n' - script_dest = '/tmp/script' - script_url = 'gs://fake/url' - script_url_dest = '/tmp/script_url' - attribute_data = { - '%s-script' % self.script_type: '\n%s' % script, - '%s-script-url' % self.script_type: script_url, - } - expected_data = { - '%s-script' % self.script_type: script_dest, - '%s-script-url' % self.script_type: script_url_dest, - } - # Mock saving a script to a file. - mock_dest = mock.Mock() - mock_dest.name = script_dest - mock_tempfile.__enter__.return_value = mock_dest - mock_tempfile.return_value = mock_tempfile - # Mock downloading a script from a URL. - mock_download = mock.Mock() - mock_download.return_value = script_url_dest - self.retriever._DownloadScript = mock_download - - self.assertEqual( - self.retriever._GetAttributeScripts(attribute_data, self.dest_dir), - expected_data) - self.assertEqual(self.mock_logger.info.call_count, 2) - mock_dest.write.assert_called_once_with(script) - mock_download.assert_called_once_with(script_url, self.dest_dir) - - def testGetAttributeScriptsNone(self): - attribute_data = {} - expected_data = {} - self.assertEqual( - self.retriever._GetAttributeScripts(attribute_data, self.dest_dir), - expected_data) - self.mock_logger.info.assert_not_called() - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - def testGetScripts(self, mock_tempfile): - script_dest = '/tmp/script' - script_url_dest = '/tmp/script_url' - metadata = { - 'instance': { - 'attributes': { - '%s-script' % self.script_type: 'a', - '%s-script-url' % self.script_type: 'b', - }, - }, - 'project': { - 'attributes': { - '%s-script' % self.script_type: 'c', - '%s-script-url' % self.script_type: 'd', - }, - }, - } - expected_data = { - '%s-script' % self.script_type: script_dest, - '%s-script-url' % self.script_type: script_url_dest, - } - self.mock_watcher.GetMetadata.return_value = metadata - self.retriever.watcher = self.mock_watcher - # Mock saving a script to a file. - mock_dest = mock.Mock() - mock_dest.name = script_dest - mock_tempfile.__enter__.return_value = mock_dest - mock_tempfile.return_value = mock_tempfile - # Mock downloading a script from a URL. - mock_download = mock.Mock() - mock_download.return_value = script_url_dest - self.retriever._DownloadScript = mock_download - - self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data) - self.assertEqual(self.mock_logger.info.call_count, 2) - self.assertEqual(self.mock_logger.warning.call_count, 0) - mock_dest.write.assert_called_once_with('a') - mock_download.assert_called_once_with('b', self.dest_dir) - - def testGetScriptsNone(self): - metadata = { - 'instance': { - 'attributes': None, - }, - 'project': { - 'attributes': None, - }, - } - expected_data = {} - self.mock_watcher.GetMetadata.return_value = metadata - self.retriever.watcher = self.mock_watcher - self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data) - self.mock_logger.info.assert_not_called() - - def testGetScriptsNoMetadata(self): - metadata = None - expected_data = {} - self.mock_watcher.GetMetadata.return_value = metadata - self.retriever.watcher = self.mock_watcher - self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data) - self.mock_logger.info.assert_not_called() - self.assertEqual(self.mock_logger.warning.call_count, 2) - - @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile') - def testGetScriptsFailed(self, mock_tempfile): - script_dest = '/tmp/script' - script_url_dest = None - metadata = { - 'instance': { - 'attributes': { - '%s-script' % self.script_type: 'a', - '%s-script-url' % self.script_type: 'b', - }, - }, - 'project': { - 'attributes': { - '%s-script' % self.script_type: 'c', - '%s-script-url' % self.script_type: 'd', - }, - }, - } - expected_data = { - '%s-script' % self.script_type: script_dest, - '%s-script-url' % self.script_type: script_url_dest, - } - self.mock_watcher.GetMetadata.return_value = metadata - self.retriever.watcher = self.mock_watcher - # Mock saving a script to a file. - mock_dest = mock.Mock() - mock_dest.name = script_dest - mock_tempfile.__enter__.return_value = mock_dest - mock_tempfile.return_value = mock_tempfile - # Mock downloading a script from a URL. - mock_download = mock.Mock() - mock_download.return_value = None - self.retriever._DownloadScript = mock_download - - self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data) - self.assertEqual(self.mock_logger.info.call_count, 2) - self.assertEqual(self.mock_logger.warning.call_count, 1) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/metadata_watcher.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A library for watching changes in the metadata server.""" - -import functools -import json -import logging -import os -import socket -import time - -from google_compute_engine.compat import httpclient -from google_compute_engine.compat import urlerror -from google_compute_engine.compat import urlparse -from google_compute_engine.compat import urlrequest - -METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1' - - -class StatusException(urlerror.HTTPError): - - def __init__(self, response): - url = response.geturl() - code = response.getcode() - message = httpclient.responses.get(code) - headers = response.headers - super(StatusException, self).__init__(url, code, message, headers, response) - - -def RetryOnUnavailable(func): - """Function decorator to retry on a service unavailable exception.""" - - @functools.wraps(func) - def Wrapper(*args, **kwargs): - while True: - try: - response = func(*args, **kwargs) - except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: - time.sleep(5) - if (isinstance(e, urlerror.HTTPError) - and e.getcode() == httpclient.SERVICE_UNAVAILABLE): - continue - elif isinstance(e, socket.timeout): - continue - raise - else: - if response.getcode() == httpclient.OK: - return response - else: - raise StatusException(response) - return Wrapper - - -class MetadataWatcher(object): - """Watches for changes in metadata.""" - - def __init__(self, logger=None, timeout=60): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - timeout: int, timeout in seconds for metadata requests. - """ - self.etag = 0 - self.logger = logger or logging - self.timeout = timeout - - @RetryOnUnavailable - def _GetMetadataRequest(self, metadata_url, params=None, timeout=None): - """Performs a GET request with the metadata headers. - - Args: - metadata_url: string, the URL to perform a GET request on. - params: dictionary, the query parameters in the GET request. - timeout: int, timeout in seconds for metadata requests. - - Returns: - HTTP response from the GET request. - - Raises: - urlerror.HTTPError: raises when the GET request fails. - """ - headers = {'Metadata-Flavor': 'Google'} - params = urlparse.urlencode(params or {}) - url = '%s?%s' % (metadata_url, params) - request = urlrequest.Request(url, headers=headers) - request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) - timeout = timeout or self.timeout - return request_opener.open(request, timeout=timeout*1.1) - - def _UpdateEtag(self, response): - """Update the etag from an API response. - - Args: - response: HTTP response with a header field. - - Returns: - bool, True if the etag in the response header updated. - """ - etag = response.headers.get('etag', self.etag) - etag_updated = self.etag != etag - self.etag = etag - return etag_updated - - def _GetMetadataUpdate( - self, metadata_key='', recursive=True, wait=True, timeout=None): - """Request the contents of metadata server and deserialize the response. - - Args: - metadata_key: string, the metadata key to watch for changes. - recursive: bool, True if we should recursively watch for metadata changes. - wait: bool, True if we should wait for a metadata change. - timeout: int, timeout in seconds for returning metadata output. - - Returns: - json, the deserialized contents of the metadata server. - """ - metadata_key = os.path.join(metadata_key, '') if recursive else metadata_key - metadata_url = os.path.join(METADATA_SERVER, metadata_key) - params = { - 'alt': 'json', - 'last_etag': self.etag, - 'recursive': recursive, - 'timeout_sec': timeout or self.timeout, - 'wait_for_change': wait, - } - while True: - response = self._GetMetadataRequest( - metadata_url, params=params, timeout=timeout) - etag_updated = self._UpdateEtag(response) - if wait and not etag_updated and not timeout: - # Retry until the etag is updated. - continue - else: - # One of the following are true: - # - Waiting for change is not required. - # - The etag is updated. - # - The user specified a request timeout. - break - return json.loads(response.read().decode('utf-8')) - - def _HandleMetadataUpdate( - self, metadata_key='', recursive=True, wait=True, timeout=None, - retry=True): - """Wait for a successful metadata response. - - Args: - metadata_key: string, the metadata key to watch for changes. - recursive: bool, True if we should recursively watch for metadata changes. - wait: bool, True if we should wait for a metadata change. - timeout: int, timeout in seconds for returning metadata output. - retry: bool, True if we should retry on failure. - - Returns: - json, the deserialized contents of the metadata server. - """ - exception = None - while True: - try: - return self._GetMetadataUpdate( - metadata_key=metadata_key, recursive=recursive, wait=wait, - timeout=timeout) - except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: - if not isinstance(e, type(exception)): - exception = e - self.logger.error('GET request error retrieving metadata. %s.', e) - if retry: - continue - else: - break - - def WatchMetadata( - self, handler, metadata_key='', recursive=True, timeout=None): - """Watch for changes to the contents of the metadata server. - - Args: - handler: callable, a function to call with the updated metadata contents. - metadata_key: string, the metadata key to watch for changes. - recursive: bool, True if we should recursively watch for metadata changes. - timeout: int, timeout in seconds for returning metadata output. - """ - while True: - response = self._HandleMetadataUpdate( - metadata_key=metadata_key, recursive=recursive, wait=True, - timeout=timeout) - try: - handler(response) - except Exception as e: - self.logger.exception('Exception calling the response handler. %s.', e) - - def GetMetadata( - self, metadata_key='', recursive=True, timeout=None, retry=True): - """Retrieve the contents of metadata server for a metadata key. - - Args: - metadata_key: string, the metadata key to watch for changes. - recursive: bool, True if we should recursively watch for metadata changes. - timeout: int, timeout in seconds for returning metadata output. - retry: bool, True if we should retry on failure. - - Returns: - json, the deserialized contents of the metadata server or None if error. - """ - return self._HandleMetadataUpdate( - metadata_key=metadata_key, recursive=recursive, wait=False, - timeout=timeout, retry=retry) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manage IP forwarding on a Google Compute Engine instance. - -When given a list of public endpoint IPs, compare it with the IPs configured -for the associated interfaces, and add or remove addresses from the interfaces -to make them match. -""" - -import logging.handlers - -from google_compute_engine import logger -from google_compute_engine.networking.ip_forwarding import ip_forwarding_utils - - -class IpForwarding(object): - """Manage IP forwarding based on changes to forwarded IPs metadata.""" - - def __init__(self, proto_id=None, debug=False): - """Constructor. - - Args: - proto_id: string, the routing protocol identifier for Google IP changes. - debug: bool, True if debug output should write to the console. - """ - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='google-ip-forwarding', debug=debug, facility=facility) - self.ip_forwarding_utils = ip_forwarding_utils.IpForwardingUtils( - logger=self.logger, proto_id=proto_id) - - def _LogForwardedIpChanges( - self, configured, desired, to_add, to_remove, interface): - """Log the planned IP address changes. - - Args: - configured: list, the IP address strings already configured. - desired: list, the IP address strings that will be configured. - to_add: list, the forwarded IP address strings to configure. - to_remove: list, the forwarded IP address strings to delete. - interface: string, the output device to modify. - """ - if not to_add and not to_remove: - return - self.logger.info( - 'Changing %s IPs from %s to %s by adding %s and removing %s.', - interface, configured or None, desired or None, to_add or None, - to_remove or None) - - def _AddForwardedIps(self, forwarded_ips, interface): - """Configure the forwarded IP address on the network interface. - - Args: - forwarded_ips: list, the forwarded IP address strings to configure. - interface: string, the output device to use. - """ - for address in forwarded_ips: - self.ip_forwarding_utils.AddForwardedIp(address, interface) - - def _RemoveForwardedIps(self, forwarded_ips, interface): - """Remove the forwarded IP addresses from the network interface. - - Args: - forwarded_ips: list, the forwarded IP address strings to delete. - interface: string, the output device to use. - """ - for address in forwarded_ips: - self.ip_forwarding_utils.RemoveForwardedIp(address, interface) - - def HandleForwardedIps(self, interface, forwarded_ips, interface_ip=None): - """Handle changes to the forwarded IPs on a network interface. - - Args: - interface: string, the output device to configure. - forwarded_ips: list, the forwarded IP address strings desired. - interface_ip: string, current interface ip address. - """ - desired = self.ip_forwarding_utils.ParseForwardedIps(forwarded_ips) - configured = self.ip_forwarding_utils.GetForwardedIps( - interface, interface_ip) - to_add = sorted(set(desired) - set(configured)) - to_remove = sorted(set(configured) - set(desired)) - self._LogForwardedIpChanges( - configured, desired, to_add, to_remove, interface) - self._AddForwardedIps(to_add, interface) - self._RemoveForwardedIps(to_remove, interface) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/ip_forwarding_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for configuring IP address forwarding.""" - -from google_compute_engine.compat import distro_utils - - -class IpForwardingUtils(object): - """Deprecated. Overridden for backwards compatibility.""" - - def __new__(self, logger, proto_id=None): - return distro_utils.Utils().IpForwardingUtils(logger, proto_id) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/tests/ip_forwarding_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/tests/ip_forwarding_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/tests/ip_forwarding_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/ip_forwarding/tests/ip_forwarding_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for ip_forwarding.py module.""" - -from google_compute_engine.networking.ip_forwarding import ip_forwarding -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class IpForwardingTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_watcher = mock.Mock() - self.mock_ip_forwarding_utils = mock.Mock() - self.mock_setup = mock.create_autospec(ip_forwarding.IpForwarding) - self.mock_setup.logger = self.mock_logger - self.mock_setup.ip_forwarding_utils = self.mock_ip_forwarding_utils - - @mock.patch('google_compute_engine.networking.ip_forwarding.ip_forwarding.ip_forwarding_utils') - @mock.patch('google_compute_engine.networking.ip_forwarding.ip_forwarding.logger') - def testIpForwarding(self, mock_logger, mock_ip_forwarding_utils): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mocks = mock.Mock() - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_ip_forwarding_utils, 'forwarding') - with mock.patch.object(ip_forwarding.IpForwarding, 'HandleForwardedIps'): - - ip_forwarding.IpForwarding(proto_id='66', debug=True) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY), - mock.call.forwarding.IpForwardingUtils( - logger=mock_logger_instance, proto_id='66'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testLogForwardedIpChanges(self): - ip_forwarding.IpForwarding._LogForwardedIpChanges( - self.mock_setup, [], [], [], [], '1') - ip_forwarding.IpForwarding._LogForwardedIpChanges( - self.mock_setup, ['a'], ['a'], [], [], '2') - ip_forwarding.IpForwarding._LogForwardedIpChanges( - self.mock_setup, ['a'], [], [], ['a'], '3') - ip_forwarding.IpForwarding._LogForwardedIpChanges( - self.mock_setup, ['a', 'b'], ['b'], [], ['a'], '4') - ip_forwarding.IpForwarding._LogForwardedIpChanges( - self.mock_setup, ['a'], ['b'], ['b'], ['a'], '5') - expected_calls = [ - mock.call.info(mock.ANY, '3', ['a'], None, None, ['a']), - mock.call.info(mock.ANY, '4', ['a', 'b'], ['b'], None, ['a']), - mock.call.info(mock.ANY, '5', ['a'], ['b'], ['b'], ['a']), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testAddForwardedIp(self): - ip_forwarding.IpForwarding._AddForwardedIps( - self.mock_setup, [], 'interface') - self.assertEqual(self.mock_ip_forwarding_utils.mock_calls, []) - - ip_forwarding.IpForwarding._AddForwardedIps( - self.mock_setup, ['a', 'b', 'c'], 'interface') - expected_calls = [ - mock.call.AddForwardedIp('a', 'interface'), - mock.call.AddForwardedIp('b', 'interface'), - mock.call.AddForwardedIp('c', 'interface'), - ] - self.assertEqual(self.mock_ip_forwarding_utils.mock_calls, expected_calls) - - def testRemoveForwardedIp(self): - ip_forwarding.IpForwarding._RemoveForwardedIps( - self.mock_setup, [], 'interface') - self.assertEqual(self.mock_ip_forwarding_utils.mock_calls, []) - - ip_forwarding.IpForwarding._RemoveForwardedIps( - self.mock_setup, ['a', 'b', 'c'], 'interface') - expected_calls = [ - mock.call.RemoveForwardedIp('a', 'interface'), - mock.call.RemoveForwardedIp('b', 'interface'), - mock.call.RemoveForwardedIp('c', 'interface'), - ] - self.assertEqual(self.mock_ip_forwarding_utils.mock_calls, expected_calls) - - def testHandleForwardedIps(self): - configured = ['c', 'c', 'b', 'b', 'a', 'a'] - desired = ['d', 'd', 'c'] - mocks = mock.Mock() - mocks.attach_mock(self.mock_ip_forwarding_utils, 'forwarding') - mocks.attach_mock(self.mock_setup, 'setup') - self.mock_ip_forwarding_utils.ParseForwardedIps.return_value = desired - self.mock_ip_forwarding_utils.GetForwardedIps.return_value = configured - forwarded_ips = 'forwarded ips' - interface_ip = 'interface ip' - interface = 'interface' - expected_add = ['d'] - expected_remove = ['a', 'b'] - - ip_forwarding.IpForwarding.HandleForwardedIps( - self.mock_setup, interface, forwarded_ips, interface_ip) - expected_calls = [ - mock.call.forwarding.ParseForwardedIps(forwarded_ips), - mock.call.forwarding.GetForwardedIps(interface, interface_ip), - mock.call.setup._LogForwardedIpChanges( - configured, desired, expected_add, expected_remove, interface), - mock.call.setup._AddForwardedIps(expected_add, interface), - mock.call.setup._RemoveForwardedIps(expected_remove, interface), - ] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_daemon.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_daemon.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_daemon.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_daemon.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manage networking on a Google Compute Engine instance. - -Run network setup to enable multiple network interfaces on startup. -Update IP forwarding when metadata changes. -""" - -import logging.handlers -import optparse -import random - -from google_compute_engine import config_manager -from google_compute_engine import constants -from google_compute_engine import file_utils -from google_compute_engine import logger -from google_compute_engine import metadata_watcher -from google_compute_engine import network_utils -from google_compute_engine.networking.ip_forwarding import ip_forwarding -from google_compute_engine.networking.network_setup import network_setup - -LOCKFILE = constants.LOCALSTATEDIR + '/lock/google_networking.lock' - - -class NetworkDaemon(object): - """Manage networking based on changes to network metadata.""" - - network_interface_metadata_key = 'instance/network-interfaces' - - def __init__( - self, ip_forwarding_enabled, proto_id, ip_aliases, target_instance_ips, - dhclient_script, dhcp_command, network_setup_enabled, debug=False): - """Constructor. - - Args: - ip_forwarding_enabled: bool, True if ip forwarding is enabled. - proto_id: string, the routing protocol identifier for Google IP changes. - ip_aliases: bool, True if the guest should configure IP alias routes. - target_instance_ips: bool, True supports internal IP load balancing. - dhclient_script: string, the path to a dhclient script used by dhclient. - dhcp_command: string, a command to enable Ethernet interfaces. - network_setup_enabled: bool, True if network setup is enabled. - debug: bool, True if debug output should write to the console. - """ - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='google-networking', debug=debug, facility=facility) - self.ip_aliases = ip_aliases - self.ip_forwarding_enabled = ip_forwarding_enabled - self.network_setup_enabled = network_setup_enabled - self.target_instance_ips = target_instance_ips - - self.ip_forwarding = ip_forwarding.IpForwarding( - proto_id=proto_id, debug=debug) - self.network_setup = network_setup.NetworkSetup( - dhclient_script=dhclient_script, dhcp_command=dhcp_command, debug=debug) - self.network_utils = network_utils.NetworkUtils(logger=self.logger) - self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger) - - try: - with file_utils.LockFile(LOCKFILE): - self.logger.info('Starting Google Networking daemon.') - timeout = 60 + random.randint(0, 30) - self.watcher.WatchMetadata( - self.HandleNetworkInterfaces, - metadata_key=self.network_interface_metadata_key, recursive=True, - timeout=timeout) - except (IOError, OSError) as e: - self.logger.warning(str(e)) - - def HandleNetworkInterfaces(self, result): - """Called when network interface metadata changes. - - Args: - result: dict, the metadata response with the network interfaces. - """ - network_interfaces = self._ExtractInterfaceMetadata(result) - - if self.network_setup_enabled: - default_interface = network_interfaces[0] - if default_interface.ipv6: - self.network_setup.EnableIpv6([default_interface.name]) - else: - self.network_setup.DisableIpv6([default_interface.name]) - self.network_setup.EnableNetworkInterfaces( - [interface.name for interface in network_interfaces[1:]]) - - for interface in network_interfaces: - if self.ip_forwarding_enabled: - self.ip_forwarding.HandleForwardedIps( - interface.name, interface.forwarded_ips, interface.ip) - - def _ExtractInterfaceMetadata(self, metadata): - """Extracts network interface metadata. - - Args: - metadata: dict, the metadata response with the new network interfaces. - - Returns: - list, a list of NetworkInterface objects. - """ - interfaces = [] - for network_interface in metadata: - mac_address = network_interface.get('mac') - interface = self.network_utils.GetNetworkInterface(mac_address) - ip_addresses = [] - if interface: - ip_addresses.extend(network_interface.get('forwardedIps', [])) - if self.ip_aliases: - ip_addresses.extend(network_interface.get('ipAliases', [])) - if self.target_instance_ips: - ip_addresses.extend(network_interface.get('targetInstanceIps', [])) - interfaces.append(NetworkDaemon.NetworkInterface( - interface, forwarded_ips=ip_addresses, - ip=network_interface.get('ip', None), - ipv6='dhcpv6Refresh' in network_interface.keys())) - else: - message = 'Network interface not found for MAC address: %s.' - self.logger.warning(message, mac_address) - return interfaces - - class NetworkInterface(object): - """Network interface information extracted from metadata.""" - - def __init__(self, name, forwarded_ips=None, ip=None, ipv6=False): - self.name = name - self.forwarded_ips = forwarded_ips - self.ip = ip - self.ipv6 = ipv6 - - -def main(): - parser = optparse.OptionParser() - parser.add_option( - '-d', '--debug', action='store_true', dest='debug', - help='print debug output to the console.') - (options, _) = parser.parse_args() - debug = bool(options.debug) - instance_config = config_manager.ConfigManager() - ip_forwarding_daemon_enabled = instance_config.GetOptionBool( - 'Daemons', 'ip_forwarding_daemon') - ip_forwarding_enabled = instance_config.GetOptionBool( - 'NetworkInterfaces', 'ip_forwarding') or ip_forwarding_daemon_enabled - network_setup_enabled = instance_config.GetOptionBool( - 'NetworkInterfaces', 'setup') - network_daemon_enabled = instance_config.GetOptionBool( - 'Daemons', 'network_daemon') - proto_id = instance_config.GetOptionString( - 'IpForwarding', 'ethernet_proto_id') - ip_aliases = instance_config.GetOptionBool( - 'IpForwarding', 'ip_aliases') - target_instance_ips = instance_config.GetOptionBool( - 'IpForwarding', 'target_instance_ips') - dhclient_script = instance_config.GetOptionString( - 'NetworkInterfaces', 'dhclient_script') - dhcp_command = instance_config.GetOptionString( - 'NetworkInterfaces', 'dhcp_command') - - if network_daemon_enabled: - NetworkDaemon( - ip_forwarding_enabled=ip_forwarding_enabled, - proto_id=proto_id, - ip_aliases=ip_aliases, - target_instance_ips=target_instance_ips, - dhclient_script=dhclient_script, - dhcp_command=dhcp_command, - network_setup_enabled=network_setup_enabled, - debug=debug) - - -if __name__ == '__main__': - main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/network_setup.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/network_setup.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/network_setup.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/network_setup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Enables the network interfaces for multi-nic support.""" - -import logging.handlers -import subprocess - -from google_compute_engine import logger -from google_compute_engine.compat import distro_utils - - -class NetworkSetup(object): - """Enable network interfaces.""" - - interfaces = set() - network_interfaces = 'instance/network-interfaces' - - def __init__(self, dhclient_script=None, dhcp_command=None, debug=False): - """Constructor. - - Args: - dhclient_script: string, the path to a dhclient script used by dhclient. - dhcp_command: string, a command to enable Ethernet interfaces. - debug: bool, True if debug output should write to the console. - """ - self.dhclient_script = dhclient_script or '/sbin/google-dhclient-script' - self.dhcp_command = dhcp_command - facility = logging.handlers.SysLogHandler.LOG_DAEMON - self.logger = logger.Logger( - name='network-setup', debug=debug, facility=facility) - self.distro_utils = distro_utils.Utils(debug=debug) - self.ipv6_initialized = False - self.ipv6_interfaces = set() - - def EnableIpv6(self, interfaces): - """Enable IPv6 on the list of network interfaces. - - Args: - interfaces: list of string, the output device names for enabling IPv6. - """ - if not interfaces or self.ipv6_interfaces == set(interfaces): - return - - self.logger.info('Enabling IPv6 on Ethernet interface: %s.', interfaces) - self.ipv6_interfaces = self.ipv6_interfaces.union(set(interfaces)) - self.ipv6_initialized = True - - # Distro-specific setup for enabling IPv6 on network interfaces. - self.distro_utils.EnableIpv6( - interfaces, self.logger, dhclient_script=self.dhclient_script) - - def DisableIpv6(self, interfaces): - """Disable IPv6 on the list of network interfaces. - - Args: - interfaces: list of string, the output device names for disabling IPv6. - """ - # Allow to run once during Initialization and after that only when an - # interface is found in the ipv6_interfaces set. - if not interfaces or ( - self.ipv6_initialized and not self.ipv6_interfaces.intersection( - set(interfaces))): - return - - self.logger.info('Disabling IPv6 on Ethernet interface: %s.', interfaces) - self.ipv6_interfaces.difference_update(interfaces) - self.ipv6_initialized = True - - # Distro-specific setup for disabling IPv6 on network interfaces. - self.distro_utils.DisableIpv6(interfaces, self.logger) - - def EnableNetworkInterfaces(self, interfaces): - """Enable the list of network interfaces. - - Args: - interfaces: list of string, the output device names to enable. - """ - # The default Ethernet interface is enabled by default. Do not attempt to - # enable interfaces if only one interface is specified in metadata. - if not interfaces or set(interfaces) == self.interfaces: - return - - self.logger.info('Ethernet interfaces: %s.', interfaces) - self.interfaces = set(interfaces) - - if self.dhcp_command: - try: - subprocess.check_call([self.dhcp_command]) - except subprocess.CalledProcessError: - self.logger.warning('Could not enable Ethernet interfaces.') - return - - # Distro-specific setup for network interfaces. - self.distro_utils.EnableNetworkInterfaces( - interfaces, self.logger, dhclient_script=self.dhclient_script) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/tests/network_setup_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/tests/network_setup_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/tests/network_setup_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/network_setup/tests/network_setup_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for network_setup.py module.""" - -import subprocess - -from google_compute_engine.networking.network_setup import network_setup -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class NetworkSetupTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_distro_utils = mock.Mock() - self.dhclient_script = '/bin/script' - self.dhcp_command = '' - self.setup = network_setup.NetworkSetup( - dhclient_script=self.dhclient_script, dhcp_command=self.dhcp_command, - debug=False) - self.setup.distro_utils = self.mock_distro_utils - self.setup.logger = self.mock_logger - - @mock.patch('google_compute_engine.networking.network_setup.network_setup.subprocess.check_call') - def testEnableIpv6(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_distro_utils.EnableIpv6, 'enable') - mock_call.side_effect = [None, subprocess.CalledProcessError(1, 'Test')] - - # Return immediately with no interfaces. - network_setup.NetworkSetup.EnableIpv6(self.setup, None) - network_setup.NetworkSetup.EnableIpv6(self.setup, []) - # Enable interfaces. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A', 'B']) - self.assertEqual(self.setup.ipv6_interfaces, set(['A', 'B'])) - # Add a new interface. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A', 'B', 'C']) - self.assertEqual(self.setup.ipv6_interfaces, set(['A', 'B', 'C'])) - # Interfaces are already enabled, do nothing. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A', 'B', 'C']) - self.assertEqual(self.setup.ipv6_interfaces, set(['A', 'B', 'C'])) - expected_calls = [ - mock.call.logger.info(mock.ANY, ['A', 'B']), - mock.call.enable(['A', 'B'], mock.ANY, dhclient_script='/bin/script'), - mock.call.logger.info(mock.ANY, ['A', 'B', 'C']), - mock.call.enable( - ['A', 'B', 'C'], mock.ANY, dhclient_script='/bin/script'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.networking.network_setup.network_setup.subprocess.check_call') - def testDisableIpv6(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_distro_utils.EnableIpv6, 'enable') - mocks.attach_mock(self.mock_distro_utils.DisableIpv6, 'disable') - expected_calls = [] - - # Clean run, run disable once e.g. at boot. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A']) - self.assertEqual(self.setup.ipv6_interfaces, set([])) - # No more disables allowed, have to follow the contract of Enable and then - # Disable. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A']) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A']), - mock.call.disable(['A'], mock.ANY), - ]) - # Enable interfaces. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A', 'B', 'C']) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A', 'B', 'C']), - mock.call.enable( - ['A', 'B', 'C'], mock.ANY, dhclient_script='/bin/script'), - ]) - # Remove interface. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A']) - self.assertEqual(self.setup.ipv6_interfaces, set(['B', 'C'])) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A']), - mock.call.disable(['A'], mock.ANY), - ]) - - # Add it back. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A']) - self.assertEqual(self.setup.ipv6_interfaces, set(['A', 'B', 'C'])) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A']), - mock.call.enable(['A'], mock.ANY, dhclient_script='/bin/script'), - ]) - - # Remove list. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A', 'B']) - self.assertEqual(self.setup.ipv6_interfaces, set(['C'])) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A', 'B']), - mock.call.disable(['A', 'B'], mock.ANY), - ]) - - # Try removing again, these are no ops. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A']) - network_setup.NetworkSetup.DisableIpv6(self.setup, ['A', 'B']) - - # Remove the last element. - network_setup.NetworkSetup.DisableIpv6(self.setup, ['C']) - self.assertEqual(self.setup.ipv6_interfaces, set([])) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['C']), - mock.call.disable(['C'], mock.ANY), - ]) - - # Empty list, allow adds back again. - network_setup.NetworkSetup.EnableIpv6(self.setup, ['A']) - self.assertEqual(self.setup.ipv6_interfaces, set(['A'])) - expected_calls.extend( - [ - mock.call.logger.info(mock.ANY, ['A']), - mock.call.enable(['A'], mock.ANY, dhclient_script='/bin/script'), - ]) - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.networking.network_setup.network_setup.subprocess.check_call') - def testEnableNetworkInterfaces(self, mock_call): - mocks = mock.Mock() - mocks.attach_mock(mock_call, 'call') - mocks.attach_mock(self.mock_logger, 'logger') - mocks.attach_mock(self.mock_distro_utils.EnableNetworkInterfaces, 'enable') - mock_call.side_effect = [None, subprocess.CalledProcessError(1, 'Test')] - - # Return immediately with no interfaces. - network_setup.NetworkSetup.EnableNetworkInterfaces(self.setup, None) - network_setup.NetworkSetup.EnableNetworkInterfaces(self.setup, []) - # Enable interfaces. - network_setup.NetworkSetup.EnableNetworkInterfaces( - self.setup, ['A', 'B']) - self.assertEqual(self.setup.interfaces, set(['A', 'B'])) - # Add a new interface. - network_setup.NetworkSetup.EnableNetworkInterfaces( - self.setup, ['A', 'B', 'C']) - self.assertEqual(self.setup.interfaces, set(['A', 'B', 'C'])) - # Interfaces are already enabled. - network_setup.NetworkSetup.EnableNetworkInterfaces( - self.setup, ['A', 'B', 'C']) - self.assertEqual(self.setup.interfaces, set(['A', 'B', 'C'])) - # Run a user supplied command successfully. - self.setup.dhcp_command = 'success' - network_setup.NetworkSetup.EnableNetworkInterfaces( - self.setup, ['D', 'E']) - self.assertEqual(self.setup.interfaces, set(['D', 'E'])) - # Run a user supplied command and logger error messages. - self.setup.dhcp_command = 'failure' - network_setup.NetworkSetup.EnableNetworkInterfaces( - self.setup, ['F', 'G']) - self.assertEqual(self.setup.interfaces, set(['F', 'G'])) - expected_calls = [ - mock.call.logger.info(mock.ANY, ['A', 'B']), - mock.call.enable(['A', 'B'], mock.ANY, dhclient_script='/bin/script'), - mock.call.logger.info(mock.ANY, ['A', 'B', 'C']), - mock.call.enable( - ['A', 'B', 'C'], mock.ANY, dhclient_script='/bin/script'), - mock.call.logger.info(mock.ANY, ['D', 'E']), - mock.call.call(['success']), - mock.call.logger.info(mock.ANY, ['F', 'G']), - mock.call.call(['failure']), - mock.call.logger.warning(mock.ANY), - ] - self.assertEqual(mocks.mock_calls, expected_calls) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/tests/network_daemon_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/tests/network_daemon_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/networking/tests/network_daemon_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/networking/tests/network_daemon_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -#!/usr/bin/python -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for network_daemon.py module.""" - -from google_compute_engine import network_utils -from google_compute_engine.networking import network_daemon -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class NetworkDaemonTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.mock_watcher = mock.Mock() - self.mock_setup = mock.create_autospec(network_daemon.NetworkDaemon) - self.mock_setup.logger = self.mock_logger - self.mock_setup.watcher = self.mock_watcher - self.mock_ip_forwarding = mock.Mock() - self.mock_network_setup = mock.Mock() - self.mock_network_utils = mock.Mock() - self.mock_setup.ip_forwarding = self.mock_ip_forwarding - self.mock_setup.network_setup = self.mock_network_setup - self.mock_setup.network_utils = self.mock_network_utils - - @mock.patch('google_compute_engine.networking.network_daemon.ip_forwarding') - @mock.patch('google_compute_engine.networking.network_daemon.network_setup') - @mock.patch('google_compute_engine.networking.network_daemon.network_utils') - @mock.patch('google_compute_engine.networking.network_daemon.metadata_watcher') - @mock.patch('google_compute_engine.networking.network_daemon.logger') - @mock.patch('google_compute_engine.networking.network_daemon.file_utils') - def testNetworkDaemon( - self, mock_lock, mock_logger, mock_watcher, mock_network_utils, - mock_network_setup, mock_ip_forwarding): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_network_utils, 'network') - mocks.attach_mock(mock_ip_forwarding, 'forwarding') - mocks.attach_mock(mock_network_setup, 'network_setup') - mocks.attach_mock(mock_watcher, 'watcher') - metadata_key = network_daemon.NetworkDaemon.network_interface_metadata_key - - with mock.patch.object( - network_daemon.NetworkDaemon, 'HandleNetworkInterfaces' - ) as mock_handle: - network_daemon.NetworkDaemon( - ip_forwarding_enabled=True, - proto_id='66', - ip_aliases=None, - target_instance_ips=None, - dhclient_script='x', - dhcp_command='y', - network_setup_enabled=True, - debug=True) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY), - mock.call.forwarding.IpForwarding(proto_id='66', debug=True), - mock.call.network_setup.NetworkSetup( - debug=True, dhclient_script='x', dhcp_command='y'), - mock.call.network.NetworkUtils(logger=mock_logger_instance), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.lock.LockFile(network_daemon.LOCKFILE), - mock.call.lock.LockFile().__enter__(), - mock.call.logger.Logger().info(mock.ANY), - mock.call.watcher.MetadataWatcher().WatchMetadata( - mock_handle, metadata_key=metadata_key, recursive=True, - timeout=mock.ANY), - mock.call.lock.LockFile().__exit__(None, None, None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.networking.network_daemon.ip_forwarding') - @mock.patch('google_compute_engine.networking.network_daemon.network_setup') - @mock.patch('google_compute_engine.networking.network_daemon.network_utils') - @mock.patch('google_compute_engine.networking.network_daemon.metadata_watcher') - @mock.patch('google_compute_engine.networking.network_daemon.logger') - @mock.patch('google_compute_engine.networking.network_daemon.file_utils') - def testNetworkDaemonError( - self, mock_lock, mock_logger, mock_watcher, mock_network_utils, - mock_network_setup, mock_ip_forwarding): - mock_logger_instance = mock.Mock() - mock_logger.Logger.return_value = mock_logger_instance - mocks = mock.Mock() - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_logger, 'logger') - mocks.attach_mock(mock_ip_forwarding, 'forwarding') - mocks.attach_mock(mock_network_setup, 'network_setup') - mocks.attach_mock(mock_network_utils, 'network') - mocks.attach_mock(mock_watcher, 'watcher') - self.mock_setup._ExtractInterfaceMetadata.return_value = [] - mock_lock.LockFile.side_effect = IOError('Test Error') - - with mock.patch.object( - network_daemon.NetworkDaemon, 'HandleNetworkInterfaces'): - network_daemon.NetworkDaemon( - ip_forwarding_enabled=False, - proto_id='66', - ip_aliases=None, - target_instance_ips=None, - dhclient_script='x', - dhcp_command='y', - network_setup_enabled=False, - debug=True) - expected_calls = [ - mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY), - mock.call.forwarding.IpForwarding(proto_id='66', debug=True), - mock.call.network_setup.NetworkSetup( - debug=True, dhclient_script='x', dhcp_command='y'), - mock.call.network.NetworkUtils(logger=mock_logger_instance), - mock.call.watcher.MetadataWatcher(logger=mock_logger_instance), - mock.call.lock.LockFile(network_daemon.LOCKFILE), - mock.call.logger.Logger().warning('Test Error'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testHandleNetworkInterfaces(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_ip_forwarding, 'forwarding') - mocks.attach_mock(self.mock_network_setup, 'network_setup') - mocks.attach_mock(self.mock_setup, 'setup') - self.mock_setup.ip_aliases = None - self.mock_setup.target_instance_ips = None - self.mock_setup.ip_forwarding_enabled = True - self.mock_setup.network_setup_enabled = True - self.mock_setup._ExtractInterfaceMetadata.return_value = [ - network_daemon.NetworkDaemon.NetworkInterface( - 'eth0', forwarded_ips=['a'], ip='1.1.1.1', ipv6=False), - network_daemon.NetworkDaemon.NetworkInterface('eth1'), - ] - result = mock.Mock() - - network_daemon.NetworkDaemon.HandleNetworkInterfaces( - self.mock_setup, result) - expected_calls = [ - mock.call.setup._ExtractInterfaceMetadata(result), - mock.call.network_setup.DisableIpv6(['eth0']), - mock.call.network_setup.EnableNetworkInterfaces(['eth1']), - mock.call.forwarding.HandleForwardedIps( - 'eth0', ['a'], '1.1.1.1'), - mock.call.forwarding.HandleForwardedIps('eth1', None, None), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testHandleNetworkInterfacesIpv6(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_ip_forwarding, 'forwarding') - mocks.attach_mock(self.mock_network_setup, 'network_setup') - mocks.attach_mock(self.mock_setup, 'setup') - self.mock_setup.ip_aliases = None - self.mock_setup.target_instance_ips = None - self.mock_setup.ip_forwarding_enabled = True - self.mock_setup.network_setup_enabled = True - self.mock_setup._ExtractInterfaceMetadata.return_value = [ - network_daemon.NetworkDaemon.NetworkInterface( - 'eth0', forwarded_ips=['a'], ip='1.1.1.1', ipv6=True), - ] - result = mock.Mock() - - network_daemon.NetworkDaemon.HandleNetworkInterfaces( - self.mock_setup, result) - expected_calls = [ - mock.call.setup._ExtractInterfaceMetadata(result), - mock.call.network_setup.EnableIpv6(['eth0']), - mock.call.network_setup.EnableNetworkInterfaces([]), - mock.call.forwarding.HandleForwardedIps( - 'eth0', ['a'], '1.1.1.1'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testHandleNetworkInterfacesIpv6Disabled(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_ip_forwarding, 'forwarding') - mocks.attach_mock(self.mock_network_setup, 'network_setup') - mocks.attach_mock(self.mock_setup, 'setup') - self.mock_setup.ip_aliases = None - self.mock_setup.target_instance_ips = None - self.mock_setup.ip_forwarding_enabled = True - self.mock_setup.network_setup_enabled = True - self.mock_setup._ExtractInterfaceMetadata.return_value = [ - network_daemon.NetworkDaemon.NetworkInterface( - 'eth0', forwarded_ips=['a'], ip='1.1.1.1', ipv6=False), - ] - result = mock.Mock() - - network_daemon.NetworkDaemon.HandleNetworkInterfaces( - self.mock_setup, result) - expected_calls = [ - mock.call.setup._ExtractInterfaceMetadata(result), - mock.call.network_setup.DisableIpv6(['eth0']), - mock.call.network_setup.EnableNetworkInterfaces([]), - mock.call.forwarding.HandleForwardedIps( - 'eth0', ['a'], '1.1.1.1'), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testHandleNetworkInterfacesDisabled(self): - mocks = mock.Mock() - mocks.attach_mock(self.mock_ip_forwarding, 'forwarding') - mocks.attach_mock(self.mock_network_setup, 'network_setup') - mocks.attach_mock(self.mock_setup, 'setup') - self.mock_setup.ip_aliases = None - self.mock_setup.target_instance_ips = None - self.mock_setup.ip_forwarding_enabled = False - self.mock_setup.network_setup_enabled = False - self.mock_setup._ExtractInterfaceMetadata.return_value = [ - network_daemon.NetworkDaemon.NetworkInterface('a'), - network_daemon.NetworkDaemon.NetworkInterface('b'), - ] - result = mock.Mock() - - network_daemon.NetworkDaemon.HandleNetworkInterfaces( - self.mock_setup, result) - expected_calls = [ - mock.call.setup._ExtractInterfaceMetadata(result), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - def testExtractInterfaceMetadata(self): - self.mock_setup.ip_aliases = True - self.mock_setup.target_instance_ips = True - self.mock_setup.network_utils = network_utils.NetworkUtils() - self.mock_setup.network_utils.interfaces = { - '1': 'eth0', '2': 'eth1', '3': 'eth2', - } - metadata = [ - { - 'mac': '1', - 'forwardedIps': ['a'], - 'dhcpv6Refresh': 1, - }, - { - 'mac': '2', - 'forwardedIps': ['b'], - 'ipAliases': ['banana'], - 'targetInstanceIps': ['baklava'], - 'ip': '2.2.2.2', - 'dhcpv6Refresh': 2, - }, - { - 'mac': '3', - 'ipAliases': ['cherry'], - 'targetInstanceIps': ['cake'], - }, - { - 'mac': '4', - }, - { - 'forwardedIps': ['d'], - 'ipAliases': ['date'], - 'targetInstanceIps': ['doughnuts'], - }, - ] - expected_interfaces = [ - network_daemon.NetworkDaemon.NetworkInterface( - 'eth0', forwarded_ips=['a'], ip=None, ipv6=True), - network_daemon.NetworkDaemon.NetworkInterface( - 'eth1', forwarded_ips=['b', 'banana', 'baklava'], ip='2.2.2.2', - ipv6=True), - network_daemon.NetworkDaemon.NetworkInterface( - 'eth2', forwarded_ips=['cherry', 'cake'], ip=None), - ] - - actual_interfaces = network_daemon.NetworkDaemon._ExtractInterfaceMetadata( - self.mock_setup, metadata) - for actual, expected in zip(actual_interfaces, expected_interfaces): - self.assertEqual(actual.name, expected.name) - self.assertEqual(actual.forwarded_ips, expected.forwarded_ips) - self.assertEqual(actual.ip, expected.ip) - self.assertEqual(actual.ipv6, expected.ipv6) - - def testExtractInterfaceMetadataWithoutOptions(self): - self.mock_setup.ip_aliases = None - self.mock_setup.target_instance_ips = None - self.mock_setup.network_utils = network_utils.NetworkUtils() - self.mock_setup.network_utils.interfaces = { - '1': 'eth0', '2': 'eth1', '3': 'eth2', - } - metadata = [ - { - 'mac': '1', - 'forwardedIps': ['a'], - 'dhcpv6Refresh': 1, - }, - { - 'mac': '2', - 'forwardedIps': ['b'], - 'ipAliases': ['banana'], - 'targetInstanceIps': ['baklava'], - 'ip': '2.2.2.2', - 'dhcpv6Refresh': 2, - }, - { - 'mac': '3', - 'ipAliases': ['cherry'], - 'targetInstanceIps': ['cake'], - }, - ] - expected_interfaces = [ - network_daemon.NetworkDaemon.NetworkInterface( - 'eth0', forwarded_ips=['a'], ip=None, ipv6=True), - network_daemon.NetworkDaemon.NetworkInterface( - 'eth1', forwarded_ips=['b'], ip='2.2.2.2', ipv6=True), - network_daemon.NetworkDaemon.NetworkInterface( - 'eth2', forwarded_ips=[], ip=None, ipv6=False), - ] - - actual_interfaces = network_daemon.NetworkDaemon._ExtractInterfaceMetadata( - self.mock_setup, metadata) - for actual, expected in zip(actual_interfaces, expected_interfaces): - self.assertEqual(actual.name, expected.name) - self.assertEqual(actual.forwarded_ips, expected.forwarded_ips) - self.assertEqual(actual.ip, expected.ip) - self.assertEqual(actual.ipv6, expected.ipv6) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/network_utils.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/network_utils.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/network_utils.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/network_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for configuring IP address forwarding.""" - -import logging -import os -import re -try: - import netifaces -except ImportError: - netifaces = None - - -MAC_REGEX = re.compile(r'\A([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})\Z') - - -class NetworkUtils(object): - """System network Ethernet interface utilities.""" - - def __init__(self, logger=logging): - """Constructor. - - Args: - logger: logger object, used to write to SysLog and serial port. - """ - self.logger = logger - self.interfaces = self._CreateInterfaceMap() - - def _CreateInterfaceMap(self): - """Generate a dictionary mapping MAC address to Ethernet interfaces. - - Returns: - dict, string MAC addresses mapped to the string network interface name. - """ - if netifaces: - return self._CreateInterfaceMapNetifaces() - else: - return self._CreateInterfaceMapSysfs() - - def _CreateInterfaceMapSysfs(self): - """Generate a dictionary mapping MAC address to Ethernet interfaces. - - Returns: - dict, string MAC addresses mapped to the string network interface name. - """ - interfaces = {} - for interface in os.listdir('/sys/class/net'): - try: - mac_address = open( - '/sys/class/net/%s/address' % interface).read().strip() - except (IOError, OSError) as e: - message = 'Unable to determine MAC address for %s. %s.' - self.logger.warning(message, interface, str(e)) - else: - interfaces[mac_address] = interface - return interfaces - - def _CreateInterfaceMapNetifaces(self): - """Generate a dictionary mapping MAC address to Ethernet interfaces. - - Returns: - dict, string MAC addresses mapped to the string network interface name. - """ - - interfaces = {} - for interface in netifaces.interfaces(): - af_link = netifaces.ifaddresses(interface).get(netifaces.AF_LINK, []) - mac_address = next(iter(af_link), {}).get('addr', '') - # In some systems this field can come with an empty string or with the - # name of the interface when there is no MAC address associated with it. - # Check the regex to be sure. - if MAC_REGEX.match(mac_address): - interfaces[mac_address] = interface - else: - message = 'Unable to determine MAC address for %s.' - self.logger.warning(message, interface) - return interfaces - - def GetNetworkInterface(self, mac_address): - """Get the name of the network interface associated with a MAC address. - - Args: - mac_address: string, the hardware address of the network interface. - - Returns: - string, the network interface associated with a MAC address or None. - """ - return self.interfaces.get(mac_address) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/test_compat.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/test_compat.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/test_compat.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/test_compat.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A module for resolving compatibility issues between Python 2 and Python 3.""" - -import sys - -from google_compute_engine.compat import httpclient -from google_compute_engine.compat import parser -from google_compute_engine.compat import urlerror -from google_compute_engine.compat import urlparse -from google_compute_engine.compat import urlrequest -from google_compute_engine.compat import urlretrieve - -# Import the mock module in Python 3.2. -if sys.version_info >= (3, 3): - import unittest.mock as mock -else: - import mock - -# Import the unittest2 module to backport testing features to Python 2.6. -if sys.version_info >= (2, 7): - import unittest -else: - import unittest2 as unittest - -builtin = 'builtins' if sys.version_info >= (3,) else '__builtin__' - -# Import the reload module to re-import modules for testing compat. -if sys.version_info >= (3, 4): - from importlib import reload as reload_import -else: - from imp import reload as reload_import diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/compat_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/compat_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/compat_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/compat_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -#!/usr/bin/python -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for compat.py module.""" - -import sys - -import google_compute_engine.compat -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import reload_import -from google_compute_engine.test_compat import unittest -from google_compute_engine.test_compat import urlretrieve - - -class CompatTest(unittest.TestCase): - - @mock.patch('google_compute_engine.compat.subprocess.check_call') - def testCurlRetrieve(self, mock_call): - url = 'http://www.example.com/script.sh' - filename = None - expected = ['curl', '--max-time', mock.ANY, '--retry', mock.ANY, '--', url] - - if sys.version_info < (2, 7, 9): - urlretrieve.urlretrieve(url, filename) - mock_call.assert_called_with(expected) - else: - pass - - @mock.patch('google_compute_engine.compat.subprocess.check_call') - def testCurlRetrieveFilename(self, mock_call): - url = 'http://www.example.com/script.sh' - filename = '/tmp/filename.txt' - expected = [ - 'curl', '--max-time', mock.ANY, '--retry', mock.ANY, '-o', filename, - '--', url, - ] - - if sys.version_info < (2, 7, 9): - urlretrieve.urlretrieve(url, filename) - mock_call.assert_called_with(expected) - else: - pass - - @mock.patch('google_compute_engine.compat.subprocess.check_call') - @mock.patch('google_compute_engine.compat.urlretrieve.urlretrieve') - def testUrlRetrieve(self, mock_retrieve, mock_call): - url = 'http://www.example.com/script.sh' - filename = '/tmp/filename.txt' - args = ['arg1', 'arg2', 'arg3'] - kwargs = {'kwarg1': 1, 'kwarg2': 2} - - if sys.version_info >= (2, 7, 9): - urlretrieve.urlretrieve(url, filename, *args, **kwargs) - mock_retrieve.assert_called_once_with(url, filename, *args, **kwargs) - mock_call.assert_not_called() - else: - pass - - @mock.patch('google_compute_engine.compat.distro.linux_distribution') - def testDistroCompatLinux(self, mock_call): - test_cases = { - ('Fedora', '28', ''): - google_compute_engine.distro_lib.el_7.utils, - ('debian', '8.10', ''): - google_compute_engine.distro_lib.debian_8.utils, - ('debian', '9.3', ''): - google_compute_engine.distro_lib.debian_9.utils, - ('debian', '10.3', ''): - google_compute_engine.distro_lib.debian_9.utils, - ('SUSE Linux Enterprise Server', '11', 'x86_64'): - google_compute_engine.distro_lib.sles_11.utils, - ('SUSE Linux Enterprise Server', '12', 'x86_64'): - google_compute_engine.distro_lib.sles_12.utils, - ('SUSE Linux Enterprise Server', '13', 'x86_64'): - google_compute_engine.distro_lib.sles_12.utils, - ('CentOS Linux', '6.4.3', 'Core'): - google_compute_engine.distro_lib.el_6.utils, - ('CentOS Linux', '7.4.1708', 'Core'): - google_compute_engine.distro_lib.el_7.utils, - ('CentOS Linux', '8.4.3', 'Core'): - google_compute_engine.distro_lib.el_7.utils, - ('Red Hat Enterprise Linux Server', '6.3.2', ''): - google_compute_engine.distro_lib.el_6.utils, - ('Red Hat Enterprise Linux Server', '7.4', ''): - google_compute_engine.distro_lib.el_7.utils, - ('Red Hat Enterprise Linux Server', '8.5.1', ''): - google_compute_engine.distro_lib.el_7.utils, - ('', '', ''): - google_compute_engine.distro_lib.debian_9.utils, - ('xxxx', 'xxxx', 'xxxx'): - google_compute_engine.distro_lib.debian_9.utils, - } - - for distro in test_cases: - mock_call.return_value = distro - reload_import(google_compute_engine.compat) - self.assertEqual( - test_cases[distro], google_compute_engine.compat.distro_utils) - - @mock.patch('google_compute_engine.compat.sys.platform', 'freebsd11') - def testDistroCompatFreeBSD(self): - reload_import(google_compute_engine.compat) - self.assertEqual( - google_compute_engine.distro_lib.freebsd_11.utils, - google_compute_engine.compat.distro_utils) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/config_manager_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/config_manager_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/config_manager_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/config_manager_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,177 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for config_manager.py module.""" - -from google_compute_engine import config_manager -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -def _HasOption(_, option): - """Validate the option exists in the config file. - - Args: - option: string, the config option to check. - - Returns: - bool, True if test is not in the option name. - """ - return 'test' not in option - - -def _HasSection(section): - """Validate the section exists in the config file. - - Args: - section: string, the config section to check. - - Returns: - bool, True if test is not in the section name. - """ - return 'test' not in section - - -class ConfigManagerTest(unittest.TestCase): - - option = 'option' - section = 'section' - value = 'value' - - def setUp(self): - self.mock_config = mock.Mock() - self.mock_config.has_option.side_effect = _HasOption - self.mock_config.has_section.side_effect = _HasSection - config_manager.parser.Parser = mock.Mock() - config_manager.parser.Parser.return_value = self.mock_config - - self.config_file = 'test.cfg' - self.config_header = 'Config file header.' - - self.mock_config_manager = config_manager.ConfigManager( - config_file=self.config_file, config_header=self.config_header) - - def testAddHeader(self): - mock_fp = mock.Mock() - self.mock_config_manager._AddHeader(mock_fp) - expected_calls = [ - mock.call('# %s' % self.config_header), - mock.call('\n\n'), - ] - self.assertEqual(mock_fp.write.mock_calls, expected_calls) - - def testGetOptionString(self): - self.mock_config_manager.GetOptionString(self.section, self.option) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_option(self.section, self.option), - mock.call.get(self.section, self.option), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testGetOptionStringNoOption(self): - option = 'test-option' - self.assertIsNone( - self.mock_config_manager.GetOptionString(self.section, option)) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_option(self.section, option), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testGetOptionBool(self): - self.mock_config_manager.GetOptionBool(self.section, self.option) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_option(self.section, self.option), - mock.call.getboolean(self.section, self.option), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testGetOptionBoolNoOption(self): - option = 'test-option' - self.assertTrue( - self.mock_config_manager.GetOptionBool(self.section, option)) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_option(self.section, option), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testSetOption(self): - self.mock_config_manager.SetOption(self.section, self.option, self.value) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_section(self.section), - mock.call.set(self.section, self.option, self.value), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testSetOptionNoOverwrite(self): - self.mock_config_manager.SetOption( - self.section, self.option, self.value, overwrite=False) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_option(self.section, self.option), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testSetOptionNewSection(self): - section = 'test-section' - self.mock_config_manager.SetOption(section, self.option, self.value) - expected_calls = [ - mock.call.read(self.config_file), - mock.call.has_section(section), - mock.call.add_section(section), - mock.call.set(section, self.option, self.value), - ] - self.assertEqual(self.mock_config.mock_calls, expected_calls) - - def testWriteConfig(self): - mock_open = mock.mock_open() - with mock.patch('%s.open' % builtin, mock_open, create=False): - self.mock_config_manager.WriteConfig() - expected_calls = [ - mock.call('# %s' % self.config_header), - mock.call('\n\n'), - ] - self.assertEqual(mock_open().write.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.config_manager.file_utils') - def testWriteConfigNoHeader(self, mock_lock): - self.mock_config_manager = config_manager.ConfigManager( - config_file='/tmp/file.cfg') - mock_open = mock.mock_open() - with mock.patch('%s.open' % builtin, mock_open, create=False): - self.mock_config_manager.WriteConfig() - mock_open().write.assert_not_called() - mock_lock.LockFile.assert_called_once_with('/var/lock/google_file.lock') - - @mock.patch('google_compute_engine.config_manager.file_utils') - def testWriteConfigLocked(self, mock_lock): - ioerror = IOError('Test Error') - mock_lock.LockFile.side_effect = ioerror - mock_open = mock.mock_open() - with mock.patch('%s.open' % builtin, mock_open, create=False): - with self.assertRaises(IOError) as error: - self.mock_config_manager.WriteConfig() - self.assertEqual(error.exception, ioerror) - mock_open().write.assert_not_called() - mock_lock.LockFile.assert_called_once_with('/var/lock/google_test.lock') - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/file_utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/file_utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/file_utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/file_utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,198 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for file_utils.py module.""" - -from google_compute_engine import file_utils -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class FileUtilsTest(unittest.TestCase): - - def setUp(self): - self.fd = 1 - self.path = '/tmp/path' - - @mock.patch('google_compute_engine.file_utils.subprocess.call') - @mock.patch('google_compute_engine.file_utils.os.access') - @mock.patch('google_compute_engine.file_utils.os.path.isfile') - def testSetSELinuxContext(self, mock_isfile, mock_access, mock_call): - restorecon = '/sbin/restorecon' - path = 'path' - mock_isfile.return_value = True - mock_access.return_value = True - file_utils._SetSELinuxContext(path) - mock_isfile.assert_called_once_with(restorecon) - mock_access.assert_called_once_with(restorecon, file_utils.os.X_OK) - mock_call.assert_called_once_with([restorecon, path]) - - @mock.patch('google_compute_engine.file_utils.subprocess.call') - @mock.patch('google_compute_engine.file_utils.os.access') - @mock.patch('google_compute_engine.file_utils.os.path.isfile') - def testSetSELinuxContextSkip(self, mock_isfile, mock_access, mock_call): - mock_isfile.side_effect = [True, False, False] - mock_access.side_effect = [False, True, False] - file_utils._SetSELinuxContext('1') - file_utils._SetSELinuxContext('2') - file_utils._SetSELinuxContext('3') - mock_call.assert_not_called() - - @mock.patch('google_compute_engine.file_utils._SetSELinuxContext') - @mock.patch('google_compute_engine.file_utils.os.path.exists') - @mock.patch('google_compute_engine.file_utils.os.mkdir') - @mock.patch('google_compute_engine.file_utils.os.chown') - @mock.patch('google_compute_engine.file_utils.os.chmod') - def testSetPermissions( - self, mock_chmod, mock_chown, mock_mkdir, mock_exists, mock_context): - mocks = mock.Mock() - mocks.attach_mock(mock_chmod, 'chmod') - mocks.attach_mock(mock_chown, 'chown') - mocks.attach_mock(mock_mkdir, 'mkdir') - mocks.attach_mock(mock_exists, 'exists') - mocks.attach_mock(mock_context, 'context') - path = 'path' - mode = 'mode' - uid = 'uid' - gid = 'gid' - mock_exists.side_effect = [False, True, False] - - # Create a new directory. - file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=True) - # The path exists, so do not create a new directory. - file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=True) - # Create a new directory without a mode specified. - file_utils.SetPermissions(path, uid=uid, gid=gid, mkdir=True) - # Do not create the path even though it does not exist. - file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=False) - # Do not set an owner when a UID or GID is not specified. - file_utils.SetPermissions(path, mode=mode, mkdir=False) - # Set the SELinux context when no parameters are specified. - file_utils.SetPermissions(path) - expected_calls = [ - # Create a new directory. - mock.call.exists(path), - mock.call.mkdir(path, mode), - mock.call.chown(path, uid, gid), - mock.call.context(path), - # Attempt to create a new path but reuse existing path. - mock.call.exists(path), - mock.call.chmod(path, mode), - mock.call.chown(path, uid, gid), - mock.call.context(path), - # Create a new directory with default mode. - mock.call.exists(path), - mock.call.mkdir(path, 0o777), - mock.call.chown(path, uid, gid), - mock.call.context(path), - # Set permissions and owner on an existing path. - mock.call.chmod(path, mode), - mock.call.chown(path, uid, gid), - mock.call.context(path), - # Set permissions, without changing ownership, of an existing path. - mock.call.chmod(path, mode), - mock.call.context(path), - # Set SELinux context on an existing path. - mock.call.context(path), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testLock(self, mock_flock): - operation = file_utils.fcntl.LOCK_EX | file_utils.fcntl.LOCK_NB - file_utils.Lock(self.fd, self.path, False) - mock_flock.assert_called_once_with(self.fd, operation) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testLockBlocking(self, mock_flock): - operation = file_utils.fcntl.LOCK_EX - file_utils.Lock(self.fd, self.path, True) - mock_flock.assert_called_once_with(self.fd, operation) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testLockTakenException(self, mock_flock): - error = IOError('Test Error') - error.errno = file_utils.errno.EWOULDBLOCK - mock_flock.side_effect = error - try: - file_utils.Lock(self.fd, self.path, False) - except IOError as e: - self.assertTrue(self.path in str(e)) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testLockException(self, mock_flock): - error = IOError('Test Error') - mock_flock.side_effect = error - try: - file_utils.Lock(self.fd, self.path, False) - except IOError as e: - self.assertTrue(self.path in str(e)) - self.assertTrue('Test Error' in str(e)) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testUnlock(self, mock_flock): - operation = file_utils.fcntl.LOCK_UN | file_utils.fcntl.LOCK_NB - file_utils.Unlock(self.fd, self.path) - mock_flock.assert_called_once_with(self.fd, operation) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testUnlockTakenException(self, mock_flock): - error = IOError('Test Error') - error.errno = file_utils.errno.EWOULDBLOCK - mock_flock.side_effect = error - try: - file_utils.Unlock(self.fd, self.path) - except IOError as e: - self.assertTrue(self.path in str(e)) - - @mock.patch('google_compute_engine.file_utils.fcntl.flock') - def testUnlockException(self, mock_flock): - error = IOError('Test Error') - mock_flock.side_effect = error - try: - file_utils.Unlock(self.fd, self.path) - except IOError as e: - self.assertTrue(self.path in str(e)) - self.assertTrue('Test Error' in str(e)) - - @mock.patch('google_compute_engine.file_utils.Unlock') - @mock.patch('google_compute_engine.file_utils.Lock') - @mock.patch('google_compute_engine.file_utils.os') - def testLockFile(self, mock_os, mock_lock, mock_unlock): - mock_callable = mock.Mock() - mock_os.open.return_value = self.fd - mock_os.O_CREAT = 1 - mocks = mock.Mock() - mocks.attach_mock(mock_callable, 'callable') - mocks.attach_mock(mock_lock, 'lock') - mocks.attach_mock(mock_unlock, 'unlock') - mocks.attach_mock(mock_os.open, 'open') - mocks.attach_mock(mock_os.close, 'close') - - with file_utils.LockFile(self.path, blocking=True): - mock_callable('test') - - expected_calls = [ - mock.call.open(self.path, 1), - mock.call.lock(self.fd, self.path, True), - mock.call.callable('test'), - mock.call.unlock(self.fd, self.path), - mock.call.close(1), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/logger_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/logger_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/logger_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/logger_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for logger.py module.""" - -from google_compute_engine import logger -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class LoggerTest(unittest.TestCase): - - @mock.patch('google_compute_engine.logger.logging.handlers.SysLogHandler') - @mock.patch('google_compute_engine.logger.logging.StreamHandler') - @mock.patch('google_compute_engine.logger.logging.NullHandler') - def testLogger(self, mock_null, mock_stream, mock_syslog): - mock_null.return_value = mock_null - mock_stream.return_value = mock_stream - mock_syslog.return_value = mock_syslog - name = 'test' - - # Verify basic logger setup. - named_logger = logger.Logger(name=name, debug=True) - mock_stream.setLevel.assert_called_once_with(logger.logging.DEBUG) - self.assertEqual(named_logger.handlers, [mock_null, mock_stream]) - - # Verify logger setup with a facility. - address = '/dev/log' - facility = 1 - named_logger = logger.Logger(name=name, debug=True, facility=facility) - mock_syslog.assert_called_once_with(address=address, facility=facility) - mock_syslog.setLevel.assert_called_once_with(logger.logging.INFO) - self.assertEqual( - named_logger.handlers, [mock_null, mock_stream, mock_syslog]) - - # Verify the handlers are reset during repeated calls. - named_logger = logger.Logger(name=name, debug=False) - self.assertEqual(named_logger.handlers, [mock_null]) - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/metadata_watcher_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/metadata_watcher_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/metadata_watcher_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/metadata_watcher_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,358 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for metadata_watcher.py module.""" - -import os - -from google_compute_engine import metadata_watcher -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class MetadataWatcherTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.timeout = 60 - self.url = 'http://metadata.google.internal/computeMetadata/v1' - self.params = { - 'alt': 'json', - 'last_etag': 0, - 'recursive': True, - 'timeout_sec': self.timeout, - 'wait_for_change': True, - } - self.mock_watcher = metadata_watcher.MetadataWatcher( - logger=self.mock_logger, timeout=self.timeout) - - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.build_opener') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.ProxyHandler') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request') - def testGetMetadataRequest(self, mock_request, mock_proxy, mock_opener): - mock_open = mock.Mock() - mock_handler = mock.Mock() - mock_response = mock.Mock() - mocks = mock.Mock() - mocks.attach_mock(mock_request, 'request') - mocks.attach_mock(mock_proxy, 'proxy') - mocks.attach_mock(mock_handler, 'handler') - mocks.attach_mock(mock_opener, 'opener') - mocks.attach_mock(mock_open, 'open') - mocks.attach_mock(mock_response, 'response') - mock_request.return_value = mock_request - mock_proxy.return_value = mock_handler - mock_opener.return_value = mock_open - mock_response.getcode.return_value = metadata_watcher.httpclient.OK - mock_open.open.return_value = mock_response - params = {'hello': 'world'} - request_url = '%s?hello=world' % self.url - headers = {'Metadata-Flavor': 'Google'} - timeout = self.timeout * 1.1 - - self.mock_watcher._GetMetadataRequest(self.url, params=params) - expected_calls = [ - mock.call.request(request_url, headers=headers), - mock.call.proxy({}), - mock.call.opener(mock_handler), - mock.call.open.open(mock_request, timeout=timeout), - mock.call.response.getcode(), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.metadata_watcher.time') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.build_opener') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.ProxyHandler') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request') - def testGetMetadataRequestRetry( - self, mock_request, mock_proxy, mock_opener, mock_time): - mock_open = mock.Mock() - mock_handler = mock.Mock() - mocks = mock.Mock() - mocks.attach_mock(mock_request, 'request') - mocks.attach_mock(mock_proxy, 'proxy') - mocks.attach_mock(mock_handler, 'handler') - mocks.attach_mock(mock_opener, 'opener') - mocks.attach_mock(mock_open, 'open') - mocks.attach_mock(mock_time, 'time') - mock_request.return_value = mock_request - mock_proxy.return_value = mock_handler - mock_opener.return_value = mock_open - - mock_unavailable = mock.Mock() - mock_unavailable.getcode.return_value = ( - metadata_watcher.httpclient.SERVICE_UNAVAILABLE) - mock_timeout = metadata_watcher.socket.timeout('Test timeout') - mock_success = mock.Mock() - mock_success.getcode.return_value = metadata_watcher.httpclient.OK - - # Retry after a service unavailable error response. - mock_open.open.side_effect = [ - metadata_watcher.StatusException(mock_unavailable), - mock_timeout, - mock_success, - ] - request_url = '%s?' % self.url - headers = {'Metadata-Flavor': 'Google'} - timeout = self.timeout * 1.1 - - self.mock_watcher._GetMetadataRequest(self.url) - expected_calls = [ - mock.call.request(request_url, headers=headers), - mock.call.proxy({}), - mock.call.opener(mock_handler), - mock.call.open.open(mock_request, timeout=timeout), - mock.call.time.sleep(mock.ANY), - mock.call.request(request_url, headers=headers), - mock.call.proxy({}), - mock.call.opener(mock_handler), - mock.call.open.open(mock_request, timeout=timeout), - mock.call.time.sleep(mock.ANY), - mock.call.request(request_url, headers=headers), - mock.call.proxy({}), - mock.call.opener(mock_handler), - mock.call.open.open(mock_request, timeout=timeout), - ] - self.assertEqual(mocks.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.metadata_watcher.time') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.build_opener') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.ProxyHandler') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request') - def testGetMetadataRequestHttpException( - self, mock_request, mock_proxy, mock_opener, mock_time): - mock_open = mock.Mock() - mock_handler = mock.Mock() - mock_response = mock.Mock() - mock_request.return_value = mock_request - mock_proxy.return_value = mock_handler - mock_opener.return_value = mock_open - mock_response.getcode.return_value = metadata_watcher.httpclient.NOT_FOUND - mock_open.open.side_effect = metadata_watcher.StatusException(mock_response) - - with self.assertRaises(metadata_watcher.StatusException): - self.mock_watcher._GetMetadataRequest(self.url) - self.assertEqual(mock_request.call_count, 1) - self.assertEqual(mock_proxy.call_count, 1) - self.assertEqual(mock_opener.call_count, 1) - self.assertEqual(mock_open.open.call_count, 1) - self.assertEqual(mock_response.getcode.call_count, 1) - - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.build_opener') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.ProxyHandler') - @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request') - def testGetMetadataRequestException( - self, mock_request, mock_proxy, mock_opener): - mock_open = mock.Mock() - mock_handler = mock.Mock() - mock_response = mock.Mock() - mock_request.return_value = mock_request - mock_proxy.return_value = mock_handler - mock_opener.return_value = mock_open - mock_response.getcode.return_value = metadata_watcher.httpclient.NOT_FOUND - mock_open.open.side_effect = mock_response - - with self.assertRaises(metadata_watcher.StatusException): - self.mock_watcher._GetMetadataRequest(self.url) - self.assertEqual(mock_request.call_count, 1) - self.assertEqual(mock_proxy.call_count, 1) - self.assertEqual(mock_opener.call_count, 1) - self.assertEqual(mock_open.open.call_count, 1) - - def testUpdateEtag(self): - mock_response = mock.Mock() - mock_response.headers = {'etag': 1} - self.assertEqual(self.mock_watcher.etag, 0) - - # Update the etag if the etag is set. - self.assertTrue(self.mock_watcher._UpdateEtag(mock_response)) - self.assertEqual(self.mock_watcher.etag, 1) - - # Do not update the etag if the etag is unchanged. - self.assertFalse(self.mock_watcher._UpdateEtag(mock_response)) - self.assertEqual(self.mock_watcher.etag, 1) - - # Do not update the etag if the etag is not set. - mock_response.headers = {} - self.assertFalse(self.mock_watcher._UpdateEtag(mock_response)) - self.assertEqual(self.mock_watcher.etag, 1) - - def testGetMetadataUpdate(self): - mock_response = mock.Mock() - mock_response.return_value = mock_response - mock_response.headers = {'etag': 1} - mock_response.read.return_value = bytes(b'{}') - self.mock_watcher._GetMetadataRequest = mock_response - request_url = os.path.join(self.url, '') - - self.assertEqual(self.mock_watcher._GetMetadataUpdate(), {}) - self.assertEqual(self.mock_watcher.etag, 1) - mock_response.assert_called_once_with( - request_url, params=self.params, timeout=None) - - def testGetMetadataUpdateArgs(self): - mock_response = mock.Mock() - mock_response.return_value = mock_response - mock_response.headers = {'etag': 0} - mock_response.read.return_value = bytes(b'{}') - self.mock_watcher._GetMetadataRequest = mock_response - metadata_key = 'instance/id' - self.params['recursive'] = False - self.params['wait_for_change'] = False - request_url = os.path.join(self.url, metadata_key) - - self.mock_watcher._GetMetadataUpdate( - metadata_key=metadata_key, recursive=False, wait=False, timeout=60) - self.assertEqual(self.mock_watcher.etag, 0) - mock_response.assert_called_once_with( - request_url, params=self.params, timeout=60) - - def testGetMetadataUpdateWait(self): - self.params['last_etag'] = 1 - self.mock_watcher.etag = 1 - mock_unchanged = mock.Mock() - mock_unchanged.headers = {'etag': 1} - mock_unchanged.read.return_value = bytes(b'{}') - mock_changed = mock.Mock() - mock_changed.headers = {'etag': 2} - mock_changed.read.return_value = bytes(b'{}') - mock_response = mock.Mock() - mock_response.side_effect = [mock_unchanged, mock_unchanged, mock_changed] - self.mock_watcher._GetMetadataRequest = mock_response - request_url = os.path.join(self.url, '') - - self.mock_watcher._GetMetadataUpdate() - self.assertEqual(self.mock_watcher.etag, 2) - expected_calls = [ - mock.call(request_url, params=self.params, timeout=None), - ] * 3 - self.assertEqual(mock_response.mock_calls, expected_calls) - - def testHandleMetadataUpdate(self): - mock_response = mock.Mock() - mock_response.return_value = {} - self.mock_watcher._GetMetadataUpdate = mock_response - - self.assertEqual(self.mock_watcher.GetMetadata(), {}) - mock_response.assert_called_once_with( - metadata_key='', recursive=True, wait=False, timeout=None) - self.mock_watcher.logger.exception.assert_not_called() - - def testHandleMetadataUpdateException(self): - mock_response = mock.Mock() - first = metadata_watcher.socket.timeout() - second = metadata_watcher.urlerror.URLError('Test') - mock_response.side_effect = [first, first, second, {}] - self.mock_watcher._GetMetadataUpdate = mock_response - metadata_key = 'instance/id' - recursive = False - wait = False - retry = True - - self.assertEqual( - self.mock_watcher._HandleMetadataUpdate( - metadata_key=metadata_key, recursive=recursive, wait=wait, - timeout=None, retry=retry), - {}) - expected_calls = [ - mock.call( - metadata_key=metadata_key, recursive=recursive, wait=wait, - timeout=None), - ] * 4 - self.assertEqual(mock_response.mock_calls, expected_calls) - expected_calls = [mock.call.error(mock.ANY, mock.ANY)] * 2 - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testHandleMetadataUpdateExceptionNoRetry(self): - mock_response = mock.Mock() - mock_response.side_effect = metadata_watcher.socket.timeout() - self.mock_watcher._GetMetadataUpdate = mock_response - metadata_key = 'instance/id' - recursive = False - wait = False - retry = False - - self.assertIsNone( - self.mock_watcher._HandleMetadataUpdate( - metadata_key=metadata_key, recursive=recursive, wait=wait, - timeout=None, retry=retry)) - expected_calls = [ - mock.call( - metadata_key=metadata_key, recursive=recursive, wait=wait, - timeout=None), - ] - self.assertEqual(mock_response.mock_calls, expected_calls) - expected_calls = [mock.call.error(mock.ANY, mock.ANY)] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testWatchMetadata(self): - mock_response = mock.Mock() - mock_response.return_value = {} - self.mock_watcher._HandleMetadataUpdate = mock_response - mock_handler = mock.Mock() - mock_handler.side_effect = Exception() - self.mock_logger.exception.side_effect = RuntimeError() - recursive = True - - with self.assertRaises(RuntimeError): - self.mock_watcher.WatchMetadata(mock_handler, recursive=recursive) - mock_handler.assert_called_once_with({}) - mock_response.assert_called_once_with( - metadata_key='', recursive=recursive, wait=True, timeout=None) - - def testWatchMetadataException(self): - mock_response = mock.Mock() - mock_response.side_effect = metadata_watcher.socket.timeout() - self.mock_watcher._GetMetadataUpdate = mock_response - self.mock_logger.error.side_effect = RuntimeError() - metadata_key = 'instance/id' - recursive = False - - with self.assertRaises(RuntimeError): - self.mock_watcher.WatchMetadata( - None, metadata_key=metadata_key, recursive=recursive) - mock_response.assert_called_once_with( - metadata_key=metadata_key, recursive=recursive, wait=True, timeout=None) - - def testGetMetadata(self): - mock_response = mock.Mock() - mock_response.return_value = {} - self.mock_watcher._HandleMetadataUpdate = mock_response - - self.assertEqual(self.mock_watcher.GetMetadata(), {}) - mock_response.assert_called_once_with( - metadata_key='', recursive=True, wait=False, timeout=None, retry=True) - self.mock_watcher.logger.exception.assert_not_called() - - def testGetMetadataArgs(self): - mock_response = mock.Mock() - mock_response.return_value = {} - self.mock_watcher._HandleMetadataUpdate = mock_response - metadata_key = 'instance/id' - recursive = False - retry = False - - response = self.mock_watcher.GetMetadata( - metadata_key=metadata_key, recursive=recursive, timeout=60, - retry=retry) - self.assertEqual(response, {}) - mock_response.assert_called_once_with( - metadata_key=metadata_key, recursive=False, wait=False, timeout=60, - retry=False) - self.mock_watcher.logger.exception.assert_not_called() - - -if __name__ == '__main__': - unittest.main() diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/network_utils_test.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/network_utils_test.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/google_compute_engine/tests/network_utils_test.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/google_compute_engine/tests/network_utils_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for network_utils.py module.""" - -from google_compute_engine import network_utils -from google_compute_engine.test_compat import builtin -from google_compute_engine.test_compat import mock -from google_compute_engine.test_compat import unittest - - -class NetworkUtilsTest(unittest.TestCase): - - def setUp(self): - self.mock_logger = mock.Mock() - self.interfaces = {'address': 'interface'} - self.mock_utils = network_utils.NetworkUtils(self.mock_logger) - self.mock_utils.interfaces = self.interfaces - - @mock.patch('google_compute_engine.network_utils.netifaces', False) - @mock.patch('google_compute_engine.network_utils.os.listdir') - def testCreateInterfaceMapSysfs(self, mock_listdir): - mock_open = mock.mock_open() - interface_map = { - '1': 'a', - '2': 'b', - '3': 'c', - } - mock_listdir.return_value = interface_map.values() - - with mock.patch('%s.open' % builtin, mock_open, create=False): - addresses = interface_map.keys() - addresses = ['%s\n' % address for address in addresses] - mock_open().read.side_effect = interface_map.keys() - self.assertEqual(self.mock_utils._CreateInterfaceMap(), interface_map) - - @mock.patch('google_compute_engine.network_utils.netifaces', False) - @mock.patch('google_compute_engine.network_utils.os.listdir') - def testCreateInterfaceMapSysfsError(self, mock_listdir): - mock_open = mock.mock_open() - mock_listdir.return_value = ['a', 'b', 'c'] - - with mock.patch('%s.open' % builtin, mock_open, create=False): - mock_open().read.side_effect = [ - '1', OSError('OSError'), IOError('IOError')] - self.assertEqual(self.mock_utils._CreateInterfaceMap(), {'1': 'a'}) - expected_calls = [ - mock.call.warning(mock.ANY, 'b', 'OSError'), - mock.call.warning(mock.ANY, 'c', 'IOError'), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - @mock.patch('google_compute_engine.network_utils.netifaces.AF_LINK', 88) - @mock.patch('google_compute_engine.network_utils.netifaces') - def testCreateInterfaceMapNetifaces(self, mock_netifaces): - interface_map = { - '11:11:11:11:11:11': 'a', - '22:22:22:22:22:22': 'b', - '33:33:33:33:33:33': 'c', - } - ifaddress_map = { - 'a': {mock_netifaces.AF_LINK: [{'addr': '11:11:11:11:11:11'}]}, - 'b': {mock_netifaces.AF_LINK: [{'addr': '22:22:22:22:22:22'}]}, - 'c': {mock_netifaces.AF_LINK: [{'addr': '33:33:33:33:33:33'}]}, - } - mock_netifaces.interfaces.return_value = interface_map.values() - mock_netifaces.ifaddresses.side_effect = ( - lambda interface: ifaddress_map[interface]) - self.assertEqual(self.mock_utils._CreateInterfaceMap(), interface_map) - - @mock.patch('google_compute_engine.network_utils.netifaces.AF_LINK', 88) - @mock.patch('google_compute_engine.network_utils.netifaces') - def testCreateInterfaceMapNetifacesError(self, mock_netifaces): - ifaddress_map = { - 'a': {mock_netifaces.AF_LINK: [{'addr': '11:11:11:11:11:11'}]}, - 'b': {}, - 'c': {mock_netifaces.AF_LINK: [{'addr': ''}]}, - } - mock_netifaces.interfaces.return_value = ['a', 'b', 'c'] - mock_netifaces.ifaddresses.side_effect = ( - lambda interface: ifaddress_map[interface]) - - self.assertEqual( - self.mock_utils._CreateInterfaceMap(), {'11:11:11:11:11:11': 'a'}) - expected_calls = [ - mock.call.warning(mock.ANY, 'b'), - mock.call.warning(mock.ANY, 'c'), - ] - self.assertEqual(self.mock_logger.mock_calls, expected_calls) - - def testGetNetworkInterface(self): - self.assertIsNone(self.mock_utils.GetNetworkInterface('invalid')) - self.assertEqual( - self.mock_utils.GetNetworkInterface('address'), 'interface') diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/LICENSE gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/LICENSE --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/LICENSE 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/MANIFEST.in gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/MANIFEST.in --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/MANIFEST.in 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/MANIFEST.in 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# Include the license file. -include LICENSE - -# Include init configuration files. -graft package diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/changelog gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/changelog --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/changelog 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/changelog 1970-01-01 00:00:00.000000000 +0000 @@ -1,237 +0,0 @@ -python-google-compute-engine (1:20190801.00-g1) stable; urgency=medium - - * Re-enable boto config and drop writing plugin directory. - * Fix metadata script retrieval. - - -- Google Cloud Team Thu, 01 Aug 2019 13:55:46 -0700 - -python-google-compute-engine (1:20190729.00-g1) stable; urgency=medium - - * Support for Google Private Access over IPv6. - - -- Google Cloud Team Mon, 29 Jul 2019 10:09:24 -0700 - -python-google-compute-engine (1:20190708.00-g1) stable; urgency=medium - - * Move Debian entry point scripts to python3. - * Update Debian build dependencies. - * Drop unnecessary build and package dependencies. - - -- Google Cloud Team Mon, 08 Jul 2019 10:20:44 -0700 - -python-google-compute-engine (2.8.16-1) stable; urgency=low - - * Fix guest attributes flow in Python 3. - - -- Google Cloud Team Wed, 22 May 2019 12:00:00 -0700 - -python-google-compute-engine (2.8.15-1) stable; urgency=low - - * Retry download for metadata scripts. - * Fix script retrieval in python3. - * Disable boto config in python3. - * Update SSH host keys in guest attributes. - - -- Google Cloud Team Tue, 21 May 2019 12:00:00 -0700 - -python-google-compute-engine (2.8.14-1) stable; urgency=low - - * FreeBSD fixes: syslog socket location and OS detection. - - -- Google Cloud Team Tue, 16 Apr 2019 12:00:00 -0700 - -python-google-compute-engine (2.8.13-1) stable; urgency=low - - * Fix metadata script retrieval to support Python 3. - - -- Google Cloud Team Thu, 24 Jan 2019 12:00:00 -0700 - -python-google-compute-engine (2.8.12-1) stable; urgency=low - - * Fix two factor enablement on change. - - -- Google Cloud Team Wed, 05 Dec 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.11-1) stable; urgency=low - - * Split up the gpasswd command into two commands. - * Update two factor enablement on change. - - -- Google Cloud Team Tue, 04 Dec 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.10-1) stable; urgency=low - - * Fix the gpasswd command default. - - -- Google Cloud Team Fri, 30 Nov 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.9-1) stable; urgency=low - - * Support enabling OS Login two factor authentication. - * Improve accounts support for FreeBSD. - * Improve SELinux support. - - -- Google Cloud Team Wed, 28 Nov 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.8-1) stable; urgency=low - - * Update sudoer group membership without overriding local groups. - - -- Google Cloud Team Tue, 23 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.7-1) stable; urgency=low - - * Remove users from sudoers group on removal (fixed). - - -- Google Cloud Team Thu, 18 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.6-1) stable; urgency=low - - * Revert PR: Remove users from sudoers group on removal. - - -- Google Cloud Team Thu, 11 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.5-1) stable; urgency=low - - * Remove users from sudoers group on removal. - * Remove gsutil dependency for metadata scripts. - - -- Google Cloud Team Thu, 05 Oct 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.4-1) stable; urgency=low - - * Remove ntp dependency. - * Support Debian 10 Buster. - * Restart the network daemon if networking is restarted. - * Prevent setup of the default ethernet interface. - * Accounts daemon can now verify username is 32 characters or less. - - -- Google Cloud Team Wed, 05 Sep 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.3-1) stable; urgency=low - - * Prevent IP forwarding daemon log spam. - * Make default shell configurable when executing metadata scripts. - * Rename distro directory to distro_lib. - - -- Google Cloud Team Mon, 11 June 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.2-1) stable; urgency=low - - * Prevent delay in configuring IP forwarding routes. - * Improve instance setup support for FreeBSD. - - -- Google Cloud Team Thu, 10 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.1-1) stable; urgency=low - - * Improve OS Login disablement. - - -- Google Cloud Team Fri, 04 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.8.0-1) stable; urgency=low - - * Create a new network daemon. - * Refactor the IP forwarding daemon and network setup. - * Improvements for using NSS cache in the accounts daemon. - - -- Google Cloud Team Tue, 01 May 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.7-1) stable; urgency=low - - * Add support for NSS cache in OS Login. - - -- Google Cloud Team Thu, 08 Mar 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.6-1) stable; urgency=low - - * Add distro specific logic. - - -- Google Cloud Team Wed, 21 Feb 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.5-2) stable; urgency=low - - * Fix dependencies for syslog. - - -- Google Cloud Team Tue, 06 Feb 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.5-1) stable; urgency=low - - * Revert hostname setting change in Debian. - - -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.4-1) stable; urgency=low - - * Fix hostname setting in Debian. - - -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.3-1) stable; urgency=low - - * Improve hostname setting and correctly restart rsyslog. - - -- Google Cloud Team Thu, 25 Jan 2018 12:00:00 -0700 - -google-compute-image-packages (2.7.2-2) stable; urgency=low - - * Force IPv4 for apt. - - -- Google Cloud Team Wed, 13 Dec 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.2-1) stable; urgency=low - - * Generate SSH host keys when none are present. - * Improve logging when activating OS Login. - - -- Google Cloud Team Wed, 29 Nov 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.1-1) stable; urgency=low - - * Update set_hostname file name to prevent conflict. - * Add apt config to prevent auto-removal of google-compute-engine. - - -- Google Cloud Team Wed, 25 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.7.0-6) stable; urgency=low - - * Linux guest environment support for OS Login. - - -- Google Cloud Team Tue, 17 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.2-1) stable; urgency=low - - * Fix system hang during VM shutdown. - - -- Google Cloud Team Fri, 06 Oct 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.1-1) stable; urgency=low - - * Use curl to download metadata script files for SSL certificate validation. - * Use netifaces for retrieving MAC address names if the import exists. - - -- Google Cloud Team Thurs, 14 Sep 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-4) stable; urgency=low - - * Fix DHCP exit hook install. - - -- Google Cloud Team Mon, 28 Aug 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-3) stable; urgency=low - - * Add systemd preset. - - -- Google Cloud Team Fri, 25 Aug 2017 14:00:00 -0700 - -google-compute-image-packages (2.6.0-2) stable; urgency=low - - * Add DHCP exit hook script back into package. - - -- Google Cloud Team Fri, 25 Aug 2017 12:00:00 -0700 - -google-compute-image-packages (2.6.0-1) stable; urgency=low - - * New packaging. - - -- Google Cloud Team Mon, 27 Jun 2017 12:00:00 -0700 diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/compat gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/compat --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/compat 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -10 diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/control gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/control --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/control 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/control 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -Source: python-google-compute-engine -Section: admin -Priority: optional -Maintainer: Google Cloud Team -Build-Depends: debhelper (>= 10), - dh-python, - python-all, - python-boto, - python-mock, - python-pytest, - python-setuptools, - python3-all, - python3-distro, - python3-pytest, - python3-setuptools -Standards-Version: 3.9.8 -Homepage: https://github.com/GoogleCloudPlatform/compute-image-packages - -Package: python-google-compute-engine -Section: python -Architecture: all -Depends: ${python:Depends}, ${misc:Depends} -Conflicts: google-compute-engine-jessie, - google-compute-engine-stretch, - google-compute-daemon, - google-startup-scripts -Description: Google Compute Engine python library for Python 2.x. - Python libraries used for interacting with Google Compute Engine instance - metadata to provide platform integration. - -Package: python3-google-compute-engine -Section: python -Architecture: all -Depends: ${python3:Depends}, ${misc:Depends}, - python3-distro, python3-pkg-resources, python3:any -Description: Google Compute Engine python library for Python 3.x. - Python libraries used for interacting with Google Compute Engine instance - metadata to provide platform integration. diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/copyright gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/copyright --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/copyright 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/copyright 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: python-google-compute-engine -Upstream-Contact: gc-team@google.com - -Files: * -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -Files: debian/* -Copyright: Copyright 2017 Google Inc. -License: Apache-2.0 - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/rules gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/rules --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/debian/rules 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/debian/rules 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -#!/usr/bin/make -f - -export PYBUILD_NAME=google-compute-engine -export PYBUILD_TEST_PYTEST=1 -export PYBUILD_TEST_ARGS={dir}/google_compute_engine/ -export PYBUILD_SYSTEM=distutils - -%: - dh $@ --with python2,python3 --buildsystem=pybuild - -override_dh_clean: - rm -rf google_compute_engine.egg-info - dh_clean - -override_dh_auto_install: - dh_auto_install - # Remove python2.7 entry points. - rm -Rf debian/python-google-compute-engine/usr/bin diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/python3-google-compute-engine.spec gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/python3-google-compute-engine.spec --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/python3-google-compute-engine.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/python3-google-compute-engine.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -# Copyright 2019 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -Name: python3-google-compute-engine -Epoch: 1 -Version: %{_version} -Release: g1%{?dist} -Summary: Google Compute Engine python3 library -License: ASL 2.0 -Url: https://github.com/GoogleCloudPlatform/compute-image-packages -Source0: %{name}_%{version}.orig.tar.gz - -BuildArch: noarch -BuildRequires: python36-devel python3-setuptools - -Requires: python3-setuptools - -%description -Google Compute Engine python library for Python 3.x. - -%prep -%autosetup - -%build -%py3_build - -%install -%py3_install - -%files -%{python3_sitelib}/google_compute_engine/ -%{python3_sitelib}/google_compute_engine*.egg-info/ -%{_bindir}/google_accounts_daemon -%{_bindir}/google_clock_skew_daemon -%{_bindir}/google_instance_setup -%{_bindir}/google_metadata_script_runner -%{_bindir}/google_network_daemon diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/python-google-compute-engine.spec gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/python-google-compute-engine.spec --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/python-google-compute-engine.spec 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/python-google-compute-engine.spec 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Force the dist to be el7 to avoid el7.centos. -%if 0%{?rhel} == 7 - %define dist .el7 -%endif - -Name: python-google-compute-engine -Epoch: 1 -Version: %{_version} -Release: g1%{?dist} -Summary: Google Compute Engine python library -License: ASL 2.0 -Url: https://github.com/GoogleCloudPlatform/compute-image-packages -Source0: %{name}_%{version}.orig.tar.gz - -BuildArch: noarch -BuildRequires: python2-devel python-setuptools python-boto - -Requires: python-boto python-setuptools - -Provides: python2-google-compute-engine - -Obsoletes: google-compute-daemon -Obsoletes: google-startup-scripts -Conflicts: google-compute-daemon -Conflicts: google-startup-scripts - -%description -Google Compute Engine python library for Python 2.x. - -%prep -%autosetup - -%build -python setup.py build - -%install -python setup.py install --prefix=%{_prefix} --root %{buildroot} - -%files -%{python_sitelib}/* -%attr(0755,-,-) %{_bindir}/* diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/setup_deb.sh gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/setup_deb.sh --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/setup_deb.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/setup_deb.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="python-google-compute-engine" -VERSION="20190801.00" - -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -# Build dependencies. -sudo apt-get -y install python-all python-setuptools python3-all \ - python3-setuptools python-pytest python3-pytest python-mock python-boto - -# DEB creation tools. -sudo apt-get -y install debhelper devscripts build-essential - -rm -rf /tmp/debpackage -mkdir /tmp/debpackage -tar czvf /tmp/debpackage/${NAME}_${VERSION}.orig.tar.gz --exclude .git \ - --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -pushd /tmp/debpackage -tar xzvf ${NAME}_${VERSION}.orig.tar.gz - -cd ${NAME}-${VERSION} - -cp -r ${working_dir}/packaging/debian ./ - -debuild -us -uc diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/setup_rpm.sh gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/setup_rpm.sh --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/packaging/setup_rpm.sh 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/packaging/setup_rpm.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NAME="python-google-compute-engine" -VERSION="20190801.00" - -rpm_working_dir=/tmp/rpmpackage/${NAME}-${VERSION} -working_dir=${PWD} -if [[ $(basename "$working_dir") != $NAME ]]; then - echo "Packaging scripts must be run from top of package dir." - exit 1 -fi - -sudo yum -y install rpmdevtools - -# RHEL/CentOS 8 uses python3. -if grep -q '^\(CentOS\|Red Hat\)[^0-9]*8\..' /etc/redhat-release; then - NAME="python3-google-compute-engine" - rpm_working_dir=/tmp/rpmpackage/${NAME}-${VERSION} - sudo yum -y install python36-devel python3-setuptools python36-rpm-macros -else - sudo yum -y install python2-devel python-setuptools python-boto -fi - -rm -rf /tmp/rpmpackage -mkdir -p ${rpm_working_dir}/{SOURCES,SPECS} - -cp packaging/${NAME}.spec ${rpm_working_dir}/SPECS/ - -tar czvf ${rpm_working_dir}/SOURCES/${NAME}_${VERSION}.orig.tar.gz \ - --exclude .git --exclude packaging --transform "s/^\./${NAME}-${VERSION}/" . - -rpmbuild --define "_topdir ${rpm_working_dir}/" --define "_version ${VERSION}" \ - -ba ${rpm_working_dir}/SPECS/${NAME}.spec diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/README.md gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/README.md --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/README.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -## Python Linux Guest Environment for Google Compute Engine - -This package contains the Python guest environment installed on Google supported -Compute Engine Linux [images](https://cloud.google.com/compute/docs/images). - -**Table of Contents** - -* [Overview](#overview) -* [Common Libraries](#common-libraries) - * [Metadata Watcher](#metadata-watcher) - * [Logging](#logging) - * [Configuration Management](#configuration-management) - * [File Management](#file-management) - * [Network Utilities](#network-utilities) -* [Daemons](#daemons) - * [Accounts](#accounts) - * [Clock Skew](#clock-skew) - * [Network](#network) -* [Instance Setup](#instance-setup) -* [Metadata Scripts](#metadata-scripts) -* [Configuration](#configuration) - -## Overview - -The Linux guest environment is made up of the following components: - -* **Accounts** daemon to setup and manage user accounts, and to enable SSH key - based authentication. -* **Clock skew** daemon to keep the system clock in sync after VM start and - stop events. -* **Instance setup** scripts to execute VM configuration scripts during boot. -* **Network** daemon that handles network setup for multiple network interfaces - on boot and integrates network load balancing with - forwarding rule changes into the guest. -* **Metadata scripts** to run user-provided scripts at VM startup and - shutdown. - -The Linux guest environment is written in Python and is version agnostic -between Python 2.6 and 3.7. There is complete unittest coverage for every Python -library and script. The design of various guest libraries, daemons, and scripts, -are detailed in the sections below. - -## Common Libraries - -The Python libraries are shared with each of the daemons and the instance setup -tools. - -#### Metadata Watcher - -The guest environment relies upon retrieving content from the metadata server to -configure the VM environment. A metadata watching library handles all -communication with the metadata server. - -The library exposes two functions: - -* **GetMetadata** immediately retrieves the contents of the metadata server - for a given metadata key. The function catches and logs any connection - related exceptions. The metadata server content is returned as a - deserialized JSON object. -* **WatchMetadata** continuously makes a hanging GET, watching for changes to - the specified contents of the metadata server. When the request closes, the - watcher verifies the etag was updated. In case of an update, the etag is - updated and a provided handler function is called with the deserialized JSON - metadata content. The WatchMetadata function should never terminate; it - catches and logs any connection related exceptions, and catches and logs any - exception generated from calling the handler. - -Metadata server requests have custom retry logic for metadata server -unavailability; by default, any request has one minute to complete before the -request is cancelled. In case of a brief network outage where the metadata -server is unavailable, there is a short delay between retries. - -#### Logging - -The Google added daemons and scripts write to the serial port for added -transparency. A common logging library is a thin wrapper around the Python -logging module. The library configures appropriate SysLog handlers, sets the -logging formatter, and provides a debug options for added logging and console -output. - -#### Configuration Management - -A configuration file allows users to disable daemons and modify instance setup -behaviors from a single location. Guest environment daemons and scripts need a -mechanism to integrate user settings into the guest. A configuration management -library retrieves and modifies these settings. - -The library exposes the following functions: - -* **GetOptionString** retrieves the value for a configuration option. The type - of the value is a string if set. -* **GetOptionBool** retrieves the value for a configuration option. The type - of the value is a boolean if set. -* **SetOption** sets the value of an option in the config file. An overwrite - flag specifies whether to replace an existing value. -* **WriteConfig** writes the configuration values to a file. The function is - responsible for locking the file, preventing concurrent writes, and writing - a file header if one is provided. - -#### File Management - -Guest environment daemons and scripts use a common library for file management. -The library provides the following functions: - -* **SetPermissions** unifies the logic to set permissions and simplify file - creation across the various Linux distributions. The function sets the mode, - UID, and GID, of a provided path. On supported OS configurations that user - SELinux, the SELinux context is automatically set. -* **LockFile** is a context manager that simplifies the process of file - locking in Python. The function sets up an `flock` and releases the lock on - exit. - -#### Network Utilities - -A network-utilities library retrieves information about a network interface. The -library is used for IP forwarding and for setting up an Ethernet interface on -boot. The library exposes a `GetNetworkInterface` function that retrieves the -network interface name associated with a MAC address. - -## Daemons - -The guest environment daemons import and use the common libraries described -above. Each daemon reads the configuration file before execution. This allows a -user to easily disable undesired functionality. Additional daemon behaviors are -detailed below. - -#### Accounts - -The accounts daemon is responsible for provisioning and deprovisioning user -accounts. The daemon grants permissions to user accounts, and updates the list -of authorized keys that have access to accounts based on metadata SSH key -updates. User account creation is based on -[adding and remove SSH Keys](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys) -stored in metadata. - -The accounts management daemon has the following behaviors. - -* Administrator permissions are managed with a `google-sudoers` Linux group. -* All users provisioned by the account daemon are added to the - `google-sudoers` group. -* The daemon stores a file in the guest to preserve state for the user - accounts managed by Google. -* The authorized keys file for a Google managed user is deleted when all SSH - keys for the user are removed from metadata. -* User accounts not managed by Google are not modified by the accounts daemon. - -#### Clock Skew - -The clock skew daemon is responsible for syncing the software clock with the -hypervisor clock after a stop/start event or after a migration. Preventing clock -skew may result in `system time has changed` messages in VM logs. - -#### Network - -The network daemon uses network interface metadata to manage the network -interfaces in the guest by performing the following tasks: - -* Enabled all associated network interfaces on boot. Network interfaces are - specified by MAC address in instance metadata. -* Uses IP forwarding metadata to setup or remove IP routes in the guest. - * Only IPv4 IP addresses are currently supported. - * Routes are set on the default Ethernet interface determined dynamically. - * Google routes are configured, by default, with the routing protocol ID - `66`. This ID is a namespace for daemon configured IP addresses. - -## Instance Setup - -Instance setup runs during VM boot. The script configures the Linux guest -environment by performing the following tasks. - -* Optimize for local SSD. -* Enable multi-queue on all the virtionet devices. -* Wait for network availability. -* Set SSH host keys the first time the instance is booted. -* Set the `boto` config for using Google Cloud Storage. -* Create the defaults configuration file. - -The defaults configuration file incorporates any user provided setting in -`/etc/default/instance_configs.cfg.template` and does not override other -conflicting settings. This allows package updates without overriding user -configuration. - -## Metadata Scripts - -Metadata scripts implement support for running user provided -[startup scripts](https://cloud.google.com/compute/docs/startupscript) and -[shutdown scripts](https://cloud.google.com/compute/docs/shutdownscript). The -guest support for metadata scripts is implemented in Python with the following -design details. - -* Metadata scripts are executed in a shell. -* If multiple metadata keys are specified (e.g. `startup-script` and - `startup-script-url`) both are executed. -* If multiple metadata keys are specified (e.g. `startup-script` and - `startup-script-url`) a URL is executed first. -* The exit status of a metadata script is logged after completed execution. - -## Configuration - -Users of Google provided images may configure the guest environment behaviors -using a configuration file. To make configuration changes, add settings to -`/etc/default/instance_configs.cfg.template`. If you are attempting to change -the behavior of a running instance, run `/usr/bin/google_instance_setup` before -reloading the affected daemons. - -Linux distributions looking to include their own defaults can specify settings -in `/etc/default/instance_configs.cfg.distro`. These settings will not override -`/etc/default/instance_configs.cfg.template`. This enables distribution settings -that do not override user configuration during package update. - -The following are valid user configuration options. - -Section | Option | Value ------------------ | ---------------------- | ----- -Accounts | deprovision\_remove | `true` makes deprovisioning a user destructive. -Accounts | groups | Comma separated list of groups for newly provisioned users. -Accounts | useradd\_cmd | Command string to create a new user. -Accounts | userdel\_cmd | Command string to delete a user. -Accounts | usermod\_cmd | Command string to modify a user's groups. -Accounts | gpasswd\_add\_cmd | Command string to add a user to a group. -Accounts | gpasswd\_remove\_cmd | Command string to remove a user from a group. -Accounts | groupadd\_cmd | Command string to create a new group. -Daemons | accounts\_daemon | `false` disables the accounts daemon. -Daemons | clock\_skew\_daemon | `false` disables the clock skew daemon. -Daemons | ip\_forwarding\_daemon | `false` (deprecated) skips IP forwarding. -Daemons | network\_daemon | `false` disables the network daemon. -InstanceSetup | host\_key\_types | Comma separated list of host key types to generate. -InstanceSetup | optimize\_local\_ssd | `false` prevents optimizing for local SSD. -InstanceSetup | network\_enabled | `false` skips instance setup functions that require metadata. -InstanceSetup | set\_boto\_config | `false` skips setting up a `boto` config. -InstanceSetup | set\_host\_keys | `false` skips generating host keys on first boot. -InstanceSetup | set\_multiqueue | `false` skips multiqueue driver support. -IpForwarding | ethernet\_proto\_id | Protocol ID string for daemon added routes. -IpForwarding | ip\_aliases | `false` disables setting up alias IP routes. -IpForwarding | target\_instance\_ips | `false` disables internal IP address load balancing. -MetadataScripts | default\_shell | String with the default shell to execute scripts. -MetadataScripts | run\_dir | String base directory where metadata scripts are executed. -MetadataScripts | startup | `false` disables startup script execution. -MetadataScripts | shutdown | `false` disables shutdown script execution. -NetworkInterfaces | setup | `false` skips network interface setup. -NetworkInterfaces | ip\_forwarding | `false` skips IP forwarding. -NetworkInterfaces | dhcp\_command | String path for alternate dhcp executable used to enable network interfaces. - -Setting `network_enabled` to `false` will skip setting up host keys and the -`boto` config in the guest. The setting may also prevent startup and shutdown -script execution. diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/setup.cfg gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/setup.cfg --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/setup.cfg 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/setup.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -[bdist_wheel] -universal=1 diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/setup.py gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/setup.py --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/setup.py 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/setup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create a Python package of the Linux guest environment.""" - -import glob -import sys - -import setuptools - -install_requires = ['setuptools'] -if sys.version_info < (3, 0): - install_requires += ['boto'] -if sys.version_info >= (3, 7): - install_requires += ['distro'] - -setuptools.setup( - author='Google Compute Engine Team', - author_email='gc-team@google.com', - description='Google Compute Engine', - include_package_data=True, - install_requires=install_requires, - license='Apache Software License', - long_description='Google Compute Engine guest environment.', - name='google-compute-engine', - packages=setuptools.find_packages(), - url='https://github.com/GoogleCloudPlatform/compute-image-packages', - version='20190801.00', - # Entry points create scripts in /usr/bin that call a function. - entry_points={ - 'console_scripts': [ - 'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main', - 'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main', - 'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main', - 'google_network_daemon=google_compute_engine.networking.network_daemon:main', - 'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main', - ], - }, - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: No Input/Output (Daemon)', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - 'Topic :: System :: Installation/Setup', - 'Topic :: System :: Systems Administration', - ], -) diff -Nru gce-compute-image-packages-20190801/packages/python-google-compute-engine/tox.ini gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/tox.ini --- gce-compute-image-packages-20190801/packages/python-google-compute-engine/tox.ini 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packages/python-google-compute-engine/tox.ini 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -[tox] -envlist = py26,py27,py32,py33,py34,py35,py36,py37,pypy,pypy3 - -[testenv] -deps = - py{35,py36,py37}: distro - setuptools>=20 - pytest - pytest-cov - mock - unittest2 -commands = - py.test \ - -v \ - --cov \ - --cov-config=.coveragerc \ - {posargs:.} -setenv = - # Global configs cause unit tests to break. - # Issue: travis-ci/travis-ci#5246 - BOTO_CONFIG=/tmp/fake - -[testenv:py32] -# Coverage doesn't support py32, so run the tests without coverage reporting. -deps = - pytest - mock -commands = - py.test \ - -v \ - {posargs:.} - -[testenv:lint] -deps = - flake8 - flake8-import-order - setuptools>=20 -commands = - flake8 --import-order-style=google - -[flake8] -# Temporarly disabling warnings until code is flake8 compliant. -# E111 indentation is not a multiple of four -# E114 indentation is not a multiple of four (comment) -# E121 continuation line under-indented for hanging indent -# E125 continuation line with same indent as next logical line -# E128 continuation line under-indented for visual indent -# E129 visually indented line with same indent as next logical line -# E226 missing whitespace around arithmetic operator -# E231 missing whitespace after ',' -# E261 at least two spaces before inline comment -# E302 expected 2 blank lines, found 1 -# E501 line too long -# F401 imported but unused -ignore = E111,E114,E121,E125,E128,E129,E226,E231,E261,E302,E501,F401,W503 -exclude = - .git, - .tox, - __pycache__, - dist, - env - -# This section configures tox-travis. -# See https://github.com/ryanhiebert/tox-travis#advanced-configuration -[travis] -python = - 3.6: py36, lint diff -Nru gce-compute-image-packages-20190801/packaging/debian/changelog gce-compute-image-packages-20201222.00/packaging/debian/changelog --- gce-compute-image-packages-20190801/packaging/debian/changelog 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/changelog 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,260 @@ +google-compute-engine (1:20200113.00-g1) stable; urgency=medium + + * Remove python daemon configs. + * Add dependency on guest-agent package. + + -- Google Cloud Team Mon, 13 Jan 2020 13:27:55 -0700 + +google-compute-engine (1:20190916.00-g2) stable; urgency=medium + + * Update dependencies. + + -- Google Cloud Team Mon, 16 Sep 2019 15:51:24 -0700 + +google-compute-engine (1:20190916.00-g1) stable; urgency=medium + + * Bump version to match python package release. + + -- Google Cloud Team Mon, 16 Sep 2019 15:51:24 -0700 + +google-compute-engine (1:20190905.00-g1) stable; urgency=medium + + * Bump version to match python package release. + + -- Google Cloud Team Thu, 05 Sep 2019 12:00:22 -0700 + +google-compute-engine (1:20190801.00-g1) stable; urgency=medium + + * Re-enable boto config and drop writing plugin directory. + * Fix metadata script retrieval. + + -- Google Cloud Team Thu, 01 Aug 2019 14:06:02 -0700 + +google-compute-engine (1:20190729.00-g1) stable; urgency=medium + + * Suport Google Private Access over IPv6. + * Switch to v1 guest attributes URL. + + -- Google Cloud Team Mon, 29 Jul 2019 10:07:29 -0700 + +google-compute-engine (1:20190708.00-g1) stable; urgency=medium + + * Drop unnecessary build and package dependencies. + * Log to journal and console directly from systemd service files. + * Update Debian build dependencies. + + -- Google Cloud Team Mon, 08 Jul 2019 10:20:15 -0700 + +google-compute-engine (2.8.16-1) stable; urgency=low + + * Fix instance setup in Python 3 environments. + + -- Google Cloud Team Wed, 22 May 2019 12:00:00 -0700 + +google-compute-engine (2.8.15-1) stable; urgency=low + + * Fix XPS settings with more than 64 vCPUs. + + -- Google Cloud Team Tue, 21 May 2019 12:00:00 -0700 + +google-compute-engine (2.8.14-1) stable; urgency=low + + * Upstart systems: only run startup scripts at boot. + + -- Google Cloud Team Tue, 16 Apr 2019 12:00:00 -0700 + +google-compute-engine (2.8.13-1) stable; urgency=low + + * Fix metadata script retrieval to support Python 3. + + -- Google Cloud Team Thu, 24 Jan 2019 12:00:00 -0700 + +google-compute-engine (2.8.12-1) stable; urgency=low + + * Fix two factor enablement on change. + + -- Google Cloud Team Wed, 05 Dec 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.11-1) stable; urgency=low + + * Split up the gpasswd command into two commands. + * Update two factor enablement on change. + + -- Google Cloud Team Tue, 04 Dec 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.10-1) stable; urgency=low + + * Fix the gpasswd command default. + + -- Google Cloud Team Fri, 30 Nov 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.9-1) stable; urgency=low + + * Support enabling OS Login two factor authentication. + * Improve accounts support for FreeBSD. + * Improve SELinux support. + + -- Google Cloud Team Wed, 28 Nov 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.8-1) stable; urgency=low + + * Update sudoer group membership without overriding local groups. + + -- Google Cloud Team Tue, 23 Oct 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.7-1) stable; urgency=low + + * Remove users from sudoers group on removal (fixed). + + -- Google Cloud Team Thu, 18 Oct 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.6-1) stable; urgency=low + + * Revert PR: Remove users from sudoers group on removal. + + -- Google Cloud Team Thu, 11 Oct 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.5-1) stable; urgency=low + + * Remove users from sudoers group on removal. + * Remove gsutil dependency for metadata scripts. + + -- Google Cloud Team Thu, 05 Oct 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.4-1) stable; urgency=low + + * Remove ntp dependency. + * Support Debian 10 Buster. + * Restart the network daemon if networking is restarted. + * Prevent setup of the default ethernet interface. + * Accounts daemon can now verify username is 32 characters or less. + + -- Google Cloud Team Wed, 05 Sep 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.3-1) stable; urgency=low + + * Prevent IP forwarding daemon log spam. + * Make default shell configurable when executing metadata scripts. + * Rename distro directory to distro_lib. + + -- Google Cloud Team Mon, 11 June 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.2-1) stable; urgency=low + + * Prevent delay in configuring IP forwarding routes. + * Improve instance setup support for FreeBSD. + + -- Google Cloud Team Thu, 10 May 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.1-1) stable; urgency=low + + * Improve OS Login disablement. + + -- Google Cloud Team Fri, 04 May 2018 12:00:00 -0700 + +google-compute-image-packages (2.8.0-1) stable; urgency=low + + * Create a new network daemon. + * Refactor the IP forwarding daemon and network setup. + * Improvements for using NSS cache in the accounts daemon. + + -- Google Cloud Team Tue, 01 May 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.7-1) stable; urgency=low + + * Add support for NSS cache in OS Login. + + -- Google Cloud Team Thu, 08 Mar 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.6-1) stable; urgency=low + + * Add distro specific logic. + + -- Google Cloud Team Wed, 21 Feb 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.5-2) stable; urgency=low + + * Fix dependencies for syslog. + + -- Google Cloud Team Tue, 06 Feb 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.5-1) stable; urgency=low + + * Revert hostname setting change in Debian. + + -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.4-1) stable; urgency=low + + * Fix hostname setting in Debian. + + -- Google Cloud Team Mon, 29 Jan 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.3-1) stable; urgency=low + + * Improve hostname setting and correctly restart rsyslog. + + -- Google Cloud Team Thu, 25 Jan 2018 12:00:00 -0700 + +google-compute-image-packages (2.7.2-2) stable; urgency=low + + * Force IPv4 for apt. + + -- Google Cloud Team Wed, 13 Dec 2017 12:00:00 -0700 + +google-compute-image-packages (2.7.2-1) stable; urgency=low + + * Generate SSH host keys when none are present. + * Improve logging when activating OS Login. + + -- Google Cloud Team Wed, 29 Nov 2017 12:00:00 -0700 + +google-compute-image-packages (2.7.1-1) stable; urgency=low + + * Update set_hostname file name to prevent conflict. + * Add apt config to prevent auto-removal of google-compute-engine. + + -- Google Cloud Team Wed, 25 Oct 2017 12:00:00 -0700 + +google-compute-image-packages (2.7.0-6) stable; urgency=low + + * Linux guest environment support for OS Login. + + -- Google Cloud Team Tue, 17 Oct 2017 12:00:00 -0700 + +google-compute-image-packages (2.6.2-1) stable; urgency=low + + * Fix system hang during VM shutdown. + + -- Google Cloud Team Fri, 06 Oct 2017 12:00:00 -0700 + +google-compute-image-packages (2.6.1-1) stable; urgency=low + + * Use curl to download metadata script files for SSL certificate validation. + * Use netifaces for retrieving MAC address names if the import exists. + + -- Google Cloud Team Thurs, 14 Sep 2017 12:00:00 -0700 + +google-compute-image-packages (2.6.0-4) stable; urgency=low + + * Fix DHCP exit hook install. + + -- Google Cloud Team Mon, 28 Aug 2017 12:00:00 -0700 + +google-compute-image-packages (2.6.0-3) stable; urgency=low + + * Add systemd preset. + + -- Google Cloud Team Fri, 25 Aug 2017 14:00:00 -0700 + +google-compute-image-packages (2.6.0-2) stable; urgency=low + + * Add DHCP exit hook script back into package. + + -- Google Cloud Team Fri, 25 Aug 2017 12:00:00 -0700 + +google-compute-image-packages (2.6.0-1) stable; urgency=low + + * New packaging. + + -- Google Cloud Team Mon, 27 Jun 2017 12:00:00 -0700 diff -Nru gce-compute-image-packages-20190801/packaging/debian/compat gce-compute-image-packages-20201222.00/packaging/debian/compat --- gce-compute-image-packages-20190801/packaging/debian/compat 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/compat 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1 @@ +10 diff -Nru gce-compute-image-packages-20190801/packaging/debian/control gce-compute-image-packages-20201222.00/packaging/debian/control --- gce-compute-image-packages-20190801/packaging/debian/control 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/control 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,36 @@ +Source: google-compute-engine +Section: admin +Priority: optional +Maintainer: Google Cloud Team +Build-Depends: debhelper (>= 10) +Standards-Version: 3.9.8 +Homepage: https://github.com/GoogleCloudPlatform/compute-image-packages + +Package: google-compute-engine +Architecture: all +Depends: google-compute-engine-oslogin, + google-guest-agent, + nvme-cli, + ${misc:Depends} +Recommends: rsyslog | system-log-daemon +Provides: irqbalance +Conflicts: google-compute-engine-jessie, + google-compute-engine-init-jessie, + google-config-jessie, + google-compute-engine-stretch, + google-compute-engine-init-stretch, + google-config-stretch, + google-compute-daemon, + google-startup-scripts, + irqbalance +Replaces: google-compute-engine-jessie, + google-compute-engine-init-jessie, + google-config-jessie, + google-compute-engine-stretch, + google-compute-engine-init-stretch, + google-config-stretch, + google-compute-daemon, + google-startup-scripts +Description: Google Compute Engine guest environment. + This package contains scripts and configuration files for + features specific to the Google Compute Engine cloud environment. diff -Nru gce-compute-image-packages-20190801/packaging/debian/copyright gce-compute-image-packages-20201222.00/packaging/debian/copyright --- gce-compute-image-packages-20190801/packaging/debian/copyright 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/copyright 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,27 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: google-compute-engine +Upstream-Contact: gc-team@google.com + +Files: * +Copyright: Copyright 2020 Google Inc. +License: Apache-2.0 + +Files: debian/* +Copyright: Copyright 202020 Google Inc. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". diff -Nru gce-compute-image-packages-20190801/packaging/debian/google-compute-engine.links gce-compute-image-packages-20201222.00/packaging/debian/google-compute-engine.links --- gce-compute-image-packages-20190801/packaging/debian/google-compute-engine.links 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/google-compute-engine.links 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1 @@ +usr/bin/google_set_hostname etc/dhcp/dhclient-exit-hooks.d/google_set_hostname diff -Nru gce-compute-image-packages-20190801/packaging/debian/install gce-compute-image-packages-20201222.00/packaging/debian/install --- gce-compute-image-packages-20190801/packaging/debian/install 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/install 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,6 @@ +etc/apt/apt.conf.d/* +etc/modprobe.d/* +etc/rsyslog.d/* +etc/sysctl.d/* +lib/udev/rules.d/* +usr/bin/* diff -Nru gce-compute-image-packages-20190801/packaging/debian/preinst gce-compute-image-packages-20201222.00/packaging/debian/preinst --- gce-compute-image-packages-20190801/packaging/debian/preinst 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/preinst 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,30 @@ +#!/bin/sh +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +if [ "$1" = upgrade ]; then + # Remove old services if they exist on upgrade. + for svc in google-ip-forwarding-daemon google-network-setup \ + google-network-daemon google-accounts-daemon google-clock-skew-daemon \ + google-instance-setup; do + if systemctl is-enabled ${svc}.service >/dev/null 2>&1; then + systemctl --no-reload disable ${svc}.service >/dev/null 2>&1 || : + if [ -d /run/systemd/system ]; then + systemctl stop ${svc}.service >/dev/null 2>&1 || : + fi + fi + done + systemdctl daemon-reload >/dev/null 2>&1 || : +fi diff -Nru gce-compute-image-packages-20190801/packaging/debian/rules gce-compute-image-packages-20201222.00/packaging/debian/rules --- gce-compute-image-packages-20190801/packaging/debian/rules 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/rules 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,7 @@ +#!/usr/bin/make -f + +%: + dh $@ + +override_dh_install: + dh_install --sourcedir=src diff -Nru gce-compute-image-packages-20190801/packaging/debian/source/format gce-compute-image-packages-20201222.00/packaging/debian/source/format --- gce-compute-image-packages-20190801/packaging/debian/source/format 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/debian/source/format 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1 @@ +3.0 (quilt) diff -Nru gce-compute-image-packages-20190801/packaging/google-compute-engine.spec gce-compute-image-packages-20201222.00/packaging/google-compute-engine.spec --- gce-compute-image-packages-20190801/packaging/google-compute-engine.spec 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/packaging/google-compute-engine.spec 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright 2020 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# For EL7, if building on CentOS, override dist to be el7. +%if 0%{?rhel} == 7 + %define dist .el7 +%endif + +Name: google-compute-engine +Epoch: 1 +Version: %{_version} +Release: g1%{?dist} +Summary: Google Compute Engine guest environment. +License: ASL 2.0 +Url: https://github.com/GoogleCloudPlatform/compute-image-packages +Source0: %{name}_%{version}.orig.tar.gz +Requires: curl +Requires: dracut +Requires: google-compute-engine-oslogin +Requires: google-guest-agent +Requires: rsyslog +Requires: nvme-cli + +BuildArch: noarch + +# Allow other files in the source that don't end up in the package. +%define _unpackaged_files_terminate_build 0 + +%description +This package contains scripts, configuration, and init files for features +specific to the Google Compute Engine cloud environment. + +%prep +%autosetup + +%install +cp -a src/{etc,usr} %{buildroot} +install -d %{buildroot}/%{_udevrulesdir} +cp -a src/lib/udev/rules.d/* %{buildroot}/%{_udevrulesdir} + +%files +%defattr(0644,root,root,0755) +%attr(0755,-,-) %{_bindir}/* +%attr(0755,-,-) /etc/dhcp/dhclient.d/google_hostname.sh +%{_udevrulesdir}/* +%config /etc/dracut.conf.d/* +%config /etc/modprobe.d/* +%config /etc/rsyslog.d/* +%config /etc/sysctl.d/* + +%pre +if [ $1 -gt 1 ] ; then + # This is an upgrade. Stop and disable services previously owned by this + # package, if any. + for svc in google-ip-forwarding-daemon google-network-setup \ + google-network-daemon google-accounts-daemon google-clock-skew-daemon \ + google-instance-setup; do + if systemctl is-enabled ${svc}.service >/dev/null 2>&1; then + systemctl --no-reload disable ${svc}.service >/dev/null 2>&1 || : + if [ -d /run/systemd/system ]; then + systemctl stop ${svc}.service >/dev/null 2>&1 || : + fi + fi + done + systemctl daemon-reload >/dev/null 2>&1 || : +fi + +%post +dracut --force diff -Nru gce-compute-image-packages-20190801/README.md gce-compute-image-packages-20201222.00/README.md --- gce-compute-image-packages-20190801/README.md 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/README.md 2020-12-07 19:55:14.000000000 +0000 @@ -1,213 +1,6 @@ ## Linux Guest Environment for Google Compute Engine -[![Build Status](https://travis-ci.org/GoogleCloudPlatform/compute-image-packages.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/compute-image-packages) [![codecov](https://codecov.io/gh/GoogleCloudPlatform/compute-image-packages/branch/master/graph/badge.svg)](https://codecov.io/gh/GoogleCloudPlatform/compute-image-packages) - -This repository stores the collection of packages installed on Google supported -Compute Engine [images](https://cloud.google.com/compute/docs/images). - -**Table of Contents** - -* [Background](#background) -* [Packaging](#packaging) - * [Version Updates](#version-updates) - * [Package Distribution](#package-distribution) -* [Troubleshooting](#troubleshooting) -* [Contributing](#contributing) -* [License](#license) - -## Background - -The Linux guest environment denotes the Google provided configuration and -tooling inside of a [Google Compute Engine](https://cloud.google.com/compute/) -(GCE) virtual machine. The -[metadata server](https://cloud.google.com/compute/docs/metadata) is a -communication channel for transferring information from a client into the guest. -The Linux guest environment includes a set of scripts and daemons (long-running -processes) that read the content of the metadata server to make a virtual -machine run properly on our platform. - -## Packaging - -The guest Python code is packaged as a -[compliant PyPI Python package](https://packaging.python.org/) -that can be used as a library or run independently. In addition to the Python -package, deb and rpm packages are created with appropriate init configuration -for supported GCE distros. The packages are targeted towards distribution -provided Python versions. - -Distro | Package Type | Python Version | Init System ------------- | ------------ | -------------- | ----------- -SLES 12 | rpm | 2.7 | systemd -SLES 15 | rpm | 3.6 | systemd -CentOS 6 | rpm | 2.6 | upstart -CentOS 7 | rpm | 2.7 | systemd -RHEL 6 | rpm | 2.6 | upstart -RHEL 7 | rpm | 2.7 | systemd -RHEL 8 | rpm | 3.6 | systemd -Ubuntu 14.04 | deb | 2.7 | upstart -Ubuntu 16.04 | deb | 3.5 or 2.7 | systemd -Ubuntu 18.04 | deb | 3.6 | systemd -Ubuntu 19.04 | deb | 3.7 | systemd -Debian 9 | deb | 3.5 or 2.7 | systemd -Debian 10 | deb | 3.7 | systemd - -We build the following packages for the Linux guest environment. - -* `google-compute-engine` - * System init scripts (systemd, upstart, or sysvinit). - * Includes udev rules, sysctl rules, rsyslog configs, dhcp configs for - hostname setting. - * Entry point scripts created by the Python package located in `/usr/bin`. - * Includes bash scripts used by `instance_setup`. -* `python-google-compute-engine` - * The Python 2 package for Linux daemons and libraries. -* `python3-google-compute-engine` - * The Python 3 package for Linux daemons and libraries. -* `google-compute-engine-oslogin` - * The PAM and NSS modules for [OS Login](https://cloud.google.com/compute/docs/oslogin/) -* `gce-disk-expand` - * The on-boot resize scripts for root partition. - -The package sources (RPM spec files and Debian packaging directories) are also -included in this project. There are also [Daisy](https://github.com/GoogleCloudPlatform/compute-image-tools/tree/master/daisy) -workflows for spinning up GCE VM's to automatically build the packages for -Debian, Red Hat, and CentOS. See the [README](packaging/README.md) in the -packaging directory for more details. - -#### Version Updates - -Versions are described as 1:YYYYMMDD.NN-gN, meaning epoch 1 to denote from a -distro maintained package which will be 0, a date string formatted as year, -month, day, an incrementing minor release, and gN representing the Google -package release. Debian, Ubuntu, and SUSE maintain distro packages which may be -out of date, have different versioning, or naming. - -The method for making version updates differs by package. - -* All packages need the `VERSION` variable set in the `setup_{deb,rpm}.sh` build - scripts. -* All packages need the `debian/changelog` file updated. Please use `dch(1)` to - update it. -* `python-google-compute-engine` additionally needs the version specified in - `setup.py`. This is used for entry points through the Python egg and PyPI. -* `google-compute-engine-oslogin` needs the version also updated in the - `Makefile`. - -#### Package Distribution - -The deb and rpm packages are published to Google Cloud repositories. Debian, -CentOS, and RHEL use these repositories to install and update the -`google-compute-engine`, `google-compute-engine-oslogin` and -`python-google-compute-engine` (and `python3-google-compute-engine` for Python -3) packages. If you are creating a custom image, you can also use these -repositories in your image. - -**For Debian, run the following commands as root:** - -Add the public repo key to your system: -``` -curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -``` - -Add a source list file `/etc/apt/sources.list.d/google-cloud.list` and change -`DIST` to either `stretch` for Debian 9 or `buster` for Debian 10: -``` -DIST=stretch -sudo tee /etc/apt/sources.list.d/google-cloud.list << EOM -deb http://packages.cloud.google.com/apt google-compute-engine-${DIST}-stable main -deb http://packages.cloud.google.com/apt google-cloud-packages-archive-keyring-${DIST} main -EOM -``` - -Install the packages to maintain the public key over time: -``` -sudo apt update; sudo apt install -y google-cloud-packages-archive-keyring -``` - -You are then able to install any of the packages from this repo. - -**For RedHat based distributions, run the following commands as root:** - -Add the yum repo to a repo file `/etc/yum.repos.d/google-cloud.repo` for EL6, -EL7, or EL8. Change `DIST` to either 6, 7, or 8 respectively: -``` -DIST=7 -tee /etc/yum.repos.d/google-cloud.repo << EOM -[google-compute-engine] -name=Google Compute Engine -baseurl=https://packages.cloud.google.com/yum/repos/google-compute-engine-el${DIST}-x86_64-stable -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOM -``` - -You are then able to install any of the packages from this repo. - -## Troubleshooting - -**Deprecated Packages** - -Deprecated Package | Replacement ------------------------------------- | --------------------------------------------------------- -`google-compute-engine-jessie` | `google-compute-engine` and `python-google-compute-engine` -`google-compute-engine-stretch` | `google-compute-engine` and `python-google-compute-engine` -`google-compute-engine-init` | `google-compute-engine` -`google-compute-engine-init-jessie` | `google-compute-engine` -`google-compute-engine-init-stretch` | `google-compute-engine` -`google-config` | `google-compute-engine` -`google-config-jessie` | `google-compute-engine` -`google-config-stretch` | `google-compute-engine` -`google-compute-daemon` | `python-google-compute-engine` -`google-startup-scripts` | `google-compute-engine` - -**An old CentOS 6 image fails to install the packages with an error on SCL** - -CentOS 6 images prior to `v20160526` may fail to install the package with -the error: -``` -http://mirror.centos.org/centos/6/SCL/x86_64/repodata/repomd.xml: [Errno 14] PYCURL ERROR 22 - "The requested URL returned error: 404 Not Found" -``` - -Remove the stale repository file: -`sudo rm -f /etc/yum.repos.d/CentOS-SCL.repo` - -**On some CentOS or RHEL 6 systems, extraneous python egg directories can cause -the python daemons to fail.** - -In `/usr/lib/python2.6/site-packages` look for -`google_compute_engine-2.4.1-py27.egg-info` directories and -`google_compute_engine-2.5.2.egg-info` directories and delete them if you run -into this problem. - -**Using boto with virtualenv** - -Specific to running `boto` inside of a Python -[`virtualenv`](http://docs.python-guide.org/en/latest/dev/virtualenvs/), -virtual environments are isolated from system site-packages. This includes the -installed Linux guest environment libraries that are used to configure `boto` -credentials. There are two recommended solutions: - -* Create a virtual environment with `virtualenv venv --system-site-packages`. -* Install `boto` via the Linux guest environment PyPI package using - `pip install google-compute-engine`. - -## Contributing - -Have a patch that will benefit this project? Awesome! Follow these steps to have -it accepted. - -1. Please sign our [Contributor License Agreement](CONTRIB.md). -1. Fork this Git repository and make your changes. -1. Create a Pull Request against the - [development](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/development) - branch. -1. Incorporate review feedback to your changes. -1. Accepted! - -## License - -All files in this repository are under the -[Apache License, Version 2.0](LICENSE) unless noted otherwise. +This repository holds the sources and packaging artifacts for the +`google-compute-engine` package. This package contains configuration files and +scripts used to support the Google Compute Engine guest environment, and also +depends on the other packages needed to provide all guest functionality. diff -Nru gce-compute-image-packages-20190801/src/etc/apt/apt.conf.d/01autoremove-gce gce-compute-image-packages-20201222.00/src/etc/apt/apt.conf.d/01autoremove-gce --- gce-compute-image-packages-20190801/src/etc/apt/apt.conf.d/01autoremove-gce 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/apt/apt.conf.d/01autoremove-gce 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,10 @@ +APT +{ + NeverAutoRemove + { + "gce-compute-image-packages.*"; + "google-compute-engine.*"; + "python-google-compute-engine.*"; + "python3-google-compute-engine.*"; + }; +}; diff -Nru gce-compute-image-packages-20190801/src/etc/apt/apt.conf.d/99ipv4-only gce-compute-image-packages-20201222.00/src/etc/apt/apt.conf.d/99ipv4-only --- gce-compute-image-packages-20190801/src/etc/apt/apt.conf.d/99ipv4-only 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/apt/apt.conf.d/99ipv4-only 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,2 @@ +# Force IPv4 for Apt. +Acquire::ForceIPv4 "true"; diff -Nru gce-compute-image-packages-20190801/src/etc/dhcp/dhclient.d/google_hostname.sh gce-compute-image-packages-20201222.00/src/etc/dhcp/dhclient.d/google_hostname.sh --- gce-compute-image-packages-20190801/src/etc/dhcp/dhclient.d/google_hostname.sh 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/dhcp/dhclient.d/google_hostname.sh 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +google_hostname_config() { + google_set_hostname +} +google_hostname_restore() { + : +} diff -Nru gce-compute-image-packages-20190801/src/etc/dracut.conf.d/gce.conf gce-compute-image-packages-20201222.00/src/etc/dracut.conf.d/gce.conf --- gce-compute-image-packages-20190801/src/etc/dracut.conf.d/gce.conf 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/dracut.conf.d/gce.conf 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,2 @@ +# Include NVMe driver in initrd to boot on NVMe devices. +force_drivers+="nvme" diff -Nru gce-compute-image-packages-20190801/src/etc/modprobe.d/gce-blacklist.conf gce-compute-image-packages-20201222.00/src/etc/modprobe.d/gce-blacklist.conf --- gce-compute-image-packages-20190801/src/etc/modprobe.d/gce-blacklist.conf 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/modprobe.d/gce-blacklist.conf 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,5 @@ +# nouveau does not work with GCE GPU's. +blacklist nouveau + +# GCE does not have a floppy device. +blacklist floppy diff -Nru gce-compute-image-packages-20190801/src/etc/rsyslog.d/90-google.conf gce-compute-image-packages-20201222.00/src/etc/rsyslog.d/90-google.conf --- gce-compute-image-packages-20190801/src/etc/rsyslog.d/90-google.conf 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/rsyslog.d/90-google.conf 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,6 @@ +# Google Compute Engine default console logging. +# +# daemon: logging from Google provided daemons. +# kern: logging information in case of an unexpected crash during boot. +# +daemon,kern.* /dev/console diff -Nru gce-compute-image-packages-20190801/src/etc/sysctl.d/60-gce-network-security.conf gce-compute-image-packages-20201222.00/src/etc/sysctl.d/60-gce-network-security.conf --- gce-compute-image-packages-20190801/src/etc/sysctl.d/60-gce-network-security.conf 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/etc/sysctl.d/60-gce-network-security.conf 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Google-recommended kernel parameters + +# Turn on SYN-flood protections. Starting with 2.6.26, there is no loss +# of TCP functionality/features under normal conditions. When flood +# protections kick in under high unanswered-SYN load, the system +# should remain more stable, with a trade off of some loss of TCP +# functionality/features (e.g. TCP Window scaling). +net.ipv4.tcp_syncookies=1 + +# Ignore source-routed packets +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 + +# Ignore ICMP redirects from non-GW hosts +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=1 +net.ipv4.conf.default.secure_redirects=1 + +# Don't pass traffic between networks or act as a router +net.ipv4.ip_forward=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 + +# Turn on Source Address Verification in all interfaces to +# prevent some spoofing attacks. +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 + +# Ignore ICMP broadcasts to avoid participating in Smurf attacks +net.ipv4.icmp_echo_ignore_broadcasts=1 + +# Ignore bad ICMP errors +net.ipv4.icmp_ignore_bogus_error_responses=1 + +# Log spoofed, source-routed, and redirect packets +net.ipv4.conf.all.log_martians=1 +net.ipv4.conf.default.log_martians=1 + +# Addresses of mmap base, heap, stack and VDSO page are randomized +kernel.randomize_va_space=2 + +# Reboot the machine soon after a kernel panic. +kernel.panic=10 diff -Nru gce-compute-image-packages-20190801/src/lib/udev/google_nvme_id gce-compute-image-packages-20201222.00/src/lib/udev/google_nvme_id --- gce-compute-image-packages-20190801/src/lib/udev/google_nvme_id 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/lib/udev/google_nvme_id 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,245 @@ +#!/bin/bash +# Copyright 2020 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Used to generate symlinks for PD-NVMe devices using the disk names reported by +# the metadata server + +# Locations of the script's dependencies +readonly nvme_cli_bin=/usr/sbin/nvme + +# Bash regex to parse device paths and controller identification +readonly NAMESPACE_NUMBER_REGEX="/dev/nvme[[:digit:]]+n([[:digit:]]+).*" +readonly PARTITION_NUMBER_REGEX="/dev/nvme[[:digit:]]+n[[:digit:]]+p([[:digit:]]+)" +readonly PD_NVME_REGEX="sn[[:space:]]+:[[:space]]+nvme_card-pd" + +# Globals used to generate the symlinks for a PD-NVMe disk. These are populated +# by the identify_pd_disk function and exported for consumption by udev rules. +ID_SERIAL='' +ID_SERIAL_SHORT='' + +####################################### +# Helper function to log an error message to stderr. +# Globals: +# None +# Arguments: +# String to print as the log message +# Outputs: +# Writes error to STDERR +####################################### +function err() { + echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $*" >&2 +} + +####################################### +# Retrieves the device name for an NVMe namespace using nvme-cli. +# Globals: +# Uses nvme_cli_bin +# Arguments: +# The path to the nvme namespace (/dev/nvme0n?) +# Outputs: +# The device name parsed from the JSON in the vendor ext of the ns-id command. +# Returns: +# 0 if the device name for the namespace could be retrieved, 1 otherwise +####################################### +function get_namespace_device_name() { + local nvme_json + nvme_json="$("$nvme_cli_bin" id-ns -b "$1" | xxd -p -seek 384 | xxd -p -r)" + if [[ $? -ne 0 ]]; then + return 1 + fi + + if [[ -z "$nvme_json" ]]; then + err "NVMe Vendor Extension disk information not present" + return 1 + fi + + local device_name + device_name="$(echo "$nvme_json" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')" + + # Error if our device name is empty + if [[ -z "$device_name" ]]; then + err "Empty name" + return 1 + fi + + echo "$device_name" + return 0 +} + +####################################### +# Retrieves the nsid for an NVMe namespace +# Globals: +# None +# Arguments: +# The path to the nvme namespace (/dev/nvme0n*) +# Outputs: +# The namespace number/id +# Returns: +# 0 if the namespace id could be retrieved, 1 otherwise +####################################### +function get_namespace_number() { + local dev_path="$1" + local namespace_number + if [[ "$dev_path" =~ $NAMESPACE_NUMBER_REGEX ]]; then + namespace_number="${BASH_REMATCH[1]}" + else + return 1 + fi + + echo "$namespace_number" + return 0 +} + +####################################### +# Retrieves the partition number for a device path if it exists +# Globals: +# None +# Arguments: +# The path to the device partition (/dev/nvme0n*p*) +# Outputs: +# The value after 'p' in the device path, or an empty string if the path has +# no partition. +####################################### +function get_partition_number() { + local dev_path="$1" + local partition_number + if [[ "$dev_path" =~ $PARTITION_NUMBER_REGEX ]]; then + partition_number="${BASH_REMATCH[1]}" + echo "$partition_number" + else + echo '' + fi + return 0 +} + +####################################### +# Generates a symlink for a PD-NVMe device using the metadata's disk name. +# Primarily used for testing but can be used if the script is directly invoked. +# Globals: +# Uses ID_SERIAL_SHORT (can be populated by identify_pd_disk) +# Arguments: +# The device path for the disk +####################################### +function gen_symlink() { + local dev_path="$1" + local partition_number="$(get_partition_number "$dev_path")" + + if [[ -n "$partition_number" ]]; then + ln -s "$dev_path" /dev/disk/by-id/google-"$ID_SERIAL_SHORT"-part"$partition_number" > /dev/null 2>&1 + else + ln -s "$dev_path" /dev/disk/by-id/google-"$ID_SERIAL_SHORT" > /dev/null 2>&1 + fi + + return 0 +} + +####################################### +# Populates the ID_* global variables with a disk's device name and namespace +# Globals: +# Populates ID_SERIAL_SHORT, and ID_SERIAL +# Arguments: +# The device path for the disk +# Returns: +# 0 on success and 1 if an error occurrs +####################################### +function identify_pd_disk() { + local dev_path="$1" + local dev_name + dev_name="$(get_namespace_device_name "$dev_path")" + if [[ $? -ne 0 ]]; then + return 1 + fi + + ID_SERIAL_SHORT="$dev_name" + ID_SERIAL="Google_PersistentDisk_${ID_SERIAL_SHORT}" + return 0 +} + +function print_help_message() { + echo "Usage: google_nvme_id [-s] [-h] -d device_path" + echo " -d (Required): Specifies the path to generate a name" + echo " for. This needs to be a path to an nvme device or namespace" + echo " -s: Create symbolic link for the disk under /dev/disk/by-id." + echo " Otherwise, the disk name will be printed to STDOUT" + echo " -h: Print this help message" +} + +function main() { + local opt_gen_symlink='false' + local device_path='' + + while getopts :d:sh flag; do + case "$flag" in + d) device_path="$OPTARG";; + s) opt_gen_symlink='true';; + h) print_help_message + return 0 + ;; + :) echo "Invalid option: ${OPTARG} requires an argument" 1>&2 + return 1 + ;; + *) return 1 + esac + done + + if [[ -z "$device_path" ]]; then + echo "Device path (-d) argument required. Use -h for full usage." 1>&2 + exit 1 + fi + + # Ensure the nvme-cli command is installed + command -v "$nvme_cli_bin" > /dev/null 2>&1 + if [[ $? -ne 0 ]]; then + err "The nvme utility (/usr/sbin/nvme) was not found. You may need to run \ +with sudo or install nvme-cli." + return 1 + fi + + # Ensure the passed device is actually an NVMe device + "$nvme_cli_bin" id-ctrl "$device_path" &>/dev/null + if [[ $? -ne 0 ]]; then + err "Passed device was not an NVMe device. (You may need to run this \ +script as root/with sudo)." + return 1 + fi + + # Detect the type of attached nvme device + local controller_id + controller_id=$("$nvme_cli_bin" id-ctrl "$device_path") + if [[ ! "$controller_id" =~ nvme_card-pd ]] ; then + err "Device is not a PD-NVMe device" + return 1 + fi + + # Fill the global variables for the id command for the given disk type + # Error messages will be printed closer to error, no need to reprint here + identify_pd_disk "$device_path" + if [[ $? -ne 0 ]]; then + return $? + fi + + # Gen symlinks or print out the globals set by the identify command + if [[ "$opt_gen_symlink" == 'true' ]]; then + gen_symlink "$device_path" + else + # These will be consumed by udev + echo "ID_SERIAL_SHORT=${ID_SERIAL_SHORT}" + echo "ID_SERIAL=${ID_SERIAL}" + fi + + return $? + +} +main "$@" diff -Nru gce-compute-image-packages-20190801/src/lib/udev/rules.d/64-gce-disk-removal.rules gce-compute-image-packages-20201222.00/src/lib/udev/rules.d/64-gce-disk-removal.rules --- gce-compute-image-packages-20190801/src/lib/udev/rules.d/64-gce-disk-removal.rules 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/lib/udev/rules.d/64-gce-disk-removal.rules 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,17 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# When a disk is removed, unmount any remaining attached volumes. + +ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*|vd*|nvme*", RUN+="/bin/sh -c '/bin/umount -fl /dev/$name && /usr/bin/logger -p daemon.warn -s WARNING: hot-removed /dev/$name that was still mounted, data may have been corrupted'" diff -Nru gce-compute-image-packages-20190801/src/lib/udev/rules.d/65-gce-disk-naming.rules gce-compute-image-packages-20201222.00/src/lib/udev/rules.d/65-gce-disk-naming.rules --- gce-compute-image-packages-20190801/src/lib/udev/rules.d/65-gce-disk-naming.rules 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/lib/udev/rules.d/65-gce-disk-naming.rules 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Name the attached disks as the specified by deviceName. + +ACTION!="add|change", GOTO="gce_disk_naming_end" +SUBSYSTEM!="block", GOTO="gce_disk_naming_end" + +# SCSI naming +KERNEL=="sd*|vd*", IMPORT{program}="scsi_id --export --whitelisted -d $tempnode" + +# NVME Local SSD naming +KERNEL=="nvme*n*", ATTRS{model}=="nvme_card", PROGRAM="/bin/sh -c 'echo $((%n-1))'", ENV{ID_SERIAL_SHORT}="local-nvme-ssd-%c" +KERNEL=="nvme*", ATTRS{model}=="nvme_card", ENV{ID_SERIAL}="Google_EphemeralDisk_$env{ID_SERIAL_SHORT}" + +# NVME Persistent Disk Naming +KERNEL=="nvme*n*", ATTRS{model}=="nvme_card-pd", IMPORT{program}="google_nvme_id -d $tempnode" + +# Symlinks +KERNEL=="sd*|vd*|nvme*", ENV{DEVTYPE}=="disk", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}" +KERNEL=="sd*|vd*|nvme*", ENV{DEVTYPE}=="partition", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}-part%n" + +LABEL="gce_disk_naming_end" diff -Nru gce-compute-image-packages-20190801/src/sbin/google-dhclient-script gce-compute-image-packages-20201222.00/src/sbin/google-dhclient-script --- gce-compute-image-packages-20190801/src/sbin/google-dhclient-script 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/sbin/google-dhclient-script 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,806 @@ +#!/bin/bash +# +# dhclient-script: Network interface configuration script run by +# dhclient based on DHCP client communication +# +# Copyright (C) 2008-2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Author(s): David Cantrell +# Jiri Popelka +# +# ---------- +# This script is a rewrite/reworking on dhclient-script originally +# included as part of dhcp-970306: +# dhclient-script for Linux. Dan Halbert, March, 1997. +# Updated for Linux 2.[12] by Brian J. Murrell, January 1999. +# Modified by David Cantrell for Fedora and RHEL +# +# This script is found in EL 7 and used to fix local routing in EL 6. +# ---------- + +PATH=/bin:/usr/bin:/sbin +# scripts in dhclient.d/ use $SAVEDIR (#833054) +SAVEDIR=/var/lib/dhclient + +LOGFACILITY="local7" +LOGLEVEL="notice" + +ETCDIR="/etc/dhcp" + +logmessage() { + msg="${1}" + logger -p ${LOGFACILITY}.${LOGLEVEL} -t "NET" "dhclient: ${msg}" +} + +eventually_add_hostnames_domain_to_search() { +# For the case when hostname for this machine has a domain that is not in domain_search list +# 1) get a hostname with `ipcalc --hostname` or `hostname` +# 2) get the domain from this hostname +# 3) add this domain to search line in resolv.conf if it's not already +# there (domain list that we have recently added there is a parameter of this function) +# We can't do this directly when generating resolv.conf in make_resolv_conf(), because +# we need to first save the resolv.conf with obtained values before we can call `ipcalc --hostname`. +# See bug 637763 + search="${1}" + if need_hostname; then + status=1 + if [ -n "${new_ip_address}" ]; then + eval $(/bin/ipcalc --silent --hostname ${new_ip_address} ; echo "status=$?") + elif [ -n "${new_ip6_address}" ]; then + eval $(/bin/ipcalc --silent --hostname ${new_ip6_address} ; echo "status=$?") + fi + + if [ ${status} -eq 0 ]; then + domain=$(echo $HOSTNAME | cut -s -d "." -f 2-) + fi + else + domain=$(hostname 2>/dev/null | cut -s -d "." -f 2-) + fi + + if [ -n "${domain}" ] && + [ ! "${domain}" = "localdomain" ] && + [ ! "${domain}" = "localdomain6" ] && + [ ! "${domain}" = "(none)" ] && + [[ ! "${domain}" = *\ * ]]; then + is_in="false" + for s in ${search}; do + if [ "${s}" = "${domain}" ] || + [ "${s}" = "${domain}." ]; then + is_in="true" + fi + done + + if [ "${is_in}" = "false" ]; then + # Add domain name to search list (#637763) + sed -i"" -e "s/${search}/${search} ${domain}/" /etc/resolv.conf + fi + fi +} + +make_resolv_conf() { + [ "${PEERDNS}" = "no" ] && return + + if [ "${reason}" = "RENEW" ] && + [ "${new_domain_name}" = "${old_domain_name}" ] && + [ "${new_domain_name_servers}" = "${old_domain_name_servers}" ]; then + return + fi + + if [ -n "${new_domain_name}" ] || + [ -n "${new_domain_name_servers}" ] || + [ -n "${new_domain_search}" ]; then + rscf="$(mktemp ${TMPDIR:-/tmp}/XXXXXX)" + [[ -z "${rscf}" ]] && return + echo "; generated by /usr/sbin/dhclient-script" > ${rscf} + + if [ -n "${SEARCH}" ]; then + search="${SEARCH}" + else + if [ -n "${new_domain_search}" ]; then + # Remove instaces of \032 (#450042) + search="${new_domain_search//\\032/ }" + elif [ -n "${new_domain_name}" ]; then + # Note that the DHCP 'Domain Name Option' is really just a domain + # name, and that this practice of using the domain name option as + # a search path is both nonstandard and deprecated. + search="${new_domain_name}" + fi + fi + + if [ -n "${search}" ]; then + echo "search ${search}" >> $rscf + fi + + if [ -n "${RES_OPTIONS}" ]; then + echo "options ${RES_OPTIONS}" >> ${rscf} + fi + + for nameserver in ${new_domain_name_servers} ; do + echo "nameserver ${nameserver}" >> ${rscf} + done + + change_resolv_conf ${rscf} + rm -f ${rscf} + + if [ -n "${search}" ]; then + eventually_add_hostnames_domain_to_search "${search}" + fi + elif [ -n "${new_dhcp6_name_servers}" ] || + [ -n "${new_dhcp6_domain_search}" ]; then + rscf="$(mktemp ${TMPDIR:-/tmp}/XXXXXX)" + [[ -z "${rscf}" ]] && return + echo "; generated by /usr/sbin/dhclient-script" > ${rscf} + + if [ -n "${SEARCH}" ]; then + search="${SEARCH}" + else + if [ -n "${new_dhcp6_domain_search}" ]; then + search="${new_dhcp6_domain_search//\\032/ }" + fi + fi + + if [ -n "${search}" ]; then + echo "search ${search}" >> $rscf + fi + + if [ -n "${RES_OPTIONS}" ]; then + echo "options ${RES_OPTIONS}" >> ${rscf} + fi + + shopt -s nocasematch + for nameserver in ${new_dhcp6_name_servers} ; do + # If the nameserver has a link-local address + # add a (interface name) to it. + if [[ "$nameserver" =~ ^fe80:: ]] + then + zone_id="%${interface}" + else + zone_id= + fi + echo "nameserver ${nameserver}$zone_id" >> ${rscf} + done + shopt -u nocasematch + + change_resolv_conf ${rscf} + rm -f ${rscf} + + if [ -n "${search}" ]; then + eventually_add_hostnames_domain_to_search "${search}" + fi + fi +} + +exit_with_hooks() { + exit_status="${1}" + + if [ -x ${ETCDIR}/dhclient-exit-hooks ]; then + . ${ETCDIR}/dhclient-exit-hooks + fi + + exit ${exit_status} +} + +quad2num() { + if [ $# -eq 4 ]; then + let n="${1} << 24 | ${2} << 16 | ${3} << 8 | ${4}" + echo "${n}" + return 0 + else + echo "0" + return 1 + fi +} + +ip2num() { + IFS="." quad2num ${1} +} + +num2ip() { + let n="${1}" + let o1="(n >> 24) & 0xff" + let o2="(n >> 16) & 0xff" + let o3="(n >> 8) & 0xff" + let o4="n & 0xff" + echo "${o1}.${o2}.${o3}.${o4}" +} + +get_network_address() { +# get network address for the given IP address and (netmask or prefix) + ip="${1}" + nm="${2}" + + if [ -n "${ip}" -a -n "${nm}" ]; then + if [[ "${nm}" = *.* ]]; then + ipcalc -s -n ${ip} ${nm} | cut -d '=' -f 2 + else + ipcalc -s -n ${ip}/${nm} | cut -d '=' -f 2 + fi + fi +} + +get_prefix() { +# get prefix for the given IP address and mask + ip="${1}" + nm="${2}" + + if [ -n "${ip}" -a -n "${nm}" ]; then + ipcalc -s -p ${ip} ${nm} | cut -d '=' -f 2 + fi +} + +class_bits() { + let ip=$(IFS='.' ip2num $1) + let bits=32 + let mask='255' + for ((i=0; i <= 3; i++, 'mask<<=8')); do + let v='ip&mask' + if [ "$v" -eq 0 ] ; then + let bits-=8 + else + break + fi + done + echo $bits +} + +is_router_reachable() { + # handle DHCP servers that give us a router not on our subnet + router="${1}" + routersubnet="$(get_network_address ${router} ${new_subnet_mask})" + mysubnet="$(get_network_address ${new_ip_address} ${new_subnet_mask})" + + if [ ! "${routersubnet}" = "${mysubnet}" ]; then + ip -4 route replace ${router}/32 dev ${interface} + if [ "$?" -ne 0 ]; then + logmessage "failed to create host route for ${router}" + return 1 + fi + fi + + return 0 +} + +add_default_gateway() { + router="${1}" + + if is_router_reachable ${router} ; then + metric="" + if [ $# -gt 1 ] && [ ${2} -gt 0 ]; then + metric="metric ${2}" + fi + ip -4 route replace default via ${router} dev ${interface} ${metric} + if [ $? -ne 0 ]; then + logmessage "failed to create default route: ${router} dev ${interface} ${metric}" + return 1 + else + return 0 + fi + fi + + return 1 +} + +execute_client_side_configuration_scripts() { +# execute any additional client side configuration scripts we have + if [ "${1}" == "config" ] || [ "${1}" == "restore" ]; then + for f in ${ETCDIR}/dhclient.d/*.sh ; do + if [ -x ${f} ]; then + subsystem="${f%.sh}" + subsystem="${subsystem##*/}" + . ${f} + "${subsystem}_${1}" + fi + done + fi +} + +flush_dev() { +# Instead of bringing the interface down (#574568) +# explicitly clear the ARP cache and flush all addresses & routes. + ip -4 addr flush dev ${1} >/dev/null 2>&1 + ip -4 route flush dev ${1} >/dev/null 2>&1 + ip -4 neigh flush dev ${1} >/dev/null 2>&1 +} + +dhconfig() { + if [ -n "${old_ip_address}" ] && [ -n "${alias_ip_address}" ] && + [ ! "${alias_ip_address}" = "${old_ip_address}" ]; then + # possible new alias, remove old alias first + ip -4 addr del ${old_ip_address} dev ${interface} label ${interface}:0 + fi + + if [ -n "${old_ip_address}" ] && + [ ! "${old_ip_address}" = "${new_ip_address}" ]; then + # IP address changed. Delete all routes, and clear the ARP cache. + flush_dev ${interface} + fi + + if [ "${reason}" = "BOUND" ] || [ "${reason}" = "REBOOT" ] || + [ ! "${old_ip_address}" = "${new_ip_address}" ] || + [ ! "${old_subnet_mask}" = "${new_subnet_mask}" ] || + [ ! "${old_network_number}" = "${new_network_number}" ] || + [ ! "${old_broadcast_address}" = "${new_broadcast_address}" ] || + [ ! "${old_routers}" = "${new_routers}" ] || + [ ! "${old_interface_mtu}" = "${new_interface_mtu}" ]; then + ip -4 addr add ${new_ip_address}/${new_prefix} broadcast ${new_broadcast_address} dev ${interface} \ + valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} >/dev/null 2>&1 + ip link set dev ${interface} up + + # The 576 MTU is only used for X.25 and dialup connections + # where the admin wants low latency. Such a low MTU can cause + # problems with UDP traffic, among other things. As such, + # disallow MTUs from 576 and below by default, so that broken + # MTUs are ignored, but higher stuff is allowed (1492, 1500, etc). + if [ -n "${new_interface_mtu}" ] && [ ${new_interface_mtu} -gt 576 ]; then + ip link set dev ${interface} mtu ${new_interface_mtu} + fi + + # static routes + if [ -n "${new_classless_static_routes}" ] || + [ -n "${new_static_routes}" ]; then + if [ -n "${new_classless_static_routes}" ]; then + IFS=', |' static_routes=(${new_classless_static_routes}) + else + IFS=', |' static_routes=(${new_static_routes}) + fi + route_targets=() + + for((i=0; i<${#static_routes[@]}; i+=2)); do + target=${static_routes[$i]} + if [ -n "${new_classless_static_routes}" ]; then + if [ ${target} = "0" ]; then + # If the DHCP server returns both a Classless Static Routes option and + # a Router option, the DHCP client MUST ignore the Router option. (RFC3442) + new_routers="" + prefix="0" + else + prefix=${target%%.*} + target=${target#*.} + IFS="." target_arr=(${target}) + unset IFS + ((pads=4-${#target_arr[@]})) + for j in $(seq $pads); do + target="${target}.0" + done + + # Client MUST zero any bits in the subnet number where the corresponding bit in the mask is zero. + # In other words, the subnet number installed in the routing table is the logical AND of + # the subnet number and subnet mask given in the Classless Static Routes option. (RFC3442) + target="$(get_network_address ${target} ${prefix})" + fi + else + prefix=$(class_bits ${target}) + fi + gateway=${static_routes[$i+1]} + + # special case 0.0.0.0 to allow static routing for link-local addresses + # (including IPv4 multicast) which will not have a next-hop (#769463, #787318) + if [ "${gateway}" = "0.0.0.0" ]; then + valid_gateway=0 + scope='scope link' + else + is_router_reachable ${gateway} + valid_gateway=$? + scope='' + fi + if [ ${valid_gateway} -eq 0 ]; then + metric='' + for t in ${route_targets[@]}; do + if [ ${t} = ${target} ]; then + if [ -z "${metric}" ]; then + metric=1 + else + ((metric=metric+1)) + fi + fi + done + + if [ -n "${metric}" ]; then + metric="metric ${metric}" + fi + + ip -4 route replace ${target}/${prefix} proto static via ${gateway} dev ${interface} ${metric} ${scope} + + if [ $? -ne 0 ]; then + logmessage "failed to create static route: ${target}/${prefix} via ${gateway} dev ${interface} ${metric}" + else + route_targets=(${route_targets[@]} ${target}) + fi + fi + done + fi + + # gateways + if [[ ( "${DEFROUTE}" != "no" ) && + (( -z "${GATEWAYDEV}" ) || ( "${GATEWAYDEV}" = "${interface}" )) ]]; then + if [[ ( -z "$GATEWAY" ) || + (( -n "$DHCLIENT_IGNORE_GATEWAY" ) && ( "$DHCLIENT_IGNORE_GATEWAY" = [Yy]* )) ]]; then + metric="${METRIC:-}" + let i="${METRIC:-0}" + default_routers=() + + for router in ${new_routers} ; do + added_router=- + + for r in ${default_routers[@]} ; do + if [ "${r}" = "${router}" ]; then + added_router=1 + fi + done + + if [ -z "${router}" ] || + [ "${added_router}" = "1" ] || + [ $(IFS=. ip2num ${router}) -le 0 ] || + [[ ( "${router}" = "${new_broadcast_address}" ) && + ( "${new_subnet_mask}" != "255.255.255.255" ) ]]; then + continue + fi + + default_routers=(${default_routers[@]} ${router}) + add_default_gateway ${router} ${metric} + let i=i+1 + metric=${i} + done + elif [ -n "${GATEWAY}" ]; then + routersubnet=$(get_network_address ${GATEWAY} ${new_subnet_mask}) + mysubnet=$(get_network_address ${new_ip_address} ${new_subnet_mask}) + + if [ "${routersubnet}" = "${mysubnet}" ]; then + ip -4 route replace default via ${GATEWAY} dev ${interface} + fi + fi + fi + + else # RENEW||REBIND - only update address lifetimes + ip -4 addr change ${new_ip_address}/${new_prefix} broadcast ${new_broadcast_address} dev ${interface} \ + valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} >/dev/null 2>&1 + fi + + if [ ! "${new_ip_address}" = "${alias_ip_address}" ] && + [ -n "${alias_ip_address}" ]; then + # Reset the alias address (fix: this should really only do this on changes) + ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 + ip -4 addr add ${alias_ip_address}/${alias_prefix} broadcast ${alias_broadcast_address} dev ${interface} label ${interface}:0 + ip -4 route replace ${alias_ip_address}/32 dev ${interface} + fi + + # After dhclient brings an interface UP with a new IP address, subnet mask, + # and routes, in the REBOOT/BOUND states -> search for "dhclient-up-hooks". + if [ "${reason}" = "BOUND" ] || [ "${reason}" = "REBOOT" ] || + [ ! "${old_ip_address}" = "${new_ip_address}" ] || + [ ! "${old_subnet_mask}" = "${new_subnet_mask}" ] || + [ ! "${old_network_number}" = "${new_network_number}" ] || + [ ! "${old_broadcast_address}" = "${new_broadcast_address}" ] || + [ ! "${old_routers}" = "${new_routers}" ] || + [ ! "${old_interface_mtu}" = "${new_interface_mtu}" ]; then + + if [ -x ${ETCDIR}/dhclient-${interface}-up-hooks ]; then + . ${ETCDIR}/dhclient-${interface}-up-hooks + elif [ -x ${ETCDIR}/dhclient-up-hooks ]; then + . ${ETCDIR}/dhclient-up-hooks + fi + fi + + make_resolv_conf + + if [ -n "${new_host_name}" ] && need_hostname; then + hostname ${new_host_name} || echo "See -nc option in dhclient(8) man page." + fi + + if [[ ( "${DHCP_TIME_OFFSET_SETS_TIMEZONE}" = [yY1]* ) && + ( -n "${new_time_offset}" ) ]]; then + # DHCP option "time-offset" is requested by default and should be + # handled. The geographical zone abbreviation cannot be determined + # from the GMT offset, but the $ZONEINFO/Etc/GMT$offset file can be + # used - note: this disables DST. + ((z=new_time_offset/3600)) + ((hoursWest=$(printf '%+d' $z))) + + if (( $hoursWest < 0 )); then + # tzdata treats negative 'hours west' as positive 'gmtoff'! + ((hoursWest*=-1)) + fi + + tzfile=/usr/share/zoneinfo/Etc/GMT$(printf '%+d' ${hoursWest}) + if [ -e ${tzfile} ]; then + cp -fp ${tzfile} /etc/localtime + touch /etc/localtime + fi + fi + + execute_client_side_configuration_scripts "config" +} + +# Section 18.1.8. (Receipt of Reply Messages) of RFC 3315 says: +# The client SHOULD perform duplicate address detection on each of +# the addresses in any IAs it receives in the Reply message before +# using that address for traffic. +add_ipv6_addr_with_DAD() { + ip -6 addr add ${new_ip6_address}/${new_ip6_prefixlen} \ + dev ${interface} scope global valid_lft ${new_max_life} \ + preferred_lft ${new_preferred_life} + + # repeatedly test whether newly added address passed + # duplicate address detection (DAD) + for i in $(seq 5); do + sleep 1 # give the DAD some time + + addr=$(ip -6 addr show dev ${interface} \ + | grep ${new_ip6_address}/${new_ip6_prefixlen}) + + # tentative flag == DAD is still not complete + tentative=$(echo "${addr}" | grep tentative) + # dadfailed flag == address is already in use somewhere else + dadfailed=$(echo "${addr}" | grep dadfailed) + + if [ -n "${dadfailed}" ] ; then + # address was added with valid_lft/preferred_lft 'forever', remove it + ip -6 addr del ${new_ip6_address}/${new_ip6_prefixlen} dev ${interface} + exit_with_hooks 3 + fi + if [ -z "${tentative}" ] ; then + if [ -n "${addr}" ]; then + # DAD is over + return 0 + else + # address was auto-removed (or not added at all) + exit_with_hooks 3 + fi + fi + done + return 0 +} + +dh6config() { + if [ -n "${old_ip6_prefix}" ] || + [ -n "${new_ip6_prefix}" ]; then + echo Prefix ${reason} old=${old_ip6_prefix} new=${new_ip6_prefix} + exit_with_hooks 0 + fi + + case "${reason}" in + BOUND6) + if [ -z "${new_ip6_address}" ] || + [ -z "${new_ip6_prefixlen}" ]; then + exit_with_hooks 2 + fi + + add_ipv6_addr_with_DAD + + make_resolv_conf + ;; + + RENEW6|REBIND6) + if [[ -n "${new_ip6_address}" ]] && + [[ -n "${new_ip6_prefixlen}" ]]; then + if [[ ! "${new_ip6_address}" = "${old_ip6_address}" ]]; then + add_ipv6_addr_with_DAD + else # only update address lifetimes + ip -6 addr change ${new_ip6_address}/${new_ip6_prefixlen} \ + dev ${interface} scope global valid_lft ${new_max_life} \ + preferred_lft ${new_preferred_life} + fi + fi + + if [ ! "${new_dhcp6_name_servers}" = "${old_dhcp6_name_servers}" ] || + [ ! "${new_dhcp6_domain_search}" = "${old_dhcp6_domain_search}" ]; then + make_resolv_conf + fi + ;; + + DEPREF6) + if [ -z "${new_ip6_prefixlen}" ]; then + exit_with_hooks 2 + fi + + ip -6 addr change ${new_ip6_address}/${new_ip6_prefixlen} \ + dev ${interface} scope global preferred_lft 0 + ;; + esac + + execute_client_side_configuration_scripts "config" +} + + +# +# ### MAIN +# + +if [ -x ${ETCDIR}/dhclient-enter-hooks ]; then + exit_status=0 + + # dhclient-enter-hooks can abort dhclient-script by setting + # the exit_status variable to a non-zero value + . ${ETCDIR}/dhclient-enter-hooks + if [ ${exit_status} -ne 0 ]; then + exit ${exit_status} + fi +fi + +if [ ! -r /etc/sysconfig/network-scripts/network-functions ]; then + echo "Missing /etc/sysconfig/network-scripts/network-functions, exiting." >&2 + exit 1 +fi + +if [ ! -r /etc/rc.d/init.d/functions ]; then + echo "Missing /etc/rc.d/init.d/functions, exiting." >&2 + exit 1 +fi + +. /etc/sysconfig/network-scripts/network-functions +. /etc/rc.d/init.d/functions + +if [ -f /etc/sysconfig/network ]; then + . /etc/sysconfig/network +fi + +if [ -f /etc/sysconfig/networking/network ]; then + . /etc/sysconfig/networking/network +fi + +cd /etc/sysconfig/network-scripts +CONFIG="${interface}" +need_config ${CONFIG} +source_config >/dev/null 2>&1 + +new_prefix="$(get_prefix ${new_ip_address} ${new_subnet_mask})" +old_prefix="$(get_prefix ${old_ip_address} ${old_subnet_mask})" +alias_prefix="$(get_prefix ${alias_ip_address} ${alias_subnet_mask})" + +case "${reason}" in + MEDIUM|ARPCHECK|ARPSEND) + # Do nothing + exit_with_hooks 0 + ;; + + PREINIT) + if [ -n "${alias_ip_address}" ]; then + # Flush alias, its routes will disappear too. + ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 + fi + + # upstream dhclient-script removes (ifconfig $interface 0 up) old adresses in PREINIT, + # but we sometimes (#125298) need (for iSCSI/nfs root to have a dhcp interface) to keep the existing ip + # flush_dev ${interface} + ip link set dev ${interface} up + if [ -n "${DHCLIENT_DELAY}" ] && [ ${DHCLIENT_DELAY} -gt 0 ]; then + # We need to give the kernel some time to get the interface up. + sleep ${DHCLIENT_DELAY} + fi + + exit_with_hooks 0 + ;; + + PREINIT6) + # ensure interface is up + ip link set dev ${interface} up + + # remove any stale addresses from aborted clients + ip -6 addr flush dev ${interface} scope global permanent + + # we need a link-local address to be ready (not tentative) + for i in $(seq 50); do + linklocal=$(ip -6 addr show dev ${interface} scope link) + # tentative flag means DAD is still not complete + tentative=$(echo "${linklocal}" | grep tentative) + [[ -n "${linklocal}" && -z "${tentative}" ]] && exit_with_hooks 0 + sleep 0.1 + done + + exit_with_hooks 0 + ;; + + BOUND|RENEW|REBIND|REBOOT) + if [ -z "${interface}" ] || [ -z "${new_ip_address}" ]; then + exit_with_hooks 2 + fi + if arping -D -q -c2 -I ${interface} ${new_ip_address}; then + dhconfig + exit_with_hooks 0 + else # DAD failed, i.e. address is already in use + ARP_REPLY=$(arping -D -c2 -I ${interface} ${new_ip_address} | grep reply | awk '{print toupper($5)}' | cut -d "[" -f2 | cut -d "]" -f1) + OUR_MACS=$(ip link show | grep link | awk '{print toupper($2)}' | uniq) + if [[ "${OUR_MACS}" = *"${ARP_REPLY}"* ]]; then + # in RENEW the reply can come from our system, that's OK + dhconfig + exit_with_hooks 0 + else + exit_with_hooks 1 + fi + fi + ;; + + BOUND6|RENEW6|REBIND6|DEPREF6) + dh6config + exit_with_hooks 0 + ;; + + EXPIRE6|RELEASE6|STOP6) + if [ -z "${old_ip6_address}" ] || [ -z "${old_ip6_prefixlen}" ]; then + exit_with_hooks 2 + fi + + ip -6 addr del ${old_ip6_address}/${old_ip6_prefixlen} \ + dev ${interface} + + execute_client_side_configuration_scripts "restore" + + if [ -x ${ETCDIR}/dhclient-${interface}-down-hooks ]; then + . ${ETCDIR}/dhclient-${interface}-down-hooks + elif [ -x ${ETCDIR}/dhclient-down-hooks ]; then + . ${ETCDIR}/dhclient-down-hooks + fi + + exit_with_hooks 0 + ;; + + EXPIRE|FAIL|RELEASE|STOP) + execute_client_side_configuration_scripts "restore" + + if [ -x ${ETCDIR}/dhclient-${interface}-down-hooks ]; then + . ${ETCDIR}/dhclient-${interface}-down-hooks + elif [ -x ${ETCDIR}/dhclient-down-hooks ]; then + . ${ETCDIR}/dhclient-down-hooks + fi + + if [ -n "${alias_ip_address}" ]; then + # Flush alias + ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 + fi + + if [ -n "${old_ip_address}" ]; then + # Delete addresses/routes/arp cache. + flush_dev ${interface} + fi + + if [ -n "${alias_ip_address}" ]; then + ip -4 addr add ${alias_ip_address}/${alias_prefix} broadcast ${alias_broadcast_address} dev ${interface} label ${interface}:0 + ip -4 route replace ${alias_ip_address}/32 dev ${interface} + fi + + exit_with_hooks 0 + ;; + + TIMEOUT) + if [ -n "${new_routers}" ]; then + if [ -n "${alias_ip_address}" ]; then + ip -4 addr flush dev ${interface} label ${interface}:0 >/dev/null 2>&1 + fi + + ip -4 addr add ${new_ip_address}/${new_prefix} \ + broadcast ${new_broadcast_address} dev ${interface} \ + valid_lft ${new_dhcp_lease_time} preferred_lft ${new_dhcp_lease_time} + set ${new_routers} + + if ping -q -c 1 -w 10 -I ${interface} ${1}; then + dhconfig + exit_with_hooks 0 + fi + + flush_dev ${interface} + exit_with_hooks 1 + else + exit_with_hooks 1 + fi + ;; + + *) + logmessage "unhandled state: ${reason}" + exit_with_hooks 1 + ;; +esac + +exit_with_hooks 0 diff -Nru gce-compute-image-packages-20190801/src/usr/bin/google_optimize_local_ssd gce-compute-image-packages-20201222.00/src/usr/bin/google_optimize_local_ssd --- gce-compute-image-packages-20190801/src/usr/bin/google_optimize_local_ssd 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/usr/bin/google_optimize_local_ssd 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,95 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +total_cpus=$(nproc) + +config_nvme() +{ + current_cpu=0 + for dev in /sys/bus/pci/drivers/nvme/* + do + if [ ! -d "$dev" ] + then + continue + fi + for irq_info in $dev/msi_irqs/* + do + if [ ! -f "$irq_info" ] + then + continue + fi + current_cpu=$((current_cpu % total_cpus)) + cpu_mask=$(printf "%x" $((1< "/proc/irq/$irq/smp_affinity" + current_cpu=$((current_cpu+1)) + done + done +} + +config_scsi() +{ + irqs=() + for device in /sys/bus/virtio/drivers/virtio_scsi/virtio* + do + ssd=0 + for target_path in $device/host*/target*/* + do + if [ ! -f "$target_path/model" ] + then + continue + fi + model=$(cat "$target_path/model") + if [[ $model =~ .*EphemeralDisk.* ]] + then + ssd=1 + for queue_path in $target_path/block/sd*/queue + do + echo noop > "$queue_path/scheduler" + echo 0 > "$queue_path/add_random" + echo 512 > "$queue_path/nr_requests" + echo 0 > "$queue_path/rotational" + echo 0 > "$queue_path/rq_affinity" + echo 1 > "$queue_path/nomerges" + done + fi + done + if [[ $ssd == 1 ]] + then + request_queue=$(basename "$device")-request + irq=$(cat /proc/interrupts | grep "$request_queue" | awk '{print $1}'| sed 's/://') + irqs+=($irq) + fi + done + irq_count=${#irqs[@]} + if [ "$irq_count" != 0 ] + then + stride=$((total_cpus / irq_count)) + stride=$((stride < 1 ? 1 : stride)) + current_cpu=0 + for irq in "${irqs[@]}" + do + current_cpu=$(($current_cpu % $total_cpus)) + cpu_mask=$(printf "%x" $((1<<$current_cpu))) + echo "Setting IRQ $irq smp_affinity to $cpu_mask." + echo "$cpu_mask" > "/proc/irq/$irq/smp_affinity" + current_cpu=$((current_cpu+stride)) + done + fi +} + +config_nvme +config_scsi diff -Nru gce-compute-image-packages-20190801/src/usr/bin/google_set_hostname gce-compute-image-packages-20201222.00/src/usr/bin/google_set_hostname --- gce-compute-image-packages-20190801/src/usr/bin/google_set_hostname 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/usr/bin/google_set_hostname 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Deal with a new hostname assignment. + +if [ -n "$new_host_name" ] && [ -n "$new_ip_address" ]; then + # Delete entries with new_host_name or new_ip_address in /etc/hosts. + sed -i"" '/Added by Google/d' /etc/hosts + + # Add an entry for our new_host_name/new_ip_address in /etc/hosts. + echo "${new_ip_address} ${new_host_name} ${new_host_name%%.*} # Added by Google" >> /etc/hosts + + # Add an entry for reaching the metadata server in /etc/hosts. + echo "169.254.169.254 metadata.google.internal # Added by Google" >> /etc/hosts +fi + +# /sbin/dhclient-scripts in both ubuntu and centos have some problems for us: +# 1) BOUND doesn't always set hostname (e.g. if old_host_name is unset in +# precise pangolin) +# 2) Using too long of a FQDN as a hostname causes some tools to break in +# some distros (e.g. ssh-keygen) and hostname tool complains when given +# a FQDN that is > 64 bytes. +# +# As a result, we set the host name in all circumstances here, to the truncated +# unqualified domain name. + +if [ -n "$new_host_name" ]; then + hostname "${new_host_name%%.*}" + + # If NetworkManager is installed set the hostname with nmcli. + # to resolve issues with NetworkManager resetting the hostname + # to the FQDN on DHCP renew. + nmcli=$(which nmcli 2> /dev/null) + if [ -x "$nmcli" ]; then + nmcli general hostname "${new_host_name%%.*}" + fi + + # Restart rsyslog to update the hostname. + systemctl=$(which systemctl 2> /dev/null) + if [ -x "$systemctl" ]; then + hasrsyslog=$($systemctl | grep rsyslog | cut -f1 -d' ') + if [ ! -z "$hasrsyslog" ]; then + $systemctl -q --no-block restart "$hasrsyslog" + fi + else + pkill -HUP syslogd + fi +fi diff -Nru gce-compute-image-packages-20190801/src/usr/bin/google_set_multiqueue gce-compute-image-packages-20201222.00/src/usr/bin/google_set_multiqueue --- gce-compute-image-packages-20190801/src/usr/bin/google_set_multiqueue 1970-01-01 00:00:00.000000000 +0000 +++ gce-compute-image-packages-20201222.00/src/usr/bin/google_set_multiqueue 2020-12-07 19:55:14.000000000 +0000 @@ -0,0 +1,162 @@ +#!/bin/bash +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# For a single-queue / no MSI-X virtionet device, sets the IRQ affinities to +# processor 0. For this virtionet configuration, distributing IRQs to all +# processors results in comparatively high cpu utilization and comparatively +# low network bandwidth. +# +# For a multi-queue / MSI-X virtionet device, sets the IRQ affinities to the +# per-IRQ affinity hint. The virtionet driver maps each virtionet TX (RX) queue +# MSI-X interrupt to a unique single CPU if the number of TX (RX) queues equals +# the number of online CPUs. The mapping of network MSI-X interrupt vector to +# CPUs is stored in the virtionet MSI-X interrupt vector affinity hint. This +# configuration allows network traffic to be spread across the CPUs, giving +# each CPU a dedicated TX and RX network queue, while ensuring that all packets +# from a single flow are delivered to the same CPU. +# +# For a gvnic device, set the IRQ affinities to the per-IRQ affinity hint. +# The google virtual ethernet driver maps each queue MSI-X interrupt to a +# unique single CPU, which is stored in the affinity_hint for each MSI-X +# vector. In older versions of the kernel, irqblanace is expected to copy the +# affinity_hint to smp_affinity; however, GCE instances disable irqbalance by +# default. This script copies over the affinity_hint to smp_affinity on boot to +# replicate the behavior of irqbalance. + +function is_decimal_int() { + [ "${1}" -eq "${1}" ] > /dev/null 2>&1 +} + +function set_channels() { + ethtool -L "${1}" combined "${2}" > /dev/null 2>&1 +} + +echo "Running $(basename $0)." +VIRTIO_NET_DEVS=/sys/bus/virtio/drivers/virtio_net/virtio* + +# Loop through all the virtionet devices and enable multi-queue +if [ -x "$(command -v ethtool)" ]; then + for dev in $VIRTIO_NET_DEVS; do + ETH_DEVS=${dev}/net/* + for eth_dev in $ETH_DEVS; do + eth_dev=$(basename "$eth_dev") + if ! errormsg=$(ethtool -l "$eth_dev" 2>&1); then + echo "ethtool says that $eth_dev does not support virtionet multiqueue: $errormsg." + continue + fi + num_max_channels=$(ethtool -l "$eth_dev" | grep -m 1 Combined | cut -f2) + [ "${num_max_channels}" -eq "1" ] && continue + if is_decimal_int "$num_max_channels" && \ + set_channels "$eth_dev" "$num_max_channels"; then + echo "Set channels for $eth_dev to $num_max_channels." + else + echo "Could not set channels for $eth_dev to $num_max_channels." + fi + done + done +else + echo "ethtool not found: cannot configure virtionet multiqueue." +fi + +for dev in $VIRTIO_NET_DEVS +do + dev=$(basename "$dev") + irq_dir=/proc/irq/* + for irq in $irq_dir + do + smp_affinity="${irq}/smp_affinity_list" + [ ! -f "${smp_affinity}" ] && continue + # Classify this IRQ as virtionet intx, virtionet MSI-X, or non-virtionet + # If the IRQ type is virtionet intx, a subdirectory with the same name as + # the device will be present. If the IRQ type is virtionet MSI-X, then + # a subdirectory of the form -.N will exist. + # In this case, N is the input (output) queue number, and is specified as + # a decimal integer ranging from 0 to K - 1 where K is the number of + # input (output) queues in the virtionet device. + virtionet_intx_dir="${irq}/${dev}" + virtionet_msix_dir_regex=".*/${dev}-(input|output)\.([0-9]+)$" + if [ -d "${virtionet_intx_dir}" ]; then + # All virtionet intx IRQs are delivered to CPU 0 + echo "Setting ${smp_affinity} to 01 for device ${dev}." + echo "01" > "${smp_affinity}" + continue + fi + # Not virtionet intx, probe for MSI-X + virtionet_msix_found=0 + for entry in ${irq}/${dev}*; do + if [[ "$entry" =~ ${virtionet_msix_dir_regex} ]]; then + virtionet_msix_found=1 + queue_num=${BASH_REMATCH[2]} + fi + done + affinity_hint="${irq}/affinity_hint" + [ "$virtionet_msix_found" -eq 0 -o ! -f "${affinity_hint}" ] && continue + + # Set the IRQ CPU affinity to the virtionet-initialized affinity hint + echo "Setting ${smp_affinity} to ${queue_num} for device ${dev}." + echo "${queue_num}" > "${smp_affinity}" + real_affinity=`cat ${smp_affinity}` + echo "${smp_affinity}: real affinity ${real_affinity}" + done +done + +# Set smp_affinity properly for gvnic queues. '-ntfy-block.' is unique to gve +# and will not affect virtio queues. +for i in /proc/irq/*; do + if ls ${i}/*-ntfy-block.* 1> /dev/null 2>&1; then + if [ -f ${i}/affinity_hint ]; then + echo Setting smp_affinity on ${i} to $(cat ${i}/affinity_hint) + cp ${i}/affinity_hint ${i}/smp_affinity + fi + fi +done + +XPS=/sys/class/net/e*/queues/tx*/xps_cpus +num_cpus=$(nproc) +[[ $num_cpus -gt 63 ]] && num_cpus=63 + +num_queues=0 +for q in $XPS; do + num_queues=$((num_queues + 1)) +done + +# If we have more CPUs than queues, then stripe CPUs across tx affinity +# as CPUNumber % queue_count. +for q in $XPS; do + queue_re=".*tx-([0-9]+).*$" + if [[ "$q" =~ ${queue_re} ]]; then + queue_num=${BASH_REMATCH[1]} + fi + + xps=0 + for cpu in `seq $queue_num $num_queues $((num_cpus - 1))`; do + xps=$((xps | (1 << cpu))) + done + + # Linux xps_cpus requires a hex number with commas every 32 bits. It ignores + # all bits above # cpus, so write a list of comma separated 32 bit hex values + # with a comma between dwords. + xps_dwords=() + for i in $(seq 0 $(((num_cpus - 1) / 32))) + do + xps_dwords=(`printf "%08x" $((xps & 0xffffffff))` "${xps_dwords[@]}") + xps=$((xps >> 32)) + done + xps_string=$(IFS=, ; echo "${xps_dwords[*]}") + + + echo ${xps_string} > $q + printf "Queue %d XPS=%s for %s\n" $queue_num `cat $q` $q +done | sort -n -k2 diff -Nru gce-compute-image-packages-20190801/.travis.yml gce-compute-image-packages-20201222.00/.travis.yml --- gce-compute-image-packages-20190801/.travis.yml 2019-08-01 23:32:44.000000000 +0000 +++ gce-compute-image-packages-20201222.00/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -language: python -sudo: true -python: -- 2.7 -- 3.4 -- 3.5 -- 3.6 -matrix: - include: - - python: 3.7 - dist: xenial -os: -- linux -install: -- pip install "virtualenv<14.0.0" -- pip install tox tox-travis codecov -- pip install "setuptools>20.0.0" -script: -- cd packages/python-google-compute-engine -- tox -after_success: -- codecov - -before_deploy: - cd packages/python-google-compute-engine -deploy: - provider: pypi - user: gc-team - on: - branch: master - python: 3.6 - distributions: sdist bdist_wheel - repo: GoogleCloudPlatform/compute-image-packages - tags: true - password: - secure: fS5d9Uot9d5pXsOmIoeo2Fl/FzPWeQ5z6GwMj4tWemrP2FJSgKLTLlU+dT1p2ylAEh9XI02gLQO9H5P/SwF8Xvo3Dnrg4KXQ3m6jPo2ggit2B2F+dbUd+IJSpmqI17i0AQY7Ey0UlQKeZ9NI0iMJpSkTBI/UNl67FOJ7epcglhE=