diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/agent.py walinuxagent-2.2.45/azurelinuxagent/agent.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/agent.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/agent.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -27,6 +27,7 @@ import sys import re import subprocess +import threading import traceback import azurelinuxagent.common.logger as logger @@ -37,6 +38,8 @@ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO, GOAL_STATE_AGENT_VERSION from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils import fileutil + class Agent(object): def __init__(self, verbose, conf_file_path=None): @@ -61,15 +64,20 @@ level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") - logger.add_logger_appender(logger.AppenderType.CONSOLE, level, - path="/dev/console") + if conf.get_logs_console(): + logger.add_logger_appender(logger.AppenderType.CONSOLE, level, + path="/dev/console") + # See issue #1035 + # logger.add_logger_appender(logger.AppenderType.TELEMETRY, + # logger.LogLevel.WARNING, + # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): - os.makedirs(ext_log_dir) + fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error( "Exception occurred while creating extension " @@ -85,6 +93,8 @@ """ Run agent daemon """ + logger.set_prefix("Daemon") + threading.current_thread().setName("Daemon") child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) @@ -120,19 +130,22 @@ print("Start {0} service".format(AGENT_NAME)) self.osutil.start_agent_service() - def run_exthandlers(self): + def run_exthandlers(self, debug=False): """ Run the update and extension handler """ + logger.set_prefix("ExtHandler") + threading.current_thread().setName("ExtHandler") from azurelinuxagent.ga.update import get_update_handler update_handler = get_update_handler() - update_handler.run() + update_handler.run(debug) def show_configuration(self): configuration = conf.get_configuration() for k in sorted(configuration.keys()): print("{0} = {1}".format(k, configuration[k])) + def main(args=[]): """ Parse command line arguments, exit with usage() on error. @@ -140,11 +153,11 @@ """ if len(args) <= 0: args = sys.argv[1:] - command, force, verbose, conf_file_path = parse_args(args) + command, force, verbose, debug, conf_file_path = parse_args(args) if command == "version": version() elif command == "help": - usage() + print(usage()) elif command == "start": start(conf_file_path=conf_file_path) else: @@ -161,7 +174,7 @@ elif command == "daemon": agent.daemon() elif command == "run-exthandlers": - agent.run_exthandlers() + agent.run_exthandlers(debug) elif command == "show-configuration": agent.show_configuration() except Exception: @@ -176,6 +189,7 @@ cmd = "help" force = False verbose = False + debug = False conf_file_path = None for a in sys_args: m = re.match("^(?:[-/]*)configuration-path:([\w/\.\-_]+)", a) @@ -203,6 +217,8 @@ cmd = "version" elif re.match("^([-/]*)verbose", a): verbose = True + elif re.match("^([-/]*)debug", a): + debug = True elif re.match("^([-/]*)force", a): force = True elif re.match("^([-/]*)show-configuration", a): @@ -212,7 +228,9 @@ else: cmd = "help" break - return cmd, force, verbose, conf_file_path + + return cmd, force, verbose, debug, conf_file_path + def version(): """ @@ -228,15 +246,16 @@ def usage(): """ - Show agent usage + Return agent usage message """ - print("") - print((("usage: {0} [-verbose] [-force] [-help] " + s = "\n" + s += ("usage: {0} [-verbose] [-force] [-help] " "-configuration-path:" "-deprovision[+user]|-register-service|-version|-daemon|-start|" - "-run-exthandlers]" - "").format(sys.argv[0]))) - print("") + "-run-exthandlers|-show-configuration]" + "").format(sys.argv[0]) + s += "\n" + return s def start(conf_file_path=None): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupapi.py walinuxagent-2.2.45/azurelinuxagent/common/cgroupapi.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupapi.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/cgroupapi.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,566 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import errno +import os +import shutil +import subprocess +import uuid + +from azurelinuxagent.common import logger +from azurelinuxagent.common.cgroup import CGroup +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.conf import get_agent_pid_file_path +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \ + ExtensionOperationError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import fileutil, shellutil +from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion, read_output +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION + +CGROUPS_FILE_SYSTEM_ROOT = '/sys/fs/cgroup' +CGROUP_CONTROLLERS = ["cpu", "memory"] +VM_AGENT_CGROUP_NAME = "walinuxagent.service" +EXTENSIONS_ROOT_CGROUP_NAME = "walinuxagent.extensions" +UNIT_FILES_FILE_SYSTEM_PATH = "/etc/systemd/system" + + +class CGroupsApi(object): + """ + Interface for the cgroups API + """ + def create_agent_cgroups(self): + raise NotImplementedError() + + def create_extension_cgroups_root(self): + raise NotImplementedError() + + def create_extension_cgroups(self, extension_name): + raise NotImplementedError() + + def remove_extension_cgroups(self, extension_name): + raise NotImplementedError() + + def get_extension_cgroups(self, extension_name): + raise NotImplementedError() + + def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, error_code): + raise NotImplementedError() + + def cleanup_legacy_cgroups(self): + raise NotImplementedError() + + @staticmethod + def track_cgroups(extension_cgroups): + try: + for cgroup in extension_cgroups: + CGroupsTelemetry.track_cgroup(cgroup) + except Exception as e: + logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. " + "Error: {1}".format(cgroup.path, ustr(e))) + + @staticmethod + def _get_extension_cgroup_name(extension_name): + # Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects. + return extension_name.replace('-', '_') + + @staticmethod + def create(): + """ + Factory method to create the correct API for the current platform + """ + return SystemdCgroupsApi() if CGroupsApi._is_systemd() else FileSystemCgroupsApi() + + @staticmethod + def _is_systemd(): + """ + Determine if systemd is managing system services; the implementation follows the same strategy as, for example, + sd_booted() in libsystemd, or /usr/sbin/service + """ + return os.path.exists('/run/systemd/system/') + + @staticmethod + def _foreach_controller(operation, message): + """ + Executes the given operation on all controllers that need to be tracked; outputs 'message' if the controller + is not mounted or if an error occurs in the operation + :return: Returns a list of error messages or an empty list if no errors occurred + """ + mounted_controllers = os.listdir(CGROUPS_FILE_SYSTEM_ROOT) + + for controller in CGROUP_CONTROLLERS: + try: + if controller not in mounted_controllers: + logger.warn('Cgroup controller "{0}" is not mounted. {1}', controller, message) + else: + operation(controller) + except Exception as e: + logger.warn('Error in cgroup controller "{0}": {1}. {2}', controller, ustr(e), message) + + @staticmethod + def _foreach_legacy_cgroup(operation): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. Also, + when running under systemd, the PIDs should not be explicitly moved to the cgroup filesystem. The older daemons would + incorrectly do that under certain conditions. + + This method checks for the existence of the legacy cgroups and, if the daemon's PID has been added to them, executes the + given operation on the cgroups. After this check, the method attempts to remove the legacy cgroups. + + :param operation: + The function to execute on each legacy cgroup. It must take 2 arguments: the controller and the daemon's PID + """ + legacy_cgroups = [] + for controller in ['cpu', 'memory']: + cgroup = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent") + if os.path.exists(cgroup): + logger.info('Found legacy cgroup {0}', cgroup) + legacy_cgroups.append((controller, cgroup)) + + try: + for controller, cgroup in legacy_cgroups: + procs_file = os.path.join(cgroup, "cgroup.procs") + + if os.path.exists(procs_file): + procs_file_contents = fileutil.read_file(procs_file).strip() + daemon_pid = fileutil.read_file(get_agent_pid_file_path()).strip() + + if daemon_pid in procs_file_contents: + operation(controller, daemon_pid) + finally: + for _, cgroup in legacy_cgroups: + logger.info('Removing {0}', cgroup) + shutil.rmtree(cgroup, ignore_errors=True) + + +class FileSystemCgroupsApi(CGroupsApi): + """ + Cgroups interface using the cgroups file system directly + """ + @staticmethod + def _try_mkdir(path): + """ + Try to create a directory, recursively. If it already exists as such, do nothing. Raise the appropriate + exception should an error occur. + + :param path: str + """ + if not os.path.isdir(path): + try: + os.makedirs(path, 0o755) + except OSError as e: + if e.errno == errno.EEXIST: + if not os.path.isdir(path): + raise CGroupsException("Create directory for cgroup {0}: normal file already exists with that name".format(path)) + else: + pass # There was a race to create the directory, but it's there now, and that's fine + elif e.errno == errno.EACCES: + # This is unexpected, as the agent runs as root + raise CGroupsException("Create directory for cgroup {0}: permission denied".format(path)) + else: + raise + + @staticmethod + def _get_agent_cgroup_path(controller): + return os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, VM_AGENT_CGROUP_NAME) + + @staticmethod + def _get_extension_cgroups_root_path(controller): + return os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, EXTENSIONS_ROOT_CGROUP_NAME) + + def _get_extension_cgroup_path(self, controller, extension_name): + extensions_root = self._get_extension_cgroups_root_path(controller) + + if not os.path.exists(extensions_root): + logger.warn("Root directory {0} does not exist.".format(extensions_root)) + + cgroup_name = self._get_extension_cgroup_name(extension_name) + + return os.path.join(extensions_root, cgroup_name) + + def _create_extension_cgroup(self, controller, extension_name): + return CGroup.create(self._get_extension_cgroup_path(controller, extension_name), controller, extension_name) + + @staticmethod + def _add_process_to_cgroup(pid, cgroup_path): + tasks_file = os.path.join(cgroup_path, 'cgroup.procs') + fileutil.append_file(tasks_file, "{0}\n".format(pid)) + logger.info("Added PID {0} to cgroup {1}".format(pid, cgroup_path)) + + def cleanup_legacy_cgroups(self): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. This + method moves the daemon's PID from the legacy cgroups to the newer cgroups. + """ + def move_daemon_pid(controller, daemon_pid): + new_path = FileSystemCgroupsApi._get_agent_cgroup_path(controller) + logger.info("Writing daemon's PID ({0}) to {1}", daemon_pid, new_path) + fileutil.append_file(os.path.join(new_path, "cgroup.procs"), daemon_pid) + msg = "Moved daemon's PID from legacy cgroup to {0}".format(new_path) + add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.CGroupsCleanUp, is_success=True, message=msg) + + CGroupsApi._foreach_legacy_cgroup(move_daemon_pid) + + def create_agent_cgroups(self): + """ + Creates a cgroup for the VM Agent in each of the controllers we are tracking; returns the created cgroups. + """ + cgroups = [] + + pid = int(os.getpid()) + + def create_cgroup(controller): + path = FileSystemCgroupsApi._get_agent_cgroup_path(controller) + + if not os.path.isdir(path): + FileSystemCgroupsApi._try_mkdir(path) + logger.info("Created cgroup {0}".format(path)) + + self._add_process_to_cgroup(pid, path) + + cgroups.append(CGroup.create(path, controller, VM_AGENT_CGROUP_NAME)) + + self._foreach_controller(create_cgroup, 'Failed to create a cgroup for the VM Agent; resource usage will not be tracked') + + if len(cgroups) == 0: + raise CGroupsException("Failed to create any cgroup for the VM Agent") + + return cgroups + + def create_extension_cgroups_root(self): + """ + Creates the directory within the cgroups file system that will contain the cgroups for the extensions. + """ + def create_cgroup(controller): + path = self._get_extension_cgroups_root_path(controller) + + if not os.path.isdir(path): + FileSystemCgroupsApi._try_mkdir(path) + logger.info("Created {0}".format(path)) + + self._foreach_controller(create_cgroup, 'Failed to create a root cgroup for extensions') + + def create_extension_cgroups(self, extension_name): + """ + Creates a cgroup for the given extension in each of the controllers we are tracking; returns the created cgroups. + """ + cgroups = [] + + def create_cgroup(controller): + cgroup = self._create_extension_cgroup(controller, extension_name) + + if not os.path.isdir(cgroup.path): + FileSystemCgroupsApi._try_mkdir(cgroup.path) + logger.info("Created cgroup {0}".format(cgroup.path)) + + cgroups.append(cgroup) + + self._foreach_controller(create_cgroup, 'Failed to create a cgroup for extension {0}'.format(extension_name)) + + return cgroups + + def remove_extension_cgroups(self, extension_name): + """ + Deletes the cgroups for the given extension. + """ + def remove_cgroup(controller): + path = self._get_extension_cgroup_path(controller, extension_name) + + if os.path.exists(path): + try: + os.rmdir(path) + logger.info('Deleted cgroup "{0}".'.format(path)) + except OSError as exception: + if exception.errno == 16: # [Errno 16] Device or resource busy + logger.warn('CGroup "{0}" still has active tasks; will not remove it.'.format(path)) + + self._foreach_controller(remove_cgroup, 'Failed to delete cgroups for extension {0}'.format(extension_name)) + + def get_extension_cgroups(self, extension_name): + """ + Returns the cgroups for the given extension. + """ + + cgroups = [] + + def get_cgroup(controller): + cgroup = self._create_extension_cgroup(controller, extension_name) + cgroups.append(cgroup) + + self._foreach_controller(get_cgroup, 'Failed to retrieve cgroups for extension {0}'.format(extension_name)) + + return cgroups + + def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, + error_code=ExtensionErrorCodes.PluginUnknownFailure): + """ + Starts a command (install/enable/etc) for an extension and adds the command's PID to the extension's cgroup + :param extension_name: The extension executing the command + :param command: The command to invoke + :param timeout: Number of seconds to wait for command completion + :param cwd: The working directory for the command + :param env: The environment to pass to the command's process + :param stdout: File object to redirect stdout to + :param stderr: File object to redirect stderr to + :param error_code: Extension error code to raise in case of error + """ + try: + extension_cgroups = self.create_extension_cgroups(extension_name) + except Exception as exception: + extension_cgroups = [] + logger.warn("Failed to create cgroups for extension '{0}'; resource usage will not be tracked. " + "Error: {1}".format(extension_name, ustr(exception))) + + def pre_exec_function(): + os.setsid() + + try: + pid = os.getpid() + + for cgroup in extension_cgroups: + try: + self._add_process_to_cgroup(pid, cgroup.path) + except Exception as exception: + logger.warn("Failed to add PID {0} to the cgroups for extension '{1}'. " + "Resource usage will not be tracked. Error: {2}".format(pid, + extension_name, + ustr(exception))) + except Exception as e: + logger.warn("Failed to add extension {0} to its cgroup. Resource usage will not be tracked. " + "Error: {1}".format(extension_name, ustr(e))) + + process = subprocess.Popen(command, + shell=shell, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + preexec_fn=pre_exec_function) + + self.track_cgroups(extension_cgroups) + process_output = handle_process_completion(process=process, + command=command, + timeout=timeout, + stdout=stdout, + stderr=stderr, + error_code=error_code) + + return extension_cgroups, process_output + + +class SystemdCgroupsApi(CGroupsApi): + """ + Cgroups interface via systemd + """ + + @staticmethod + def create_and_start_unit(unit_filename, unit_contents): + try: + unit_path = os.path.join(UNIT_FILES_FILE_SYSTEM_PATH, unit_filename) + fileutil.write_file(unit_path, unit_contents) + shellutil.run_command(["systemctl", "daemon-reload"]) + shellutil.run_command(["systemctl", "start", unit_filename]) + except Exception as e: + raise CGroupsException("Failed to create and start {0}. Error: {1}".format(unit_filename, ustr(e))) + + @staticmethod + def _get_extensions_slice_root_name(): + return "system-{0}.slice".format(EXTENSIONS_ROOT_CGROUP_NAME) + + def _get_extension_slice_name(self, extension_name): + return "system-{0}-{1}.slice".format(EXTENSIONS_ROOT_CGROUP_NAME, self._get_extension_cgroup_name(extension_name)) + + def create_agent_cgroups(self): + try: + cgroup_unit = None + cgroup_paths = fileutil.read_file("/proc/self/cgroup") + for entry in cgroup_paths.splitlines(): + fields = entry.split(':') + if fields[1] == "name=systemd": + cgroup_unit = fields[2].lstrip(os.path.sep) + + cpu_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, 'cpu', cgroup_unit) + memory_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, 'memory', cgroup_unit) + + return [CGroup.create(cpu_cgroup_path, 'cpu', VM_AGENT_CGROUP_NAME), + CGroup.create(memory_cgroup_path, 'memory', VM_AGENT_CGROUP_NAME)] + except Exception as e: + raise CGroupsException("Failed to get paths of agent's cgroups. Error: {0}".format(ustr(e))) + + def create_extension_cgroups_root(self): + unit_contents = """ +[Unit] +Description=Slice for walinuxagent extensions +DefaultDependencies=no +Before=slices.target +Requires=system.slice +After=system.slice""" + unit_filename = self._get_extensions_slice_root_name() + self.create_and_start_unit(unit_filename, unit_contents) + logger.info("Created slice for walinuxagent extensions {0}".format(unit_filename)) + + def create_extension_cgroups(self, extension_name): + # TODO: The slice created by this function is not used currently. We need to create the extension scopes within + # this slice and use the slice to monitor the cgroups. Also see comment in get_extension_cgroups. + # the slice. + unit_contents = """ +[Unit] +Description=Slice for extension {0} +DefaultDependencies=no +Before=slices.target +Requires=system-{1}.slice +After=system-{1}.slice""".format(extension_name, EXTENSIONS_ROOT_CGROUP_NAME) + unit_filename = self._get_extension_slice_name(extension_name) + self.create_and_start_unit(unit_filename, unit_contents) + logger.info("Created slice for {0}".format(unit_filename)) + + return self.get_extension_cgroups(extension_name) + + def remove_extension_cgroups(self, extension_name): + # For transient units, cgroups are released automatically when the unit stops, so it is sufficient + # to call stop on them. Persistent cgroups are released when the unit is disabled and its configuration + # file is deleted. + # The assumption is that this method is called after the extension has been uninstalled. For now, since + # we're running extensions within transient scopes which clean up after they finish running, no removal + # of units is needed. In the future, when the extension is running under its own slice, + # the following clean up is needed. + unit_filename = self._get_extension_slice_name(extension_name) + try: + unit_path = os.path.join(UNIT_FILES_FILE_SYSTEM_PATH, unit_filename) + shellutil.run_command(["systemctl", "stop", unit_filename]) + fileutil.rm_files(unit_path) + shellutil.run_command(["systemctl", "daemon-reload"]) + except Exception as e: + raise CGroupsException("Failed to remove {0}. Error: {1}".format(unit_filename, ustr(e))) + + def get_extension_cgroups(self, extension_name): + # TODO: The slice returned by this function is not used currently. We need to create the extension scopes within + # this slice and use the slice to monitor the cgroups. Also see comment in create_extension_cgroups. + slice_name = self._get_extension_cgroup_name(extension_name) + + cgroups = [] + + def create_cgroup(controller): + cpu_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, 'system.slice', slice_name) + cgroups.append(CGroup.create(cpu_cgroup_path, controller, extension_name)) + + self._foreach_controller(create_cgroup, 'Cannot retrieve cgroup for extension {0}; resource usage will not be tracked.'.format(extension_name)) + + return cgroups + + @staticmethod + def _is_systemd_failure(scope_name, process_output): + unit_not_found = "Unit {0} not found.".format(scope_name) + return unit_not_found in process_output or scope_name not in process_output + + def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, + error_code=ExtensionErrorCodes.PluginUnknownFailure): + scope_name = "{0}_{1}".format(self._get_extension_cgroup_name(extension_name), uuid.uuid4()) + + process = subprocess.Popen( + "systemd-run --unit={0} --scope {1}".format(scope_name, command), + shell=shell, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + preexec_fn=os.setsid) + + logger.info("Started extension using scope '{0}'", scope_name) + extension_cgroups = [] + + def create_cgroup(controller): + cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, 'system.slice', scope_name + ".scope") + extension_cgroups.append(CGroup.create(cgroup_path, controller, extension_name)) + + self._foreach_controller(create_cgroup, 'Cannot create cgroup for extension {0}; ' + 'resource usage will not be tracked.'.format(extension_name)) + self.track_cgroups(extension_cgroups) + + # Wait for process completion or timeout + try: + process_output = handle_process_completion(process=process, + command=command, + timeout=timeout, + stdout=stdout, + stderr=stderr, + error_code=error_code) + except ExtensionError as e: + # The extension didn't terminate successfully. Determine whether it was due to systemd errors or + # extension errors. + process_output = read_output(stdout, stderr) + systemd_failure = self._is_systemd_failure(scope_name, process_output) + + if not systemd_failure: + # There was an extension error; it either timed out or returned a non-zero exit code. Re-raise the error + raise + else: + # There was an issue with systemd-run. We need to log it and retry the extension without systemd. + err_msg = 'Systemd process exited with code %s and output %s' % (e.exit_code, process_output) \ + if isinstance(e, ExtensionOperationError) else "Systemd timed-out, output: %s" % process_output + event_msg = 'Failed to run systemd-run for unit {0}.scope. ' \ + 'Will retry invoking the extension without systemd. ' \ + 'Systemd-run error: {1}'.format(scope_name, err_msg) + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.InvokeCommandUsingSystemd, + is_success=False, + log_event=False, + message=event_msg) + logger.warn(event_msg) + + # Reset the stdout and stderr + stdout.truncate(0) + stderr.truncate(0) + + # Try invoking the process again, this time without systemd-run + logger.info('Extension invocation using systemd failed, falling back to regular invocation ' + 'without cgroups tracking.') + process = subprocess.Popen(command, + shell=shell, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + preexec_fn=os.setsid) + + process_output = handle_process_completion(process=process, + command=command, + timeout=timeout, + stdout=stdout, + stderr=stderr, + error_code=error_code) + + return [], process_output + + # The process terminated in time and successfully + return extension_cgroups, process_output + + def cleanup_legacy_cgroups(self): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If + we find that any of the legacy groups include the PID of the daemon then we disable data collection for this instance + (under systemd, moving PIDs across the cgroup file system can produce unpredictable results) + """ + def report_error(_, daemon_pid): + raise CGroupsException( + "The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) + + CGroupsApi._foreach_legacy_cgroup(report_error) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupconfigurator.py walinuxagent-2.2.45/azurelinuxagent/common/cgroupconfigurator.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupconfigurator.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/cgroupconfigurator.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,210 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import os +import subprocess + +from azurelinuxagent.common import logger +from azurelinuxagent.common.cgroupapi import CGroupsApi +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION +from azurelinuxagent.common.event import add_event, WALAEventOperation + + +class CGroupConfigurator(object): + """ + This class implements the high-level operations on CGroups (e.g. initialization, creation, etc) + + NOTE: with the exception of start_extension_command, none of the methods in this class raise exceptions (cgroup operations should not block extensions) + """ + class __impl(object): + def __init__(self): + """ + Ensures the cgroups file system is mounted and selects the correct API to interact with it + """ + osutil = get_osutil() + + self._cgroups_supported = osutil.is_cgroups_supported() + + if self._cgroups_supported: + self._enabled = True + try: + osutil.mount_cgroups() + self._cgroups_api = CGroupsApi.create() + status = "The cgroup filesystem is ready to use" + except Exception as e: + status = ustr(e) + self._enabled = False + else: + self._enabled = False + self._cgroups_api = None + status = "Cgroups are not supported by the platform" + + logger.info("CGroups Status: {0}".format(status)) + + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.InitializeCGroups, + is_success=self._enabled, + message=status, + log_event=False) + + def enabled(self): + return self._enabled + + def enable(self): + if not self._cgroups_supported: + raise CGroupsException("cgroups are not supported on the current platform") + + self._enabled = True + + def disable(self): + self._enabled = False + + def _invoke_cgroup_operation(self, operation, error_message, on_error=None): + """ + Ensures the given operation is invoked only if cgroups are enabled and traps any errors on the operation. + """ + if not self.enabled(): + return + + try: + return operation() + except Exception as e: + logger.warn("{0} Error: {1}".format(error_message, ustr(e))) + if on_error is not None: + try: + on_error(e) + except Exception as ex: + logger.warn("CGroupConfigurator._invoke_cgroup_operation: {0}".format(ustr(e))) + + def create_agent_cgroups(self, track_cgroups): + """ + Creates and returns the cgroups needed to track the VM Agent + """ + def __impl(): + cgroups = self._cgroups_api.create_agent_cgroups() + + if track_cgroups: + for cgroup in cgroups: + CGroupsTelemetry.track_cgroup(cgroup) + + return cgroups + + self._invoke_cgroup_operation(__impl, "Failed to create a cgroup for the VM Agent; resource usage for the Agent will not be tracked.") + + def cleanup_legacy_cgroups(self): + def __impl(): + self._cgroups_api.cleanup_legacy_cgroups() + + message = 'Failed to process legacy cgroups. Collection of resource usage data will be disabled.' + + def disable_cgroups(exception): + self.disable() + CGroupsTelemetry.reset() + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.CGroupsCleanUp, + is_success=False, + log_event=False, + message='{0} {1}'.format(message, ustr(exception))) + + self._invoke_cgroup_operation(__impl, message, on_error=disable_cgroups) + + def create_extension_cgroups_root(self): + """ + Creates the container (directory/cgroup) that includes the cgroups for all extensions (/sys/fs/cgroup/*/walinuxagent.extensions) + """ + def __impl(): + self._cgroups_api.create_extension_cgroups_root() + + self._invoke_cgroup_operation(__impl, "Failed to create a root cgroup for extensions; resource usage for extensions will not be tracked.") + + def create_extension_cgroups(self, name): + """ + Creates and returns the cgroups for the given extension + """ + def __impl(): + return self._cgroups_api.create_extension_cgroups(name) + + return self._invoke_cgroup_operation(__impl, "Failed to create a cgroup for extension '{0}'; resource usage will not be tracked.".format(name)) + + def remove_extension_cgroups(self, name): + """ + Deletes the cgroup for the given extension + """ + def __impl(): + cgroups = self._cgroups_api.remove_extension_cgroups(name) + return cgroups + + self._invoke_cgroup_operation(__impl, "Failed to delete cgroups for extension '{0}'.".format(name)) + + def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, + error_code=ExtensionErrorCodes.PluginUnknownFailure): + """ + Starts a command (install/enable/etc) for an extension and adds the command's PID to the extension's cgroup + :param extension_name: The extension executing the command + :param command: The command to invoke + :param timeout: Number of seconds to wait for command completion + :param cwd: The working directory for the command + :param env: The environment to pass to the command's process + :param stdout: File object to redirect stdout to + :param stderr: File object to redirect stderr to + :param stderr: File object to redirect stderr to + :param error_code: Extension error code to raise in case of error + """ + if not self.enabled(): + process = subprocess.Popen(command, + shell=shell, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + preexec_fn=os.setsid) + + process_output = handle_process_completion(process=process, + command=command, + timeout=timeout, + stdout=stdout, + stderr=stderr, + error_code=error_code) + else: + extension_cgroups, process_output = self._cgroups_api.start_extension_command(extension_name, + command, + timeout, + shell=shell, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + error_code=error_code) + + return process_output + + # unique instance for the singleton (TODO: find a better pattern for a singleton) + _instance = None + + @staticmethod + def get_instance(): + if CGroupConfigurator._instance is None: + CGroupConfigurator._instance = CGroupConfigurator.__impl() + return CGroupConfigurator._instance diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroup.py walinuxagent-2.2.45/azurelinuxagent/common/cgroup.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroup.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/cgroup.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,247 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +import errno +import os +import re + +from azurelinuxagent.common import logger +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils import fileutil + +re_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n') + + +class CGroup(object): + @staticmethod + def create(cgroup_path, controller, extension_name): + """ + Factory method to create the correct CGroup. + """ + if controller == "cpu": + return CpuCgroup(extension_name, cgroup_path) + if controller == "memory": + return MemoryCgroup(extension_name, cgroup_path) + raise CGroupsException('CGroup controller {0} is not supported'.format(controller)) + + def __init__(self, name, cgroup_path, controller_type): + """ + Initialize _data collection for the Memory controller + :param: name: Name of the CGroup + :param: cgroup_path: Path of the controller + :param: controller_type: + :return: + """ + self.name = name + self.path = cgroup_path + self.controller = controller_type + + def _get_cgroup_file(self, file_name): + return os.path.join(self.path, file_name) + + def _get_file_contents(self, file_name): + """ + Retrieve the contents to file. + + :param str file_name: Name of file within that metric controller + :return: Entire contents of the file + :rtype: str + """ + + parameter_file = self._get_cgroup_file(file_name) + + try: + return fileutil.read_file(parameter_file) + except Exception: + raise + + def _get_parameters(self, parameter_name, first_line_only=False): + """ + Retrieve the values of a parameter from a controller. + Returns a list of values in the file. + + :param first_line_only: return only the first line. + :param str parameter_name: Name of file within that metric controller + :return: The first line of the file, without line terminator + :rtype: [str] + """ + result = [] + try: + values = self._get_file_contents(parameter_name).splitlines() + result = values[0] if first_line_only else values + except IndexError: + parameter_filename = self._get_cgroup_file(parameter_name) + logger.error("File {0} is empty but should not be".format(parameter_filename)) + raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: + raise e + parameter_filename = self._get_cgroup_file(parameter_name) + raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e) + return result + + def is_active(self): + try: + tasks = self._get_parameters("tasks") + if tasks: + return len(tasks) != 0 + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + # only suppressing file not found exceptions. + pass + else: + logger.periodic_warn(logger.EVERY_HALF_HOUR, + 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' + ' Internal error: {1}'.format(self.path, ustr(e))) + except CGroupsException as e: + logger.periodic_warn(logger.EVERY_HALF_HOUR, + 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' + ' Internal error: {1}'.format(self.path, ustr(e))) + return False + + return False + + +class CpuCgroup(CGroup): + def __init__(self, name, cgroup_path): + """ + Initialize _data collection for the Cpu controller. User must call update() before attempting to get + any useful metrics. + + :return: CpuCgroup + """ + super(CpuCgroup, self).__init__(name, cgroup_path, "cpu") + + self._osutil = get_osutil() + self._current_cpu_total = 0 + self._previous_cpu_total = 0 + self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() + self._previous_system_cpu = 0 + + def __str__(self): + return "cgroup: Name: {0}, cgroup_path: {1}; Controller: {2}".format( + self.name, self.path, self.controller + ) + + def _get_current_cpu_total(self): + """ + Compute the number of USER_HZ of CPU time (user and system) consumed by this cgroup since boot. + + :return: int + """ + cpu_total = 0 + try: + cpu_stat = self._get_file_contents('cpuacct.stat') + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: + raise e + raise CGroupsException("Exception while attempting to read {0}".format("cpuacct.stat"), e) + + if cpu_stat: + m = re_user_system_times.match(cpu_stat) + if m: + cpu_total = int(m.groups()[0]) + int(m.groups()[1]) + return cpu_total + + def _update_cpu_data(self): + """ + Update all raw _data required to compute metrics of interest. The intent is to call update() once, then + call the various get_*() methods which use this _data, which we've collected exactly once. + """ + self._previous_cpu_total = self._current_cpu_total + self._previous_system_cpu = self._current_system_cpu + self._current_cpu_total = self._get_current_cpu_total() + self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() + + def _get_cpu_percent(self): + """ + Compute the percent CPU time used by this cgroup over the elapsed time since the last time this instance was + update()ed. If the cgroup fully consumed 2 cores on a 4 core system, return 200. + + :return: CPU usage in percent of a single core + :rtype: float + """ + cpu_delta = self._current_cpu_total - self._previous_cpu_total + system_delta = max(1, self._current_system_cpu - self._previous_system_cpu) + + return round(float(cpu_delta * self._osutil.get_processor_cores() * 100) / float(system_delta), 3) + + def get_cpu_usage(self): + """ + Collects and return the cpu usage. + + :rtype: float + """ + self._update_cpu_data() + return self._get_cpu_percent() + + +class MemoryCgroup(CGroup): + def __init__(self, name, cgroup_path): + """ + Initialize _data collection for the Memory controller + + :return: MemoryCgroup + """ + super(MemoryCgroup, self).__init__(name, cgroup_path, "memory") + + def __str__(self): + return "cgroup: Name: {0}, cgroup_path: {1}; Controller: {2}".format( + self.name, self.path, self.controller + ) + + def get_memory_usage(self): + """ + Collect memory.usage_in_bytes from the cgroup. + + :return: Memory usage in bytes + :rtype: int + """ + usage = None + + try: + usage = self._get_parameters('memory.usage_in_bytes', first_line_only=True) + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + # only suppressing file not found exceptions. + pass + else: + raise e + + if not usage: + usage = "0" + return int(usage) + + def get_max_memory_usage(self): + """ + Collect memory.usage_in_bytes from the cgroup. + + :return: Memory usage in bytes + :rtype: int + """ + usage = None + try: + usage = self._get_parameters('memory.max_usage_in_bytes', first_line_only=True) + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + # only suppressing file not found exceptions. + pass + else: + raise e + if not usage: + usage = "0" + return int(usage) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupstelemetry.py walinuxagent-2.2.45/azurelinuxagent/common/cgroupstelemetry.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroupstelemetry.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/cgroupstelemetry.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,222 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +import errno +import threading +from datetime import datetime as dt + +from azurelinuxagent.common import logger +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.exception import CGroupsException + + +class CGroupsTelemetry(object): + """ + """ + _tracked = [] + _cgroup_metrics = {} + _rlock = threading.RLock() + + @staticmethod + def _get_metrics_list(metric): + return [metric.average(), metric.min(), metric.max(), metric.median(), metric.count(), + metric.first_poll_time(), metric.last_poll_time()] + + @staticmethod + def _process_cgroup_metric(cgroup_metrics): + memory_usage = cgroup_metrics.get_memory_usage() + max_memory_usage = cgroup_metrics.get_max_memory_usage() + cpu_usage = cgroup_metrics.get_cpu_usage() + + processed_extension = {} + + if cpu_usage.count() > 0: + processed_extension["cpu"] = {"cur_cpu": CGroupsTelemetry._get_metrics_list(cpu_usage)} + + if memory_usage.count() > 0: + if "memory" in processed_extension: + processed_extension["memory"]["cur_mem"] = CGroupsTelemetry._get_metrics_list(memory_usage) + else: + processed_extension["memory"] = {"cur_mem": CGroupsTelemetry._get_metrics_list(memory_usage)} + + if max_memory_usage.count() > 0: + if "memory" in processed_extension: + processed_extension["memory"]["max_mem"] = CGroupsTelemetry._get_metrics_list(max_memory_usage) + else: + processed_extension["memory"] = {"max_mem": CGroupsTelemetry._get_metrics_list(max_memory_usage)} + + return processed_extension + + @staticmethod + def track_cgroup(cgroup): + """ + Adds the given item to the dictionary of tracked cgroups + """ + with CGroupsTelemetry._rlock: + if not CGroupsTelemetry.is_tracked(cgroup.path): + CGroupsTelemetry._tracked.append(cgroup) + logger.info("Started tracking new cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path)) + + @staticmethod + def is_tracked(path): + """ + Returns true if the given item is in the list of tracked items + O(n) operation. But limited to few cgroup objects we have. + """ + with CGroupsTelemetry._rlock: + for cgroup in CGroupsTelemetry._tracked: + if path == cgroup.path: + return True + + return False + + @staticmethod + def stop_tracking(cgroup): + """ + Stop tracking the cgroups for the given name + """ + with CGroupsTelemetry._rlock: + CGroupsTelemetry._tracked.remove(cgroup) + logger.info("Stopped tracking cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path)) + + @staticmethod + def report_all_tracked(): + collected_metrics = {} + + for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items(): + perf_metric = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics) + + if perf_metric: + collected_metrics[name] = perf_metric + + cgroup_metrics.clear() + + # Doing cleanup after the metrics have already been collected. + for key in [key for key in CGroupsTelemetry._cgroup_metrics if + CGroupsTelemetry._cgroup_metrics[key].marked_for_delete]: + del CGroupsTelemetry._cgroup_metrics[key] + + return collected_metrics + + @staticmethod + def poll_all_tracked(): + with CGroupsTelemetry._rlock: + for cgroup in CGroupsTelemetry._tracked[:]: + + if cgroup.name not in CGroupsTelemetry._cgroup_metrics: + CGroupsTelemetry._cgroup_metrics[cgroup.name] = CgroupMetrics() + + CGroupsTelemetry._cgroup_metrics[cgroup.name].collect_data(cgroup) + + if not cgroup.is_active(): + CGroupsTelemetry.stop_tracking(cgroup) + CGroupsTelemetry._cgroup_metrics[cgroup.name].marked_for_delete = True + + @staticmethod + def prune_all_tracked(): + with CGroupsTelemetry._rlock: + for cgroup in CGroupsTelemetry._tracked[:]: + if not cgroup.is_active(): + CGroupsTelemetry.stop_tracking(cgroup) + + @staticmethod + def reset(): + with CGroupsTelemetry._rlock: + CGroupsTelemetry._tracked *= 0 # emptying the list + CGroupsTelemetry._cgroup_metrics = {} + + +class CgroupMetrics(object): + def __init__(self): + self._memory_usage = Metric() + self._max_memory_usage = Metric() + self._cpu_usage = Metric() + self.marked_for_delete = False + + def collect_data(self, cgroup): + # noinspection PyBroadException + try: + if cgroup.controller == "cpu": + self._cpu_usage.append(cgroup.get_cpu_usage()) + elif cgroup.controller == "memory": + self._memory_usage.append(cgroup.get_memory_usage()) + self._max_memory_usage.append(cgroup.get_max_memory_usage()) + else: + raise CGroupsException('CGroup controller {0} is not supported'.format(controller)) + except Exception as e: + if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: + logger.periodic_warn(logger.EVERY_HALF_HOUR, 'Could not collect metrics for cgroup {0}. Error : {1}'.format(cgroup.path, ustr(e))) + + def get_memory_usage(self): + return self._memory_usage + + def get_max_memory_usage(self): + return self._max_memory_usage + + def get_cpu_usage(self): + return self._cpu_usage + + def clear(self): + self._memory_usage.clear() + self._max_memory_usage.clear() + self._cpu_usage.clear() + + +class Metric(object): + def __init__(self): + self._data = [] + self._first_poll_time = None + self._last_poll_time = None + + def append(self, data): + if not self._first_poll_time: + # We only want to do it first time. + self._first_poll_time = dt.utcnow() + + self._data.append(data) + self._last_poll_time = dt.utcnow() + + def clear(self): + self._first_poll_time = None + self._last_poll_time = None + self._data *= 0 + + def average(self): + return float(sum(self._data)) / float(len(self._data)) if self._data else None + + def max(self): + return max(self._data) if self._data else None + + def min(self): + return min(self._data) if self._data else None + + def median(self): + data = sorted(self._data) + l_len = len(data) + if l_len < 1: + return None + if l_len % 2 == 0: + return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0 + else: + return data[int((l_len - 1) / 2)] + + def count(self): + return len(self._data) + + def first_poll_time(self): + return str(self._first_poll_time) + + def last_poll_time(self): + return str(self._last_poll_time) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/conf.py walinuxagent-2.2.45/azurelinuxagent/common/conf.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/conf.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/conf.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -26,6 +26,8 @@ import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import AgentConfigError +DISABLE_AGENT_FILE = 'disable_agent' + class ConfigurationProvider(object): """ @@ -40,11 +42,11 @@ raise AgentConfigError("Can't not parse empty configuration") for line in content.split('\n'): if not line.startswith("#") and "=" in line: - parts = line.split('=') + parts = line.split('=', 1) if len(parts) < 2: continue key = parts[0].strip() - value = parts[1].split('#')[0].strip("\" ") + value = parts[1].split('#')[0].strip("\" ").strip() self.values[key] = value if value != "None" else None def get(self, key, default_val): @@ -85,59 +87,65 @@ raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) + __SWITCH_OPTIONS__ = { - "OS.AllowHTTP" : False, - "OS.EnableFirewall" : False, - "OS.EnableFIPS" : False, - "OS.EnableRDMA" : False, - "OS.UpdateRdmaDriver" : False, - "OS.CheckRdmaDriver" : False, - "Logs.Verbose" : False, - "Provisioning.Enabled" : True, - "Provisioning.UseCloudInit" : False, - "Provisioning.AllowResetSysUser" : False, - "Provisioning.RegenerateSshHostKeyPair" : False, - "Provisioning.DeleteRootPassword" : False, - "Provisioning.DecodeCustomData" : False, - "Provisioning.ExecuteCustomData" : False, - "Provisioning.MonitorHostName" : False, - "DetectScvmmEnv" : False, - "ResourceDisk.Format" : False, - "DetectScvmmEnv" : False, - "ResourceDisk.Format" : False, - "ResourceDisk.EnableSwap" : False, - "AutoUpdate.Enabled" : True, - "EnableOverProvisioning" : False + "OS.AllowHTTP": False, + "OS.EnableFirewall": False, + "OS.EnableFIPS": False, + "OS.EnableRDMA": False, + "OS.UpdateRdmaDriver": False, + "OS.CheckRdmaDriver": False, + "Logs.Verbose": False, + "Logs.Console": True, + "Extensions.Enabled": True, + "Provisioning.AllowResetSysUser": False, + "Provisioning.RegenerateSshHostKeyPair": False, + "Provisioning.DeleteRootPassword": False, + "Provisioning.DecodeCustomData": False, + "Provisioning.ExecuteCustomData": False, + "Provisioning.MonitorHostName": False, + "DetectScvmmEnv": False, + "ResourceDisk.Format": False, + "ResourceDisk.EnableSwap": False, + "ResourceDisk.EnableSwapEncryption": False, + "AutoUpdate.Enabled": True, + "EnableOverProvisioning": True, + "CGroups.EnforceLimits": False, } + __STRING_OPTIONS__ = { - "Lib.Dir" : "/var/lib/waagent", - "DVD.MountPoint" : "/mnt/cdrom/secure", - "Pid.File" : "/var/run/waagent.pid", - "Extension.LogDir" : "/var/log/azure", - "OS.OpensslPath" : "/usr/bin/openssl", - "OS.SshDir" : "/etc/ssh", - "OS.HomeDir" : "/home", - "OS.PasswordPath" : "/etc/shadow", - "OS.SudoersDir" : "/etc/sudoers.d", - "OS.RootDeviceScsiTimeout" : None, - "Provisioning.SshHostKeyPairType" : "rsa", - "Provisioning.PasswordCryptId" : "6", - "HttpProxy.Host" : None, - "ResourceDisk.MountPoint" : "/mnt/resource", - "ResourceDisk.MountOptions" : None, - "ResourceDisk.Filesystem" : "ext3", - "AutoUpdate.GAFamily" : "Prod" + "Lib.Dir": "/var/lib/waagent", + "DVD.MountPoint": "/mnt/cdrom/secure", + "Pid.File": "/var/run/waagent.pid", + "Extension.LogDir": "/var/log/azure", + "OS.OpensslPath": "/usr/bin/openssl", + "OS.SshDir": "/etc/ssh", + "OS.HomeDir": "/home", + "OS.PasswordPath": "/etc/shadow", + "OS.SudoersDir": "/etc/sudoers.d", + "OS.RootDeviceScsiTimeout": None, + "Provisioning.Agent": "auto", + "Provisioning.SshHostKeyPairType": "rsa", + "Provisioning.PasswordCryptId": "6", + "HttpProxy.Host": None, + "ResourceDisk.MountPoint": "/mnt/resource", + "ResourceDisk.MountOptions": None, + "ResourceDisk.Filesystem": "ext3", + "AutoUpdate.GAFamily": "Prod", + "CGroups.Excluded": "customscript,runcommand", } + __INTEGER_OPTIONS__ = { - "OS.SshClientAliveInterval" : 180, - "Provisioning.PasswordCryptSaltLength" : 10, - "HttpProxy.Port" : None, - "ResourceDisk.SwapSizeMB" : 0, - "Autoupdate.Frequency" : 3600 + "OS.SshClientAliveInterval": 180, + "Provisioning.PasswordCryptSaltLength": 10, + "HttpProxy.Port": None, + "ResourceDisk.SwapSizeMB": 0, + "Autoupdate.Frequency": 3600 } + def get_configuration(conf=__conf__): options = {} for option in __SWITCH_OPTIONS__: @@ -151,20 +159,28 @@ return options + def enable_firewall(conf=__conf__): return conf.get_switch("OS.EnableFirewall", False) + def enable_rdma(conf=__conf__): return conf.get_switch("OS.EnableRDMA", False) or \ conf.get_switch("OS.UpdateRdmaDriver", False) or \ conf.get_switch("OS.CheckRdmaDriver", False) + def enable_rdma_update(conf=__conf__): return conf.get_switch("OS.UpdateRdmaDriver", False) +def enable_check_rdma_driver(conf=__conf__): + return conf.get_switch("OS.CheckRdmaDriver", True) + def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) +def get_logs_console(conf=__conf__): + return conf.get_switch("Logs.Console", True) def get_lib_dir(conf=__conf__): return conf.get("Lib.Dir", "/var/lib/waagent") @@ -185,44 +201,57 @@ def get_ext_log_dir(conf=__conf__): return conf.get("Extension.LogDir", "/var/log/azure") + def get_fips_enabled(conf=__conf__): return conf.get_switch("OS.EnableFIPS", False) + def get_openssl_cmd(conf=__conf__): return conf.get("OS.OpensslPath", "/usr/bin/openssl") + def get_ssh_client_alive_interval(conf=__conf__): return conf.get("OS.SshClientAliveInterval", 180) + def get_ssh_dir(conf=__conf__): return conf.get("OS.SshDir", "/etc/ssh") + def get_home_dir(conf=__conf__): return conf.get("OS.HomeDir", "/home") + def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") + def get_sudoers_dir(conf=__conf__): return conf.get("OS.SudoersDir", "/etc/sudoers.d") + def get_sshd_conf_file_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), "sshd_config") + def get_ssh_key_glob(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_*key*') + def get_ssh_key_private_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key'.format(get_ssh_host_keypair_type(conf))) + def get_ssh_key_public_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key.pub'.format(get_ssh_host_keypair_type(conf))) + def get_root_device_scsi_timeout(conf=__conf__): return conf.get("OS.RootDeviceScsiTimeout", None) + def get_ssh_host_keypair_type(conf=__conf__): keypair_type = conf.get("Provisioning.SshHostKeyPairType", "rsa") if keypair_type == "auto": @@ -233,14 +262,14 @@ return "rsa" return keypair_type + def get_ssh_host_keypair_mode(conf=__conf__): return conf.get("Provisioning.SshHostKeyPairType", "rsa") -def get_provision_enabled(conf=__conf__): - return conf.get_switch("Provisioning.Enabled", True) -def get_provision_cloudinit(conf=__conf__): - return conf.get_switch("Provisioning.UseCloudInit", False) +def get_extensions_enabled(conf=__conf__): + return conf.get_switch("Extensions.Enabled", True) + def get_allow_reset_sys_user(conf=__conf__): return conf.get_switch("Provisioning.AllowResetSysUser", False) @@ -266,6 +295,21 @@ return conf.get("Provisioning.PasswordCryptId", "6") +def get_provisioning_agent(conf=__conf__): + return conf.get("Provisioning.Agent", "auto") + + +def get_provision_enabled(conf=__conf__): + """ + Provisioning (as far as waagent is concerned) is enabled if either the + agent is set to 'auto' or 'waagent'. This wraps logic that was introduced + for flexible provisioning agent configuration and detection. The replaces + the older bool setting to turn provisioning on or off. + """ + + return get_provisioning_agent(conf) in ("auto", "waagent") + + def get_password_crypt_salt_len(conf=__conf__): return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) @@ -292,7 +336,9 @@ def get_resourcedisk_enable_swap(conf=__conf__): return conf.get_switch("ResourceDisk.EnableSwap", False) - + +def get_resourcedisk_enable_swap_encryption(conf=__conf__): + return conf.get_switch("ResourceDisk.EnableSwapEncryption", False) def get_resourcedisk_mountpoint(conf=__conf__): return conf.get("ResourceDisk.MountPoint", "/mnt/resource") @@ -321,8 +367,23 @@ def get_autoupdate_frequency(conf=__conf__): return conf.get_int("Autoupdate.Frequency", 3600) + def get_enable_overprovisioning(conf=__conf__): - return conf.get_switch("EnableOverProvisioning", False) + return conf.get_switch("EnableOverProvisioning", True) + def get_allow_http(conf=__conf__): return conf.get_switch("OS.AllowHTTP", False) + + +def get_disable_agent_file_path(conf=__conf__): + return os.path.join(get_lib_dir(conf), DISABLE_AGENT_FILE) + + +def get_cgroups_enforce_limits(conf=__conf__): + return conf.get_switch("CGroups.EnforceLimits", False) + + +def get_cgroups_excluded(conf=__conf__): + excluded_value = conf.get("CGroups.Excluded", "customscript, runcommand") + return [s for s in [i.strip().lower() for i in excluded_value.split(',')] if len(s) > 0] if excluded_value else [] diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/datacontract.py walinuxagent-2.2.45/azurelinuxagent/common/datacontract.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/datacontract.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/datacontract.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,83 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.exception import ProtocolError +import azurelinuxagent.common.logger as logger + +""" +Base class for data contracts between guest and host and utilities to manipulate the properties in those contracts +""" + + +class DataContract(object): + pass + + +class DataContractList(list): + def __init__(self, item_cls): + self.item_cls = item_cls + + +def validate_param(name, val, expected_type): + if val is None: + raise ProtocolError("{0} is None".format(name)) + if not isinstance(val, expected_type): + raise ProtocolError(("{0} type should be {1} not {2}" + "").format(name, expected_type, type(val))) + + +def set_properties(name, obj, data): + if isinstance(obj, DataContract): + validate_param("Property '{0}'".format(name), data, dict) + for prob_name, prob_val in data.items(): + prob_full_name = "{0}.{1}".format(name, prob_name) + try: + prob = getattr(obj, prob_name) + except AttributeError: + logger.warn("Unknown property: {0}", prob_full_name) + continue + prob = set_properties(prob_full_name, prob, prob_val) + setattr(obj, prob_name, prob) + return obj + elif isinstance(obj, DataContractList): + validate_param("List '{0}'".format(name), data, list) + for item_data in data: + item = obj.item_cls() + item = set_properties(name, item, item_data) + obj.append(item) + return obj + else: + return data + + +def get_properties(obj): + if isinstance(obj, DataContract): + data = {} + props = vars(obj) + for prob_name, prob in list(props.items()): + data[prob_name] = get_properties(prob) + return data + elif isinstance(obj, DataContractList): + data = [] + for item in obj: + item_data = get_properties(item) + data.append(item_data) + return data + else: + return obj diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/dhcp.py walinuxagent-2.2.45/azurelinuxagent/common/dhcp.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/dhcp.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/dhcp.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import os import socket @@ -86,9 +86,8 @@ route_exists = False logger.info("Test for route to {0}".format(KNOWN_WIRESERVER_IP)) try: - route_file = '/proc/net/route' - if os.path.exists(route_file) and \ - KNOWN_WIRESERVER_IP_ENTRY in open(route_file).read(): + route_table = self.osutil.read_route_table() + if any([(KNOWN_WIRESERVER_IP_ENTRY in route) for route in route_table]): # reset self.gateway and self.routes # we do not need to alter the routing table self.endpoint = KNOWN_WIRESERVER_IP @@ -102,7 +101,7 @@ logger.error( "Could not determine whether route exists to {0}: {1}".format( KNOWN_WIRESERVER_IP, e)) - + return route_exists @property @@ -151,6 +150,15 @@ def send_dhcp_req(self): """ + Check if DHCP is available + """ + (dhcp_available, endpoint) = self.osutil.is_dhcp_available() + if not dhcp_available: + logger.info("send_dhcp_req: DHCP not available") + self.endpoint = endpoint + return + + """ Build dhcp request with mac addr Configure route to allow dhcp traffic Stop dhcp service if necessary @@ -376,7 +384,7 @@ unpack_big_endian(request, 4, 4))) if request_broadcast: - # set broadcast flag to true to request the dhcp sever + # set broadcast flag to true to request the dhcp server # to respond to a boradcast address, # this is useful when user dhclient fails. request[0x0A] = 0x80; diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/errorstate.py walinuxagent-2.2.45/azurelinuxagent/common/errorstate.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/errorstate.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/errorstate.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,45 @@ +from datetime import datetime, timedelta + +ERROR_STATE_DELTA_DEFAULT = timedelta(minutes=15) +ERROR_STATE_DELTA_INSTALL = timedelta(minutes=5) +ERROR_STATE_HOST_PLUGIN_FAILURE = timedelta(minutes=5) + + +class ErrorState(object): + def __init__(self, min_timedelta=ERROR_STATE_DELTA_DEFAULT): + self.min_timedelta = min_timedelta + + self.count = 0 + self.timestamp = None + + def incr(self): + if self.count == 0: + self.timestamp = datetime.utcnow() + + self.count += 1 + + def reset(self): + self.count = 0 + self.timestamp = None + + def is_triggered(self): + if self.timestamp is None: + return False + + delta = datetime.utcnow() - self.timestamp + if delta >= self.min_timedelta: + return True + + return False + + @property + def fail_time(self): + if self.timestamp is None: + return 'unknown' + + delta = round((datetime.utcnow() - self.timestamp).seconds / 60.0, 2) + if delta < 60: + return '{0} min'.format(delta) + + delta_hr = round(delta / 60.0, 2) + return '{0} hr'.format(delta_hr) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/event.py walinuxagent-2.2.45/azurelinuxagent/common/event.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/event.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/event.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,59 +12,81 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import os -import sys -import traceback import atexit import json -import time -import datetime +import os +import sys import threading -import platform - -from datetime import datetime, timedelta +import time +import traceback +from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger - -from azurelinuxagent.common.exception import EventError, ProtocolError +from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ - TelemetryEvent, \ - set_properties, get_properties -from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_VERSION, \ - CURRENT_AGENT, CURRENT_VERSION +from azurelinuxagent.common.datacontract import get_properties +from azurelinuxagent.common.telemetryevent import TelemetryEventParam, TelemetryEvent +from azurelinuxagent.common.utils import fileutil, textutil +from azurelinuxagent.common.version import CURRENT_VERSION, CURRENT_AGENT _EVENT_MSG = "Event: name={0}, op={1}, message={2}, duration={3}" +TELEMETRY_EVENT_PROVIDER_ID = "69B669B9-4AF8-4C50-BDC4-6006FA76E975" + +# Store the last retrieved container id as an environment variable to be shared between threads for telemetry purposes +CONTAINER_ID_ENV_VARIABLE = "AZURE_GUEST_AGENT_CONTAINER_ID" + + +def get_container_id_from_env(): + return os.environ.get(CONTAINER_ID_ENV_VARIABLE, "UNINITIALIZED") + class WALAEventOperation: ActivateResourceDisk = "ActivateResourceDisk" AgentBlacklisted = "AgentBlacklisted" AgentEnabled = "AgentEnabled" + ArtifactsProfileBlob = "ArtifactsProfileBlob" AutoUpdate = "AutoUpdate" CustomData = "CustomData" + CGroupsCleanUp = "CGroupsCleanUp" + CGroupsLimitsCrossed = "CGroupsLimitsCrossed" + ExtensionMetricsData = "ExtensionMetricsData" Deploy = "Deploy" Disable = "Disable" + Downgrade = "Downgrade" Download = "Download" Enable = "Enable" ExtensionProcessing = "ExtensionProcessing" Firewall = "Firewall" + GetArtifactExtended = "GetArtifactExtended" HealthCheck = "HealthCheck" + HealthObservation = "HealthObservation" HeartBeat = "HeartBeat" HostPlugin = "HostPlugin" + HostPluginHeartbeat = "HostPluginHeartbeat" + HostPluginHeartbeatExtended = "HostPluginHeartbeatExtended" HttpErrors = "HttpErrors" + ImdsHeartbeat = "ImdsHeartbeat" Install = "Install" + InitializeCGroups = "InitializeCGroups" InitializeHostPlugin = "InitializeHostPlugin" + InvokeCommandUsingSystemd = "InvokeCommandUsingSystemd" + Log = "Log" + OSInfo = "OSInfo" Partition = "Partition" ProcessGoalState = "ProcessGoalState" Provision = "Provision" + ProvisionGuestAgent = "ProvisionGuestAgent" + RemoteAccessHandling = "RemoteAccessHandling" ReportStatus = "ReportStatus" + ReportStatusExtended = "ReportStatusExtended" Restart = "Restart" + SequenceNumberMismatch = "SequenceNumberMismatch" + SetCGroupsLimits = "SetCGroupsLimits" + SkipUpdate = "SkipUpdate" UnhandledError = "UnhandledError" UnInstall = "UnInstall" Unknown = "Unknown" @@ -72,10 +94,19 @@ Update = "Update" +SHOULD_ENCODE_MESSAGE_LEN = 80 +SHOULD_ENCODE_MESSAGE_OP = [ + WALAEventOperation.Disable, + WALAEventOperation.Enable, + WALAEventOperation.Install, + WALAEventOperation.UnInstall, +] + + class EventStatus(object): EVENT_STATUS_FILE = "event_status.json" - def __init__(self, status_dir=conf.get_lib_dir()): + def __init__(self): self._path = None self._status = {} @@ -90,7 +121,7 @@ event = self._event_name(name, version, op) if event not in self._status: return True - return self._status[event] == True + return self._status[event] is True def initialize(self, status_dir=conf.get_lib_dir()): self._path = os.path.join(status_dir, EventStatus.EVENT_STATUS_FILE) @@ -98,7 +129,7 @@ def mark_event_status(self, name, version, op, status): event = self._event_name(name, version, op) - self._status[event] = (status == True) + self._status[event] = (status is True) self._save() def _event_name(self, name, version, op): @@ -121,6 +152,7 @@ except Exception as e: logger.warn("Exception occurred saving event status: {0}".format(e)) + __event_status__ = EventStatus() __event_status_operations__ = [ WALAEventOperation.AutoUpdate, @@ -128,9 +160,45 @@ ] +def _encode_message(op, message): + """ + Gzip and base64 encode a message based on the operation. + + The intent of this message is to make the logs human readable and include the + stdout/stderr from extension operations. Extension operations tend to generate + a lot of noise, which makes it difficult to parse the line-oriented waagent.log. + The compromise is to encode the stdout/stderr so we preserve the data and do + not destroy the line oriented nature. + + The data can be recovered using the following command: + + $ echo '' | base64 -d | pigz -zd + + You may need to install the pigz command. + + :param op: Operation, e.g. Enable or Install + :param message: Message to encode + :return: gzip'ed and base64 encoded message, or the original message + """ + + if len(message) == 0: + return message + + if op not in SHOULD_ENCODE_MESSAGE_OP: + return message + + try: + return textutil.compress(message) + except Exception: + # If the message could not be encoded a dummy message ('<>') is returned. + # The original message was still sent via telemetry, so all is not lost. + return "<>" + + def _log_event(name, op, message, duration, is_success=True): global _EVENT_MSG + message = _encode_message(op, message) if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) else: @@ -147,9 +215,11 @@ logger.warn("Cannot save event -- Event reporter is not initialized.") return - if not os.path.exists(self.event_dir): - os.mkdir(self.event_dir) - os.chmod(self.event_dir, 0o700) + try: + fileutil.mkdir(self.event_dir, mode=0o700) + except (IOError, OSError) as e: + msg = "Failed to create events folder {0}. Error: {1}".format(self.event_dir, ustr(e)) + raise EventError(msg) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: @@ -169,7 +239,8 @@ hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: - raise EventError("Failed to write events to file:{0}", e) + msg = "Failed to write events to file: {0}".format(e) + raise EventError(msg) def reset_periodic(self): self.periodic_events = {} @@ -178,80 +249,174 @@ return h not in self.periodic_events or \ (self.periodic_events[h] + delta) <= datetime.now() - def add_periodic(self, - delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, - version=CURRENT_VERSION, message="", evt_type="", - is_internal=False, log_event=True, force=False): + def add_periodic(self, delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, + version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True, + force=False): + h = hash(name + op + ustr(is_success) + message) - h = hash(name+op+ustr(is_success)+message) - if force or self.is_period_elapsed(delta, h): - self.add_event(name, - op=op, is_success=is_success, duration=duration, - version=version, message=message, evt_type=evt_type, - is_internal=is_internal, log_event=log_event) + self.add_event(name, op=op, is_success=is_success, duration=duration, + version=version, message=message, evt_type=evt_type, + is_internal=is_internal, log_event=log_event) self.periodic_events[h] = datetime.now() - def add_event(self, - name, - op=WALAEventOperation.Unknown, - is_success=True, - duration=0, - version=CURRENT_VERSION, - message="", - evt_type="", - is_internal=False, - log_event=True): + def add_event(self, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), + message="", evt_type="", is_internal=False, log_event=True): - if not is_success or log_event: + if (not is_success) and log_event: _log_event(name, op, message, duration, is_success=is_success) - event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, event_id=1) + + def _add_event(self, duration, evt_type, is_internal, is_success, message, name, op, version, event_id): + event = TelemetryEvent(event_id, TELEMETRY_EVENT_PROVIDER_ID) event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) - event.parameters.append(TelemetryEventParam('OperationSuccess', - is_success)) + event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) + self.add_default_parameters_to_event(event) + data = get_properties(event) + try: + self.save_event(json.dumps(data)) + except EventError as e: + logger.periodic_error(logger.EVERY_FIFTEEN_MINUTES, "[PERIODIC] {0}".format(ustr(e))) + + def add_log_event(self, level, message): + # By the time the message has gotten to this point it is formatted as + # + # YYYY/MM/DD HH:mm:ss.fffffff LEVEL . + # + # The timestamp and the level are redundant, and should be stripped. + # The logging library does not schematize this data, so I am forced + # to parse the message. The format is regular, so the burden is low. + + parts = message.split(' ', 3) + msg = parts[3] if len(parts) == 4 \ + else message + + event = TelemetryEvent(7, "FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F") + event.parameters.append(TelemetryEventParam('EventName', WALAEventOperation.Log)) + event.parameters.append(TelemetryEventParam('CapabilityUsed', logger.LogLevel.STRINGS[level])) + event.parameters.append(TelemetryEventParam('Context1', msg)) + event.parameters.append(TelemetryEventParam('Context2', '')) + event.parameters.append(TelemetryEventParam('Context3', '')) + + self.add_default_parameters_to_event(event) + data = get_properties(event) + try: + self.save_event(json.dumps(data)) + except EventError: + pass + + def add_metric(self, category, counter, instance, value, log_event=False): + """ + Create and save an event which contains a telemetry event. + + :param str category: The category of metric (e.g. "cpu", "memory") + :param str counter: The specific metric within the category (e.g. "%idle") + :param str instance: For instanced metrics, the instance identifier (filesystem name, cpu core#, etc.) + :param value: Value of the metric + :param bool log_event: If true, log the collected metric in the agent log + """ + if log_event: + from azurelinuxagent.common.version import AGENT_NAME + message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) + _log_event(AGENT_NAME, "METRIC", message, 0) + + event = TelemetryEvent(4, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + event.parameters.append(TelemetryEventParam('Category', category)) + event.parameters.append(TelemetryEventParam('Counter', counter)) + event.parameters.append(TelemetryEventParam('Instance', instance)) + event.parameters.append(TelemetryEventParam('Value', value)) + + self.add_default_parameters_to_event(event) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.error("{0}", e) + @staticmethod + def add_default_parameters_to_event(event, set_default_values=False): + # We write the GAVersion here rather than add it in azurelinuxagent.ga.monitor.MonitorHandler.add_sysinfo + # as there could be a possibility of events being sent with newer version of the agent, rather than the agent + # version generating the event. + # Old behavior example: V1 writes the event on the disk and finds an update immediately, and updates. Now the + # new monitor thread would pick up the events from the disk and send it with the CURRENT_AGENT, which would have + # newer version of the agent. This causes confusion. + # + # ContainerId can change due to live migration and we want to preserve the container Id of the container writing + # the event, rather than sending the event. + # OpcodeName: This is used as the actual time of event generation. + + default_parameters = [("GAVersion", CURRENT_AGENT), ('ContainerId', get_container_id_from_env()), + ('OpcodeName', datetime.utcnow().__str__()), + ('EventTid', threading.current_thread().ident), + ('EventPid', os.getpid()), ("TaskName", threading.current_thread().getName()), + ("KeywordName", '')] + + for param in default_parameters: + event.parameters.append(TelemetryEventParam(param[0], param[1])) + __event_logger__ = EventLogger() def elapsed_milliseconds(utc_start): - d = datetime.utcnow() - utc_start + now = datetime.utcnow() + if now < utc_start: + return 0 + + d = now - utc_start return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \ (d.microseconds / 1000.0)) -def report_event(op, is_success=True, message=''): + +def report_event(op, is_success=True, message='', log_event=True): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_event(AGENT_NAME, - version=CURRENT_VERSION, + version=str(CURRENT_VERSION), is_success=is_success, message=message, - op=op) + op=op, + log_event=log_event) + def report_periodic(delta, op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_periodic(delta, AGENT_NAME, - version=CURRENT_VERSION, - is_success=is_success, - message=message, - op=op) + version=str(CURRENT_VERSION), + is_success=is_success, + message=message, + op=op) + + +def report_metric(category, counter, instance, value, log_event=False, reporter=__event_logger__): + """ + Send a telemetry event reporting a single instance of a performance counter. + :param str category: The category of the metric (cpu, memory, etc) + :param str counter: The name of the metric ("%idle", etc) + :param str instance: For instanced metrics, the identifier of the instance. E.g. a disk drive name, a cpu core# + :param value: The value of the metric + :param bool log_event: If True, log the metric in the agent log as well + :param EventLogger reporter: The EventLogger instance to which metric events should be sent + """ + if reporter.event_dir is None: + from azurelinuxagent.common.version import AGENT_NAME + logger.warn("Cannot report metric event -- Event reporter is not initialized.") + message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) + _log_event(AGENT_NAME, "METRIC", message, 0) + return + reporter.add_metric(category, counter, instance, value, log_event) -def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, - version=CURRENT_VERSION, - message="", evt_type="", is_internal=False, log_event=True, - reporter=__event_logger__): + +def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="", + evt_type="", is_internal=False, log_event=True, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) @@ -259,30 +424,34 @@ if should_emit_event(name, version, op, is_success): mark_event_status(name, version, op, is_success) - reporter.add_event( - name, op=op, is_success=is_success, duration=duration, - version=str(version), message=message, evt_type=evt_type, - is_internal=is_internal, log_event=log_event) - -def add_periodic( - delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, - version=CURRENT_VERSION, - message="", evt_type="", is_internal=False, log_event=True, force=False, - reporter=__event_logger__): + reporter.add_event(name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, + evt_type=evt_type, is_internal=is_internal, log_event=log_event) + + +def add_log_event(level, message, reporter=__event_logger__): + if reporter.event_dir is None: + return + + reporter.add_log_event(level, message) + + +def add_periodic(delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, + version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True, force=False, + reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add periodic event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return - reporter.add_periodic( - delta, name, op=op, is_success=is_success, duration=duration, - version=str(version), message=message, evt_type=evt_type, - is_internal=is_internal, log_event=log_event, force=force) + reporter.add_periodic(delta, name, op=op, is_success=is_success, duration=duration, version=str(version), + message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event, force=force) + def mark_event_status(name, version, op, status): if op in __event_status_operations__: __event_status__.mark_event_status(name, version, op, status) + def should_emit_event(name, version, op, status): return \ op not in __event_status_operations__ or \ @@ -290,12 +459,15 @@ not __event_status__.event_marked(name, version, op) or \ __event_status__.event_succeeded(name, version, op) != status + def init_event_logger(event_dir): __event_logger__.event_dir = event_dir + def init_event_status(status_dir): __event_status__.initialize(status_dir) + def dump_unhandled_err(name): if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ hasattr(sys, 'last_traceback'): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/exception.py walinuxagent-2.2.45/azurelinuxagent/common/exception.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/exception.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/exception.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Defines all exceptions @@ -44,20 +44,60 @@ class AgentNetworkError(AgentError): """ - When network is not avaiable. + When network is not available\. """ def __init__(self, msg=None, inner=None): super(AgentNetworkError, self).__init__(msg, inner) +class CGroupsException(AgentError): + + def __init__(self, msg, inner=None): + super(AgentError, self).__init__(msg, inner) + # TODO: AgentError should set the message - investigate whether doing it there would break anything + self.message = msg + + def __str__(self): + return self.message + class ExtensionError(AgentError): """ When failed to execute an extension """ - def __init__(self, msg=None, inner=None): + def __init__(self, msg=None, inner=None, code=-1): super(ExtensionError, self).__init__(msg, inner) + self.code = code + + +class ExtensionOperationError(ExtensionError): + """ + When the command times out or returns with a non-zero exit_code + """ + + def __init__(self, msg=None, inner=None, code=-1, exit_code=-1): + super(ExtensionOperationError, self).__init__(msg, inner) + self.code = code + self.exit_code = exit_code + + +class ExtensionUpdateError(ExtensionError): + """ + When failed to update an extension + """ + + def __init__(self, msg=None, inner=None, code=-1): + super(ExtensionUpdateError, self).__init__(msg, inner, code) + + +class ExtensionDownloadError(ExtensionError): + """ + When failed to download and setup an extension + """ + + def __init__(self, msg=None, inner=None, code=-1): + super(ExtensionDownloadError, self).__init__(msg, inner, code) class ProvisionError(AgentError): @@ -86,6 +126,7 @@ def __init__(self, msg=None, inner=None): super(DhcpError, self).__init__(msg, inner) + class OSUtilError(AgentError): """ Failed to perform operation to OS configuration @@ -122,6 +163,15 @@ super(HttpError, self).__init__(msg, inner) +class InvalidContainerError(HttpError): + """ + Container id sent in the header is invalid + """ + + def __init__(self, msg=None, inner=None): + super(InvalidContainerError, self).__init__(msg, inner) + + class EventError(AgentError): """ Event reporting error @@ -158,3 +208,64 @@ if msg is None: msg = "Resource is gone" super(ResourceGoneError, self).__init__(msg, inner) + + +class RemoteAccessError(AgentError): + """ + Remote Access Error + """ + + def __init__(self, msg=None, inner=None): + super(RemoteAccessError, self).__init__(msg, inner) + + +class ExtensionErrorCodes(object): + """ + Common Error codes used across by Compute RP for better understanding + the cause and clarify common occurring errors + """ + + # Unknown Failures + PluginUnknownFailure = -1 + + # Success + PluginSuccess = 0 + + # Catch all error code. + PluginProcessingError = 1000 + + # Plugin failed to download + PluginManifestDownloadError = 1001 + + # Cannot find or load successfully the HandlerManifest.json + PluginHandlerManifestNotFound = 1002 + + # Cannot successfully serialize the HandlerManifest.json + PluginHandlerManifestDeserializationError = 1003 + + # Cannot download the plugin package + PluginPackageDownloadFailed = 1004 + + # Cannot extract the plugin form package + PluginPackageExtractionFailed = 1005 + + # Install failed + PluginInstallProcessingFailed = 1007 + + # Update failed + PluginUpdateProcessingFailed = 1008 + + # Enable failed + PluginEnableProcessingFailed = 1009 + + # Disable failed + PluginDisableProcessingFailed = 1010 + + # Extension script timed out + PluginHandlerScriptTimedout = 1011 + + # Invalid status file of the extension. + PluginSettingsStatusInvalid = 1012 + + def __init__(self): + pass diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/future.py walinuxagent-2.2.45/azurelinuxagent/common/future.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/future.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/future.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,14 @@ +import platform import sys +import os +import re + +# Note broken dependency handling to avoid potential backward +# compatibility issues on different distributions +try: + import distro +except Exception: + pass """ Add alias for python2 and python3 libs and functions. @@ -24,3 +34,66 @@ else: raise ImportError("Unknown python version: {0}".format(sys.version_info)) + + +def get_linux_distribution(get_full_name, supported_dists): + """Abstract platform.linux_distribution() call which is deprecated as of + Python 3.5 and removed in Python 3.7""" + try: + supported = platform._supported_dists + (supported_dists,) + osinfo = list( + platform.linux_distribution( + full_distribution_name=get_full_name, + supported_dists=supported + ) + ) + + # The platform.linux_distribution() lib has issue with detecting OpenWRT linux distribution. + # Merge the following patch provided by OpenWRT as a temporary fix. + if os.path.exists("/etc/openwrt_release"): + osinfo = get_openwrt_platform() + + if not osinfo or osinfo == ['', '', '']: + return get_linux_distribution_from_distro(get_full_name) + full_name = platform.linux_distribution()[0].strip() + osinfo.append(full_name) + except AttributeError: + return get_linux_distribution_from_distro(get_full_name) + + return osinfo + + +def get_linux_distribution_from_distro(get_full_name): + """Get the distribution information from the distro Python module.""" + # If we get here we have to have the distro module, thus we do + # not wrap the call in a try-except block as it would mask the problem + # and result in a broken agent installation + osinfo = list( + distro.linux_distribution( + full_distribution_name=get_full_name + ) + ) + full_name = distro.linux_distribution()[0].strip() + osinfo.append(full_name) + return osinfo + +def get_openwrt_platform(): + """ + Add this workaround for detecting OpenWRT products because + the version and product information is contained in the /etc/openwrt_release file. + """ + result = [None, None, None] + openwrt_version = re.compile(r"^DISTRIB_RELEASE=['\"](\d+\.\d+.\d+)['\"]") + openwrt_product = re.compile(r"^DISTRIB_ID=['\"]([\w-]+)['\"]") + + with open('/etc/openwrt_release', 'r') as fh: + content = fh.readlines() + for line in content: + version_matches = openwrt_version.match(line) + product_matches = openwrt_product.match(line) + if version_matches: + result[1] = version_matches.group(1) + elif product_matches: + if product_matches.group(1) == "OpenWrt": + result[0] = "openwrt" + return result \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/__init__.py walinuxagent-2.2.45/azurelinuxagent/common/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/logger.py walinuxagent-2.2.45/azurelinuxagent/common/logger.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/logger.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/logger.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,22 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and openssl_bin 1.0+ +# Requires Python 2.6+ and openssl_bin 1.0+ # """ Log utils """ -import os import sys + from azurelinuxagent.common.future import ustr from datetime import datetime, timedelta EVERY_DAY = timedelta(days=1) EVERY_HALF_DAY = timedelta(hours=12) +EVERY_SIX_HOURS = timedelta(hours=6) EVERY_HOUR = timedelta(hours=1) EVERY_HALF_HOUR = timedelta(minutes=30) EVERY_FIFTEEN_MINUTES = timedelta(minutes=15) + class Logger(object): """ Logger class @@ -41,16 +43,31 @@ def reset_periodic(self): self.logger.periodic_messages = {} - def is_period_elapsed(self, delta, h): + def set_prefix(self, prefix): + self.prefix = prefix + + def _is_period_elapsed(self, delta, h): return h not in self.logger.periodic_messages or \ (self.logger.periodic_messages[h] + delta) <= datetime.now() - def periodic(self, delta, msg_format, *args): + def _periodic(self, delta, log_level_op, msg_format, *args): h = hash(msg_format) - if self.is_period_elapsed(delta, h): - self.info(msg_format, *args) + if self._is_period_elapsed(delta, h): + log_level_op(msg_format, *args) self.logger.periodic_messages[h] = datetime.now() + def periodic_info(self, delta, msg_format, *args): + self._periodic(delta, self.info, msg_format, *args) + + def periodic_verbose(self, delta, msg_format, *args): + self._periodic(delta, self.verbose, msg_format, *args) + + def periodic_warn(self, delta, msg_format, *args): + self._periodic(delta, self.warn, msg_format, *args) + + def periodic_error(self, delta, msg_format, *args): + self._periodic(delta, self.error, msg_format, *args) + def verbose(self, msg_format, *args): self.log(LogLevel.VERBOSE, msg_format, *args) @@ -64,7 +81,7 @@ self.log(LogLevel.ERROR, msg_format, *args) def log(self, level, msg_format, *args): - #if msg_format is not unicode convert it to unicode + # if msg_format is not unicode convert it to unicode if type(msg_format) is not ustr: msg_format = ustr(msg_format, errors="backslashreplace") if len(args) > 0: @@ -92,6 +109,7 @@ appender = _create_logger_appender(appender_type, level, path) self.appenders.append(appender) + class ConsoleAppender(object): def __init__(self, level, path): self.level = level @@ -105,6 +123,7 @@ except IOError: pass + class FileAppender(object): def __init__(self, level, path): self.level = level @@ -118,6 +137,7 @@ except IOError: pass + class StdoutAppender(object): def __init__(self, level): self.level = level @@ -129,9 +149,24 @@ except IOError: pass -#Initialize logger instance + +class TelemetryAppender(object): + def __init__(self, level, event_func): + self.level = level + self.event_func = event_func + + def write(self, level, msg): + if self.level <= level: + try: + self.event_func(level, msg) + except IOError: + pass + + +# Initialize logger instance DEFAULT_LOGGER = Logger() + class LogLevel(object): VERBOSE = 0 INFO = 1 @@ -144,35 +179,78 @@ "ERROR" ] + class AppenderType(object): FILE = 0 CONSOLE = 1 STDOUT = 2 + TELEMETRY = 3 + def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) + def reset_periodic(): DEFAULT_LOGGER.reset_periodic() -def periodic(delta, msg_format, *args): - DEFAULT_LOGGER.periodic(delta, msg_format, *args) + +def set_prefix(prefix): + DEFAULT_LOGGER.set_prefix(prefix) + + +def periodic_info(delta, msg_format, *args): + """ + The hash-map maintaining the state of the logs gets reset here - + azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. + """ + DEFAULT_LOGGER.periodic_info(delta, msg_format, *args) + + +def periodic_verbose(delta, msg_format, *args): + """ + The hash-map maintaining the state of the logs gets reset here - + azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. + """ + DEFAULT_LOGGER.periodic_verbose(delta, msg_format, *args) + + +def periodic_error(delta, msg_format, *args): + """ + The hash-map maintaining the state of the logs gets reset here - + azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. + """ + DEFAULT_LOGGER.periodic_error(delta, msg_format, *args) + + +def periodic_warn(delta, msg_format, *args): + """ + The hash-map maintaining the state of the logs gets reset here - + azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. + """ + DEFAULT_LOGGER.periodic_warn(delta, msg_format, *args) + def verbose(msg_format, *args): DEFAULT_LOGGER.verbose(msg_format, *args) + def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) + def warn(msg_format, *args): DEFAULT_LOGGER.warn(msg_format, *args) + def error(msg_format, *args): DEFAULT_LOGGER.error(msg_format, *args) + def log(level, msg_format, *args): DEFAULT_LOGGER.log(level, msg_format, args) + def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): if appender_type == AppenderType.CONSOLE: return ConsoleAppender(level, path) @@ -180,6 +258,8 @@ return FileAppender(level, path) elif appender_type == AppenderType.STDOUT: return StdoutAppender(level) + elif appender_type == AppenderType.TELEMETRY: + return TelemetryAppender(level, path) else: raise ValueError("Unknown appender type") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/alpine.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/alpine.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/alpine.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/alpine.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger @@ -22,19 +22,17 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class AlpineOSUtil(DefaultOSUtil): + def __init__(self): super(AlpineOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' + self.jit_enabled = True def is_dhcp_enabled(self): return True def get_dhcp_pid(self): - ret = shellutil.run_get_output('pidof dhcpcd', chk_err=False) - if ret[0] == 0: - logger.info('dhcpcd is pid {}'.format(ret[1])) - return ret[1].strip() - return None + return self._get_dhcp_pid(["pidof", "dhcpcd"]) def restart_if(self, ifname): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/arch.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/arch.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/arch.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/arch.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,14 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil + class ArchUtil(DefaultOSUtil): + def __init__(self): + super(ArchUtil, self).__init__() + self.jit_enabled = True + def is_dhcp_enabled(self): return True @@ -41,15 +46,14 @@ return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): - return shellutil.run("systemctl start waagent", chk_err=False) + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): - return shellutil.run("systemctl stop waagent", chk_err=False) + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof systemd-networkd") - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", "systemd-networkd"]) def conf_sshd(self, disable_password): # Don't whack the system default sshd conf - pass \ No newline at end of file + pass diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/bigip.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/bigip.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/bigip.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/bigip.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import array @@ -41,6 +41,7 @@ class BigIpOSUtil(DefaultOSUtil): + def __init__(self): super(BigIpOSUtil, self).__init__() @@ -84,20 +85,19 @@ return shellutil.run("/usr/bin/bigstart restart sshd", chk_err=False) def stop_agent_service(self): - return shellutil.run("/sbin/service waagent stop", chk_err=False) + return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): - return shellutil.run("/sbin/service waagent start", chk_err=False) + return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): - return shellutil.run("/sbin/chkconfig --add waagent", chk_err=False) + return shellutil.run("/sbin/chkconfig --add {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("/sbin/chkconfig --del waagent", chk_err=False) + return shellutil.run("/sbin/chkconfig --del {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): - ret = shellutil.run_get_output("/sbin/pidof dhclient") - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["/sbin/pidof", "dhclient"]) def set_hostname(self, hostname): """Set the static hostname of the device @@ -132,7 +132,7 @@ """ return None - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """Create user account using tmsh Our policy is to create two accounts when booting a BIG-IP instance. @@ -143,6 +143,7 @@ :param username: The username that you want to add to the system :param expiration: The expiration date to use. We do not use this value. + :param comment: description of the account. We do not use this value. """ if self.get_userentry(username): logger.info("User {0} already exists, skip useradd", username) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/clearlinux.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/clearlinux.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/clearlinux.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/clearlinux.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -34,9 +34,11 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class ClearLinuxUtil(DefaultOSUtil): + def __init__(self): super(ClearLinuxUtil, self).__init__() self.agent_conf_file_path = '/usr/share/defaults/waagent/waagent.conf' + self.jit_enabled = True def is_dhcp_enabled(self): return True @@ -58,14 +60,13 @@ return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): - return shellutil.run("systemctl start waagent", chk_err=False) + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): - return shellutil.run("systemctl stop waagent", chk_err=False) + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof systemd-networkd") - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", "systemd-networkd"]) def conf_sshd(self, disable_password): # Don't whack the system default sshd conf diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/coreos.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/coreos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/coreos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/coreos.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,19 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil + class CoreOSUtil(DefaultOSUtil): + def __init__(self): super(CoreOSUtil, self).__init__() self.agent_conf_file_path = '/usr/share/oem/waagent.conf' self.waagent_path = '/usr/share/oem/bin/waagent' self.python_path = '/usr/share/oem/python/bin' + self.jit_enabled = True if 'PATH' in os.environ: path = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: @@ -65,16 +68,13 @@ return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): - return shellutil.run("systemctl start waagent", chk_err=False) + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): - return shellutil.run("systemctl stop waagent", chk_err=False) + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): - ret = shellutil.run_get_output("systemctl show -p MainPID " - "systemd-networkd", chk_err=False) - pid = ret[1].split('=', 1)[-1].strip() if ret[0] == 0 else None - return pid if pid != '0' else None + return self._get_dhcp_pid(["systemctl", "show", "-p", "MainPID", "systemd-networkd"]) def conf_sshd(self, disable_password): # In CoreOS, /etc/sshd_config is mount readonly. Skip the setting. diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/debian.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/debian.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/debian.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/debian.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -32,9 +32,12 @@ import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil -class DebianOSUtil(DefaultOSUtil): + +class DebianOSBaseUtil(DefaultOSUtil): + def __init__(self): - super(DebianOSUtil, self).__init__() + super(DebianOSBaseUtil, self).__init__() + self.jit_enabled = True def restart_ssh_service(self): return shellutil.run("systemctl --job-mode=ignore-dependencies try-reload-or-restart ssh", chk_err=False) @@ -56,3 +59,21 @@ def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') + + +class DebianOSModernUtil(DebianOSBaseUtil): + + def __init__(self): + super(DebianOSModernUtil, self).__init__() + self.jit_enabled = True + self.service_name = self.get_service_name() + + @staticmethod + def get_service_name(): + return "walinuxagent" + + def stop_agent_service(self): + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) + + def start_agent_service(self): + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/default.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/default.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import array import base64 import datetime +import errno import fcntl import glob import multiprocessing @@ -31,17 +31,21 @@ import struct import sys import time +from pwd import getpwall + +import array -import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil - from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard +from azurelinuxagent.common.utils.shellutil import CommandError __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] @@ -60,13 +64,21 @@ # Note: # -- Initially "flight" the change to ACCEPT packets and develop a metric baseline # A subsequent release will convert the ACCEPT to DROP -FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" -# FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" +# FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" +FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" FIREWALL_LIST = "iptables {0} -t security -L -nxv" FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv" FIREWALL_FLUSH = "iptables {0} -t security --flush" +# Precisely delete the rules created by the agent. +# this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact. +FIREWALL_DELETE_CONNTRACK_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" +FIREWALL_DELETE_OWNER_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m owner --uid-owner {2} -j ACCEPT" +FIREWALL_DELETE_CONNTRACK_DROP = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" + PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" +ALL_CPUS_REGEX = re.compile('^cpu .*') + _enable_firewall = True @@ -76,12 +88,29 @@ r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) -class DefaultOSUtil(object): +IOCTL_SIOCGIFCONF = 0x8912 +IOCTL_SIOCGIFFLAGS = 0x8913 +IOCTL_SIOCGIFHWADDR = 0x8927 +IFNAMSIZ = 16 + +IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') + +BASE_CGROUPS = '/sys/fs/cgroup' +STORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/' +GEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb' + +class DefaultOSUtil(object): def __init__(self): self.agent_conf_file_path = '/etc/waagent.conf' self.selinux = None self.disable_route_warning = False + self.jit_enabled = False + self.service_name = self.get_service_name() + + @staticmethod + def get_service_name(): + return "waagent" def get_firewall_dropped_packets(self, dst_ip=None): # If a previous attempt failed, do not retry @@ -92,7 +121,12 @@ try: wait = self.get_firewall_will_wait() - rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait)) + rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait), log_cmd=False, expected_errors=[3]) + if rc == 3: + # Transient error that we ignore. This code fires every loop + # of the daemon (60m), so we will get the value eventually. + return 0 + if rc != 0: return -1 @@ -129,24 +163,44 @@ else "" return wait - def remove_firewall(self): + def _delete_rule(self, rule): + """ + Continually execute the delete operation until the return + code is non-zero or the limit has been reached. + """ + for i in range(1, 100): + rc = shellutil.run(rule, chk_err=False) + if rc == 1: + return + elif rc == 2: + raise Exception("invalid firewall deletion rule '{0}'".format(rule)) + + def remove_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: + if dst_ip is None or uid is None: + msg = "Missing arguments to enable_firewall" + logger.warn(msg) + raise Exception(msg) + wait = self.get_firewall_will_wait() - flush_rule = FIREWALL_FLUSH.format(wait) - if shellutil.run(flush_rule, chk_err=True) != 0: - raise Exception("non-zero return code") + # This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25 + # has aged out, keep this cleanup in place. + self._delete_rule(FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip)) + + self._delete_rule(FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst_ip, uid)) + self._delete_rule(FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst_ip)) return True except Exception as e: _enable_firewall = False - logger.info("Unable to flush firewall -- " + logger.info("Unable to remove firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False @@ -167,10 +221,15 @@ # If the DROP rule exists, make no changes drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip) - - if shellutil.run(drop_rule, chk_err=False) == 0: + rc = shellutil.run(drop_rule, chk_err=False) + if rc == 0: logger.verbose("Firewall appears established") return True + elif rc == 2: + self.remove_firewall(dst_ip, uid) + msg = "please upgrade iptables to a version that supports the -C option" + logger.warn(msg) + raise Exception(msg) # Otherwise, append both rules accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid) @@ -205,7 +264,8 @@ "{0}".format(ustr(e))) return False - def _correct_instance_id(self, id): + @staticmethod + def _correct_instance_id(id): ''' Azure stores the instance ID with an incorrect byte ordering for the first parts. For example, the ID returned by the metadata service: @@ -238,9 +298,68 @@ may have been persisted using the incorrect byte ordering. ''' id_this = self.get_instance_id() - return id_that == id_this or \ - id_that == self._correct_instance_id(id_this) + logger.verbose("current instance id: {0}".format(id_this)) + logger.verbose(" former instance id: {0}".format(id_that)) + return id_this.lower() == id_that.lower() or \ + id_this.lower() == self._correct_instance_id(id_that).lower() + + @staticmethod + def is_cgroups_supported(): + """ + Enabled by default; disabled if the base path of cgroups doesn't exist. + """ + return os.path.exists(BASE_CGROUPS) + @staticmethod + def _cgroup_path(tail=""): + return os.path.join(BASE_CGROUPS, tail).rstrip(os.path.sep) + + def mount_cgroups(self): + try: + path = self._cgroup_path() + if not os.path.exists(path): + fileutil.mkdir(path) + self.mount(device='cgroup_root', + mount_point=path, + option="-t tmpfs", + chk_err=False) + elif not os.path.isdir(self._cgroup_path()): + logger.error("Could not mount cgroups: ordinary file at {0}", path) + return + + controllers_to_mount = ['cpu,cpuacct', 'memory'] + errors = 0 + cpu_mounted = False + for controller in controllers_to_mount: + try: + target_path = self._cgroup_path(controller) + if not os.path.exists(target_path): + fileutil.mkdir(target_path) + self.mount(device=controller, + mount_point=target_path, + option="-t cgroup -o {0}".format(controller), + chk_err=False) + if controller == 'cpu,cpuacct': + cpu_mounted = True + except Exception as exception: + errors += 1 + if errors == len(controllers_to_mount): + raise + logger.warn("Could not mount cgroup controller {0}: {1}", controller, ustr(exception)) + + if cpu_mounted: + for controller in ['cpu', 'cpuacct']: + target_path = self._cgroup_path(controller) + if not os.path.exists(target_path): + os.symlink(self._cgroup_path('cpu,cpuacct'), target_path) + + except OSError as oe: + # log a warning for read-only file systems + logger.warn("Could not mount cgroups: {0}", ustr(oe)) + raise + except Exception as e: + logger.error("Could not mount cgroups: {0}", ustr(e)) + raise def get_agent_conf_file_path(self): return self.agent_conf_file_path @@ -254,15 +373,16 @@ ''' if os.path.isfile(PRODUCT_ID_FILE): s = fileutil.read_file(PRODUCT_ID_FILE).strip() - + else: rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" - + return self._correct_instance_id(s.strip()) - def get_userentry(self, username): + @staticmethod + def get_userentry(username): try: return pwd.getpwnam(username) except KeyError: @@ -293,7 +413,7 @@ else: return False - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ @@ -306,6 +426,9 @@ cmd = "useradd -m {0} -e {1}".format(username, expiration) else: cmd = "useradd -m {0}".format(username) + + if comment is not None: + cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " @@ -322,6 +445,9 @@ if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) + + def get_users(self): + return getpwall() def conf_sudoer(self, username, nopasswd=False, remove=False): sudoers_dir = conf.get_sudoers_dir() @@ -330,12 +456,12 @@ if not remove: # for older distros create sudoers.d if not os.path.isdir(sudoers_dir): - sudoers_file = os.path.join(sudoers_dir, '../sudoers') # create the sudoers.d directory - os.mkdir(sudoers_dir) + fileutil.mkdir(sudoers_dir) # add the include of sudoers.d to the /etc/sudoers - sudoers = '\n#includedir ' + sudoers_dir + '\n' - fileutil.append_file(sudoers_file, sudoers) + sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers') + include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir) + fileutil.append_file(sudoers_file, include_sudoers_dir) sudoer = None if nopasswd: sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username) @@ -367,7 +493,8 @@ except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) - def _norm_path(self, filepath): + @staticmethod + def _norm_path(filepath): home = conf.get_home_dir() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) @@ -415,6 +542,8 @@ if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) + if not value.endswith("\n"): + value += "\n" fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() @@ -579,8 +708,8 @@ time.sleep(1) return False - def mount(self, dvd, mount_point, option="", chk_err=True): - cmd = "mount {0} {1} {2}".format(option, dvd, mount_point) + def mount(self, device, mount_point, option="", chk_err=True): + cmd = "mount {0} {1} {2}".format(option, device, mount_point) retcode, err = shellutil.run_get_output(cmd, chk_err) if retcode != 0: detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err) @@ -591,14 +720,13 @@ return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) def allow_dhcp_broadcast(self): - #Open DHCP port if iptables is enabled. + # Open DHCP port if iptables is enabled. # We supress error logging on error. shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) - def remove_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for src in rules_files: @@ -623,12 +751,10 @@ def get_mac_addr(self): """ - Convienience function, returns mac addr bound to + Convenience function, returns mac addr bound to first non-loopback interface. """ - ifname='' - while len(ifname) < 2 : - ifname=self.get_first_if()[0] + ifname = self.get_if_name() addr = self.get_if_mac(ifname) return textutil.hexstr_to_bytearray(addr) @@ -640,49 +766,141 @@ socket.SOCK_DGRAM, socket.IPPROTO_UDP) param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) - info = fcntl.ioctl(sock.fileno(), 0x8927, param) + info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param) + sock.close() return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) - def get_first_if(self): + @staticmethod + def _get_struct_ifconf_size(): """ - Return the interface name, and ip addr of the - first active non-loopback interface. + Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes; + on 32-bit platforms the size is 32 bytes. """ - iface='' - expected=16 # how many devices should I expect... - - # for 64bit the size is 40 bytes - # for 32bit the size is 32 bytes python_arc = platform.architecture()[0] struct_size = 32 if python_arc == '32bit' else 40 + return struct_size - sock = socket.socket(socket.AF_INET, - socket.SOCK_DGRAM, - socket.IPPROTO_UDP) - buff=array.array('B', b'\0' * (expected * struct_size)) - param = struct.pack('iL', - expected*struct_size, - buff.buffer_info()[0]) - ret = fcntl.ioctl(sock.fileno(), 0x8912, param) - retsize=(struct.unpack('iL', ret)[0]) - if retsize == (expected * struct_size): + def _get_all_interfaces(self): + """ + Return a dictionary mapping from interface name to IPv4 address. + Interfaces without a name are ignored. + """ + expected=16 # how many devices should I expect... + struct_size = DefaultOSUtil._get_struct_ifconf_size() + array_size = expected * struct_size + + buff = array.array('B', b'\0' * array_size) + param = struct.pack('iL', array_size, buff.buffer_info()[0]) + + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) + ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param) + retsize = (struct.unpack('iL', ret)[0]) + sock.close() + + if retsize == array_size: logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) - sock = buff.tostring() - primary = bytearray(self.get_primary_interface(), encoding='utf-8') - for i in range(0, struct_size * expected, struct_size): - iface=sock[i:i+16].split(b'\0', 1)[0] - if len(iface) == 0 or self.is_loopback(iface) or iface != primary: - # test the next one - if len(iface) != 0 and not self.disable_route_warning: - logger.info('Interface [{0}] skipped'.format(iface)) - continue - else: - # use this one - logger.info('Interface [{0}] selected'.format(iface)) - break - return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) + ifconf_buff = buff.tostring() + + ifaces = {} + for i in range(0, array_size, struct_size): + iface = ifconf_buff[i:i+IFNAMSIZ].split(b'\0', 1)[0] + if len(iface) > 0: + iface_name = iface.decode('latin-1') + if iface_name not in ifaces: + ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i+20:i+24]) + return ifaces + + def get_first_if(self): + """ + Return the interface name, and IPv4 addr of the "primary" interface or, + failing that, any active non-loopback interface. + """ + primary = self.get_primary_interface() + ifaces = self._get_all_interfaces() + + if primary in ifaces: + return primary, ifaces[primary] + + for iface_name in ifaces.keys(): + if not self.is_loopback(iface_name): + logger.info("Choosing non-primary [{0}]".format(iface_name)) + return iface_name, ifaces[iface_name] + + return '', '' + + @staticmethod + def _build_route_list(proc_net_route): + """ + Construct a list of network route entries + :param list(str) proc_net_route: Route table lines, including headers, containing at least one route + :return: List of network route objects + :rtype: list(RouteEntry) + """ + idx = 0 + column_index = {} + header_line = proc_net_route[0] + for header in filter(lambda h: len(h) > 0, header_line.split("\t")): + column_index[header.strip()] = idx + idx += 1 + try: + idx_iface = column_index["Iface"] + idx_dest = column_index["Destination"] + idx_gw = column_index["Gateway"] + idx_flags = column_index["Flags"] + idx_metric = column_index["Metric"] + idx_mask = column_index["Mask"] + except KeyError: + msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line) + logger.error(msg) + return [] + + route_list = [] + for entry in proc_net_route[1:]: + route = entry.split("\t") + if len(route) > 0: + route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask], + route[idx_flags], route[idx_metric]) + route_list.append(route_obj) + return route_list + + @staticmethod + def read_route_table(): + """ + Return a list of strings comprising the route table, including column headers. Each line is stripped of leading + or trailing whitespace but is otherwise unmolested. + + :return: Entries in the text route table + :rtype: list(str) + """ + try: + with open('/proc/net/route') as routing_table: + return list(map(str.strip, routing_table.readlines())) + except Exception as e: + logger.error("Cannot read route table [{0}]", ustr(e)) + + return [] + + @staticmethod + def get_list_of_routes(route_table): + """ + Construct a list of all network routes known to this system. + + :param list(str) route_table: List of text entries from route table, including headers + :return: a list of network routes + :rtype: list(RouteEntry) + """ + route_list = [] + count = len(route_table) + + if count < 1: + logger.error("/proc/net/route is missing headers") + elif count == 1: + logger.error("/proc/net/route contains no routes") + else: + route_list = DefaultOSUtil._build_route_list(route_table) + return route_list def get_primary_interface(self): """ @@ -695,43 +913,26 @@ RTF_GATEWAY = 0x02 DEFAULT_DEST = "00000000" - hdr_iface = "Iface" - hdr_dest = "Destination" - hdr_flags = "Flags" - hdr_metric = "Metric" - - idx_iface = -1 - idx_dest = -1 - idx_flags = -1 - idx_metric = -1 - primary = None - primary_metric = None + primary_interface = None if not self.disable_route_warning: logger.info("Examine /proc/net/route for primary interface") - with open('/proc/net/route') as routing_table: - idx = 0 - for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): - if header == hdr_iface: - idx_iface = idx - elif header == hdr_dest: - idx_dest = idx - elif header == hdr_flags: - idx_flags = idx - elif header == hdr_metric: - idx_metric = idx - idx = idx + 1 - for entry in routing_table.readlines(): - route = entry.strip(" \n").split("\t") - if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY: - metric = int(route[idx_metric]) - iface = route[idx_iface] - if primary is None or metric < primary_metric: - primary = iface - primary_metric = metric - if primary is None: - primary = '' + route_table = DefaultOSUtil.read_route_table() + + def is_default(route): + return route.destination == DEFAULT_DEST and int(route.flags) & RTF_GATEWAY == RTF_GATEWAY + + candidates = list(filter(is_default, DefaultOSUtil.get_list_of_routes(route_table))) + + if len(candidates) > 0: + def get_metric(route): + return int(route.metric) + primary_route = min(candidates, key=get_metric) + primary_interface = primary_route.interface + + if primary_interface is None: + primary_interface = '' if not self.disable_route_warning: with open('/proc/net/route') as routing_table_fh: routing_table_text = routing_table_fh.read() @@ -741,9 +942,9 @@ logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: - logger.info('Primary interface is [{0}]'.format(primary)) + logger.info('Primary interface is [{0}]'.format(primary_interface)) self.disable_route_warning = False - return primary + return primary_interface def is_primary_interface(self, ifname): """ @@ -754,13 +955,18 @@ return self.get_primary_interface() == ifname def is_loopback(self, ifname): + """ + Determine if a named interface is loopback. + """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) - result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15])) + ifname_buff = ifname + ('\0'*256) + result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff) flags, = struct.unpack('H', result[16:18]) isloopback = flags & 8 == 8 if not self.disable_route_warning: logger.info('interface [{0}] has flags [{1}], ' 'is loopback [{2}]'.format(ifname, flags, isloopback)) + s.close() return isloopback def get_dhcp_lease_endpoint(self): @@ -783,28 +989,23 @@ endpoint = None HEADER_LEASE = "lease" - HEADER_OPTION = "option unknown-245" - HEADER_DNS = "option domain-name-servers" + HEADER_OPTION_245 = "option unknown-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" + option_245_re = re.compile(r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);') logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() - if HEADER_OPTION in leases: + if HEADER_OPTION_245 in leases: cached_endpoint = None - has_option_245 = False + option_245_match = None expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None - has_option_245 = False expired = True - elif HEADER_DNS in line: - cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;") - elif HEADER_OPTION in line: - has_option_245 = True elif HEADER_EXPIRE in line: if "never" in line: expired = False @@ -818,12 +1019,20 @@ logger.error("could not parse expiry token '{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( - cached_endpoint, has_option_245, expired)) - if not expired and cached_endpoint is not None and has_option_245: + cached_endpoint, option_245_match is not None, expired)) + if not expired and cached_endpoint is not None: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching + else: + option_245_match = option_245_re.match(line) + if option_245_match is not None: + cached_endpoint = '{0}.{1}.{2}.{3}'.format( + int(option_245_match.group(1), 16), + int(option_245_match.group(2), 16), + int(option_245_match.group(3), 16), + int(option_245_match.group(4), 16)) if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: @@ -831,26 +1040,40 @@ return endpoint def is_missing_default_route(self): - routes = shellutil.run_get_output("route -n")[1] + route_cmd = "ip route show" + routes = shellutil.run_get_output(route_cmd)[1] for route in routes.split("\n"): if route.startswith("0.0.0.0 ") or route.startswith("default "): return False return True def get_if_name(self): - return self.get_first_if()[0] + if_name = '' + if_found = False + while not if_found: + if_name = self.get_first_if()[0] + if_found = len(if_name) >= 2 + if not if_found: + time.sleep(2) + return if_name def get_ip4_addr(self): return self.get_first_if()[1] def set_route_for_dhcp_broadcast(self, ifname): - return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname), + route_cmd = "ip route add" + return shellutil.run("{0} 255.255.255.255 dev {1}".format( + route_cmd, ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): - shellutil.run("route del 255.255.255.255 dev {0}".format(ifname), + route_cmd = "ip route del" + shellutil.run("{0} 255.255.255.255 dev {1}".format(route_cmd, ifname), chk_err=False) + def is_dhcp_available(self): + return (True, '') + def is_dhcp_enabled(self): return False @@ -880,15 +1103,24 @@ def route_add(self, net, mask, gateway): """ - Add specified route using /sbin/route add -net. + Add specified route """ - cmd = ("/sbin/route add -net " - "{0} netmask {1} gw {2}").format(net, mask, gateway) + cmd = "ip route add {0} via {1}".format(net, gateway) return shellutil.run(cmd, chk_err=False) + @staticmethod + def _text_to_pid_list(text): + return [int(n) for n in text.split()] + + @staticmethod + def _get_dhcp_pid(command): + try: + return DefaultOSUtil._text_to_pid_list(shellutil.run_command(command)) + except CommandError as exception: + return [] + def get_dhcp_pid(self): - ret = shellutil.run_get_output("pidof dhclient", chk_err=False) - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", "dhclient"]) def set_hostname(self, hostname): fileutil.write_file('/etc/hostname', hostname) @@ -910,7 +1142,7 @@ def restart_if(self, ifname, retries=3, wait=5): retry_limit=retries+1 for attempt in range(1, retry_limit): - return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname)) + return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname), expected_errors=[1] if attempt < retries else []) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) @@ -961,6 +1193,67 @@ return tokens[2] if len(tokens) > 2 else None return None + @staticmethod + def _enumerate_device_id(): + """ + Enumerate all storage device IDs. + + Args: + None + + Returns: + Iterator[Tuple[str, str]]: VmBus and storage devices. + """ + + if os.path.exists(STORAGE_DEVICE_PATH): + for vmbus in os.listdir(STORAGE_DEVICE_PATH): + deviceid = fileutil.read_file(os.path.join(STORAGE_DEVICE_PATH, vmbus, "device_id")) + guid = deviceid.strip('{}\n') + yield vmbus, guid + + @staticmethod + def search_for_resource_disk(gen1_device_prefix, gen2_device_id): + """ + Search the filesystem for a device by ID or prefix. + + Args: + gen1_device_prefix (str): Gen1 resource disk prefix. + gen2_device_id (str): Gen2 resource device ID. + + Returns: + str: The found device. + """ + + device = None + # We have to try device IDs for both Gen1 and Gen2 VMs. + logger.info('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id)) + try: + for vmbus, guid in DefaultOSUtil._enumerate_device_id(): + if guid.startswith(gen1_device_prefix) or guid == gen2_device_id: + for root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): + root_path_parts = root.split('/') + # For Gen1 VMs we only have to check for the block dir in the + # current device. But for Gen2 VMs all of the disks (sda, sdb, + # sr0) are presented in this device on the same SCSI controller. + # Because of that we need to also read the LUN. It will be: + # 0 - OS disk + # 1 - Resource disk + # 2 - CDROM + if root_path_parts[-1] == 'block' and ( + guid != gen2_device_id or + root_path_parts[-2].split(':')[-1] == '1'): + device = dirs[0] + return device + else: + # older distros + for d in dirs: + if ':' in d and "block" == d.split(':')[0]: + device = d.split(':')[1] + return device + except (OSError, IOError) as exc: + logger.warn('Error getting device for {0} or {1}: {2}', gen1_device_prefix, gen2_device_id, ustr(exc)) + return None + def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. @@ -971,23 +1264,13 @@ if port_id > 1: g0 = "00000001" port_id = port_id - 2 - device = None - path = "/sys/bus/vmbus/devices/" - if os.path.exists(path): - for vmbus in os.listdir(path): - deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) - guid = deviceid.lstrip('{').split('-') - if guid[0] == g0 and guid[1] == "000" + ustr(port_id): - for root, dirs, files in os.walk(path + vmbus): - if root.endswith("/block"): - device = dirs[0] - break - else : #older distros - for d in dirs: - if ':' in d and "block" == d.split(':')[0]: - device = d.split(':')[1] - break - break + + gen1_device_prefix = '{0}-000{1}'.format(g0, port_id) + device = DefaultOSUtil.search_for_resource_disk( + gen1_device_prefix=gen1_device_prefix, + gen2_device_id=GEN2_DEVICE_ID + ) + logger.info('Found device: {0}'.format(device)) return device def set_hostname_record(self, hostname): @@ -998,7 +1281,7 @@ if not os.path.exists(hostname_record): # this file is created at provisioning time with agents >= 2.2.3 hostname = socket.gethostname() - logger.warn('Hostname record does not exist, ' + logger.info('Hostname record does not exist, ' 'creating [{0}] with hostname [{1}]', hostname_record, hostname) @@ -1024,8 +1307,116 @@ return multiprocessing.cpu_count() def check_pid_alive(self, pid): - return pid is not None and os.path.isdir(os.path.join('/proc', pid)) + try: + pid = int(pid) + os.kill(pid, 0) + except (ValueError, TypeError): + return False + except OSError as e: + if e.errno == errno.EPERM: + return True + return False + return True @property def is_64bit(self): return sys.maxsize > 2**32 + + @staticmethod + def _get_proc_stat(): + """ + Get the contents of /proc/stat. + # cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0 + # cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0 + # cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0 + + :return: A single string with the contents of /proc/stat + :rtype: str + """ + results = None + try: + results = fileutil.read_file('/proc/stat') + except (OSError, IOError) as ex: + logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror)) + raise + + return results + + @staticmethod + def get_total_cpu_ticks_since_boot(): + """ + Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot. + + :return: int + """ + system_cpu = 0 + proc_stat = DefaultOSUtil._get_proc_stat() + if proc_stat is not None: + for line in proc_stat.splitlines(): + if ALL_CPUS_REGEX.match(line): + system_cpu = sum(int(i) for i in line.split()[1:7]) + break + return system_cpu + + def get_nic_state(self): + """ + Capture NIC state (IPv4 and IPv6 addresses plus link state). + + :return: Dictionary of NIC state objects, with the NIC name as key + :rtype: dict(str,NetworkInformationCard) + """ + state = {} + + status, output = shellutil.run_get_output("ip -a -o link", chk_err=False, log_cmd=False) + """ + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 + 2: eth0: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 + 3: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64 + + """ + if status != 0: + logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) + return {} + + for entry in output.splitlines(): + result = IP_COMMAND_OUTPUT.match(entry) + if result: + name = result.group(1) + state[name] = NetworkInterfaceCard(name, result.group(2)) + + self._update_nic_state(state, "ip -4 -a -o address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") + """ + 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever + 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever + 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever + """ + + self._update_nic_state(state, "ip -6 -a -o address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") + """ + 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever + 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever + """ + + return state + + def _update_nic_state(self, state, ip_command, handler, description): + """ + Update the state of NICs based on the output of a specified ip subcommand. + + :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects + :param str ip_command: The ip command to run + :param handler: A method on the NetworkInterfaceCard class + :param str description: Description of the particular information being added to the state + """ + status, output = shellutil.run_get_output(ip_command, chk_err=True) + if status != 0: + return + + for entry in output.splitlines(): + result = IP_COMMAND_OUTPUT.match(entry) + if result: + interface_name = result.group(1) + if interface_name in state: + handler(state[interface_name], result.group(2)) + else: + logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/factory.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,25 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # + import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import * from .default import DefaultOSUtil from .arch import ArchUtil from .clearlinux import ClearLinuxUtil from .coreos import CoreOSUtil -from .debian import DebianOSUtil +from .debian import DebianOSBaseUtil, DebianOSModernUtil from .freebsd import FreeBSDOSUtil from .openbsd import OpenBSDOSUtil from .redhat import RedhatOSUtil, Redhat6xOSUtil from .suse import SUSEOSUtil, SUSE11OSUtil -from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, UbuntuSnappyOSUtil +from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ + UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil from .alpine import AlpineOSUtil from .bigip import BigIpOSUtil from .gaia import GaiaOSUtil +from .iosxe import IosxeOSUtil +from .nsbsd import NSBSDOSUtil +from .openwrt import OpenWRTOSUtil + +from distutils.version import LooseVersion as Version def get_osutil(distro_name=DISTRO_NAME, @@ -38,18 +44,29 @@ distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): + # We are adding another layer of abstraction here since we want to be able to mock the final result of the + # function call. Since the get_osutil function is imported in various places in our tests, we can't mock + # it globally. Instead, we add _get_osutil function and mock it in the test base class, AgentTestCase. + return _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) + + +def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name): + if distro_name == "arch": return ArchUtil() - if distro_name == "clear linux os for intel architecture" \ - or distro_name == "clear linux software for intel architecture": + if "Clear Linux" in distro_full_name: return ClearLinuxUtil() if distro_name == "ubuntu": - if Version(distro_version) == Version("12.04") or Version(distro_version) == Version("12.10"): + if Version(distro_version) in [Version("12.04"), Version("12.10")]: return Ubuntu12OSUtil() - elif Version(distro_version) == Version("14.04") or Version(distro_version) == Version("14.10"): + elif Version(distro_version) in [Version("14.04"), Version("14.10")]: return Ubuntu14OSUtil() + elif Version(distro_version) in [Version('16.04'), Version('16.10'), Version('17.04')]: + return Ubuntu16OSUtil() + elif Version(distro_version) in [Version('18.04')]: + return Ubuntu18OSUtil() elif distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() else: @@ -59,12 +76,12 @@ return AlpineOSUtil() if distro_name == "kali": - return DebianOSUtil() + return DebianOSBaseUtil() if distro_name == "coreos" or distro_code_name == "coreos": return CoreOSUtil() - if distro_name == "suse": + if distro_name in ("suse", "sles", "opensuse"): if distro_full_name == 'SUSE Linux Enterprise Server' \ and Version(distro_version) < Version('12') \ or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): @@ -72,10 +89,13 @@ else: return SUSEOSUtil() - elif distro_name == "debian": - return DebianOSUtil() + if distro_name == "debian": + if "sid" in distro_version or Version(distro_version) > Version("7"): + return DebianOSModernUtil() + else: + return DebianOSBaseUtil() - elif distro_name == "redhat" \ + if distro_name == "redhat" \ or distro_name == "centos" \ or distro_name == "oracle": if Version(distro_version) < Version("7"): @@ -83,21 +103,30 @@ else: return RedhatOSUtil() - elif distro_name == "euleros": + if distro_name == "euleros": return RedhatOSUtil() - elif distro_name == "freebsd": + if distro_name == "freebsd": return FreeBSDOSUtil() - elif distro_name == "openbsd": + if distro_name == "openbsd": return OpenBSDOSUtil() - elif distro_name == "bigip": + if distro_name == "bigip": return BigIpOSUtil() - elif distro_name == "gaia": + if distro_name == "gaia": return GaiaOSUtil() + if distro_name == "iosxe": + return IosxeOSUtil() + + if distro_name == "nsbsd": + return NSBSDOSUtil() + + if distro_name == "openwrt": + return OpenWRTOSUtil() + else: logger.warn("Unable to load distro implementation for {0}. Using " "default distro implementation instead.", diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/freebsd.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/freebsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/freebsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/freebsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,20 +14,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ +import socket +import struct +import binascii import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.networkutil import RouteEntry import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.future import ustr class FreeBSDOSUtil(DefaultOSUtil): + def __init__(self): super(FreeBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False + self.jit_enabled = True def set_hostname(self, hostname): rc_file_path = '/etc/rc.conf' @@ -39,7 +45,7 @@ def restart_ssh_service(self): return shellutil.run('service sshd restart', chk_err=False) - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ @@ -47,11 +53,12 @@ if userentry is not None: logger.warn("User {0} already exists, skip useradd", username) return - if expiration is not None: cmd = "pw useradd {0} -e {1} -m".format(username, expiration) else: cmd = "pw useradd {0} -m".format(username) + if comment is not None: + cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " @@ -90,6 +97,304 @@ def get_first_if(self): return self._get_net_info()[:2] + @staticmethod + def read_route_table(): + """ + Return a list of strings comprising the route table as in the Linux /proc/net/route format. The input taken is from FreeBSDs + `netstat -rn -f inet` command. Here is what the function does in detail: + + 1. Runs `netstat -rn -f inet` which outputs a column formatted list of ipv4 routes in priority order like so: + + > Routing tables + > + > Internet: + > Destination Gateway Flags Refs Use Netif Expire + > default 61.221.xx.yy UGS 0 247 em1 + > 10 10.10.110.5 UGS 0 50 em0 + > 10.10.110/26 link#1 UC 0 0 em0 + > 10.10.110.5 00:1b:0d:e6:58:40 UHLW 2 0 em0 1145 + > 61.221.xx.yy/29 link#2 UC 0 0 em1 + > 61.221.xx.yy 00:1b:0d:e6:57:c0 UHLW 2 0 em1 1055 + > 61.221.xx/24 link#2 UC 0 0 em1 + > 127.0.0.1 127.0.0.1 UH 0 0 lo0 + + 2. Convert it to an array of lines that resemble an equivalent /proc/net/route content on a Linux system like so: + + > Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT + > gre828 00000000 00000000 0001 0 0 0 000000F8 0 0 0 + > ens160 00000000 FE04700A 0003 0 0 100 00000000 0 0 0 + > gre828 00000008 00000000 0001 0 0 0 000000FE 0 0 0 + > ens160 0004700A 00000000 0001 0 0 100 00FFFFFF 0 0 0 + > gre828 2504700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 + > gre828 3704700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 + > gre828 4104700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 + + :return: Entries in the ipv4 route priority list from `netstat -rn -f inet` in the linux `/proc/net/route` style + :rtype: list(str) + """ + + def _get_netstat_rn_ipv4_routes(): + """ + Runs `netstat -rn -f inet` and parses its output and returns a list of routes where the key is the column name + and the value is the value in the column, stripped of leading and trailing whitespace. + + :return: List of dictionaries representing routes in the ipv4 route priority list from `netstat -rn -f inet` + :rtype: list(dict) + """ + cmd = [ "netstat", "-rn", "-f", "inet" ] + output = shellutil.run_command(cmd, log_error=True) + output_lines = output.split("\n") + if len(output_lines) < 3: + raise OSUtilError("`netstat -rn -f inet` output seems to be empty") + output_lines = [ line.strip() for line in output_lines if line ] + if "Internet:" not in output_lines: + raise OSUtilError("`netstat -rn -f inet` output seems to contain no ipv4 routes") + route_header_line = output_lines.index("Internet:") + 1 + # Parse the file structure and left justify the routes + route_start_line = route_header_line + 1 + route_line_length = max([len(line) for line in output_lines[route_header_line:]]) + netstat_route_list = [line.ljust(route_line_length) for line in output_lines[route_start_line:]] + # Parse the headers + _route_headers = output_lines[route_header_line].split() + n_route_headers = len(_route_headers) + route_columns = {} + for i in range(0, n_route_headers - 1): + route_columns[_route_headers[i]] = ( + output_lines[route_header_line].index(_route_headers[i]), + (output_lines[route_header_line].index(_route_headers[i+1]) - 1) + ) + route_columns[_route_headers[n_route_headers - 1]] = ( + output_lines[route_header_line].index(_route_headers[n_route_headers - 1]), + None + ) + # Parse the routes + netstat_routes = [] + n_netstat_routes = len(netstat_route_list) + for i in range(0, n_netstat_routes): + netstat_route = {} + for column in route_columns: + netstat_route[column] = netstat_route_list[i][route_columns[column][0]:route_columns[column][1]].strip() + netstat_route["Metric"] = n_netstat_routes - i + netstat_routes.append(netstat_route) + # Return the Sections + return netstat_routes + + def _ipv4_ascii_address_to_hex(ipv4_ascii_address): + """ + Converts an IPv4 32bit address from its ASCII notation (ie. 127.0.0.1) to an 8 digit padded hex notation + (ie. "0100007F") string. + + :return: 8 character long hex string representation of the IP + :rtype: string + """ + # Raises socket.error if the IP is not a valid IPv4 + return "%08X" % int(binascii.hexlify(struct.pack("!I", struct.unpack("=I", socket.inet_pton(socket.AF_INET, ipv4_ascii_address))[0])), 16) + + def _ipv4_cidr_mask_to_hex(ipv4_cidr_mask): + """ + Converts an subnet mask from its CIDR integer notation (ie. 32) to an 8 digit padded hex notation + (ie. "FFFFFFFF") string representing its bitmask form. + + :return: 8 character long hex string representation of the IP + :rtype: string + """ + return "{0:08x}".format(struct.unpack("=I", struct.pack("!I", (0xffffffff << (32 - ipv4_cidr_mask)) & 0xffffffff))[0]).upper() + + def _ipv4_cidr_destination_to_hex(destination): + """ + Converts an destination address from its CIDR notation (ie. 127.0.0.1/32 or default or localhost) to an 8 + digit padded hex notation (ie. "0100007F" or "00000000" or "0100007F") string and its subnet bitmask + also in hex (FFFFFFFF). + + :return: tuple of 8 character long hex string representation of the IP and 8 character long hex string representation of the subnet mask + :rtype: tuple(string, int) + """ + destination_ip = "0.0.0.0" + destination_subnetmask = 32 + if destination != "default": + if destination == "localhost": + destination_ip = "127.0.0.1" + else: + destination_ip = destination.split("/") + if len(destination_ip) > 1: + destination_subnetmask = int(destination_ip[1]) + destination_ip = destination_ip[0] + hex_destination_ip = _ipv4_ascii_address_to_hex(destination_ip) + hex_destination_subnetmask = _ipv4_cidr_mask_to_hex(destination_subnetmask) + return hex_destination_ip, hex_destination_subnetmask + + def _try_ipv4_gateway_to_hex(gateway): + """ + If the gateway is an IPv4 address, return its IP in hex, else, return "00000000" + + :return: 8 character long hex string representation of the IP of the gateway + :rtype: string + """ + try: + return _ipv4_ascii_address_to_hex(gateway) + except socket.error: + return "00000000" + + def _ascii_route_flags_to_bitmask(ascii_route_flags): + """ + Converts route flags to a bitmask of their equivalent linux/route.h values. + + :return: integer representation of a 16 bit mask + :rtype: int + """ + bitmask_flags = 0 + RTF_UP = 0x0001 + RTF_GATEWAY = 0x0002 + RTF_HOST = 0x0004 + RTF_DYNAMIC = 0x0010 + if "U" in ascii_route_flags: + bitmask_flags |= RTF_UP + if "G" in ascii_route_flags: + bitmask_flags |= RTF_GATEWAY + if "H" in ascii_route_flags: + bitmask_flags |= RTF_HOST + if "S" not in ascii_route_flags: + bitmask_flags |= RTF_DYNAMIC + return bitmask_flags + + def _freebsd_netstat_rn_route_to_linux_proc_net_route(netstat_route): + """ + Converts a single FreeBSD `netstat -rn -f inet` route to its equivalent /proc/net/route line. ie: + > default 0.0.0.0 UGS 0 247 em1 + to + > em1 00000000 00000000 0003 0 0 0 FFFFFFFF 0 0 0 + + :return: string representation of the equivalent /proc/net/route line + :rtype: string + """ + network_interface = netstat_route["Netif"] + hex_destination_ip, hex_destination_subnetmask = _ipv4_cidr_destination_to_hex(netstat_route["Destination"]) + hex_gateway = _try_ipv4_gateway_to_hex(netstat_route["Gateway"]) + bitmask_flags = _ascii_route_flags_to_bitmask(netstat_route["Flags"]) + dummy_refcount = 0 + dummy_use = 0 + route_metric = netstat_route["Metric"] + dummy_mtu = 0 + dummy_window = 0 + dummy_irtt = 0 + return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}".format( + network_interface, + hex_destination_ip, + hex_gateway, + bitmask_flags, + dummy_refcount, + dummy_use, + route_metric, + hex_destination_subnetmask, + dummy_mtu, + dummy_window, + dummy_irtt + ) + + linux_style_route_file = [ "Iface\tDestination\tGateway\tFlags\tRefCnt\tUse\tMetric\tMask\tMTU\tWindow\tIRTT" ] + + try: + netstat_routes = _get_netstat_rn_ipv4_routes() + # Make sure the `netstat -rn -f inet` contains columns for Netif, Destination, Gateway and Flags which are needed to convert + # to the Linux Format + if len(netstat_routes) > 0: + missing_headers = [] + if "Netif" not in netstat_routes[0]: + missing_headers.append("Netif") + if "Destination" not in netstat_routes[0]: + missing_headers.append("Destination") + if "Gateway" not in netstat_routes[0]: + missing_headers.append("Gateway") + if "Flags" not in netstat_routes[0]: + missing_headers.append("Flags") + if missing_headers: + raise KeyError("`netstat -rn -f inet` output is missing columns required to convert to the Linux /proc/net/route format; columns are [{0}]".format(missing_headers)) + # Parse the Netstat IPv4 Routes + for netstat_route in netstat_routes: + try: + linux_style_route = _freebsd_netstat_rn_route_to_linux_proc_net_route(netstat_route) + linux_style_route_file.append(linux_style_route) + except Exception: + # Skip the route + continue + except Exception as e: + logger.error("Cannot read route table [{0}]", ustr(e)) + return linux_style_route_file + + @staticmethod + def get_list_of_routes(route_table): + """ + Construct a list of all network routes known to this system. + + :param list(str) route_table: List of text entries from route table, including headers + :return: a list of network routes + :rtype: list(RouteEntry) + """ + route_list = [] + count = len(route_table) + + if count < 1: + logger.error("netstat -rn -f inet is missing headers") + elif count == 1: + logger.error("netstat -rn -f inet contains no routes") + else: + route_list = DefaultOSUtil._build_route_list(route_table) + return route_list + + def get_primary_interface(self): + """ + Get the name of the primary interface, which is the one with the + default route attached to it; if there are multiple default routes, + the primary has the lowest Metric. + :return: the interface which has the default route + """ + RTF_GATEWAY = 0x0002 + DEFAULT_DEST = "00000000" + + primary_interface = None + + if not self.disable_route_warning: + logger.info("Examine `netstat -rn -f inet` for primary interface") + + route_table = self.read_route_table() + + def is_default(route): + return (route.destination == DEFAULT_DEST) and (RTF_GATEWAY & route.flags) + + candidates = list(filter(is_default, self.get_list_of_routes(route_table))) + + if len(candidates) > 0: + def get_metric(route): + return int(route.metric) + primary_route = min(candidates, key=get_metric) + primary_interface = primary_route.interface + + if primary_interface is None: + primary_interface = '' + if not self.disable_route_warning: + logger.warn('Could not determine primary interface, ' + 'please ensure routes are correct') + logger.warn('Primary interface examination will retry silently') + self.disable_route_warning = True + else: + logger.info('Primary interface is [{0}]'.format(primary_interface)) + self.disable_route_warning = False + return primary_interface + + def is_primary_interface(self, ifname): + """ + Indicate whether the specified interface is the primary. + :param ifname: the name of the interface - eth0, lo, etc. + :return: True if this interface binds the default route + """ + return self.get_primary_interface() == ifname + + def is_loopback(self, ifname): + """ + Determine if a named interface is loopback. + """ + return ifname.startswith("lo") + def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) @@ -100,6 +405,14 @@ specify the route manually to get it work in a VNET environment. SEE ALSO: man ip(4) IP_ONESBCAST, """ + RTF_GATEWAY = 0x0002 + DEFAULT_DEST = "00000000" + + route_table = self.read_route_table() + routes = self.get_list_of_routes(route_table) + for route in routes: + if (route.destination == DEFAULT_DEST) and (RTF_GATEWAY & route.flags): + return False return True def is_dhcp_enabled(self): @@ -118,8 +431,7 @@ shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def get_dhcp_pid(self): - ret = shellutil.run_get_output("pgrep -n dhclient", chk_err=False) - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pgrep", "-n", "dhclient"]) def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() @@ -247,3 +559,7 @@ if not possible.startswith('pass'): return possible return None + + @staticmethod + def get_total_cpu_ticks_since_boot(): + return 0 diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/gaia.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/gaia.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/gaia.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/gaia.py 2019-11-07 00:36:56.000000000 +0000 @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 @@ -33,6 +33,7 @@ class GaiaOSUtil(DefaultOSUtil): + def __init__(self): super(GaiaOSUtil, self).__init__() @@ -139,16 +140,16 @@ def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on GAiA') - def mount(self, dvd, mount_point, option="", chk_err=True): - logger.info('mount {0} {1} {2}', dvd, mount_point, option) + def mount(self, device, mount_point, option="", chk_err=True): + logger.info('mount {0} {1} {2}', device, mount_point, option) if 'udf,iso9660' in option: ret, out = super(GaiaOSUtil, self).mount( - dvd, mount_point, option=option.replace('udf,iso9660', 'udf'), + device, mount_point, option=option.replace('udf,iso9660', 'udf'), chk_err=chk_err) if not ret: return ret, out return super(GaiaOSUtil, self).mount( - dvd, mount_point, option=option, chk_err=chk_err) + device, mount_point, option=option, chk_err=chk_err) def allow_dhcp_broadcast(self): logger.info('allow_dhcp_broadcast is ignored on GAiA') diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/__init__.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import get_osutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/iosxe.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/iosxe.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/iosxe.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/iosxe.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,84 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil +from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil + +''' +The IOSXE distribution is a variant of the Centos distribution, +version 7.1. +The primary difference is that IOSXE makes some assumptions about +the waagent environment: + - only the waagent daemon is executed + - no provisioning is performed + - no DHCP-based services are available +''' +class IosxeOSUtil(DefaultOSUtil): + def __init__(self): + super(IosxeOSUtil, self).__init__() + + def set_hostname(self, hostname): + """ + Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl + Due to a bug in systemd in Centos-7.0, if this call fails, fallback + to hostname. + """ + hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) + if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: + logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) + DefaultOSUtil.set_hostname(self, hostname) + + def publish_hostname(self, hostname): + """ + Restart NetworkManager first before publishing hostname + """ + shellutil.run("service NetworkManager restart") + super(RedhatOSUtil, self).publish_hostname(hostname) + + def register_agent_service(self): + return shellutil.run("systemctl enable waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("systemctl disable waagent", chk_err=False) + + def openssl_to_openssh(self, input_file, output_file): + DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) + + def is_dhcp_available(self): + return (False, '168.63.129.16') + + def get_instance_id(self): + ''' + Azure records a UUID as the instance ID + First check /sys/class/dmi/id/product_uuid. + If that is missing, then extracts from dmidecode + If nothing works (for old VMs), return the empty string + ''' + if os.path.isfile(PRODUCT_ID_FILE): + try: + s = fileutil.read_file(PRODUCT_ID_FILE).strip() + return self._correct_instance_id(s.strip()) + except IOError: + pass + rc, s = shellutil.run_get_output(DMIDECODE_CMD) + if rc != 0 or UUID_PATTERN.match(s) is None: + return "" + return self._correct_instance_id(s.strip()) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/nsbsd.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/nsbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/nsbsd.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/nsbsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,155 @@ +# +# Copyright 2018 Stormshield +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil +import os + +class NSBSDOSUtil(FreeBSDOSUtil): + + resolver = None + + def __init__(self): + super(NSBSDOSUtil, self).__init__() + + if self.resolver is None: + # NSBSD doesn't have a system resolver, configure a python one + + try: + import dns.resolver + except ImportError: + raise OSUtilError("Python DNS resolver not available. Cannot proceed!") + + self.resolver = dns.resolver.Resolver() + servers = [] + cmd = "getconf /usr/Firewall/ConfigFiles/dns Servers | tail -n +2" + ret, output = shellutil.run_get_output(cmd) + for server in output.split("\n"): + if server == '': + break + server = server[:-1] # remove last '=' + cmd = "grep '{}' /etc/hosts".format(server) + " | awk '{print $1}'" + ret, ip = shellutil.run_get_output(cmd) + servers.append(ip) + self.resolver.nameservers = servers + dns.resolver.override_system_resolver(self.resolver) + + def set_hostname(self, hostname): + shellutil.run("/usr/Firewall/sbin/setconf /usr/Firewall/System/global SystemName {0}".format(hostname)) + shellutil.run("/usr/Firewall/sbin/enlog") + shellutil.run("/usr/Firewall/sbin/enproxy -u") + shellutil.run("/usr/Firewall/sbin/ensl -u") + shellutil.run("/usr/Firewall/sbin/ennetwork -f") + + def restart_ssh_service(self): + return shellutil.run('/usr/Firewall/sbin/enservice', chk_err=False) + + def conf_sshd(self, disable_password): + option = "0" if disable_password else "1" + + shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH State 1', + chk_err=False) + shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH Password {}'.format(option), + chk_err=False) + shellutil.run('enservice', chk_err=False) + + logger.info("{0} SSH password-based authentication methods." + .format("Disabled" if disable_password else "Enabled")) + + def useradd(self, username, expiration=None): + """ + Create user account with 'username' + """ + logger.warn("User creation disabled") + return + + def del_account(self, username): + logger.warn("User deletion disabled") + + def conf_sudoer(self, username, nopasswd=False, remove=False): + logger.warn("Sudo is not enabled") + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + cmd = "/usr/Firewall/sbin/fwpasswd -p {0}".format(password) + ret, output = shellutil.run_get_output(cmd, log_cmd=False) + if ret != 0: + raise OSUtilError(("Failed to set password for admin: {0}" + "").format(output)) + + # password set, activate webadmin and ssh access + shellutil.run('setconf /usr/Firewall/ConfigFiles/webadmin ACL any && ensl', + chk_err=False) + + def deploy_ssh_pubkey(self, username, pubkey): + """ + Deploy authorized_key + """ + path, thumbprint, value = pubkey + + #overide parameters + super(NSBSDOSUtil, self).deploy_ssh_pubkey('admin', + ["/usr/Firewall/.ssh/authorized_keys", thumbprint, value]) + + def del_root_password(self): + logger.warn("Root password deletion disabled") + + def start_dhcp_service(self): + shellutil.run("/usr/Firewall/sbin/nstart dhclient", chk_err=False) + + def stop_dhcp_service(self): + shellutil.run("/usr/Firewall/sbin/nstop dhclient", chk_err=False) + + def get_dhcp_pid(self): + ret = "" + pidfile = "/var/run/dhclient.pid" + + if os.path.isfile(pidfile): + ret = fileutil.read_file(pidfile, encoding='ascii') + return self._text_to_pid_list(ret) + + def eject_dvd(self, chk_err=True): + pass + + def restart_if(self, ifname): + # Restart dhclient only to publish hostname + shellutil.run("ennetwork", chk_err=False) + + def set_dhcp_hostname(self, hostname): + #already done by the dhcp client + pass + + def get_firewall_dropped_packets(self, dst_ip=None): + # disable iptables methods + return 0 + + def get_firewall_will_wait(self): + # disable iptables methods + return "" + + def _delete_rule(self, rule): + # disable iptables methods + return + + def remove_firewall(self, dst_ip=None, uid=None): + # disable iptables methods + return True + + def enable_firewall(self, dst_ip=None, uid=None): + # disable iptables methods + return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openbsd.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/openbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openbsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/openbsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and OpenSSL 1.0+ +# Requires Python 2.6+ and OpenSSL 1.0+ import os import re @@ -35,9 +35,12 @@ r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) + class OpenBSDOSUtil(DefaultOSUtil): + def __init__(self): super(OpenBSDOSUtil, self).__init__() + self.jit_enabled = True self._scsi_disks_timeout_set = False def get_instance_id(self): @@ -54,17 +57,17 @@ return shellutil.run('rcctl restart sshd', chk_err=False) def start_agent_service(self): - return shellutil.run('rcctl start waagent', chk_err=False) + return shellutil.run('rcctl start {0}'.format(self.service_name), chk_err=False) def stop_agent_service(self): - return shellutil.run('rcctl stop waagent', chk_err=False) + return shellutil.run('rcctl stop {0}'.format(self.service_name), chk_err=False) def register_agent_service(self): - shellutil.run('chmod 0555 /etc/rc.d/waagent', chk_err=False) - return shellutil.run('rcctl enable waagent', chk_err=False) + shellutil.run('chmod 0555 /etc/rc.d/{0}'.format(self.service_name), chk_err=False) + return shellutil.run('rcctl enable {0}'.format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run('rcctl disable waagent', chk_err=False) + return shellutil.run('rcctl disable {0}'.format(self.service_name), chk_err=False) def del_account(self, username): if self.is_sys_user(username): @@ -223,9 +226,7 @@ "{0}".format(ifname), chk_err=False) def get_dhcp_pid(self): - ret, output = shellutil.run_get_output("pgrep -n dhclient", - chk_err=False) - return output if ret == 0 else None + return self._get_dhcp_pid(["pgrep", "-n", "dhclient"]) def get_dvd_device(self, dev_dir='/dev'): pattern = r'cd[0-9]c' @@ -345,3 +346,7 @@ Return device name attached to ide port 'n'. """ return "wd{0}".format(port_id) + + @staticmethod + def get_total_cpu_ticks_since_boot(): + return 0 diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openwrt.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/openwrt.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openwrt.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/openwrt.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,152 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import os +import re +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil +from azurelinuxagent.common.utils.networkutil import NetworkInterfaceCard + +class OpenWRTOSUtil(DefaultOSUtil): + + def __init__(self): + super(OpenWRTOSUtil, self).__init__() + self.agent_conf_file_path = '/etc/waagent.conf' + self.dhclient_name = 'udhcpc' + self.ip_command_output = re.compile('^\d+:\s+(\w+):\s+(.*)$') + self.jit_enabled = True + + def eject_dvd(self, chk_err=True): + logger.warn('eject is not supported on OpenWRT') + + def useradd(self, username, expiration=None, comment=None): + """ + Create user account with 'username' + """ + userentry = self.get_userentry(username) + if userentry is not None: + logger.info("User {0} already exists, skip useradd", username) + return + + if expiration is not None: + cmd = "useradd -m {0} -s /bin/ash -e {1}".format(username, expiration) + else: + cmd = "useradd -m {0} -s /bin/ash".format(username) + + if not os.path.exists("/home"): + os.mkdir("/home") + + if comment is not None: + cmd += " -c {0}".format(comment) + retcode, out = shellutil.run_get_output(cmd) + if retcode != 0: + raise OSUtilError(("Failed to create user account:{0}, " + "retcode:{1}, " + "output:{2}").format(username, retcode, out)) + + def get_dhcp_pid(self): + return self._get_dhcp_pid(["pidof", self.dhclient_name]) + + def get_nic_state(self): + """ + Capture NIC state (IPv4 and IPv6 addresses plus link state). + + :return: Dictionary of NIC state objects, with the NIC name as key + :rtype: dict(str,NetworkInformationCard) + """ + state = {} + status, output = shellutil.run_get_output("ip -o link", chk_err=False, log_cmd=False) + + if status != 0: + logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) + return {} + + for entry in output.splitlines(): + result = self.ip_command_output.match(entry) + if result: + name = result.group(1) + state[name] = NetworkInterfaceCard(name, result.group(2)) + + + self._update_nic_state(state, "ip -o -f inet address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") + self._update_nic_state(state, "ip -o -f inet6 address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") + + return state + + def _update_nic_state(self, state, ip_command, handler, description): + """ + Update the state of NICs based on the output of a specified ip subcommand. + + :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects + :param str ip_command: The ip command to run + :param handler: A method on the NetworkInterfaceCard class + :param str description: Description of the particular information being added to the state + """ + status, output = shellutil.run_get_output(ip_command, chk_err=True) + if status != 0: + return + + for entry in output.splitlines(): + result = self.ip_command_output.match(entry) + if result: + interface_name = result.group(1) + if interface_name in state: + handler(state[interface_name], result.group(2)) + else: + logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) + + def is_dhcp_enabled(self): + pass + + def start_dhcp_service(self): + pass + + def stop_dhcp_service(self): + pass + + def start_network(self) : + return shellutil.run("/etc/init.d/network start", chk_err=True) + + def restart_ssh_service(self): + # Since Dropbear is the default ssh server on OpenWRt, lets do a sanity check + if os.path.exists("/etc/init.d/sshd"): + return shellutil.run("/etc/init.d/sshd restart", chk_err=True) + else: + logger.warn("sshd service does not exists", username) + + def stop_agent_service(self): + return shellutil.run("/etc/init.d/{0} stop".format(self.service_name), chk_err=True) + + def start_agent_service(self): + return shellutil.run("/etc/init.d/{0} start".format(self.service_name), chk_err=True) + + def register_agent_service(self): + return shellutil.run("/etc/init.d/{0} enable".format(self.service_name), chk_err=True) + + def unregister_agent_service(self): + return shellutil.run("/etc/init.d/{0} disable".format(self.service_name), chk_err=True) + + def set_hostname(self, hostname): + fileutil.write_file('/etc/hostname', hostname) + shellutil.run("uci set system.@system[0].hostname='{0}' && uci commit system && /etc/init.d/system reload".format(hostname), chk_err=False) + + def remove_rules_files(self, rules_files=""): + pass diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/redhat.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/redhat.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/redhat.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/redhat.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -38,8 +38,10 @@ class Redhat6xOSUtil(DefaultOSUtil): + def __init__(self): super(Redhat6xOSUtil, self).__init__() + self.jit_enabled = True def start_network(self): return shellutil.run("/sbin/service networking start", chk_err=False) @@ -48,16 +50,16 @@ return shellutil.run("/sbin/service sshd condrestart", chk_err=False) def stop_agent_service(self): - return shellutil.run("/sbin/service waagent stop", chk_err=False) + return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): - return shellutil.run("/sbin/service waagent start", chk_err=False) + return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): - return shellutil.run("chkconfig --add waagent", chk_err=False) + return shellutil.run("chkconfig --add {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("chkconfig --del waagent", chk_err=False) + return shellutil.run("chkconfig --del {0}".format(self.service_name), chk_err=False) def openssl_to_openssh(self, input_file, output_file): pubkey = fileutil.read_file(input_file) @@ -66,12 +68,11 @@ ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) except CryptError as e: raise OSUtilError(ustr(e)) - fileutil.write_file(output_file, ssh_rsa_pubkey) + fileutil.append_file(output_file, ssh_rsa_pubkey) # Override def get_dhcp_pid(self): - ret = shellutil.run_get_output("pidof dhclient", chk_err=False) - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", "dhclient"]) def set_hostname(self, hostname): """ @@ -96,6 +97,7 @@ class RedhatOSUtil(Redhat6xOSUtil): def __init__(self): super(RedhatOSUtil, self).__init__() + self.service_name = self.get_service_name() def set_hostname(self, hostname): """ @@ -116,10 +118,10 @@ super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): - return shellutil.run("systemctl enable waagent", chk_err=False) + return shellutil.run("systemctl enable {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("systemctl disable waagent", chk_err=False) + return shellutil.run("systemctl disable {0}".format(self.service_name), chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/suse.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/suse.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/suse.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/suse.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -32,9 +32,12 @@ from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil.default import DefaultOSUtil + class SUSE11OSUtil(DefaultOSUtil): + def __init__(self): super(SUSE11OSUtil, self).__init__() + self.jit_enabled = True self.dhclient_name='dhcpcd' def set_hostname(self, hostname): @@ -42,9 +45,7 @@ shellutil.run("hostname {0}".format(hostname), chk_err=False) def get_dhcp_pid(self): - ret = shellutil.run_get_output("pidof {0}".format(self.dhclient_name), - chk_err=False) - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", self.dhclient_name]) def is_dhcp_enabled(self): return True @@ -64,16 +65,17 @@ return shellutil.run("/sbin/service sshd restart", chk_err=False) def stop_agent_service(self): - return shellutil.run("/sbin/service waagent stop", chk_err=False) + return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): - return shellutil.run("/sbin/service waagent start", chk_err=False) + return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): - return shellutil.run("/sbin/insserv waagent", chk_err=False) + return shellutil.run("/sbin/insserv {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("/sbin/insserv -r waagent", chk_err=False) + return shellutil.run("/sbin/insserv -r {0}".format(self.service_name), chk_err=False) + class SUSEOSUtil(SUSE11OSUtil): def __init__(self): @@ -95,15 +97,13 @@ return shellutil.run("systemctl restart sshd", chk_err=False) def stop_agent_service(self): - return shellutil.run("systemctl stop waagent", chk_err=False) + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def start_agent_service(self): - return shellutil.run("systemctl start waagent", chk_err=False) + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def register_agent_service(self): - return shellutil.run("systemctl enable waagent", chk_err=False) + return shellutil.run("systemctl enable {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("systemctl disable waagent", chk_err=False) - - + return shellutil.run("systemctl disable {0}".format(self.service_name), chk_err=False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/ubuntu.py walinuxagent-2.2.45/azurelinuxagent/common/osutil/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/osutil/ubuntu.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,24 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import time + +import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil + from azurelinuxagent.common.osutil.default import DefaultOSUtil + class Ubuntu14OSUtil(DefaultOSUtil): + def __init__(self): super(Ubuntu14OSUtil, self).__init__() + self.jit_enabled = True + self.service_name = self.get_service_name() + + @staticmethod + def get_service_name(): + return "walinuxagent" def start_network(self): return shellutil.run("service networking start", chk_err=False) def stop_agent_service(self): - return shellutil.run("service walinuxagent stop", chk_err=False) + return shellutil.run("service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): - return shellutil.run("service walinuxagent start", chk_err=False) + return shellutil.run("service {0} start".format(self.service_name), chk_err=False) def remove_rules_files(self, rules_files=""): pass @@ -41,24 +53,91 @@ def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') + class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() # Override def get_dhcp_pid(self): - ret = shellutil.run_get_output("pidof dhclient3", chk_err=False) - return ret[1] if ret[0] == 0 else None + return self._get_dhcp_pid(["pidof", "dhclient3"]) + + def mount_cgroups(self): + pass -class UbuntuOSUtil(Ubuntu14OSUtil): + +class Ubuntu16OSUtil(Ubuntu14OSUtil): + """ + Ubuntu 16.04, 16.10, and 17.04. + """ def __init__(self): - super(UbuntuOSUtil, self).__init__() + super(Ubuntu16OSUtil, self).__init__() + self.service_name = self.get_service_name() def register_agent_service(self): - return shellutil.run("systemctl unmask walinuxagent", chk_err=False) + return shellutil.run("systemctl unmask {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): - return shellutil.run("systemctl mask walinuxagent", chk_err=False) + return shellutil.run("systemctl mask {0}".format(self.service_name), chk_err=False) + + def mount_cgroups(self): + """ + Mounted by default in Ubuntu 16.04 + """ + pass + + +class Ubuntu18OSUtil(Ubuntu16OSUtil): + """ + Ubuntu 18.04 + """ + def __init__(self): + super(Ubuntu18OSUtil, self).__init__() + self.service_name = self.get_service_name() + + def get_dhcp_pid(self): + return self._get_dhcp_pid(["pidof", "systemd-networkd"]) + + def start_network(self): + return shellutil.run("systemctl start systemd-networkd", chk_err=False) + + def stop_network(self): + return shellutil.run("systemctl stop systemd-networkd", chk_err=False) + + def start_dhcp_service(self): + return self.start_network() + + def stop_dhcp_service(self): + return self.stop_network() + + def start_agent_service(self): + return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) + + def stop_agent_service(self): + return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) + + +class UbuntuOSUtil(Ubuntu16OSUtil): + def __init__(self): + super(UbuntuOSUtil, self).__init__() + + def restart_if(self, ifname, retries=3, wait=5): + """ + Restart an interface by bouncing the link. systemd-networkd observes + this event, and forces a renew of DHCP. + """ + retry_limit=retries+1 + for attempt in range(1, retry_limit): + return_code=shellutil.run("ip link set {0} down && ip link set {0} up".format(ifname)) + if return_code == 0: + return + logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) + if attempt < retry_limit: + logger.info("retrying in {0} seconds".format(wait)) + time.sleep(wait) + else: + logger.warn("exceeded restart retries") + class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/healthservice.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/healthservice.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/healthservice.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/healthservice.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,177 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import json + +from azurelinuxagent.common import logger +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import restutil +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION + + +class Observation(object): + def __init__(self, name, is_healthy, description='', value=''): + if name is None: + raise ValueError("Observation name must be provided") + + if is_healthy is None: + raise ValueError("Observation health must be provided") + + if value is None: + value = '' + + if description is None: + description = '' + + self.name = name + self.is_healthy = is_healthy + self.description = description + self.value = value + + @property + def as_obj(self): + return { + "ObservationName": self.name[:64], + "IsHealthy": self.is_healthy, + "Description": self.description[:128], + "Value": self.value[:128] + } + + +class HealthService(object): + + ENDPOINT = 'http://{0}:80/HealthService' + API = 'reporttargethealth' + VERSION = "1.0" + OBSERVER_NAME = 'WALinuxAgent' + HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME = 'GuestAgentPluginHeartbeat' + HOST_PLUGIN_STATUS_OBSERVATION_NAME = 'GuestAgentPluginStatus' + HOST_PLUGIN_VERSIONS_OBSERVATION_NAME = 'GuestAgentPluginVersions' + HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME = 'GuestAgentPluginArtifact' + IMDS_OBSERVATION_NAME = 'InstanceMetadataHeartbeat' + MAX_OBSERVATIONS = 10 + + def __init__(self, endpoint): + self.endpoint = HealthService.ENDPOINT.format(endpoint) + self.api = HealthService.API + self.version = HealthService.VERSION + self.source = HealthService.OBSERVER_NAME + self.observations = list() + + @property + def as_json(self): + data = { + "Api": self.api, + "Version": self.version, + "Source": self.source, + "Observations": [o.as_obj for o in self.observations] + } + return json.dumps(data) + + def report_host_plugin_heartbeat(self, is_healthy): + """ + Reports a signal for /health + :param is_healthy: whether the call succeeded + """ + self._observe(name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=is_healthy) + self._report() + + def report_host_plugin_versions(self, is_healthy, response): + """ + Reports a signal for /versions + :param is_healthy: whether the api call succeeded + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def report_host_plugin_extension_artifact(self, is_healthy, source, response): + """ + Reports a signal for /extensionArtifact + :param is_healthy: whether the api call succeeded + :param source: specifies the api caller for debugging failures + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=is_healthy, + description=source, + value=response) + self._report() + + def report_host_plugin_status(self, is_healthy, response): + """ + Reports a signal for /status + :param is_healthy: whether the api call succeeded + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def report_imds_status(self, is_healthy, response): + """ + Reports a signal for /metadata/instance + :param is_healthy: whether the api call succeeded and returned valid data + :param response: debugging information for failures + """ + self._observe(name=HealthService.IMDS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def _observe(self, name, is_healthy, value='', description=''): + # ensure we keep the list size within bounds + if len(self.observations) >= HealthService.MAX_OBSERVATIONS: + del self.observations[:HealthService.MAX_OBSERVATIONS-1] + self.observations.append(Observation(name=name, + is_healthy=is_healthy, + value=value, + description=description)) + + def _report(self): + logger.verbose('HealthService: report observations') + try: + restutil.http_post(self.endpoint, self.as_json, headers={'Content-Type': 'application/json'}) + logger.verbose('HealthService: Reported observations to {0}: {1}', self.endpoint, self.as_json) + except HttpError as e: + logger.warn("HealthService: could not report observations: {0}", ustr(e)) + finally: + # report any failures via telemetry + self._report_failures() + # these signals are not timestamped, so there is no value in persisting data + del self.observations[:] + + def _report_failures(self): + try: + logger.verbose("HealthService: report failures as telemetry") + from azurelinuxagent.common.event import add_event, WALAEventOperation + for o in self.observations: + if not o.is_healthy: + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HealthObservation, + is_success=False, + message=json.dumps(o.as_obj)) + except Exception as e: + logger.verbose("HealthService: could not report failures: {0}".format(ustr(e))) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/hostplugin.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/hostplugin.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/hostplugin.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/hostplugin.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,17 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 +import datetime import json -import traceback from azurelinuxagent.common import logger -from azurelinuxagent.common.exception import HttpError, ProtocolError, \ - ResourceGoneError -from azurelinuxagent.common.future import ustr, httpclient +from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_HOST_PLUGIN_FAILURE +from azurelinuxagent.common.exception import HttpError, ProtocolError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil from azurelinuxagent.common.utils.textutil import remove_bom @@ -35,6 +36,7 @@ URI_FORMAT_GET_EXTENSION_ARTIFACT = "http://{0}:{1}/extensionArtifact" URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" +URI_FORMAT_HEALTH = "http://{0}:{1}/health" API_VERSION = "2015-09-01" HEADER_CONTAINER_ID = "x-ms-containerid" HEADER_VERSION = "x-ms-version" @@ -47,6 +49,9 @@ class HostPluginProtocol(object): _is_default_channel = False + FETCH_REPORTING_PERIOD = datetime.timedelta(minutes=1) + STATUS_REPORTING_PERIOD = datetime.timedelta(minutes=1) + def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: raise ProtocolError("HostGAPlugin: Endpoint not provided") @@ -58,6 +63,11 @@ self.deployment_id = None self.role_config_name = role_config_name self.manifest_uri = None + self.health_service = HealthService(endpoint) + self.fetch_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) + self.status_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) + self.fetch_last_timestamp = None + self.status_last_timestamp = None @staticmethod def is_default_channel(): @@ -77,25 +87,41 @@ is_success=self.is_available) return self.is_available + def get_health(self): + """ + Call the /health endpoint + :return: True if 200 received, False otherwise + """ + url = URI_FORMAT_HEALTH.format(self.endpoint, + HOST_PLUGIN_PORT) + logger.verbose("HostGAPlugin: Getting health from [{0}]", url) + response = restutil.http_get(url, max_retry=1) + return restutil.request_succeeded(response) + def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) - logger.verbose("HostGAPlugin: Getting API versions at [{0}]".format( - url)) + logger.verbose("HostGAPlugin: Getting API versions at [{0}]" + .format(url)) return_val = [] + error_response = '' + is_healthy = False try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) + if restutil.request_failed(response): - logger.error( - "HostGAPlugin: Failed Get API versions: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + logger.error("HostGAPlugin: Failed Get API versions: {0}".format(error_response)) + is_healthy = not restutil.request_failed_at_hostplugin(response) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') - + is_healthy = True except HttpError as e: logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) + self.health_service.report_host_plugin_versions(is_healthy=is_healthy, response=error_response) + return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): @@ -117,6 +143,55 @@ return url, headers + def report_fetch_health(self, uri, is_healthy=True, source='', response=''): + + if uri != URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT): + return + + if self.should_report(is_healthy, + self.fetch_error_state, + self.fetch_last_timestamp, + HostPluginProtocol.FETCH_REPORTING_PERIOD): + self.fetch_last_timestamp = datetime.datetime.utcnow() + health_signal = self.fetch_error_state.is_triggered() is False + self.health_service.report_host_plugin_extension_artifact(is_healthy=health_signal, + source=source, + response=response) + + def report_status_health(self, is_healthy, response=''): + if self.should_report(is_healthy, + self.status_error_state, + self.status_last_timestamp, + HostPluginProtocol.STATUS_REPORTING_PERIOD): + self.status_last_timestamp = datetime.datetime.utcnow() + health_signal = self.status_error_state.is_triggered() is False + self.health_service.report_host_plugin_status(is_healthy=health_signal, + response=response) + + @staticmethod + def should_report(is_healthy, error_state, last_timestamp, period): + """ + Determine whether a health signal should be reported + :param is_healthy: whether the current measurement is healthy + :param error_state: the error state which is tracking time since failure + :param last_timestamp: the last measurement time stamp + :param period: the reporting period + :return: True if the signal should be reported, False otherwise + """ + + if is_healthy: + # we only reset the error state upon success, since we want to keep + # reporting the failure; this is different to other uses of error states + # which do not have a separate periodicity + error_state.reset() + else: + error_state.incr() + + if last_timestamp is None: + last_timestamp = datetime.datetime.utcnow() - period + + return datetime.datetime.utcnow() >= (last_timestamp + period) + def put_vm_log(self, content): raise NotImplementedError("Unimplemented") @@ -134,38 +209,31 @@ raise ProtocolError("HostGAPlugin: Status blob was not provided") logger.verbose("HostGAPlugin: Posting VM status") - try: - - blob_type = status_blob.type if status_blob.type else config_blob_type - - if blob_type == "BlockBlob": - self._put_block_blob_status(sas_url, status_blob) - else: - self._put_page_blob_status(sas_url, status_blob) - - except Exception as e: - # If the HostPlugin rejects the request, - # let the error continue, but set to use the HostPlugin - if isinstance(e, ResourceGoneError): - logger.verbose("HostGAPlugin: Setting host plugin as default channel") - HostPluginProtocol.set_default_channel(True) + blob_type = status_blob.type if status_blob.type else config_blob_type - raise + if blob_type == "BlockBlob": + self._put_block_blob_status(sas_url, status_blob) + else: + self._put_page_blob_status(sas_url, status_blob) def _put_block_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_block_blob_headers(len(status_blob.data)), - bytearray(status_blob.data, encoding='utf-8')), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_block_blob_headers(len(status_blob.data)), + bytearray(status_blob.data, encoding='utf-8')), + headers=self._build_status_headers()) if restutil.request_failed(response): - raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) + raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}" + .format(error_response)) else: + self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") def _put_page_blob_status(self, sas_url, status_blob): @@ -178,16 +246,19 @@ # First, initialize an empty blob response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_page_blob_create_headers(status_size)), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_create_headers(status_size)), + headers=self._build_status_headers()) if restutil.request_failed(response): - raise HttpError( - "HostGAPlugin: Failed PageBlob clean-up: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) + raise HttpError("HostGAPlugin: Failed PageBlob clean-up: {0}" + .format(error_response)) else: + self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") # Then, upload the blob in pages @@ -207,17 +278,19 @@ # Send the page response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_page_blob_page_headers(start, end), - buf), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_page_headers(start, end), + buf), + headers=self._build_status_headers()) if restutil.request_failed(response): + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError( - "HostGAPlugin Error: Put PageBlob bytes [{0},{1}]: " \ - "{2}".format( - start, end, restutil.read_response_error(response))) + "HostGAPlugin Error: Put PageBlob bytes " + "[{0},{1}]: {2}".format(start, end, error_response)) # Advance to the next page (if any) start = end diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/imds.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/imds.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/imds.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/imds.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,387 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +import json +import re +from collections import namedtuple + +import azurelinuxagent.common.utils.restutil as restutil +from azurelinuxagent.common.exception import HttpError, ResourceGoneError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.datacontract import DataContract, set_properties +from azurelinuxagent.common.protocol.util import get_protocol_util +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion + +IMDS_ENDPOINT = '169.254.169.254' +APIVERSION = '2018-02-01' +BASE_METADATA_URI = "http://{0}/metadata/{1}?api-version={2}" + +IMDS_IMAGE_ORIGIN_UNKNOWN = 0 +IMDS_IMAGE_ORIGIN_CUSTOM = 1 +IMDS_IMAGE_ORIGIN_ENDORSED = 2 +IMDS_IMAGE_ORIGIN_PLATFORM = 3 + +MetadataResult = namedtuple('MetadataResult', ['success', 'service_error', 'response']) +IMDS_RESPONSE_SUCCESS = 0 +IMDS_RESPONSE_ERROR = 1 +IMDS_CONNECTION_ERROR = 2 +IMDS_INTERNAL_SERVER_ERROR = 3 + + +def get_imds_client(): + return ImdsClient() + + +# A *slightly* future proof list of endorsed distros. +# -> e.g. I have predicted the future and said that 20.04-LTS will exist +# and is endored. +# +# See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros for +# more details. +# +# This is not an exhaustive list. This is a best attempt to mark images as +# endorsed or not. Image publishers do not encode all of the requisite information +# in their publisher, offer, sku, and version to definitively mark something as +# endorsed or not. This is not perfect, but it is approximately 98% perfect. +ENDORSED_IMAGE_INFO_MATCHER_JSON = """{ + "CANONICAL": { + "UBUNTUSERVER": { + "List": [ + "14.04.0-LTS", + "14.04.1-LTS", + "14.04.2-LTS", + "14.04.3-LTS", + "14.04.4-LTS", + "14.04.5-LTS", + "14.04.6-LTS", + "14.04.7-LTS", + "14.04.8-LTS", + + "16.04-LTS", + "16.04.0-LTS", + "18.04-LTS", + "20.04-LTS", + "22.04-LTS" + ] + } + }, + "COREOS": { + "COREOS": { + "STABLE": { "Minimum": "494.4.0" } + } + }, + "CREDATIV": { + "DEBIAN": { "Minimum": "7" } + }, + "OPENLOGIC": { + "CENTOS": { + "Minimum": "6.3", + "List": [ + "7-LVM", + "7-RAW" + ] + }, + "CENTOS-HPC": { "Minimum": "6.3" } + }, + "REDHAT": { + "RHEL": { + "Minimum": "6.7", + "List": [ + "7-LVM", + "7-RAW" + ] + }, + "RHEL-HANA": { "Minimum": "6.7" }, + "RHEL-SAP": { "Minimum": "6.7" }, + "RHEL-SAP-APPS": { "Minimum": "6.7" }, + "RHEL-SAP-HANA": { "Minimum": "6.7" } + }, + "SUSE": { + "SLES": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + }, + "SLES-BYOS": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + }, + "SLES-SAP": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + } + } +}""" + + +class ImageInfoMatcher(object): + def __init__(self, doc): + self.doc = json.loads(doc) + + def is_match(self, publisher, offer, sku, version): + def _is_match_walk(doci, keys): + key = keys.pop(0).upper() + if key is None: + return False + + if key not in doci: + return False + + if 'List' in doci[key] and keys[0] in doci[key]['List']: + return True + + if 'Match' in doci[key] and re.match(doci[key]['Match'], keys[0]): + return True + + if 'Minimum' in doci[key]: + try: + return FlexibleVersion(keys[0]) >= FlexibleVersion(doci[key]['Minimum']) + except ValueError: + pass + + return _is_match_walk(doci[key], keys) + + return _is_match_walk(self.doc, [ publisher, offer, sku, version ]) + + +class ComputeInfo(DataContract): + __matcher = ImageInfoMatcher(ENDORSED_IMAGE_INFO_MATCHER_JSON) + + def __init__(self, + location=None, + name=None, + offer=None, + osType=None, + placementGroupId=None, + platformFaultDomain=None, + placementUpdateDomain=None, + publisher=None, + resourceGroupName=None, + sku=None, + subscriptionId=None, + tags=None, + version=None, + vmId=None, + vmSize=None, + vmScaleSetName=None, + zone=None): + self.location = location + self.name = name + self.offer = offer + self.osType = osType + self.placementGroupId = placementGroupId + self.platformFaultDomain = platformFaultDomain + self.platformUpdateDomain = placementUpdateDomain + self.publisher = publisher + self.resourceGroupName = resourceGroupName + self.sku = sku + self.subscriptionId = subscriptionId + self.tags = tags + self.version = version + self.vmId = vmId + self.vmSize = vmSize + self.vmScaleSetName = vmScaleSetName + self.zone = zone + + @property + def image_info(self): + return "{0}:{1}:{2}:{3}".format(self.publisher, self.offer, self.sku, self.version) + + @property + def image_origin(self): + """ + An integer value describing the origin of the image. + + 0 -> unknown + 1 -> custom - user created image + 2 -> endorsed - See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros + 3 -> platform - non-endorsed image that is available in the Azure Marketplace. + """ + + try: + if self.publisher == "": + return IMDS_IMAGE_ORIGIN_CUSTOM + + if ComputeInfo.__matcher.is_match(self.publisher, self.offer, self.sku, self.version): + return IMDS_IMAGE_ORIGIN_ENDORSED + else: + return IMDS_IMAGE_ORIGIN_PLATFORM + + except Exception as e: + logger.periodic_warn(logger.EVERY_FIFTEEN_MINUTES, + "[PERIODIC] Could not determine the image origin from IMDS: {0}".format(ustr(e))) + return IMDS_IMAGE_ORIGIN_UNKNOWN + + +class ImdsClient(object): + def __init__(self, version=APIVERSION): + self._api_version = version + self._headers = { + 'User-Agent': restutil.HTTP_USER_AGENT, + 'Metadata': True, + } + self._health_headers = { + 'User-Agent': restutil.HTTP_USER_AGENT_HEALTH, + 'Metadata': True, + } + self._regex_ioerror = re.compile(r".*HTTP Failed. GET http://[^ ]+ -- IOError .*") + self._regex_throttled = re.compile(r".*HTTP Retry. GET http://[^ ]+ -- Status Code 429 .*") + self._protocol_util = get_protocol_util() + + def _get_metadata_url(self, endpoint, resource_path): + return BASE_METADATA_URI.format(endpoint, resource_path, self._api_version) + + def _http_get(self, endpoint, resource_path, headers): + url = self._get_metadata_url(endpoint, resource_path) + return restutil.http_get(url, headers=headers, use_proxy=False) + + def _get_metadata_from_endpoint(self, endpoint, resource_path, headers): + """ + Get metadata from one of the IMDS endpoints. + + :param str endpoint: IMDS endpoint to call + :param str resource_path: path of IMDS resource + :param bool headers: headers to send in the request + :return: Tuple + status: one of the following response status codes: IMDS_RESPONSE_SUCCESS, IMDS_RESPONSE_ERROR, + IMDS_CONNECTION_ERROR, IMDS_INTERNAL_SERVER_ERROR + response: IMDS response on IMDS_RESPONSE_SUCCESS, failure message otherwise + """ + try: + resp = self._http_get(endpoint=endpoint, resource_path=resource_path, headers=headers) + except ResourceGoneError: + return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone".format(resource_path) + except HttpError as e: + msg = str(e) + if self._regex_throttled.match(msg): + return IMDS_RESPONSE_ERROR, "IMDS error in /metadata/{0}: Throttled".format(resource_path) + if self._regex_ioerror.match(msg): + logger.periodic_warn(logger.EVERY_FIFTEEN_MINUTES, + "[PERIODIC] [IMDS_CONNECTION_ERROR] Unable to connect to IMDS endpoint {0}".format(endpoint)) + return IMDS_CONNECTION_ERROR, "IMDS error in /metadata/{0}: Unable to connect to endpoint".format(resource_path) + return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: {1}".format(resource_path, msg) + + if resp.status >= 500: + return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: {1}".format( + resource_path, restutil.read_response_error(resp)) + + if restutil.request_failed(resp): + return IMDS_RESPONSE_ERROR, "IMDS error in /metadata/{0}: {1}".format( + resource_path, restutil.read_response_error(resp)) + + return IMDS_RESPONSE_SUCCESS, resp.read() + + def get_metadata(self, resource_path, is_health): + """ + Get metadata from IMDS, falling back to Wireserver endpoint if necessary. + + :param str resource_path: path of IMDS resource + :param bool is_health: True if for health/heartbeat, False otherwise + :return: instance of MetadataResult + :rtype: MetadataResult + """ + headers = self._health_headers if is_health else self._headers + endpoint = IMDS_ENDPOINT + + status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) + if status == IMDS_CONNECTION_ERROR: + endpoint = self._protocol_util.get_wireserver_endpoint() + status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) + + if status == IMDS_RESPONSE_SUCCESS: + return MetadataResult(True, False, resp) + elif status == IMDS_INTERNAL_SERVER_ERROR: + return MetadataResult(False, True, resp) + return MetadataResult(False, False, resp) + + def get_compute(self): + """ + Fetch compute information. + + :return: instance of a ComputeInfo + :rtype: ComputeInfo + """ + + # ensure we get a 200 + result = self.get_metadata('instance/compute', is_health=False) + if not result.success: + raise HttpError(result.response) + + data = json.loads(ustr(result.response, encoding="utf-8")) + + compute_info = ComputeInfo() + set_properties('compute', compute_info, data) + + return compute_info + + def validate(self): + """ + Determines whether the metadata instance api returns 200, and the response + is valid: compute should contain location, name, subscription id, and vm size + and network should contain mac address and private ip address. + :return: Tuple + is_healthy: False when service returns an error, True on successful + response and connection failures. + error_response: validation failure details to assist with debugging + """ + + # ensure we get a 200 + result = self.get_metadata('instance', is_health=True) + if not result.success: + # we should only return False when the service is unhealthy + return (not result.service_error), result.response + + # ensure the response is valid json + try: + json_data = json.loads(ustr(result.response, encoding="utf-8")) + except Exception as e: + return False, "JSON parsing failed: {0}".format(ustr(e)) + + # ensure all expected fields are present and have a value + try: + # TODO: compute fields cannot be verified yet since we need to exclude rdfe vms (#1249) + + self.check_field(json_data, 'network') + self.check_field(json_data['network'], 'interface') + self.check_field(json_data['network']['interface'][0], 'macAddress') + self.check_field(json_data['network']['interface'][0], 'ipv4') + self.check_field(json_data['network']['interface'][0]['ipv4'], 'ipAddress') + self.check_field(json_data['network']['interface'][0]['ipv4']['ipAddress'][0], 'privateIpAddress') + except ValueError as v: + return False, ustr(v) + + return True, '' + + @staticmethod + def check_field(dict_obj, field): + if field not in dict_obj or dict_obj[field] is None: + raise ValueError('Missing field: [{0}]'.format(field)) + + if len(dict_obj[field]) == 0: + raise ValueError('Empty field: [{0}]'.format(field)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/__init__.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.protocol.util import get_protocol_util, \ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/metadata.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/metadata.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/metadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/metadata.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,18 +14,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import base64 import json import os import shutil import re +import sys +import traceback import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.datacontract import get_properties, set_properties, validate_param +from azurelinuxagent.common.exception import HttpError, ProtocolError +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils import restutil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.telemetryevent import TelemetryEventList from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.protocol.restapi import * @@ -41,6 +48,8 @@ P7B_FILE_NAME = "Certificates.p7b" PEM_FILE_NAME = "Certificates.pem" +IF_NONE_MATCH_HEADER = "If-None-Match" + KEY_AGENT_VERSION_URIS = "versionsManifestUris" KEY_URI = "uri" @@ -49,6 +58,14 @@ RETRY_PING_INTERVAL = 10 +def get_traceback(e): + if sys.version_info[0] == 3: + return e.__traceback__ + elif sys.version_info[0] == 2: + ex_type, ex, tb = sys.exc_info() + return tb + + def _add_content_type(headers): if headers is None: headers = {} @@ -81,6 +98,7 @@ self.certs = None self.agent_manifests = None self.agent_etag = None + self.cert_etag = None def _get_data(self, url, headers=None): try: @@ -88,13 +106,21 @@ except HttpError as e: raise ProtocolError(ustr(e)) - if restutil.request_failed(resp): + # NOT_MODIFIED (304) response means the call was successful, so allow that to proceed. + is_not_modified = restutil.request_not_modified(resp) + if restutil.request_failed(resp) and not is_not_modified: raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) data = resp.read() etag = resp.getheader('ETag') + + # If the response was 304, then explicilty set data to None + if is_not_modified: + data = None + if data is not None: data = json.loads(ustr(data, encoding="utf-8")) + return data, etag def _put_data(self, url, data, headers=None): @@ -123,6 +149,10 @@ content = fileutil.read_file(trans_crt_file) return textutil.get_bytes_from_pem(content) + def supports_overprovisioning(self): + # Metadata protocol does not support overprovisioning + return False + def detect(self): self.get_vminfo() trans_prv_file = os.path.join(conf.get_lib_dir(), @@ -151,23 +181,27 @@ def get_certs(self): certlist = CertList() certificatedata = CertificateData() - data, etag = self._get_data(self.cert_uri) + headers = None if self.cert_etag is None else {IF_NONE_MATCH_HEADER: self.cert_etag} + data, etag = self._get_data(self.cert_uri, headers=headers) - set_properties("certlist", certlist, data) + if self.cert_etag is None or self.cert_etag != etag: + self.cert_etag = etag - cert_list = get_properties(certlist) + set_properties("certlist", certlist, data) - headers = { - "x-ms-vmagent-public-x509-cert": self._get_trans_cert() - } + cert_list = get_properties(certlist) + + headers = { + "x-ms-vmagent-public-x509-cert": self._get_trans_cert() + } - for cert_i in cert_list["certificates"]: - certificate_data_uri = cert_i['certificateDataUri'] - data, etag = self._get_data(certificate_data_uri, headers=headers) - set_properties("certificatedata", certificatedata, data) - json_certificate_data = get_properties(certificatedata) + for cert_i in cert_list["certificates"]: + certificate_data_uri = cert_i['certificateDataUri'] + data, etag = self._get_data(certificate_data_uri, headers=headers) + set_properties("certificatedata", certificatedata, data) + json_certificate_data = get_properties(certificatedata) - self.certs = Certificates(self, json_certificate_data) + self.certs = Certificates(self, json_certificate_data) if self.certs is None: return None @@ -181,8 +215,10 @@ def get_vmagent_manifests(self): self.update_goal_state() - data, etag = self._get_data(self.vmagent_uri) - if self.agent_etag is None or self.agent_etag < etag: + headers = None if self.agent_etag is None else {IF_NONE_MATCH_HEADER: self.agent_etag} + + data, etag = self._get_data(self.vmagent_uri, headers=headers) + if self.agent_etag is None or self.agent_etag != etag: self.agent_etag = etag # Create a list with a single manifest @@ -236,7 +272,7 @@ } ext_list = ExtHandlerList() data, etag = self._get_data(self.ext_uri, headers=headers) - if last_etag is None or last_etag < etag: + if last_etag is None or last_etag != etag: set_properties("extensionHandlers", ext_list.extHandlers, data) return ext_list, etag @@ -300,10 +336,25 @@ try: self.update_certs() return - except: + except Exception as e: logger.verbose("Incarnation is out of date. Update goalstate.") + msg = u"Exception updating certs: {0}".format(ustr(e)) + logger.warn(msg) + detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) + logger.verbose(detailed_msg) raise ProtocolError("Exceeded max retry updating goal state") + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + success = False + try: + resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy) + if restutil.request_succeeded(resp): + fileutil.write_file(destination, bytearray(resp.read()), asbin=True) + success = True + except Exception as e: + logger.warn("Failed to download from: {0}".format(uri), e) + return success + class Certificates(object): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/ovfenv.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/ovfenv.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/ovfenv.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/ovfenv.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Copy and parse ovf-env.xml from provisioning ISO and local cache @@ -52,6 +52,7 @@ self.disable_ssh_password_auth = True self.ssh_pubkeys = [] self.ssh_keypairs = [] + self.provision_guest_agent = None self.parse(xml_text) def parse(self, xml_text): @@ -111,3 +112,11 @@ fingerprint = findtext(keypair, "Fingerprint", namespace=wans) self.ssh_keypairs.append((path, fingerprint)) + platform_settings_section = find(environment, "PlatformSettingsSection", namespace=wans) + _validate_ovf(platform_settings_section, "PlatformSettingsSection not found") + + platform_settings = find(platform_settings_section, "PlatformSettings", namespace=wans) + _validate_ovf(platform_settings, "PlatformSettings not found") + + self.provision_guest_agent = findtext(platform_settings, "ProvisionGuestAgent", namespace=wans) + _validate_ovf(self.provision_guest_agent, "ProvisionGuestAgent not found") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/restapi.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/restapi.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/restapi.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/restapi.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,77 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # + import socket -import azurelinuxagent.common.logger as logger -import azurelinuxagent.common.utils.restutil as restutil -from azurelinuxagent.common.exception import ProtocolError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import DISTRO_VERSION, DISTRO_NAME, CURRENT_VERSION - - -def validate_param(name, val, expected_type): - if val is None: - raise ProtocolError("{0} is None".format(name)) - if not isinstance(val, expected_type): - raise ProtocolError(("{0} type should be {1} not {2}" - "").format(name, expected_type, type(val))) - - -def set_properties(name, obj, data): - if isinstance(obj, DataContract): - validate_param("Property '{0}'".format(name), data, dict) - for prob_name, prob_val in data.items(): - prob_full_name = "{0}.{1}".format(name, prob_name) - try: - prob = getattr(obj, prob_name) - except AttributeError: - logger.warn("Unknown property: {0}", prob_full_name) - continue - prob = set_properties(prob_full_name, prob, prob_val) - setattr(obj, prob_name, prob) - return obj - elif isinstance(obj, DataContractList): - validate_param("List '{0}'".format(name), data, list) - for item_data in data: - item = obj.item_cls() - item = set_properties(name, item, item_data) - obj.append(item) - return obj - else: - return data - - -def get_properties(obj): - if isinstance(obj, DataContract): - data = {} - props = vars(obj) - for prob_name, prob in list(props.items()): - data[prob_name] = get_properties(prob) - return data - elif isinstance(obj, DataContractList): - data = [] - for item in obj: - item_data = get_properties(item) - data.append(item_data) - return data - else: - return obj - - -class DataContract(object): - pass - - -class DataContractList(list): - def __init__(self, item_cls): - self.item_cls = item_cls - - -""" -Data contract between guest and host -""" +from azurelinuxagent.common.datacontract import DataContract, DataContractList class VMInfo(DataContract): @@ -150,19 +86,19 @@ sequenceNumber=None, publicSettings=None, protectedSettings=None, - certificateThumbprint=None): + certificateThumbprint=None, + dependencyLevel=0): self.name = name self.sequenceNumber = sequenceNumber self.publicSettings = publicSettings self.protectedSettings = protectedSettings self.certificateThumbprint = certificateThumbprint + self.dependencyLevel = dependencyLevel class ExtHandlerProperties(DataContract): def __init__(self): self.version = None - self.upgradePolicy = None - self.upgradeGuid = None self.state = None self.extensions = DataContractList(Extension) @@ -178,6 +114,18 @@ self.properties = ExtHandlerProperties() self.versionUris = DataContractList(ExtHandlerVersionUri) + def sort_key(self): + levels = [e.dependencyLevel for e in self.properties.extensions] + if len(levels) == 0: + level = 0 + else: + level = min(levels) + # Process uninstall or disabled before enabled, in reverse order + # remap 0 to -1, 1 to -2, 2 to -3, etc + if self.properties.state != u"enabled": + level = (0 - level) - 1 + return level + class ExtHandlerList(DataContract): def __init__(self): @@ -246,13 +194,11 @@ def __init__(self, name=None, version=None, - upgradeGuid=None, status=None, code=0, message=None): self.name = name self.version = version - self.upgradeGuid = upgradeGuid self.status = status self.code = code self.message = message @@ -275,22 +221,16 @@ self.vmAgent = VMAgentStatus(status=status, message=message) -class TelemetryEventParam(DataContract): - def __init__(self, name=None, value=None): +class RemoteAccessUser(DataContract): + def __init__(self, name, encrypted_password, expiration): self.name = name - self.value = value + self.encrypted_password = encrypted_password + self.expiration = expiration -class TelemetryEvent(DataContract): - def __init__(self, eventId=None, providerId=None): - self.eventId = eventId - self.providerId = providerId - self.parameters = DataContractList(TelemetryEventParam) - - -class TelemetryEventList(DataContract): +class RemoteAccessUsersList(DataContract): def __init__(self): - self.events = DataContractList(TelemetryEvent) + self.users = DataContractList(RemoteAccessUser) class Protocol(DataContract): @@ -321,13 +261,8 @@ def get_artifacts_profile(self): raise NotImplementedError() - def download_ext_handler_pkg(self, uri, headers=None): - try: - resp = restutil.http_get(uri, use_proxy=True, headers=headers) - if restutil.request_succeeded(resp): - return resp.read() - except Exception as e: - logger.warn("Failed to download from: {0}".format(uri), e) + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + raise NotImplementedError() def report_provision_status(self, provision_status): raise NotImplementedError() @@ -340,3 +275,6 @@ def report_event(self, event): raise NotImplementedError() + + def supports_overprovisioning(self): + return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/util.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/util.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import errno import os import re import shutil -import time import threading +import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger @@ -49,13 +49,24 @@ PASSWORD_REPLACEMENT = "*<" +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError("%s not a valid value" % name) + + +prots = _nameset(("WireProtocol", "MetadataProtocol")) + + def get_protocol_util(): return ProtocolUtil() class ProtocolUtil(object): + """ - ProtocolUtil handles initialization for protocol instance. 2 protocol types + ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ def __init__(self): @@ -73,7 +84,7 @@ ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) + tag_file_path = self._get_tag_file_path() try: self.osutil.mount_dvd() @@ -108,15 +119,17 @@ "{0} to {1}: {2}".format(tag_file_path, tag_file_path, ustr(e))) + self._cleanup_ovf_dvd() + + return ovfenv + def _cleanup_ovf_dvd(self): try: self.osutil.umount_dvd() self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) - return ovfenv - def get_ovf_env(self): """ Load saved ovf-env.xml @@ -126,9 +139,20 @@ xml_text = fileutil.read_file(ovf_file_path) return OvfEnv(xml_text) else: - raise ProtocolError("ovf-env.xml is missing from {0}".format(ovf_file_path)) + raise ProtocolError( + "ovf-env.xml is missing from {0}".format(ovf_file_path)) - def _get_wireserver_endpoint(self): + def _get_protocol_file_path(self): + return os.path.join( + conf.get_lib_dir(), + PROTOCOL_FILE_NAME) + + def _get_tag_file_path(self): + return os.path.join( + conf.get_lib_dir(), + TAG_FILE_NAME) + + def get_wireserver_endpoint(self): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) return fileutil.read_file(file_path) @@ -141,22 +165,34 @@ fileutil.write_file(file_path, endpoint) except IOError as e: raise OSUtilError(ustr(e)) - + def _detect_wire_protocol(self): endpoint = self.dhcp_handler.endpoint if endpoint is None: - logger.info("WireServer endpoint is not found. Rerun dhcp handler") - try: - self.dhcp_handler.run() - except DhcpError as e: - raise ProtocolError(ustr(e)) - endpoint = self.dhcp_handler.endpoint - + ''' + Check if DHCP can be used to get the wire protocol endpoint + ''' + (dhcp_available, conf_endpoint) = self.osutil.is_dhcp_available() + if dhcp_available: + logger.info("WireServer endpoint is not found. Rerun dhcp handler") + try: + self.dhcp_handler.run() + except DhcpError as e: + raise ProtocolError(ustr(e)) + endpoint = self.dhcp_handler.endpoint + else: + logger.info("_detect_wire_protocol: DHCP not available") + endpoint = self.get_wireserver_endpoint() + if endpoint == None: + endpoint = conf_endpoint + logger.info("Using hardcoded WireServer endpoint {0}", endpoint) + else: + logger.info("WireServer endpoint {0} read from file", endpoint) + try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) - self.save_protocol("WireProtocol") return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") @@ -167,9 +203,8 @@ def _detect_metadata_protocol(self): protocol = MetadataProtocol() protocol.detect() - self.save_protocol("MetadataProtocol") return protocol - + def _detect_protocol(self, protocols): """ Probe protocol endpoints in turn. @@ -180,19 +215,16 @@ for protocol_name in protocols: try: protocol = self._detect_wire_protocol() \ - if protocol_name == "WireProtocol" \ + if protocol_name == prots.WireProtocol \ else self._detect_metadata_protocol() - IOErrorCounter.set_protocol_endpoint( - endpoint=protocol.endpoint) - - return protocol + return (protocol_name, protocol) except ProtocolError as e: - logger.info("Protocol endpoint not found: {0}, {1}", + logger.info("Protocol endpoint not found: {0}, {1}", protocol_name, e) - if retry < MAX_RETRY -1: + if retry < MAX_RETRY - 1: logger.info("Retry detect protocols: retry={0}", retry) time.sleep(PROBE_INTERVAL) raise ProtocolNotFoundError("No protocol found.") @@ -201,26 +233,25 @@ """ Get protocol instance based on previous detecting result. """ - protocol_file_path = os.path.join(conf.get_lib_dir(), - PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) - if protocol_name == "WireProtocol": - endpoint = self._get_wireserver_endpoint() + if protocol_name == prots.WireProtocol: + endpoint = self.get_wireserver_endpoint() return WireProtocol(endpoint) - elif protocol_name == "MetadataProtocol": + elif protocol_name == prots.MetadataProtocol: return MetadataProtocol() else: raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) - def save_protocol(self, protocol_name): + def _save_protocol(self, protocol_name): """ Save protocol endpoint """ - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() try: fileutil.write_file(protocol_file_path, protocol_name) except IOError as e: @@ -232,9 +263,9 @@ """ logger.info("Clean protocol") self.protocol = None - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): - return + return try: os.remove(protocol_file_path) @@ -244,14 +275,14 @@ return logger.error("Failed to clear protocol endpoint: {0}", e) - def get_protocol(self): + def get_protocol(self, by_file=False): """ - Detect protocol by endpoints - + Detect protocol by endpoints, if by_file is True, + detect MetadataProtocol in priority. :returns: protocol instance """ self.lock.acquire() - + try: if self.protocol is not None: return self.protocol @@ -261,45 +292,21 @@ return self.protocol except ProtocolNotFoundError: pass - logger.info("Detect protocol endpoints") - protocols = ["WireProtocol", "MetadataProtocol"] - self.protocol = self._detect_protocol(protocols) - - return self.protocol + protocols = [prots.WireProtocol] - finally: - self.lock.release() - - def get_protocol_by_file(self): - """ - Detect protocol by tag file. - - If a file "useMetadataEndpoint.tag" is found on provision iso, - metedata protocol will be used. No need to probe for wire protocol - - :returns: protocol instance - """ - self.lock.acquire() - - try: - if self.protocol is not None: - return self.protocol + if by_file: + tag_file_path = self._get_tag_file_path() + if os.path.isfile(tag_file_path): + protocols.insert(0, prots.MetadataProtocol) + else: + protocols.append(prots.MetadataProtocol) + protocol_name, protocol = self._detect_protocol(protocols) - try: - self.protocol = self._get_protocol() - return self.protocol - except ProtocolNotFoundError: - pass + IOErrorCounter.set_protocol_endpoint(endpoint=protocol.endpoint) + self._save_protocol(protocol_name) - logger.info("Detect protocol by file") - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) - protocols = [] - if os.path.isfile(tag_file_path): - protocols.append("MetadataProtocol") - else: - protocols.append("WireProtocol") - self.protocol = self._detect_protocol(protocols) + self.protocol = protocol return self.protocol finally: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/wire.py walinuxagent-2.2.45/azurelinuxagent/common/protocol/wire.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/wire.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/protocol/wire.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,26 +14,34 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ +import datetime import json import os +import random import re import time import xml.sax.saxutils as saxutils +from datetime import datetime import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.datacontract import validate_param, set_properties +from azurelinuxagent.common.event import add_event, add_periodic, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE import azurelinuxagent.common.utils.textutil as textutil - from azurelinuxagent.common.exception import ProtocolNotFoundError, \ - ResourceGoneError + ResourceGoneError, ExtensionDownloadError, InvalidContainerError, ProtocolError, HttpError from azurelinuxagent.common.future import httpclient, bytebuffer +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils import fileutil, restutil from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.telemetryevent import TelemetryEventList +from azurelinuxagent.common.utils.archive import StateFlusher from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \ findtext, getattrib, gettext, remove_bom, get_bytes_from_pem, parse_json +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" @@ -47,6 +55,7 @@ HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" SHARED_CONF_FILE_NAME = "SharedConfig.xml" CERTS_FILE_NAME = "Certificates.xml" +REMOTE_ACCESS_FILE_NAME = "RemoteAccess.{0}.xml" P7M_FILE_NAME = "Certificates.p7m" PEM_FILE_NAME = "Certificates.pem" EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" @@ -54,12 +63,16 @@ AGENTS_MANIFEST_FILE_NAME = "{0}.{1}.agentsManifest" TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" +# Store the last retrieved container id as an environment variable to be shared between threads for telemetry purposes +CONTAINER_ID_ENV_VARIABLE = "AZURE_GUEST_AGENT_CONTAINER_ID" PROTOCOL_VERSION = "2012-11-30" ENDPOINT_FINE_NAME = "WireServer" SHORT_WAITING_INTERVAL = 1 # 1 second +MAX_EVENT_BUFFER_SIZE = 2 ** 16 - 2 ** 10 + class UploadError(HttpError): pass @@ -108,7 +121,6 @@ vminfo.tenantName = hosting_env.deployment_name vminfo.roleName = hosting_env.role_name vminfo.roleInstanceName = goal_state.role_instance_id - vminfo.containerId = goal_state.container_id return vminfo def get_certs(self): @@ -154,17 +166,24 @@ logger.verbose("Get In-VM Artifacts Profile") return self.client.get_artifacts_profile() - def download_ext_handler_pkg(self, uri, headers=None): - package = super(WireProtocol, self).download_ext_handler_pkg(uri) + def download_ext_handler_pkg_through_host(self, uri, destination): + host = self.client.get_host_plugin() + uri, headers = host.get_artifact_request(uri, host.manifest_uri) + success = self.client.stream(uri, destination, headers=headers, use_proxy=False) + return success + + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + direct_func = lambda: self.client.stream(uri, destination, headers=None, use_proxy=True) + # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data + # in the lambda. + host_func = lambda: self.download_ext_handler_pkg_through_host(uri, destination) - if package is not None: - return package - else: - logger.warn("Download did not succeed, falling back to host plugin") - host = self.client.get_host_plugin() - uri, headers = host.get_artifact_request(uri, host.manifest_uri) - package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers) - return package + try: + success = self.client.send_request_using_appropriate_channel(direct_func, host_func) + except Exception: + success = False + + return success def report_provision_status(self, provision_status): validate_param("provision_status", provision_status, ProvisionStatus) @@ -247,17 +266,15 @@ return xml -""" -Convert VMStatus object to status blob format -""" - - def ga_status_to_guest_info(ga_status): + """ + Convert VMStatus object to status blob format + """ v1_ga_guest_info = { - "computerName" : ga_status.hostname, - "osName" : ga_status.osname, - "osVersion" : ga_status.osversion, - "version" : ga_status.version, + "computerName": ga_status.hostname, + "osName": ga_status.osname, + "osVersion": ga_status.osversion, + "version": ga_status.version, } return v1_ga_guest_info @@ -268,12 +285,13 @@ 'message': ga_status.message } v1_ga_status = { - "version" : ga_status.version, - "status" : ga_status.status, - "formattedMessage" : formatted_msg + "version": ga_status.version, + "status": ga_status.status, + "formattedMessage": formatted_msg } return v1_ga_status + def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: @@ -320,7 +338,8 @@ 'handlerVersion': handler_status.version, 'handlerName': handler_status.name, 'status': handler_status.status, - 'code': handler_status.code + 'code': handler_status.code, + 'useExactVersion': True } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { @@ -328,9 +347,6 @@ "message": handler_status.message } - if handler_status.upgradeGuid is not None: - v1_handler_status["upgradeGuid"] = handler_status.upgradeGuid - if len(handler_status.extensions) > 0: # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] @@ -364,7 +380,7 @@ 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, - 'guestOSInfo' : v1_ga_guest_info + 'guestOSInfo': v1_ga_guest_info } return v1_vm_status @@ -525,10 +541,12 @@ self.updated = None self.hosting_env = None self.shared_conf = None + self.remote_access = None self.certs = None self.ext_conf = None self.host_plugin = None self.status_blob = StatusBlob(self) + self.goal_state_flusher = StateFlusher(conf.get_lib_dir()) def call_wireserver(self, http_req, *args, **kwargs): try: @@ -577,8 +595,7 @@ try: fileutil.write_file(local_file, data) except IOError as e: - fileutil.clean_ioerror(e, - paths=[local_file]) + fileutil.clean_ioerror(e, paths=[local_file]) raise ProtocolError("Failed to write cache: {0}".format(e)) @staticmethod @@ -589,67 +606,98 @@ return http_req(*args, **kwargs) + def fetch_manifest_through_host(self, uri): + host = self.get_host_plugin() + uri, headers = host.get_artifact_request(uri) + response = self.fetch(uri, headers, use_proxy=False) + return response + def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") - for version in version_uris: - response = None - if not HostPluginProtocol.is_default_channel(): - response = self.fetch(version.uri) - - if not response: - if HostPluginProtocol.is_default_channel(): - logger.verbose("Using host plugin as default channel") - else: - logger.verbose("Failed to download manifest, " - "switching to host plugin") + version_uris_shuffled = version_uris + random.shuffle(version_uris_shuffled) - try: + for version in version_uris_shuffled: + # GA expects a location and failoverLocation in ExtensionsConfig, but + # this is not always the case. See #1147. + if version.uri is None: + logger.verbose('The specified manifest URL is empty, ignored.') + continue + + direct_func = lambda: self.fetch(version.uri) + # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data + # in the lambda. + host_func = lambda: self.fetch_manifest_through_host(version.uri) + + try: + response = self.send_request_using_appropriate_channel(direct_func, host_func) + + if response: host = self.get_host_plugin() - uri, headers = host.get_artifact_request(version.uri) - response = self.fetch(uri, headers, use_proxy=False) + host.manifest_uri = version.uri + return response + except Exception as e: + logger.warn("Exception when fetching manifest. Error: {0}".format(ustr(e))) - # If the HostPlugin rejects the request, - # let the error continue, but set to use the HostPlugin - except ResourceGoneError: - HostPluginProtocol.set_default_channel(True) - raise - - host.manifest_uri = version.uri - logger.verbose("Manifest downloaded successfully from host plugin") - if not HostPluginProtocol.is_default_channel(): - logger.info("Setting host plugin as default channel") - HostPluginProtocol.set_default_channel(True) + raise ExtensionDownloadError("Failed to fetch manifest from all sources") - if response: - return response + def stream(self, uri, destination, headers=None, use_proxy=None): + success = False + logger.verbose("Fetch [{0}] with headers [{1}] to file [{2}]", uri, headers, destination) - raise ProtocolError("Failed to fetch manifest from all sources") + response = self._fetch_response(uri, headers, use_proxy) + if response is not None: + chunk_size = 1024 * 1024 # 1MB buffer + try: + with open(destination, 'wb', chunk_size) as destination_fh: + complete = False + while not complete: + chunk = response.read(chunk_size) + destination_fh.write(chunk) + complete = len(chunk) < chunk_size + success = True + except Exception as e: + logger.error('Error streaming {0} to {1}: {2}'.format(uri, destination, ustr(e))) - def fetch(self, uri, headers=None, use_proxy=None): + return success + + def fetch(self, uri, headers=None, use_proxy=None, decode=True): logger.verbose("Fetch [{0}] with headers [{1}]", uri, headers) + content = None + response = self._fetch_response(uri, headers, use_proxy) + if response is not None: + response_content = response.read() + content = self.decode_config(response_content) if decode else response_content + return content + + def _fetch_response(self, uri, headers=None, use_proxy=None): + resp = None try: resp = self.call_storage_service( - restutil.http_get, - uri, - headers=headers, - use_proxy=use_proxy) + restutil.http_get, + uri, + headers=headers, + use_proxy=use_proxy) if restutil.request_failed(resp): - msg = "[Storage Failed] URI {0} ".format(uri) - if resp is not None: - msg += restutil.read_response_error(resp) + error_response = restutil.read_response_error(resp) + msg = "Fetch failed from [{0}]: {1}".format(uri, error_response) logger.warn(msg) + if self.host_plugin is not None: + self.host_plugin.report_fetch_health(uri, + is_healthy=not restutil.request_failed_at_hostplugin(resp), + source='WireClient', + response=error_response) raise ProtocolError(msg) + else: + if self.host_plugin is not None: + self.host_plugin.report_fetch_health(uri, source='WireClient') - return self.decode_config(resp.read()) - - except (HttpError, ProtocolError) as e: + except (HttpError, ProtocolError, IOError) as e: logger.verbose("Fetch failed from [{0}]: {1}", uri, e) - - if isinstance(e, ResourceGoneError): + if isinstance(e, ResourceGoneError) or isinstance(e, InvalidContainerError): raise - - return None + return resp def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: @@ -678,6 +726,29 @@ self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) + def update_remote_access_conf(self, goal_state): + if goal_state.remote_access_uri is None: + # Nothing in accounts data. Just return, nothing to do. + return + xml_text = self.fetch_config(goal_state.remote_access_uri, + self.get_header_for_cert()) + self.remote_access = RemoteAccess(xml_text) + local_file = os.path.join(conf.get_lib_dir(), REMOTE_ACCESS_FILE_NAME.format(self.remote_access.incarnation)) + self.save_cache(local_file, xml_text) + + def get_remote_access(self): + incarnation_file = os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME) + incarnation = self.fetch_cache(incarnation_file) + file_name = REMOTE_ACCESS_FILE_NAME.format(incarnation) + remote_access_file = os.path.join(conf.get_lib_dir(), file_name) + if not os.path.isfile(remote_access_file): + # no remote access data. + return None + xml_text = self.fetch_cache(remote_access_file) + remote_access = RemoteAccess(xml_text) + return remote_access + def update_ext_conf(self, goal_state): if goal_state.ext_uri is None: logger.info("ExtensionsConfig.xml uri is empty") @@ -690,63 +761,78 @@ self.save_cache(local_file, xml_text) self.ext_conf = ExtensionsConfig(xml_text) + def save_or_update_goal_state_file(self, incarnation, xml_text): + # It should create a new file if the incarnation number is new. + # It should overwrite the existing file if the incarnation number is the same. + file_name = GOAL_STATE_FILE_NAME.format(incarnation) + goal_state_file = os.path.join(conf.get_lib_dir(), file_name) + self.save_cache(goal_state_file, xml_text) + + def update_host_plugin(self, container_id, role_config_name): + if self.host_plugin is not None: + self.host_plugin.container_id = container_id + self.host_plugin.role_config_name = role_config_name + def update_goal_state(self, forced=False, max_retry=3): - incarnation_file = os.path.join(conf.get_lib_dir(), - INCARNATION_FILE_NAME) + incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) uri = GOAL_STATE_URI.format(self.endpoint) - # Start updating goalstate, retry on 410 - fetch_goal_state = True + current_goal_state_from_configuration = None for retry in range(0, max_retry): try: - if fetch_goal_state: - fetch_goal_state = False - + if current_goal_state_from_configuration is None: xml_text = self.fetch_config(uri, self.get_header()) - goal_state = GoalState(xml_text) + current_goal_state_from_configuration = GoalState(xml_text) if not forced: last_incarnation = None if os.path.isfile(incarnation_file): - last_incarnation = fileutil.read_file( - incarnation_file) - new_incarnation = goal_state.incarnation - if last_incarnation is not None and \ - last_incarnation == new_incarnation: - # Goalstate is not updated. - return + last_incarnation = fileutil.read_file(incarnation_file) + new_incarnation = current_goal_state_from_configuration.incarnation - self.goal_state = goal_state - file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) - goal_state_file = os.path.join(conf.get_lib_dir(), file_name) - self.save_cache(goal_state_file, xml_text) - self.update_hosting_env(goal_state) - self.update_shared_conf(goal_state) - self.update_certs(goal_state) - self.update_ext_conf(goal_state) - self.save_cache(incarnation_file, goal_state.incarnation) + if last_incarnation is not None and last_incarnation == new_incarnation: + # Incarnation number is not updated, but role config file and container ID + # can change without the incarnation number changing. Ensure they are updated in + # the goal state file on disk, as well as in the HostGA plugin instance. + self.goal_state = current_goal_state_from_configuration + self.save_or_update_goal_state_file(new_incarnation, xml_text) + self.update_host_plugin(current_goal_state_from_configuration.container_id, + current_goal_state_from_configuration.role_config_name) - if self.host_plugin is not None: - self.host_plugin.container_id = goal_state.container_id - self.host_plugin.role_config_name = goal_state.role_config_name + return + self.goal_state_flusher.flush(datetime.utcnow()) + + self.goal_state = current_goal_state_from_configuration + self.save_or_update_goal_state_file(current_goal_state_from_configuration.incarnation, xml_text) + self.update_hosting_env(current_goal_state_from_configuration) + self.update_shared_conf(current_goal_state_from_configuration) + self.update_certs(current_goal_state_from_configuration) + self.update_ext_conf(current_goal_state_from_configuration) + self.update_remote_access_conf(current_goal_state_from_configuration) + self.save_cache(incarnation_file, current_goal_state_from_configuration.incarnation) + self.update_host_plugin(current_goal_state_from_configuration.container_id, + current_goal_state_from_configuration.role_config_name) return + except IOError as e: + logger.warn("IOError processing goal state, retrying [{0}]", ustr(e)) + except ResourceGoneError: - logger.info("GoalState is stale -- re-fetching") - fetch_goal_state = True + logger.info("Goal state is stale, re-fetching") + current_goal_state_from_configuration = None - except Exception as e: - log_method = logger.info \ - if type(e) is ProtocolError \ - else logger.warn - log_method( - "Exception processing GoalState-related files: {0}".format( - ustr(e))) + except ProtocolError as e: + if retry < max_retry - 1: + logger.verbose("ProtocolError processing goal state, retrying [{0}]", ustr(e)) + else: + logger.error("ProtocolError processing goal state, giving up [{0}]", ustr(e)) - if retry < max_retry-1: - continue - raise + except Exception as e: + if retry < max_retry - 1: + logger.verbose("Exception processing goal state, retrying: [{0}]", ustr(e)) + else: + logger.error("Exception processing goal state, giving up: [{0}]", ustr(e)) raise ProtocolError("Exceeded max retry updating goal state") @@ -787,6 +873,22 @@ return None return self.certs + def get_current_handlers(self): + handler_list = list() + try: + incarnation = self.fetch_cache(os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME)) + ext_conf = ExtensionsConfig(self.fetch_cache(os.path.join(conf.get_lib_dir(), + EXT_CONF_FILE_NAME.format(incarnation)))) + handler_list = ext_conf.ext_handlers.extHandlers + except ProtocolError as pe: + # cache file is missing, nothing to do + logger.verbose(ustr(pe)) + except Exception as e: + logger.error("Could not obtain current handlers: {0}", ustr(e)) + + return handler_list + def get_ext_conf(self): if self.ext_conf is None: goal_state = self.get_goal_state() @@ -800,24 +902,15 @@ return self.ext_conf def get_ext_manifest(self, ext_handler, goal_state): - for update_goal_state in [False, True]: - try: - if update_goal_state: - self.update_goal_state(forced=True) - goal_state = self.get_goal_state() - - local_file = MANIFEST_FILE_NAME.format( - ext_handler.name, - goal_state.incarnation) - local_file = os.path.join(conf.get_lib_dir(), local_file) - xml_text = self.fetch_manifest(ext_handler.versionUris) - self.save_cache(local_file, xml_text) - return ExtensionManifest(xml_text) + local_file = MANIFEST_FILE_NAME.format(ext_handler.name, goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) - except ResourceGoneError: - continue - - raise ProtocolError("Failed to retrieve extension manifest") + try: + xml_text = self.fetch_manifest(ext_handler.versionUris) + self.save_cache(local_file, xml_text) + return ExtensionManifest(xml_text) + except Exception as e: + raise ExtensionDownloadError("Failed to retrieve extension manifest. Error: {0}".format(ustr(e))) def filter_package_list(self, family, ga_manifest, goal_state): complete_list = ga_manifest.pkg_list @@ -855,25 +948,36 @@ return allowed_list def get_gafamily_manifest(self, vmagent_manifest, goal_state): - for update_goal_state in [False, True]: - try: - if update_goal_state: - self.update_goal_state(forced=True) - goal_state = self.get_goal_state() - - local_file = MANIFEST_FILE_NAME.format( - vmagent_manifest.family, - goal_state.incarnation) - local_file = os.path.join(conf.get_lib_dir(), local_file) - xml_text = self.fetch_manifest( - vmagent_manifest.versionsManifestUris) - fileutil.write_file(local_file, xml_text) - return ExtensionManifest(xml_text) + self._remove_stale_agent_manifest(vmagent_manifest.family, goal_state.incarnation) - except ResourceGoneError: - continue + local_file = MANIFEST_FILE_NAME.format(vmagent_manifest.family, goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) - raise ProtocolError("Failed to retrieve GAFamily manifest") + try: + xml_text = self.fetch_manifest(vmagent_manifest.versionsManifestUris) + fileutil.write_file(local_file, xml_text) + return ExtensionManifest(xml_text) + except Exception as e: + raise ProtocolError("Failed to retrieve GAFamily manifest. Error: {0}".format(ustr(e))) + + def _remove_stale_agent_manifest(self, family, incarnation): + """ + The incarnation number can reset at any time, which means there + could be a stale agentsManifest on disk. Stale files are cleaned + on demand as new goal states arrive from WireServer. If the stale + file is not removed agent upgrade may be delayed. + + :param family: GA family, e.g. Prod or Test + :param incarnation: incarnation of the current goal state + """ + fn = AGENTS_MANIFEST_FILE_NAME.format( + family, + incarnation) + + agent_manifest = os.path.join(conf.get_lib_dir(), fn) + + if os.path.exists(agent_manifest): + os.unlink(agent_manifest) def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) @@ -885,61 +989,153 @@ logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) elif PROTOCOL_VERSION in version_info.get_supported(): logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) - logger.warn("Server preferred version:{0}", preferred) + logger.info("Server preferred version:{0}", preferred) else: error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) - def upload_status_blob(self): - for update_goal_state in [False, True]: + def send_request_using_appropriate_channel(self, direct_func, host_func): + # A wrapper method for all function calls that send HTTP requests. The purpose of the method is to + # define which channel to use, direct or through the host plugin. For the host plugin channel, + # also implement a retry mechanism. + + # By default, the direct channel is the default channel. If that is the case, try getting a response + # through that channel. On failure, fall back to the host plugin channel. + + # When using the host plugin channel, regardless if it's set as default or not, try sending the request first. + # On specific failures that indicate a stale goal state (such as resource gone or invalid container parameter), + # refresh the goal state and try again. If successful, set the host plugin channel as default. If failed, + # raise the exception. + + # NOTE: direct_func and host_func are passed as lambdas. Be careful about capturing goal state data in them as + # they will not be refreshed even if a goal state refresh is called before retrying the host_func. + + if not HostPluginProtocol.is_default_channel(): + ret = None try: - if update_goal_state: - self.update_goal_state(forced=True) + ret = direct_func() - ext_conf = self.get_ext_conf() + # Different direct channel functions report failure in different ways: by returning None, False, + # or raising ResourceGone or InvalidContainer exceptions. + if not ret: + logger.periodic_info(logger.EVERY_HOUR, "[PERIODIC] Request failed using the direct channel, " + "switching to host plugin.") + except (ResourceGoneError, InvalidContainerError) as e: + logger.periodic_info(logger.EVERY_HOUR, "[PERIODIC] Request failed using the direct channel, " + "switching to host plugin. Error: {0}".format(ustr(e))) - blob_uri = ext_conf.status_upload_blob - blob_type = ext_conf.status_upload_blob_type + if ret: + return ret + else: + logger.periodic_info(logger.EVERY_HALF_DAY, "[PERIODIC] Using host plugin as default channel.") - if blob_uri is not None: - - if not blob_type in ["BlockBlob", "PageBlob"]: - blob_type = "BlockBlob" - logger.verbose("Status Blob type is unspecified " - "-- assuming it is a BlockBlob") - - try: - self.status_blob.prepare(blob_type) - except Exception as e: - self.report_status_event( - "Exception creating status blob: {0}", ustr(e)) - return - - if not HostPluginProtocol.is_default_channel(): - try: - if self.status_blob.upload(blob_uri): - return - except HttpError as e: - pass + try: + ret = host_func() + except (ResourceGoneError, InvalidContainerError) as e: + old_container_id = self.host_plugin.container_id + old_role_config_name = self.host_plugin.role_config_name + + msg = "[PERIODIC] Request failed with the current host plugin configuration. " \ + "ContainerId: {0}, role config file: {1}. Fetching new goal state and retrying the call." \ + "Error: {2}".format(old_container_id, old_role_config_name, ustr(e)) + logger.periodic_info(logger.EVERY_SIX_HOURS, msg) + + self.update_goal_state(forced=True) + + new_container_id = self.host_plugin.container_id + new_role_config_name = self.host_plugin.role_config_name + msg = "[PERIODIC] Host plugin reconfigured with new parameters. " \ + "ContainerId: {0}, role config file: {1}.".format(new_container_id, new_role_config_name) + logger.periodic_info(logger.EVERY_SIX_HOURS, msg) - host = self.get_host_plugin() - host.put_vm_status(self.status_blob, - ext_conf.status_upload_blob, - ext_conf.status_upload_blob_type) - HostPluginProtocol.set_default_channel(True) - return + try: + ret = host_func() + if ret: + msg = "[PERIODIC] Request succeeded using the host plugin channel after goal state refresh. " \ + "ContainerId changed from {0} to {1}, " \ + "role config file changed from {2} to {3}.".format(old_container_id, new_container_id, + old_role_config_name, new_role_config_name) + add_periodic(delta=logger.EVERY_SIX_HOURS, + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HostPlugin, + is_success=True, + message=msg, + log_event=True) + + except (ResourceGoneError, InvalidContainerError) as e: + msg = "[PERIODIC] Request failed using the host plugin channel after goal state refresh. " \ + "ContainerId changed from {0} to {1}, role config file changed from {2} to {3}. " \ + "Exception type: {4}.".format(old_container_id, new_container_id, old_role_config_name, + new_role_config_name, type(e).__name__) + add_periodic(delta=logger.EVERY_SIX_HOURS, + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HostPlugin, + is_success=False, + message=msg, + log_event=True) + raise - except Exception as e: - # If the HostPlugin rejects the request, - # let the error continue, but set to use the HostPlugin - if isinstance(e, ResourceGoneError): - HostPluginProtocol.set_default_channel(True) - continue + if not HostPluginProtocol.is_default_channel(): + logger.info("Setting host plugin as default channel from now on. " + "Restart the agent to reset the default channel.") + HostPluginProtocol.set_default_channel(True) - self.report_status_event( - "Exception uploading status blob: {0}", ustr(e)) + return ret + + def upload_status_blob(self): + self.update_goal_state() + ext_conf = self.get_ext_conf() + + if ext_conf.status_upload_blob is None: + self.update_goal_state(forced=True) + ext_conf = self.get_ext_conf() + + if ext_conf.status_upload_blob is None: + raise ProtocolNotFoundError("Status upload uri is missing") + + blob_type = ext_conf.status_upload_blob_type + if blob_type not in ["BlockBlob", "PageBlob"]: + blob_type = "BlockBlob" + logger.verbose("Status Blob type is unspecified, assuming BlockBlob") + + try: + self.status_blob.prepare(blob_type) + except Exception as e: + raise ProtocolError("Exception creating status blob: {0}", ustr(e)) + + # Swap the order of use for the HostPlugin vs. the "direct" route. + # Prefer the use of HostPlugin. If HostPlugin fails fall back to the + # direct route. + # + # The code previously preferred the "direct" route always, and only fell back + # to the HostPlugin *if* there was an error. We would like to move to + # the HostPlugin for all traffic, but this is a big change. We would like + # to see how this behaves at scale, and have a fallback should things go + # wrong. This is why we try HostPlugin then direct. + try: + host = self.get_host_plugin() + host.put_vm_status(self.status_blob, ext_conf.status_upload_blob, ext_conf.status_upload_blob_type) + return + except ResourceGoneError: + # do not attempt direct, force goal state update and wait to try again + self.update_goal_state(forced=True) + return + except Exception as e: + # for all other errors, fall back to direct + msg = "Falling back to direct upload: {0}".format(ustr(e)) + self.report_status_event(msg, is_success=True) + + try: + if self.status_blob.upload(ext_conf.status_upload_blob): return + except Exception as e: + msg = "Exception uploading status blob: {0}".format(ustr(e)) + self.report_status_event(msg, is_success=False) + + raise ProtocolError("Failed to upload status blob via either channel") def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() @@ -1001,6 +1197,8 @@ data = data_format.format(provider_id, event_str) try: header = self.get_header_for_xml_content() + # NOTE: The call to wireserver requests utf-8 encoding in the headers, but the body should not + # be encoded: some nodes in the telemetry pipeline do not support utf-8 encoding. resp = self.call_wireserver(restutil.http_post, uri, data, header) except HttpError as e: raise ProtocolError("Failed to send events:{0}".format(e)) @@ -1017,10 +1215,14 @@ if event.providerId not in buf: buf[event.providerId] = "" event_str = event_to_v1(event) - if len(event_str) >= 63 * 1024: - logger.warn("Single event too large: {0}", event_str[300:]) + if len(event_str) >= MAX_EVENT_BUFFER_SIZE: + details_of_event = [ustr(x.name) + ":" + ustr(x.value) for x in event.parameters if x.name in + ["Name", "Version", "Operation", "OperationSuccess"]] + logger.periodic_warn(logger.EVERY_HALF_HOUR, + "Single event too large: {0}, with the length: {1} more than the limit({2})" + .format(str(details_of_event), len(event_str), MAX_EVENT_BUFFER_SIZE)) continue - if len(buf[event.providerId] + event_str) >= 63 * 1024: + if len(buf[event.providerId] + event_str) >= MAX_EVENT_BUFFER_SIZE: self.send_event(event.providerId, buf[event.providerId]) buf[event.providerId] = "" buf[event.providerId] = buf[event.providerId] + event_str @@ -1030,15 +1232,14 @@ if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) - def report_status_event(self, message, *args): + def report_status_event(self, message, is_success): from azurelinuxagent.common.event import report_event, \ - WALAEventOperation + WALAEventOperation - message = message.format(*args) - logger.warn(message) report_event(op=WALAEventOperation.ReportStatus, - is_success=False, - message=message) + is_success=is_success, + message=message, + log_event=not is_success) def get_header(self): return { @@ -1075,51 +1276,49 @@ def has_artifacts_profile_blob(self): return self.ext_conf and not \ - textutil.is_str_none_or_whitespace(self.ext_conf.artifacts_profile_blob) + textutil.is_str_none_or_whitespace(self.ext_conf.artifacts_profile_blob) + + def get_artifacts_profile_through_host(self, blob): + host = self.get_host_plugin() + uri, headers = host.get_artifact_request(blob) + profile = self.fetch(uri, headers, use_proxy=False) + return profile def get_artifacts_profile(self): artifacts_profile = None - for update_goal_state in [False, True]: - try: - if update_goal_state: - self.update_goal_state(forced=True) - if self.has_artifacts_profile_blob(): - blob = self.ext_conf.artifacts_profile_blob + if self.has_artifacts_profile_blob(): + blob = self.ext_conf.artifacts_profile_blob + direct_func = lambda: self.fetch(blob) + # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data + # in the lambda. + host_func = lambda: self.get_artifacts_profile_through_host(blob) - profile = None - if not HostPluginProtocol.is_default_channel(): - logger.verbose("Retrieving the artifacts profile") - profile = self.fetch(blob) - - if profile is None: - if HostPluginProtocol.is_default_channel(): - logger.verbose("Using host plugin as default channel") - else: - logger.verbose("Failed to download artifacts profile, " - "switching to host plugin") - - host = self.get_host_plugin() - uri, headers = host.get_artifact_request(blob) - config = self.fetch(uri, headers, use_proxy=False) - profile = self.decode_config(config) - - if not textutil.is_str_none_or_whitespace(profile): - logger.verbose("Artifacts profile downloaded") - artifacts_profile = InVMArtifactsProfile(profile) + logger.verbose("Retrieving the artifacts profile") - return artifacts_profile + try: + profile = self.send_request_using_appropriate_channel(direct_func, host_func) + except Exception as e: + logger.warn("Exception retrieving artifacts profile: {0}".format(ustr(e))) + return None - except ResourceGoneError: - HostPluginProtocol.set_default_channel(True) - continue + if not textutil.is_str_empty(profile): + logger.verbose("Artifacts profile downloaded") + try: + artifacts_profile = InVMArtifactsProfile(profile) + except Exception: + logger.warn("Could not parse artifacts profile blob") + msg = "Content: [{0}]".format(profile) + logger.verbose(msg) + + from azurelinuxagent.common.event import report_event, WALAEventOperation + report_event(op=WALAEventOperation.ArtifactsProfileBlob, + is_success=False, + message=msg, + log_event=False) - except Exception as e: - logger.warn( - "Exception retrieving artifacts profile: {0}".format( - ustr(e))) + return artifacts_profile - return None class VersionInfo(object): def __init__(self, xml_text): @@ -1162,6 +1361,7 @@ self.expected_state = None self.hosting_env_uri = None self.shared_conf_uri = None + self.remote_access_uri = None self.certs_uri = None self.ext_uri = None self.role_instance_id = None @@ -1189,6 +1389,8 @@ self.role_config_name = findtext(role_config, "ConfigName") container = find(xml_doc, "Container") self.container_id = findtext(container, "ContainerId") + os.environ[CONTAINER_ID_ENV_VARIABLE] = self.container_id + self.remote_access_uri = findtext(container, "RemoteAccessInfo") lbprobe_ports = find(xml_doc, "LBProbePorts") self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") return self @@ -1242,6 +1444,73 @@ return self +class RemoteAccess(object): + """ + Object containing information about user accounts + """ + + # + # + # + # + # + # + # + # + # + # + # + # + # + + def __init__(self, xml_text): + logger.verbose("Load RemoteAccess.xml") + self.version = None + self.incarnation = None + self.user_list = RemoteAccessUsersList() + + self.xml_text = None + self.parse(xml_text) + + def parse(self, xml_text): + """ + Parse xml document containing user account information + """ + if xml_text is None or len(xml_text) == 0: + return None + self.xml_text = xml_text + xml_doc = parse_doc(xml_text) + self.incarnation = findtext(xml_doc, "Incarnation") + self.version = findtext(xml_doc, "Version") + user_collection = find(xml_doc, "Users") + users = findall(user_collection, "User") + + for user in users: + remote_access_user = self.parse_user(user) + self.user_list.users.append(remote_access_user) + return self + + def parse_user(self, user): + name = findtext(user, "Name") + encrypted_password = findtext(user, "Password") + expiration = findtext(user, "Expiration") + remote_access_user = RemoteAccessUser(name, encrypted_password, expiration) + return remote_access_user + + +class UserAccount(object): + """ + Stores information about single user account + """ + + def __init__(self): + self.Name = None + self.EncryptedPassword = None + self.Password = None + self.Expiration = None + self.Groups = [] + + class Certificates(object): """ Object containing certificates of host and provisioned user. @@ -1262,6 +1531,12 @@ if data is None: return + # if the certificates format is not Pkcs7BlobWithPfxContents do not parse it + certificateFormat = findtext(xml_doc, "Format") + if certificateFormat and certificateFormat != "Pkcs7BlobWithPfxContents": + logger.warn("The Format is not Pkcs7BlobWithPfxContents. Format is " + certificateFormat) + return + cryptutil = CryptUtil(conf.get_openssl_cmd()) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" @@ -1327,6 +1602,18 @@ tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) + logger.info("Found private key matching thumbprint {0}".format(thumbprint)) + else: + # Since private key has *no* matching certificate, + # it will not be named correctly + logger.warn("Found NO matching cert/thumbprint for private key!") + + # Log if any certificates were found without matching private keys + # This can happen (rarely), and is useful to know for debugging + for pubkey in thumbprints: + if not pubkey in prvs: + msg = "Certificate with thumbprint {0} has no matching private key." + logger.info(msg.format(thumbprints[pubkey])) for v1_cert in v1_cert_list: cert = Cert() @@ -1401,15 +1688,6 @@ ext_handler.properties.version = getattrib(plugin, "version") ext_handler.properties.state = getattrib(plugin, "state") - ext_handler.properties.upgradeGuid = getattrib(plugin, "upgradeGuid") - if not ext_handler.properties.upgradeGuid: - ext_handler.properties.upgradeGuid = None - auto_upgrade = getattrib(plugin, "autoUpgrade") - if auto_upgrade is not None and auto_upgrade.lower() == "true": - ext_handler.properties.upgradePolicy = "auto" - else: - ext_handler.properties.upgradePolicy = "manual" - location = getattrib(plugin, "location") failover_location = getattrib(plugin, "failoverlocation") for uri in [location, failover_location]: @@ -1441,6 +1719,15 @@ logger.error("Invalid extension settings") return + depends_on_level = 0 + depends_on_node = find(settings[0], "DependsOn") + if depends_on_node != None: + try: + depends_on_level = int(getattrib(depends_on_node, "dependencyLevel")) + except (ValueError, TypeError): + logger.warn("Could not parse dependencyLevel for handler {0}. Setting it to 0".format(name)) + depends_on_level = 0 + for plugin_settings_list in runtime_settings["runtimeSettings"]: handler_settings = plugin_settings_list["handlerSettings"] ext = Extension() @@ -1450,6 +1737,7 @@ ext.sequenceNumber = seqNo ext.publicSettings = handler_settings.get("publicSettings") ext.protectedSettings = handler_settings.get("protectedSettings") + ext.dependencyLevel = depends_on_level thumbprint = handler_settings.get( "protectedSettingsCertThumbprint") ext.certificateThumbprint = thumbprint @@ -1513,12 +1801,13 @@ * encryptedHealthChecks (optional) * encryptedApplicationProfile (optional) """ + def __init__(self, artifacts_profile): - if not textutil.is_str_none_or_whitespace(artifacts_profile): + if not textutil.is_str_empty(artifacts_profile): self.__dict__.update(parse_json(artifacts_profile)) def is_on_hold(self): # hasattr() is not available in Python 2.6 if 'onHold' in self.__dict__: - return self.onHold.lower() == 'true' + return str(self.onHold).lower() == 'true' return False diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/rdma.py walinuxagent-2.2.45/azurelinuxagent/common/rdma.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/rdma.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/rdma.py 2019-11-07 00:36:56.000000000 +0000 @@ -22,7 +22,6 @@ import os import re import time -import threading import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger @@ -39,7 +38,7 @@ '/usr/local/etc/dat.conf' ] -def setup_rdma_device(): +def setup_rdma_device(nd_version): logger.verbose("Parsing SharedConfig XML contents for RDMA details") xml_doc = parse_doc( fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) @@ -71,19 +70,24 @@ rdma_ipv4_addr, rdma_mac_addr)) # Set up the RDMA device with collected informatino - RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start() + RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr, nd_version).start() logger.info("RDMA: device is set up") return class RDMAHandler(object): driver_module_name = 'hv_network_direct' + nd_version = None - @staticmethod - def get_rdma_version(): + def get_rdma_version(self): """Retrieve the firmware version information from the system. This depends on information provided by the Linux kernel.""" + if self.nd_version : + return self.nd_version + + kvp_key_size = 512 + kvp_value_size = 2048 driver_info_source = '/var/lib/hyperv/.kvp_pool_0' base_kernel_err_msg = 'Kernel does not provide the necessary ' base_kernel_err_msg += 'information or the kvp daemon is not running.' @@ -93,21 +97,24 @@ logger.error(error_msg % driver_info_source) return - lines = open(driver_info_source).read() - if not lines: - error_msg = 'RDMA: Source file "%s" is empty. ' - error_msg += base_kernel_err_msg - logger.error(error_msg % driver_info_source) - return + f = open(driver_info_source) + while True : + key = f.read(kvp_key_size) + value = f.read(kvp_value_size) + if key and value : + key_0 = key.split("\x00")[0] + value_0 = value.split("\x00")[0] + if key_0 == "NdDriverVersion" : + f.close() + self.nd_version = value_0 + return self.nd_version + else : + break + f.close() - r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines) - if r: - NdDriverVersion = r.groups()[0] - return NdDriverVersion - else: - error_msg = 'RDMA: NdDriverVersion not found in "%s"' - logger.error(error_msg % driver_info_source) - return + error_msg = 'RDMA: NdDriverVersion not found in "%s"' + logger.error(error_msg % driver_info_source) + return @staticmethod def is_kvp_daemon_running(): @@ -144,6 +151,15 @@ logger.info('RDMA: Loaded the kernel driver successfully.') return True + def install_driver_if_needed(self): + if self.nd_version: + if conf.enable_check_rdma_driver(): + self.install_driver() + else: + logger.info('RDMA: check RDMA driver is disabled, skip installing driver') + else: + logger.info('RDMA: skip installing driver when ndversion not present\n') + def install_driver(self): """Install the driver. This is distribution specific and must be overwritten in the child implementation.""" @@ -180,61 +196,109 @@ """ rdma_dev = '/dev/hvnd_rdma' + sriov_dir = '/sys/class/infiniband' device_check_timeout_sec = 120 device_check_interval_sec = 1 + ipoib_check_timeout_sec = 60 + ipoib_check_interval_sec = 1 ipv4_addr = None mac_adr = None + nd_version = None - def __init__(self, ipv4_addr, mac_addr): + def __init__(self, ipv4_addr, mac_addr, nd_version): self.ipv4_addr = ipv4_addr self.mac_addr = mac_addr + self.nd_version = nd_version def start(self): - """ - Start a thread in the background to process the RDMA tasks and returns. - """ - logger.info("RDMA: starting device processing in the background.") - threading.Thread(target=self.process).start() + logger.info("RDMA: starting device processing.") + self.process() + logger.info("RDMA: completed device processing.") def process(self): try: - RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) + if not self.nd_version : + logger.info("RDMA: provisioning SRIOV RDMA device.") + self.provision_sriov_rdma() + else : + logger.info("RDMA: provisioning Network Direct RDMA device.") + self.provision_network_direct_rdma() + except Exception as e: + logger.error("RDMA: device processing failed: {0}".format(e)) - skip_rdma_device = False - module_name = "hv_network_direct" - retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False) - if retcode == 0: - module_name = out.strip() - else: - logger.info("RDMA: failed to resolve module name. Use original name") - retcode,out = shellutil.run_get_output("modprobe %s" % module_name) - if retcode != 0: - logger.error("RDMA: failed to load module %s" % module_name) - return - retcode,out = shellutil.run_get_output("modinfo %s" % module_name) - if retcode == 0: - version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) - if version: - v1 = int(version.groups(0)[0]) - v2 = int(version.groups(0)[1]) - if v1>4 or v1==4 and v2>0: - logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later") - skip_rdma_device = True - else: - logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.") + def provision_network_direct_rdma(self) : + RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) + + if not conf.enable_check_rdma_driver(): + logger.info("RDMA: skip checking RDMA driver version") + RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) + return + + skip_rdma_device = False + module_name = "hv_network_direct" + retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False) + if retcode == 0: + module_name = out.strip() + else: + logger.info("RDMA: failed to resolve module name. Use original name") + retcode,out = shellutil.run_get_output("modprobe %s" % module_name) + if retcode != 0: + logger.error("RDMA: failed to load module %s" % module_name) + return + retcode,out = shellutil.run_get_output("modinfo %s" % module_name) + if retcode == 0: + version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) + if version: + v1 = int(version.groups(0)[0]) + v2 = int(version.groups(0)[1]) + if v1>4 or v1==4 and v2>0: + logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later") + skip_rdma_device = True else: - logger.warn("RDMA: failed to get module info on hv_network_direct.") + logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.") + else: + logger.warn("RDMA: failed to get module info on hv_network_direct.") - if not skip_rdma_device: - RDMADeviceHandler.wait_rdma_device( - self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) - RDMADeviceHandler.write_rdma_config_to_device( - self.rdma_dev, self.ipv4_addr, self.mac_addr) + if not skip_rdma_device: + RDMADeviceHandler.wait_rdma_device( + self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) + RDMADeviceHandler.write_rdma_config_to_device( + self.rdma_dev, self.ipv4_addr, self.mac_addr) + + RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) + + def provision_sriov_rdma(self) : + RDMADeviceHandler.wait_any_rdma_device( + self.sriov_dir, self.device_check_timeout_sec, self.device_check_interval_sec) + RDMADeviceHandler.update_iboip_interface(self.ipv4_addr, self.ipoib_check_timeout_sec, self.ipoib_check_interval_sec) + return - RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) - except Exception as e: - logger.error("RDMA: device processing failed: {0}".format(e)) + @staticmethod + def update_iboip_interface(ipv4_addr, timeout_sec, check_interval_sec) : + logger.info("Wait for ib0 become available") + total_retries = timeout_sec/check_interval_sec + n = 0 + found_ib0 = None + while not found_ib0 and n < total_retries: + ret, output = shellutil.run_get_output("ifconfig -a") + if ret != 0: + raise Exception("Failed to list network interfaces") + found_ib0 = re.search("ib0", output, re.IGNORECASE) + if found_ib0: + break + time.sleep(check_interval_sec) + n += 1 + + if not found_ib0: + raise Exception("ib0 is not available") + + netmask = 16 + logger.info("RDMA: configuring IPv4 addr and netmask on ipoib interface") + addr = '{0}/{1}'.format(ipv4_addr, netmask) + if shellutil.run("ifconfig ib0 {0}".format(addr)) != 0: + raise Exception("Could set addr to {0} on ib0".format(addr)) + logger.info("RDMA: ipoib address and netmask configured on interface") @staticmethod def update_dat_conf(paths, ipv4_addr): @@ -291,6 +355,26 @@ return logger.verbose( "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) + time.sleep(check_interval_sec) + n += 1 + logger.error("RDMA device wait timed out") + raise Exception("The device did not show up in {0} seconds ({1} retries)".format( + timeout_sec, total_retries)) + + @staticmethod + def wait_any_rdma_device(dir, timeout_sec, check_interval_sec): + logger.info( + "RDMA: waiting for any Infiniband device at directory={0} timeout={1}s".format( + dir, timeout_sec)) + total_retries = timeout_sec/check_interval_sec + n = 0 + while n < total_retries: + r = os.listdir(dir) + if r: + logger.info("RDMA: device found in {0}".format(dir)) + return + logger.verbose( + "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) time.sleep(check_interval_sec) n += 1 logger.error("RDMA device wait timed out") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/telemetryevent.py walinuxagent-2.2.45/azurelinuxagent/common/telemetryevent.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/telemetryevent.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/telemetryevent.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,45 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.datacontract import DataContract, DataContractList + + +class TelemetryEventParam(DataContract): + def __init__(self, name=None, value=None): + self.name = name + self.value = value + + def __eq__(self, other): + return isinstance(other, TelemetryEventParam) and other.name == self.name and other.value == self.value + + +class TelemetryEvent(DataContract): + def __init__(self, eventId=None, providerId=None): + self.eventId = eventId + self.providerId = providerId + self.parameters = DataContractList(TelemetryEventParam) + + # Checking if the particular param name is in the TelemetryEvent. + def __contains__(self, param_name): + return param_name in [param.name for param in self.parameters] + + +class TelemetryEventList(DataContract): + def __init__(self): + self.events = DataContractList(TelemetryEvent) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/archive.py walinuxagent-2.2.45/azurelinuxagent/common/utils/archive.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/archive.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/archive.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,218 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +import errno +import os +import re +import shutil +import zipfile + +from azurelinuxagent.common.utils import fileutil + +import azurelinuxagent.common.logger as logger + + +""" +archive.py + +The module supports the archiving of guest agent state. Guest +agent state is flushed whenever there is a incarnation change. +The flush is archived periodically (once a day). + +The process works as follows whenever a new incarnation arrives. + + 1. Flush - move all state files to a new directory under + .../history/timestamp/. + 2. Archive - enumerate all directories under .../history/timestamp + and create a .zip file named timestamp.zip. Delete the archive + directory + 3. Purge - glob the list .zip files, sort by timestamp in descending + order, keep the first 50 results, and delete the rest. + +... is the directory where the agent's state resides, by default this +is /var/lib/waagent. + +The timestamp is an ISO8601 formatted value. +""" + +ARCHIVE_DIRECTORY_NAME = 'history' + +MAX_ARCHIVED_STATES = 50 + +CACHE_PATTERNS = [ + re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), + re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), + re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) +] + +# 2018-04-06T08:21:37.142697 +# 2018-04-06T08:21:37.142697.zip +ARCHIVE_PATTERNS_DIRECTORY = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+$') +ARCHIVE_PATTERNS_ZIP = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+\.zip$') + + +class StateFlusher(object): + def __init__(self, lib_dir): + self._source = lib_dir + + d = os.path.join(self._source, ARCHIVE_DIRECTORY_NAME) + if not os.path.exists(d): + try: + fileutil.mkdir(d) + except OSError as e: + if e.errno != errno.EEXIST: + logger.error("{0} : {1}", self._source, e.strerror) + + def flush(self, timestamp): + files = self._get_files_to_archive() + if len(files) == 0: + return + + if self._mkdir(timestamp): + self._archive(files, timestamp) + else: + self._purge(files) + + def history_dir(self, timestamp): + return os.path.join(self._source, ARCHIVE_DIRECTORY_NAME, timestamp.isoformat()) + + def _get_files_to_archive(self): + files = [] + for f in os.listdir(self._source): + full_path = os.path.join(self._source, f) + for pattern in CACHE_PATTERNS: + m = pattern.match(f) + if m is not None: + files.append(full_path) + break + + return files + + def _archive(self, files, timestamp): + for f in files: + dst = os.path.join(self.history_dir(timestamp), os.path.basename(f)) + shutil.move(f, dst) + + def _purge(self, files): + for f in files: + os.remove(f) + + def _mkdir(self, timestamp): + d = self.history_dir(timestamp) + + try: + fileutil.mkdir(d, mode=0o700) + return True + except IOError as e: + logger.error("{0} : {1}".format(d, e.strerror)) + return False + + +# TODO: use @total_ordering once RHEL/CentOS and SLES 11 are EOL. +# @total_ordering first appeared in Python 2.7 and 3.2 +# If there are more use cases for @total_ordering, I will +# consider re-implementing it. +class State(object): + def __init__(self, path, timestamp): + self._path = path + self._timestamp = timestamp + + @property + def timestamp(self): + return self._timestamp + + def delete(self): + pass + + def archive(self): + pass + + def __eq__(self, other): + return self._timestamp == other.timestamp + + def __ne__(self, other): + return self._timestamp != other.timestamp + + def __lt__(self, other): + return self._timestamp < other.timestamp + + def __gt__(self, other): + return self._timestamp > other.timestamp + + def __le__(self, other): + return self._timestamp <= other.timestamp + + def __ge__(self, other): + return self._timestamp >= other.timestamp + + +class StateZip(State): + def __init__(self, path, timestamp): + super(StateZip,self).__init__(path, timestamp) + + def delete(self): + os.remove(self._path) + + +class StateDirectory(State): + def __init__(self, path, timestamp): + super(StateDirectory, self).__init__(path, timestamp) + + def delete(self): + shutil.rmtree(self._path) + + def archive(self): + fn_tmp = "{0}.zip.tmp".format(self._path) + fn = "{0}.zip".format(self._path) + + ziph = zipfile.ZipFile(fn_tmp, 'w') + for f in os.listdir(self._path): + full_path = os.path.join(self._path, f) + ziph.write(full_path, f, zipfile.ZIP_DEFLATED) + + ziph.close() + + os.rename(fn_tmp, fn) + shutil.rmtree(self._path) + + +class StateArchiver(object): + def __init__(self, lib_dir): + self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) + + if not os.path.isdir(self._source): + try: + fileutil.mkdir(self._source, mode=0o700) + except IOError as e: + if e.errno != errno.EEXIST: + logger.error("{0} : {1}", self._source, e.strerror) + + def purge(self): + """ + Delete "old" archive directories and .zip archives. Old + is defined as any directories or files older than the X + newest ones. + """ + states = self._get_archive_states() + states.sort(reverse=True) + + for state in states[MAX_ARCHIVED_STATES:]: + state.delete() + + def archive(self): + states = self._get_archive_states() + for state in states: + state.archive() + + def _get_archive_states(self): + states = [] + for f in os.listdir(self._source): + full_path = os.path.join(self._source, f) + m = ARCHIVE_PATTERNS_DIRECTORY.match(f) + if m is not None: + states.append(StateDirectory(full_path, m.group(0))) + + m = ARCHIVE_PATTERNS_ZIP.match(f) + if m is not None: + states.append(StateZip(full_path, m.group(0))) + + return states diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/cryptutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/cryptutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/cryptutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/cryptutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,11 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 +import errno import struct +import os.path +import subprocess from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import CryptError @@ -26,6 +29,10 @@ import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil + +DECRYPT_SECRET_CMD = "{0} cms -decrypt -inform DER -inkey {1} -in /dev/stdin" + + class CryptUtil(object): def __init__(self, openssl_cmd): self.openssl_cmd = openssl_cmd @@ -39,37 +46,47 @@ "-out {2}").format(self.openssl_cmd, prv_file, crt_file) rc = shellutil.run(cmd) if rc != 0: - logger.error("Failed to create {0} and {1} certificates".format( - prv_file, crt_file)) + logger.error("Failed to create {0} and {1} certificates".format(prv_file, crt_file)) def get_pubkey_from_prv(self, file_name): - cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = [self.openssl_cmd, "rsa", "-in", file_name, "-pubout"] + pub = shellutil.run_command(cmd, log_error=True) + return pub def get_pubkey_from_crt(self, file_name): - cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = [self.openssl_cmd, "x509", "-in", file_name, "-pubkey", "-noout"] + pub = shellutil.run_command(cmd, log_error=True) + return pub def get_thumbprint_from_crt(self, file_name): - cmd="{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, - file_name) - thumbprint = shellutil.run_get_output(cmd)[1] - thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() - return thumbprint + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = [self.openssl_cmd, "x509", "-in", file_name, "-fingerprint", "-noout"] + thumbprint = shellutil.run_command(cmd) + thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() + return thumbprint def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): - cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " - "| {4} pkcs12 -nodes -password pass: -out {5}" - "").format(self.openssl_cmd, p7m_file, trans_prv_file, - trans_cert_file, self.openssl_cmd, pem_file) - shellutil.run(cmd) - rc = shellutil.run(cmd) - if rc != 0: - logger.error("Failed to decrypt {0}".format(p7m_file)) + if not os.path.exists(p7m_file): + raise IOError(errno.ENOENT, "File not found", p7m_file) + elif not os.path.exists(trans_prv_file): + raise IOError(errno.ENOENT, "File not found", trans_prv_file) + else: + cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " + "| {4} pkcs12 -nodes -password pass: -out {5}" + "").format(self.openssl_cmd, p7m_file, trans_prv_file, + trans_cert_file, self.openssl_cmd, pem_file) + shellutil.run(cmd) + rc = shellutil.run(cmd) + if rc != 0: + logger.error("Failed to decrypt {0}".format(p7m_file)) def crt_to_ssh(self, input_file, output_file): shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, @@ -96,7 +113,7 @@ keydata.extend(b"\0") keydata.extend(self.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) - return ustr(b"ssh-rsa " + keydata_base64 + b"\n", + return ustr(b"ssh-rsa " + keydata_base64 + b"\n", encoding='utf-8') except ImportError as e: raise CryptError("Failed to load pyasn1.codec.der") @@ -128,3 +145,16 @@ index = 7 return bytes(byte_array) + def decrypt_secret(self, encrypted_password, private_key): + try: + decoded = base64.b64decode(encrypted_password) + args = DECRYPT_SECRET_CMD.format(self.openssl_cmd, private_key).split(' ') + p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) + p.stdin.write(decoded) + output = p.communicate()[0] + retcode = p.poll() + if retcode: + raise subprocess.CalledProcessError(retcode, "openssl cms -decrypt", output=output) + return output.decode('utf-16') + except Exception as e: + raise CryptError("Error decoding secret", e) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/extensionprocessutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/extensionprocessutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/extensionprocessutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/extensionprocessutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,141 @@ +# Microsoft Azure Linux Agent +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.exception import ExtensionErrorCodes, ExtensionOperationError, ExtensionError +from azurelinuxagent.common.future import ustr +import os +import signal +import time + +TELEMETRY_MESSAGE_MAX_LEN = 3200 + + +def wait_for_process_completion_or_timeout(process, timeout): + """ + Utility function that waits for the process to complete within the given time frame. This function will terminate + the process if when the given time frame elapses. + :param process: Reference to a running process + :param timeout: Number of seconds to wait for the process to complete before killing it + :return: Two parameters: boolean for if the process timed out and the return code of the process (None if timed out) + """ + while timeout > 0 and process.poll() is None: + time.sleep(1) + timeout -= 1 + + return_code = None + + if timeout == 0: + os.killpg(os.getpgid(process.pid), signal.SIGKILL) + else: + # process completed or forked; sleep 1 sec to give the child process (if any) a chance to start + time.sleep(1) + return_code = process.wait() + + return timeout == 0, return_code + + +def handle_process_completion(process, command, timeout, stdout, stderr, error_code): + """ + Utility function that waits for process completion and retrieves its output (stdout and stderr) if it completed + before the timeout period. Otherwise, the process will get killed and an ExtensionError will be raised. + In case the return code is non-zero, ExtensionError will be raised. + :param process: Reference to a running process + :param command: The extension command to run + :param timeout: Number of seconds to wait before killing the process + :param stdout: Must be a file since we seek on it when parsing the subprocess output + :param stderr: Must be a file since we seek on it when parsing the subprocess outputs + :param error_code: The error code to set if we raise an ExtensionError + :return: + """ + # Wait for process completion or timeout + timed_out, return_code = wait_for_process_completion_or_timeout(process, timeout) + process_output = read_output(stdout, stderr) + + if timed_out: + raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, command, process_output), + code=ExtensionErrorCodes.PluginHandlerScriptTimedout) + + if return_code != 0: + raise ExtensionOperationError("Non-zero exit code: {0}, {1}\n{2}".format(return_code, command, process_output), + code=error_code, exit_code=return_code) + + return process_output + + +def read_output(stdout, stderr): + """ + Read the output of the process sent to stdout and stderr and trim them to the max appropriate length. + :param stdout: File containing the stdout of the process + :param stderr: File containing the stderr of the process + :return: Returns the formatted concatenated stdout and stderr of the process + """ + try: + stdout.seek(0) + stderr.seek(0) + + stdout = ustr(stdout.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', + errors='backslashreplace') + stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', + errors='backslashreplace') + + return format_stdout_stderr(stdout, stderr) + except Exception as e: + return format_stdout_stderr("", "Cannot read stdout/stderr: {0}".format(ustr(e))) + + +def format_stdout_stderr(stdout, stderr, max_len=TELEMETRY_MESSAGE_MAX_LEN): + """ + Format stdout and stderr's output to make it suitable in telemetry. + The goal is to maximize the amount of output given the constraints + of telemetry. + + For example, if there is more stderr output than stdout output give + more buffer space to stderr. + + :param str stdout: characters captured from stdout + :param str stderr: characters captured from stderr + :param int max_len: maximum length of the string to return + + :return: a string formatted with stdout and stderr that is less than + or equal to max_len. + :rtype: str + """ + template = "[stdout]\n{0}\n\n[stderr]\n{1}" + # +6 == len("{0}") + len("{1}") + max_len_each = int((max_len - len(template) + 6) / 2) + + if max_len_each <= 0: + return '' + + def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): + s = template.format(captured_stdout[stdout_offset:], captured_stderr[stderr_offset:]) + return s + + if len(stdout) + len(stderr) < max_len: + return to_s(stdout, 0, stderr, 0) + elif len(stdout) < max_len_each: + bonus = max_len_each - len(stdout) + stderr_len = min(max_len_each + bonus, len(stderr)) + return to_s(stdout, 0, stderr, -1*stderr_len) + elif len(stderr) < max_len_each: + bonus = max_len_each - len(stderr) + stdout_len = min(max_len_each + bonus, len(stdout)) + return to_s(stdout, -1*stdout_len, stderr, 0) + else: + return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/fileutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/fileutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/fileutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/fileutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -27,7 +27,6 @@ import pwd import re import shutil -import string import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil @@ -46,13 +45,6 @@ ] -def copy_file(from_path, to_path=None, to_dir=None): - if to_path is None: - to_path = os.path.join(to_dir, os.path.basename(from_path)) - shutil.copyfile(from_path, to_path) - return to_path - - def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ Read and return contents of 'filepath'. @@ -141,7 +133,7 @@ def rm_dirs(*args): """ - Remove the contents of each directry + Remove the contents of each directory """ for p in args: if not os.path.isdir(p): @@ -194,9 +186,10 @@ (Trailing whitespace is ignored.) """ try: - for line in (open(file_path, 'r')).readlines(): - if line_str == line.rstrip(): - return True + with open(file_path, 'r') as fh: + for line in fh.readlines(): + if line_str == line.rstrip(): + return True except Exception: # swallow exception pass @@ -208,11 +201,12 @@ Return match object if found in file. """ try: - pattern = re.compile(line_re) - for line in (open(file_path, 'r')).readlines(): - match = re.search(pattern, line) - if match: - return match + with open(file_path, 'r') as fh: + pattern = re.compile(line_re) + for line in fh.readlines(): + match = re.search(pattern, line) + if match: + return match except: pass diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/flexible_version.py walinuxagent-2.2.45/azurelinuxagent/common/utils/flexible_version.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/flexible_version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/flexible_version.py 2019-11-07 00:36:56.000000000 +0000 @@ -147,6 +147,18 @@ return True + def matches(self, that): + if self.sep != that.sep or len(self.version) > len(that.version): + return False + + for i in range(len(self.version)): + if self.version[i] != that.version[i]: + return False + if self.prerel_tags: + return self.prerel_tags == that.prerel_tags + + return True + def _assemble(self, version, sep, prerel_sep, prerelease): s = sep.join(map(str, version)) if prerelease is not None: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/__init__.py walinuxagent-2.2.45/azurelinuxagent/common/utils/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/networkutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/networkutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/networkutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/networkutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,95 @@ +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + + +class RouteEntry(object): + """ + Represents a single route. The destination, gateway, and mask members are hex representations of the IPv4 address in + network byte order. + """ + def __init__(self, interface, destination, gateway, mask, flags, metric): + self.interface = interface + self.destination = destination + self.gateway = gateway + self.mask = mask + self.flags = int(flags, 16) + self.metric = int(metric) + + @staticmethod + def _net_hex_to_dotted_quad(value): + if len(value) != 8: + raise Exception("String to dotted quad conversion must be 8 characters") + octets = [] + for idx in range(6, -2, -2): + octets.append(str(int(value[idx:idx+2], 16))) + return ".".join(octets) + + def destination_quad(self): + return self._net_hex_to_dotted_quad(self.destination) + + def gateway_quad(self): + return self._net_hex_to_dotted_quad(self.gateway) + + def mask_quad(self): + return self._net_hex_to_dotted_quad(self.mask) + + def to_json(self): + f = '{{"Iface": "{0}", "Destination": "{1}", "Gateway": "{2}", "Mask": "{3}", "Flags": "{4:#06x}", "Metric": "{5}"}}' + return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), + self.flags, self.metric) + + def __str__(self): + f = "Iface: {0}\tDestination: {1}\tGateway: {2}\tMask: {3}\tFlags: {4:#06x}\tMetric: {5}" + return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), + self.flags, self.metric) + + def __repr__(self): + return 'RouteEntry("{0}", "{1}", "{2}", "{3}", "{4:#04x}", "{5}")'\ + .format(self.interface, self.destination, self.gateway, self.mask, self.flags, self.metric) + + +class NetworkInterfaceCard: + def __init__(self, name, link_info): + self.name = name + self.ipv4 = set() + self.ipv6 = set() + self.link = link_info + + def add_ipv4(self, info): + self.ipv4.add(info) + + def add_ipv6(self, info): + self.ipv6.add(info) + + def __eq__(self, other): + return self.link == other.link and \ + self.ipv4 == other.ipv4 and \ + self.ipv6 == other.ipv6 + + @staticmethod + def _json_array(items): + return "[{0}]".format(",".join(['"{0}"'.format(x) for x in sorted(items)])) + + def __str__(self): + entries = ['"name": "{0}"'.format(self.name), + '"link": "{0}"'.format(self.link)] + if len(self.ipv4) > 0: + entries.append('"ipv4": {0}'.format(self._json_array(self.ipv4))) + if len(self.ipv6) > 0: + entries.append('"ipv6": {0}'.format(self._json_array(self.ipv6))) + return "{{ {0} }}".format(", ".join(entries)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/restutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/restutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/restutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/restutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,22 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os +import re import threading import time import traceback +import socket +import struct import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil -from azurelinuxagent.common.exception import HttpError, ResourceGoneError +from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError from azurelinuxagent.common.future import httpclient, urlparse, ustr -from azurelinuxagent.common.version import PY_VERSION_MAJOR - +from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION SECURE_WARNING_EMITTED = False @@ -39,6 +41,9 @@ THROTTLE_RETRIES = 25 THROTTLE_DELAY_IN_SECONDS = 1 +REDACTED_TEXT = "" +SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$') + RETRY_CODES = [ httpclient.RESET_CONTENT, httpclient.PARTIAL_CONTENT, @@ -49,11 +54,10 @@ httpclient.SERVICE_UNAVAILABLE, httpclient.GATEWAY_TIMEOUT, httpclient.INSUFFICIENT_STORAGE, - 429, # Request Rate Limit Exceeded + 429, # Request Rate Limit Exceeded ] RESOURCE_GONE_CODES = [ - httpclient.BAD_REQUEST, httpclient.GONE ] @@ -63,6 +67,14 @@ httpclient.ACCEPTED ] +NOT_MODIFIED_CODES = [ + httpclient.NOT_MODIFIED +] + +HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [ + 502 +] + THROTTLE_CODES = [ httpclient.FORBIDDEN, httpclient.SERVICE_UNAVAILABLE, @@ -76,10 +88,17 @@ httpclient.BadStatusLine ] +# http://www.gnu.org/software/wget/manual/html_node/Proxies.html HTTP_PROXY_ENV = "http_proxy" HTTPS_PROXY_ENV = "https_proxy" +NO_PROXY_ENV = "no_proxy" -DEFAULT_PROTOCOL_ENDPOINT='168.63.129.16' +HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION) +HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT) +INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration" +REQUEST_ROLE_CONFIG_FILE_NOT_FOUND = "RequestRoleConfigFileNotFound" + +DEFAULT_PROTOCOL_ENDPOINT = '168.63.129.16' HOST_PLUGIN_PORT = 32526 @@ -122,16 +141,24 @@ fib = (fib[1], fib[0]+fib[1]) return delay*fib[1] + def _is_retry_status(status, retry_codes=RETRY_CODES): return status in retry_codes + def _is_retry_exception(e): return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0 + def _is_throttle_status(status): return status in THROTTLE_CODES + def _parse_url(url): + """ + Parse URL to get the components of the URL broken down to host, port + :rtype: string, int, bool, string + """ o = urlparse(url) rel_uri = o.path if o.fragment: @@ -144,6 +171,95 @@ return o.hostname, o.port, secure, rel_uri +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + :rtype: bool + """ + if string_network.count('/') == 1: + try: + mask = int(string_network.split('/')[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split('/')[0]) + except socket.error: + return False + else: + return False + return True + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + Example: if mask is 24 function returns 255.255.255.0 + :rtype: str + """ + bits = 0xffffffff ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack('>I', bits)) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + :rtype: bool + """ + ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] + netaddr, bits = net.split('/') + netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except socket.error: + return False + return True + + +def get_no_proxy(): + no_proxy = os.environ.get(NO_PROXY_ENV) or os.environ.get(NO_PROXY_ENV.upper()) + + if no_proxy: + no_proxy = [host for host in no_proxy.replace(' ', '').split(',') if host] + + # no_proxy in the proxies argument takes precedence + return no_proxy + + +def bypass_proxy(host): + no_proxy = get_no_proxy() + + if no_proxy: + if is_ipv4_address(host): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(host, proxy_ip): + return True + elif host == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + for proxy_domain in no_proxy: + if host.lower().endswith(proxy_domain.lower()): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + return False + + def _get_http_proxy(secure=False): # Prefer the configuration settings over environment variables host = conf.get_httpproxy_host() @@ -166,39 +282,46 @@ return host, port +def redact_sas_tokens_in_urls(url): + return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url) + + def _http_request(method, host, rel_uri, port=None, data=None, secure=False, headers=None, proxy_host=None, proxy_port=None): headers = {} if headers is None else headers + headers['Connection'] = 'close' + use_proxy = proxy_host is not None and proxy_port is not None if port is None: port = 443 if secure else 80 + if 'User-Agent' not in headers: + headers['User-Agent'] = HTTP_USER_AGENT + if use_proxy: conn_host, conn_port = proxy_host, proxy_port scheme = "https" if secure else "http" url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri) - else: conn_host, conn_port = host, port url = rel_uri if secure: conn = httpclient.HTTPSConnection(conn_host, - conn_port, - timeout=10) + conn_port, + timeout=10) if use_proxy: conn.set_tunnel(host, port) - else: conn = httpclient.HTTPConnection(conn_host, - conn_port, - timeout=10) + conn_port, + timeout=10) logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]", method, - url, + redact_sas_tokens_in_urls(url), data, headers) @@ -219,7 +342,7 @@ # Use the HTTP(S) proxy proxy_host, proxy_port = (None, None) - if use_proxy: + if use_proxy and not bypass_proxy(host): proxy_host, proxy_port = _get_http_proxy(secure=secure) if proxy_host or proxy_port: @@ -293,8 +416,7 @@ if request_failed(resp): if _is_retry_status(resp.status, retry_codes=retry_codes): - msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format( - method, url, resp.status) + msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status) # Note if throttled and ensure a safe, minimum number of # retry attempts if _is_throttle_status(resp.status): @@ -302,31 +424,44 @@ max_retry = max(max_retry, THROTTLE_RETRIES) continue + # If we got a 410 (resource gone) for any reason, raise an exception. The caller will handle it by + # forcing a goal state refresh and retrying the call. if resp.status in RESOURCE_GONE_CODES: - raise ResourceGoneError() + response_error = read_response_error(resp) + raise ResourceGoneError(response_error) + + # If we got a 400 (bad request) because the container id is invalid, it could indicate a stale goal + # state. The caller will handle this exception by forcing a goal state refresh and retrying the call. + if resp.status == httpclient.BAD_REQUEST: + response_error = read_response_error(resp) + if INVALID_CONTAINER_CONFIGURATION in response_error: + raise InvalidContainerError(response_error) return resp except httpclient.HTTPException as e: - msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format( - method, url, e) + clean_url = redact_sas_tokens_in_urls(url) + msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e) if _is_retry_exception(e): continue break except IOError as e: IOErrorCounter.increment(host=host, port=port) - msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format( - method, url, e) + clean_url = redact_sas_tokens_in_urls(url) + msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e) continue - raise HttpError("{0} -- {1} attempts made".format(msg,attempt)) + raise HttpError("{0} -- {1} attempts made".format(msg, attempt)) -def http_get(url, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_get(url, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("GET", url, None, headers=headers, use_proxy=use_proxy, @@ -335,10 +470,13 @@ retry_delay=retry_delay) -def http_head(url, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_head(url, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("HEAD", url, None, headers=headers, use_proxy=use_proxy, @@ -347,10 +485,14 @@ retry_delay=retry_delay) -def http_post(url, data, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_post(url, + data, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("POST", url, data, headers=headers, use_proxy=use_proxy, @@ -359,10 +501,14 @@ retry_delay=retry_delay) -def http_put(url, data, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_put(url, + data, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("PUT", url, data, headers=headers, use_proxy=use_proxy, @@ -371,10 +517,13 @@ retry_delay=retry_delay) -def http_delete(url, headers=None, use_proxy=False, +def http_delete(url, + headers=None, + use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): + return http_request("DELETE", url, None, headers=headers, use_proxy=use_proxy, @@ -382,12 +531,26 @@ retry_codes=retry_codes, retry_delay=retry_delay) + def request_failed(resp, ok_codes=OK_CODES): return not request_succeeded(resp, ok_codes=ok_codes) + def request_succeeded(resp, ok_codes=OK_CODES): return resp is not None and resp.status in ok_codes + +def request_not_modified(resp): + return resp is not None and resp.status in NOT_MODIFIED_CODES + + +def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES): + """ + Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502 + """ + return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes + + def read_response_error(resp): result = '' if resp is not None: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/shellutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/shellutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/shellutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/shellutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import subprocess @@ -57,52 +57,113 @@ Shell command util functions """ +def has_command(cmd): + """ + Return True if the given command is on the path + """ + return not run(cmd, False) -def run(cmd, chk_err=True): +def run(cmd, chk_err=True, expected_errors=[]): """ Calls run_get_output on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ - retcode, out = run_get_output(cmd, chk_err) + retcode, out = run_get_output(cmd, chk_err=chk_err, expected_errors=expected_errors) return retcode -def run_get_output(cmd, chk_err=True, log_cmd=True): +def run_get_output(cmd, chk_err=True, log_cmd=True, expected_errors=[]): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True + + For new callers, consider using run_command instead as it separates stdout from stderr, + returns only stdout on success, logs both outputs and return code on error and raises an exception. """ if log_cmd: - logger.verbose(u"Run '{0}'", cmd) + logger.verbose(u"Command: [{0}]", cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - output = ustr(output, - encoding='utf-8', - errors="backslashreplace") + output = _encode_command_output(output) + except subprocess.CalledProcessError as e: + output = _encode_command_output(e.output) + + if chk_err: + msg = u"Command: [{0}], " \ + u"return code: [{1}], " \ + u"result: [{2}]".format(cmd, e.returncode, output) + if e.returncode in expected_errors: + logger.info(msg) + else: + logger.error(msg) + return e.returncode, output except Exception as e: - if type(e) is subprocess.CalledProcessError: - output = ustr(e.output, - encoding='utf-8', - errors="backslashreplace") - if chk_err: - if log_cmd: - logger.error(u"Command: '{0}'", e.cmd) - logger.error(u"Return code: {0}", e.returncode) - logger.error(u"Result: {0}", output) - return e.returncode, output - else: - logger.error( - u"'{0}' raised unexpected exception: '{1}'".format( - cmd, ustr(e))) - return -1, ustr(e) + if chk_err: + logger.error(u"Command [{0}] raised unexpected exception: [{1}]" + .format(cmd, ustr(e))) + return -1, ustr(e) return 0, output +def _encode_command_output(output): + return ustr(output, encoding='utf-8', errors="backslashreplace") + + +class CommandError(Exception): + """ + Exception raised by run_command when the command returns an error + """ + @staticmethod + def _get_message(command, returncode): + command_name = command[0] if isinstance(command, list) and len(command) > 0 else command + return "'{0}' failed: {1}".format(command_name, returncode) + + def __init__(self, command, returncode, stdout, stderr): + super(Exception, self).__init__(CommandError._get_message(command, returncode)) + self.command = command + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +def run_command(command, log_error=False): + """ + Executes the given command and returns its stdout as a string. + If there are any errors executing the command it logs details about the failure and raises a RunCommandException; + if 'log_error' is True, it also logs details about the error. + """ + def format_command(cmd): + return " ".join(cmd) if isinstance(cmd, list) else command + + try: + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) + stdout, stderr = process.communicate() + returncode = process.returncode + except Exception as e: + if log_error: + logger.error(u"Command [{0}] raised unexpected exception: [{1}]", format_command(command), ustr(e)) + raise + + if returncode != 0: + encoded_stdout = _encode_command_output(stdout) + encoded_stderr = _encode_command_output(stderr) + if log_error: + logger.error( + "Command: [{0}], return code: [{1}], stdout: [{2}] stderr: [{3}]", + format_command(command), + returncode, + encoded_stdout, + encoded_stderr) + raise CommandError(command=command, returncode=returncode, stdout=encoded_stdout, stderr=encoded_stderr) + + return _encode_command_output(stdout) + + def quote(word_list): """ Quote a list or tuple of strings for Unix Shell as words, using the diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/textutil.py walinuxagent-2.2.45/azurelinuxagent/common/utils/textutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/textutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/utils/textutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,19 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import base64 import crypt +import hashlib import random import re import string import struct import sys +import zlib import xml.dom.minidom as minidom -from distutils.version import LooseVersion as Version - def parse_doc(xml_text): """ @@ -272,9 +272,9 @@ def remove_bom(c): - ''' + """ bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8. - ''' + """ if not is_str_none_or_whitespace(c) and \ len(c) > 2 and \ str_to_ord(c[0]) > 128 and \ @@ -302,6 +302,21 @@ return base64_bytes +def compress(s): + """ + Compress a string, and return the base64 encoded result of the compression. + + This method returns a string instead of a byte array. It is expected + that this method is called to compress smallish strings, not to compress + the contents of a file. The output of this method is suitable for + embedding in log statements. + """ + from azurelinuxagent.common.version import PY_VERSION_MAJOR + if PY_VERSION_MAJOR > 2: + return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8') + return base64.b64encode(zlib.compress(s)) + + def b64encode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: @@ -323,6 +338,7 @@ return shlex.split(s.encode('utf-8')) return shlex.split(s) + def swap_hexstring(s, width=2): r = len(s) % width if r != 0: @@ -334,17 +350,49 @@ s, re.IGNORECASE))) + def parse_json(json_str): """ Parse json string and return a resulting dictionary """ # trim null and whitespaces result = None - if not is_str_none_or_whitespace(json_str): + if not is_str_empty(json_str): import json result = json.loads(json_str.rstrip(' \t\r\n\0')) return result + def is_str_none_or_whitespace(s): return s is None or len(s) == 0 or s.isspace() + + +def is_str_empty(s): + return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0')) + + +def hash_strings(string_list): + """ + Compute a cryptographic hash of a list of strings + + :param string_list: The strings to be hashed + :return: The cryptographic hash (digest) of the strings in the order provided + """ + sha1_hash = hashlib.sha1() + for item in string_list: + sha1_hash.update(item.encode()) + return sha1_hash.digest() + + +def format_memory_value(unit, value): + units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024} + + if unit not in units: + raise ValueError("Unit must be one of {0}".format(units.keys())) + try: + value = float(value) + except TypeError: + raise TypeError('Value must be convertible to a float') + + return int(value * units[unit]) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/version.py walinuxagent-2.2.45/azurelinuxagent/common/version.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/common/version.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -23,7 +23,7 @@ import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion -from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.future import ustr, get_linux_distribution def get_f5_platform(): @@ -80,14 +80,17 @@ elif 'OpenBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] - elif 'linux_distribution' in dir(platform): - supported = platform._supported_dists + ('alpine',) - osinfo = list(platform.linux_distribution(full_distribution_name=0, - supported_dists=supported)) - full_name = platform.linux_distribution()[0].strip() - osinfo.append(full_name) + elif 'Linux' in platform.system(): + osinfo = get_linux_distribution(0, 'alpine') + elif 'NS-BSD' in platform.system(): + release = re.sub('\-.*\Z', '', ustr(platform.release())) + osinfo = ['nsbsd', release, '', 'nsbsd'] else: - osinfo = platform.dist() + try: + # dist() removed in Python 3.7 + osinfo = list(platform.dist()) + [''] + except: + osinfo = ['UNKNOWN', 'FFFF', '', ''] # The platform.py lib has issue with detecting oracle linux distribution. # Merge the following patch provided by oracle as a temporary fix. @@ -106,6 +109,9 @@ if os.path.exists("/etc/cp-release"): osinfo = get_checkpoint_platform() + if os.path.exists("/home/guestshell/azure"): + osinfo = ['iosxe', 'csr1000v', '', 'Cisco IOSXE Linux'] + # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo @@ -113,7 +119,7 @@ AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.2.20' +AGENT_VERSION = '2.2.45' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux @@ -129,6 +135,9 @@ AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+"\.zip") AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) +# The execution mode of the VM - IAAS or PAAS. Linux VMs are only executed in IAAS mode. +AGENT_EXECUTION_MODE = "IAAS" + EXT_HANDLER_PATTERN = b".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" EXT_HANDLER_REGEX = re.compile(EXT_HANDLER_PATTERN) @@ -161,14 +170,17 @@ version = AGENT_VERSION return agent, FlexibleVersion(version) + def is_agent_package(path): path = os.path.basename(path) return not re.match(AGENT_PKG_PATTERN, path) is None + def is_agent_path(path): path = os.path.basename(path) return not re.match(AGENT_NAME_PATTERN, path) is None + CURRENT_AGENT, CURRENT_VERSION = set_current_agent() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/__init__.py walinuxagent-2.2.45/azurelinuxagent/daemon/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.main import get_daemon_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/main.py walinuxagent-2.2.45/azurelinuxagent/daemon/main.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/main.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/main.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -26,6 +26,7 @@ import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil @@ -65,6 +66,9 @@ PY_VERSION_MICRO) self.check_pid() + self.initialize_environment() + + CGroupConfigurator.get_instance().create_agent_cgroups(track_cgroups=False) # If FIPS is enabled, set the OpenSSL environment variable # Note: @@ -95,6 +99,23 @@ fileutil.write_file(pid_file, ustr(os.getpid())) + def sleep_if_disabled(self): + agent_disabled_file_path = conf.get_disable_agent_file_path() + if os.path.exists(agent_disabled_file_path): + import threading + logger.warn("Disabling the guest agent by sleeping forever; " + "to re-enable, remove {0} and restart" + .format(agent_disabled_file_path)) + self.running = False + disable_event = threading.Event() + disable_event.wait() + + def initialize_environment(self): + # Create lib dir + if not os.path.isdir(conf.get_lib_dir()): + fileutil.mkdir(conf.get_lib_dir(), mode=0o700) + os.chdir(conf.get_lib_dir()) + def daemon(self, child_args=None): logger.info("Run daemon") @@ -105,11 +126,6 @@ self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() - # Create lib dir - if not os.path.isdir(conf.get_lib_dir()): - fileutil.mkdir(conf.get_lib_dir(), mode=0o700) - os.chdir(conf.get_lib_dir()) - if conf.get_detect_scvmm_env(): self.scvmm_handler.run() @@ -117,14 +133,15 @@ self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. - # on-premise) since a VHD can move between environments + # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): - self.rdma_handler.install_driver() + nd_version = self.rdma_handler.get_rdma_version() + self.rdma_handler.install_driver_if_needed() logger.info("RDMA capabilities are enabled in configuration") try: @@ -138,11 +155,13 @@ raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) - setup_rdma_device() + setup_rdma_device(nd_version) except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") + self.sleep_if_disabled() + while self.running: self.update_handler.run_latest(child_args=child_args) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/default.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/default.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os import re +import stat import sys import threading from time import sleep @@ -124,12 +125,13 @@ force_option = 'F' if self.fs == 'xfs': force_option = 'f' - mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) + mkfs_string = "mkfs.{0} -{2} {1}".format( + self.fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if - re.match("^\s*[0-9]+", x)] + re.match(r"^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") @@ -138,18 +140,23 @@ shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") - shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) + shellutil.run( + "parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") - ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) + ret = self.change_partition_type( + suppress_message=True, + option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") - self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) + self.change_partition_type( + suppress_message=False, + option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) @@ -169,7 +176,8 @@ attempts -= 1 if not os.path.exists(partition): - raise ResourceDiskError("Partition was not created [{0}]".format(partition)) + raise ResourceDiskError( + "Partition was not created [{0}]".format(partition)) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) @@ -187,7 +195,7 @@ self.reread_partition_table(device) - ret, output = shellutil.run_get_output(mount_string) + ret, output = shellutil.run_get_output(mount_string, chk_err=False) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", @@ -215,14 +223,19 @@ """ command_to_use = '--part-type' - input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) - err_code, output = shellutil.run_get_output(input, chk_err=False, log_cmd=True) + input = "sfdisk {0} {1} {2}".format( + command_to_use, '-f' if suppress_message else '', option_str) + err_code, output = shellutil.run_get_output( + input, chk_err=False, log_cmd=True) # fall back to -c if err_code != 0: - logger.info("sfdisk with --part-type failed [{0}], retrying with -c", err_code) + logger.info( + "sfdisk with --part-type failed [{0}], retrying with -c", + err_code) command_to_use = '-c' - input = "sfdisk {0} {1} {2}".format(command_to_use, '-f' if suppress_message else '', option_str) + input = "sfdisk {0} {1} {2}".format( + command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, log_cmd=True) if err_code == 0: @@ -245,16 +258,30 @@ else: return 'mount {0} {1}'.format(partition, mount_point) + @staticmethod + def check_existing_swap_file(swapfile, swaplist, size): + if swapfile in swaplist and os.path.isfile( + swapfile) and os.path.getsize(swapfile) == size: + logger.info("Swap already enabled") + # restrict access to owner (remove all access from group, others) + swapfile_mode = os.stat(swapfile).st_mode + if swapfile_mode & (stat.S_IRWXG | stat.S_IRWXO): + swapfile_mode = swapfile_mode & ~(stat.S_IRWXG | stat.S_IRWXO) + logger.info( + "Changing mode of {0} to {1:o}".format( + swapfile, swapfile_mode)) + os.chmod(swapfile, swapfile_mode) + return True + + return False + def create_swap_space(self, mount_point, size_mb): size_kb = size_mb * 1024 size = size_kb * 1024 swapfile = os.path.join(mount_point, 'swapfile') swaplist = shellutil.run_get_output("swapon -s")[1] - if swapfile in swaplist \ - and os.path.isfile(swapfile) \ - and os.path.getsize(swapfile) == size: - logger.info("Swap already enabled") + if self.check_existing_swap_file(swapfile, swaplist, size): return if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: @@ -296,7 +323,8 @@ os.remove(filename) # If file system is xfs, use dd right away as we have been reported that - # swap enabling fails in xfs fs when disk space is allocated with fallocate + # swap enabling fails in xfs fs when disk space is allocated with + # fallocate ret = 0 fn_sh = shellutil.quote((filename,)) if self.fs != 'xfs': @@ -305,13 +333,21 @@ # Probable errors: # - OSError: Seen on Cygwin, libc notimpl? # - AttributeError: What if someone runs this under... - with open(filename, 'w') as f: - try: - os.posix_fallocate(f.fileno(), 0, nbytes) - return 0 - except: - # Not confident with this thing, just keep trying... - pass + fd = None + + try: + fd = os.open( + filename, + os.O_CREAT | os.O_WRONLY | os.O_EXCL, + stat.S_IRUSR | stat.S_IWUSR) + os.posix_fallocate(fd, 0, nbytes) + return 0 + except BaseException: + # Not confident with this thing, just keep trying... + pass + finally: + if fd is not None: + os.close(fd) # fallocate command ret = shellutil.run( diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/factory.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, \ DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ResourceDiskHandler from .freebsd import FreeBSDResourceDiskHandler from .openbsd import OpenBSDResourceDiskHandler +from .openwrt import OpenWRTResourceDiskHandler + +from distutils.version import LooseVersion as Version + def get_resourcedisk_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, @@ -33,5 +35,8 @@ if distro_name == "openbsd": return OpenBSDResourceDiskHandler() + if distro_name == "openwrt": + return OpenWRTResourceDiskHandler() + return ResourceDiskHandler() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/freebsd.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/freebsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/freebsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/freebsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import os import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler + class FreeBSDResourceDiskHandler(ResourceDiskHandler): """ This class handles resource disk mounting for FreeBSD. @@ -34,6 +37,7 @@ 1. MBR: The resource disk partition is /dev/da1s1 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. """ + def __init__(self): super(FreeBSDResourceDiskHandler, self).__init__() @@ -50,25 +54,30 @@ def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ufs': - raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) + raise ResourceDiskError( + "Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) # 1. Detect device err, output = shellutil.run_get_output('gpart list') if err: - raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + raise ResourceDiskError( + "Unable to detect resource disk device:{0}".format(output)) disks = self.parse_gpart_list(output) device = self.osutil.device_for_ide_port(1) - if device is None or not device in disks: - # fallback logic to find device - err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') + if device is None or device not in disks: + # fallback logic to find device + err, output = shellutil.run_get_output( + 'camcontrol periphlist 2:1:0') if err: # try again on "3:1:0" - err, output = shellutil.run_get_output('camcontrol periphlist 3:1:0') + err, output = shellutil.run_get_output( + 'camcontrol periphlist 3:1:0') if err: - raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + raise ResourceDiskError( + "Unable to detect resource disk device:{0}".format(output)) - # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' + # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' for line in output.split('\n'): index = line.find(':') if index > 0: @@ -89,9 +98,11 @@ elif partition_table_type == 'GPT': provider_name = device + 'p2' else: - raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) + raise ResourceDiskError( + "Unsupported partition table type:{0}".format(output)) - err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) + err, output = shellutil.run_get_output( + 'gpart show -p {0}'.format(device)) if err or output.find(provider_name) == -1: raise ResourceDiskError("Resource disk partition not found.") @@ -110,14 +121,63 @@ mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: - logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) - err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) + logger.info( + 'Creating {0} filesystem on partition {1}'.format( + fs, partition)) + err, output = shellutil.run_get_output( + 'newfs -U {0}'.format(partition)) if err: - raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" - .format(partition, output)) + raise ResourceDiskError( + "Failed to create new filesystem on partition {0}, error:{1}" .format( + partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: - raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) - - logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) + raise ResourceDiskError( + "Failed to mount partition {0}, error {1}".format( + partition, output)) + + logger.info( + "Resource disk partition {0} is mounted at {1} with fstype {2}", + partition, + mount_point, + fs) return mount_point + + def create_swap_space(self, mount_point, size_mb): + size_kb = size_mb * 1024 + size = size_kb * 1024 + swapfile = os.path.join(mount_point, 'swapfile') + swaplist = shellutil.run_get_output("swapctl -l")[1] + + if self.check_existing_swap_file(swapfile, swaplist, size): + return + + if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: + logger.info("Remove old swap file") + shellutil.run("swapoff -a", chk_err=False) + os.remove(swapfile) + + if not os.path.isfile(swapfile): + logger.info("Create swap file") + self.mkfile(swapfile, size_kb * 1024) + + mddevice = shellutil.run_get_output( + "mdconfig -a -t vnode -f {0}".format(swapfile))[1].rstrip() + shellutil.run("chmod 0600 /dev/{0}".format(mddevice)) + + if conf.get_resourcedisk_enable_swap_encryption(): + shellutil.run("kldload aesni") + shellutil.run("kldload cryptodev") + shellutil.run("kldload geom_eli") + shellutil.run( + "geli onetime -e AES-XTS -l 256 -d /dev/{0}".format(mddevice)) + shellutil.run("chmod 0600 /dev/{0}.eli".format(mddevice)) + if shellutil.run("swapon /dev/{0}.eli".format(mddevice)): + raise ResourceDiskError("/dev/{0}.eli".format(mddevice)) + logger.info( + "Enabled {0}KB of swap at /dev/{1}.eli ({2})".format(size_kb, mddevice, swapfile)) + else: + if shellutil.run("swapon /dev/{0}".format(mddevice)): + raise ResourceDiskError("/dev/{0}".format(mddevice)) + logger.info( + "Enabled {0}KB of swap at /dev/{1} ({2})".format(size_kb, mddevice, swapfile)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/__init__.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openbsd.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/openbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openbsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/openbsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and OpenSSL 1.0+ +# Requires Python 2.6+ and OpenSSL 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openwrt.py walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/openwrt.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openwrt.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/resourcedisk/openwrt.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,135 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import os +import errno as errno + +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import ResourceDiskError +from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler + +class OpenWRTResourceDiskHandler(ResourceDiskHandler): + def __init__(self): + super(OpenWRTResourceDiskHandler, self).__init__() + # Fase File System (FFS) is UFS + if self.fs == 'ufs' or self.fs == 'ufs2': + self.fs = 'ffs' + + def reread_partition_table(self, device): + ret, output = shellutil.run_get_output("hdparm -z {0}".format(device), chk_err=False) + if ret != 0: + logger.warn("Failed refresh the partition table.") + + def mount_resource_disk(self, mount_point): + device = self.osutil.device_for_ide_port(1) + if device is None: + raise ResourceDiskError("unable to detect disk topology") + logger.info('Resource disk device {0} found.', device) + + # 2. Get partition + device = "/dev/{0}".format(device) + partition = device + "1" + logger.info('Resource disk partition {0} found.', partition) + + # 3. Mount partition + mount_list = shellutil.run_get_output("mount")[1] + existing = self.osutil.get_mount_point(mount_list, device) + if existing: + logger.info("Resource disk [{0}] is already mounted [{1}]", + partition, + existing) + return existing + + try: + fileutil.mkdir(mount_point, mode=0o755) + except OSError as ose: + msg = "Failed to create mount point " \ + "directory [{0}]: {1}".format(mount_point, ose) + logger.error(msg) + raise ResourceDiskError(msg=msg, inner=ose) + + force_option = 'F' + if self.fs == 'xfs': + force_option = 'f' + mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) + + # Compare to the Default mount_resource_disk, we don't check for GPT that is not supported on OpenWRT + ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) + ptype = ret[1].strip() + if ptype == "7" and self.fs != "ntfs": + logger.info("The partition is formatted with ntfs, updating " + "partition type to 83") + self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) + self.reread_partition_table(device) + logger.info("Format partition [{0}]", mkfs_string) + shellutil.run(mkfs_string) + else: + logger.info("The partition type is {0}", ptype) + + mount_options = conf.get_resourcedisk_mountoptions() + mount_string = self.get_mount_string(mount_options, + partition, + mount_point) + attempts = 5 + while not os.path.exists(partition) and attempts > 0: + logger.info("Waiting for partition [{0}], {1} attempts remaining", + partition, + attempts) + sleep(5) + attempts -= 1 + + if not os.path.exists(partition): + raise ResourceDiskError("Partition was not created [{0}]".format(partition)) + + if os.path.ismount(mount_point): + logger.warn("Disk is already mounted on {0}", mount_point) + else: + # Some kernels seem to issue an async partition re-read after a + # command invocation. This causes mount to fail if the + # partition re-read is not complete by the time mount is + # attempted. Seen in CentOS 7.2. Force a sequential re-read of + # the partition and try mounting. + logger.info("Mounting after re-reading partition info.") + + self.reread_partition_table(device) + + logger.info("Mount resource disk [{0}]", mount_string) + ret, output = shellutil.run_get_output(mount_string) + if ret: + logger.warn("Failed to mount resource disk. " + "Attempting to format and retry mount. [{0}]", + output) + + shellutil.run(mkfs_string) + ret, output = shellutil.run_get_output(mount_string) + if ret: + raise ResourceDiskError("Could not mount {0} " + "after syncing partition table: " + "[{1}] {2}".format(partition, + ret, + output)) + + logger.info("Resource disk {0} is mounted at {1} with {2}", + device, + mount_point, + self.fs) + return mount_point diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/scvmm.py walinuxagent-2.2.45/azurelinuxagent/daemon/scvmm.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/scvmm.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/daemon/scvmm.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import re diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/__init__.py walinuxagent-2.2.45/azurelinuxagent/distro/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/distro/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/suse/__init__.py walinuxagent-2.2.45/azurelinuxagent/distro/suse/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/suse/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/distro/suse/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/env.py walinuxagent-2.2.45/azurelinuxagent/ga/env.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/env.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/env.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import re @@ -22,9 +22,6 @@ import socket import time import threading - -import operator - import datetime import azurelinuxagent.common.conf as conf @@ -32,10 +29,10 @@ from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.event import add_periodic, WALAEventOperation +from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.protocol.wire import INCARNATION_FILE_NAME -from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.utils.archive import StateArchiver from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CACHE_PATTERNS = [ @@ -46,7 +43,7 @@ MAXIMUM_CACHED_FILES = 50 -CACHE_PURGE_INTERVAL = datetime.timedelta(hours=24) +ARCHIVE_INTERVAL = datetime.timedelta(hours=24) def get_env_handler(): @@ -67,10 +64,11 @@ self.protocol_util = get_protocol_util() self.stopped = True self.hostname = None - self.dhcp_id = None + self.dhcp_id_list = [] self.server_thread = None self.dhcp_warning_enabled = True - self.last_purge = None + self.last_archive = None + self.archiver = StateArchiver(conf.get_lib_dir()) def run(self): if not self.stopped: @@ -81,9 +79,16 @@ logger.info("Start env monitor service.") self.dhcp_handler.conf_routes() self.hostname = self.osutil.get_hostname_record() - self.dhcp_id = self.osutil.get_dhcp_pid() + self.dhcp_id_list = self.get_dhcp_client_pid() + self.start() + + def is_alive(self): + return self.server_thread.is_alive() + + def start(self): self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) + self.server_thread.setName("EnvHandler") self.server_thread.start() def monitor(self): @@ -94,20 +99,29 @@ Purge unnecessary files from disk cache. """ protocol = self.protocol_util.get_protocol() + reset_firewall_fules = False while not self.stopped: self.osutil.remove_rules_files() if conf.enable_firewall(): - success = self.osutil.enable_firewall( - dst_ip=protocol.endpoint, - uid=os.getuid()) + # If the rules ever change we must reset all rules and start over again. + # + # There was a rule change at 2.2.26, which started dropping non-root traffic + # to WireServer. The previous rules allowed traffic. Having both rules in + # place negated the fix in 2.2.26. + if not reset_firewall_fules: + self.osutil.remove_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) + reset_firewall_fules = True + + success = self.osutil.enable_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) + add_periodic( logger.EVERY_HOUR, AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Firewall, is_success=success, - log_event=True) + log_event=False) timeout = conf.get_root_device_scsi_timeout() if timeout is not None: @@ -118,7 +132,7 @@ self.handle_dhclient_restart() - self.purge_disk_cache() + self.archive_history() time.sleep(5) @@ -132,91 +146,50 @@ self.osutil.publish_hostname(curr_hostname) self.hostname = curr_hostname - def handle_dhclient_restart(self): - if self.dhcp_id is None: + def get_dhcp_client_pid(self): + pid = [] + + try: + # return a sorted list since handle_dhclient_restart needs to compare the previous value with + # the new value and the comparison should not be affected by the order of the items in the list + pid = sorted(self.osutil.get_dhcp_pid()) + + if len(pid) == 0 and self.dhcp_warning_enabled: + logger.warn("Dhcp client is not running.") + except Exception as exception: if self.dhcp_warning_enabled: - logger.warn("Dhcp client is not running. ") - self.dhcp_id = self.osutil.get_dhcp_pid() - # disable subsequent error logging - self.dhcp_warning_enabled = self.dhcp_id is not None + logger.error("Failed to get the PID of the DHCP client: {0}", ustr(exception)) + + self.dhcp_warning_enabled = len(pid) != 0 + + return pid + + def handle_dhclient_restart(self): + if len(self.dhcp_id_list) == 0: + self.dhcp_id_list = self.get_dhcp_client_pid() return - # the dhcp process has not changed since the last check - if self.osutil.check_pid_alive(self.dhcp_id.strip()): + if all(self.osutil.check_pid_alive(pid) for pid in self.dhcp_id_list): return - new_pid = self.osutil.get_dhcp_pid() - if new_pid is not None and new_pid != self.dhcp_id: - logger.info("EnvMonitor: Detected dhcp client restart. " - "Restoring routing table.") + new_pid = self.get_dhcp_client_pid() + if len(new_pid) != 0 and new_pid != self.dhcp_id_list: + logger.info("EnvMonitor: Detected dhcp client restart. Restoring routing table.") self.dhcp_handler.conf_routes() - self.dhcp_id = new_pid + self.dhcp_id_list = new_pid - def purge_disk_cache(self): + def archive_history(self): """ - Ensure the number of cached files does not exceed a maximum count. - Purge only once per interval, and never delete files related to the - current incarnation. + Purge history if we have exceed the maximum count. + Create a .zip of the history that has been preserved. """ - if self.last_purge is not None \ + if self.last_archive is not None \ and datetime.datetime.utcnow() < \ - self.last_purge + CACHE_PURGE_INTERVAL: + self.last_archive + ARCHIVE_INTERVAL: return - current_incarnation = -1 - self.last_purge = datetime.datetime.utcnow() - incarnation_file = os.path.join(conf.get_lib_dir(), - INCARNATION_FILE_NAME) - if os.path.exists(incarnation_file): - last_incarnation = fileutil.read_file(incarnation_file) - if last_incarnation is not None: - current_incarnation = int(last_incarnation) - - logger.info("Purging disk cache, current incarnation is {0}" - .format('not found' - if current_incarnation == -1 - else current_incarnation)) - - # Create tuples: (prefix, suffix, incarnation, name, file_modified) - files = [] - for f in os.listdir(conf.get_lib_dir()): - full_path = os.path.join(conf.get_lib_dir(), f) - for pattern in CACHE_PATTERNS: - m = pattern.match(f) - if m is not None: - prefix = m.group(1) - suffix = m.group(3) - incarnation = int(m.group(2)) - file_modified = os.path.getmtime(full_path) - t = (prefix, suffix, incarnation, f, file_modified) - files.append(t) - break - - if len(files) <= 0: - return - - # Sort by (prefix, suffix, file_modified) in reverse order - files = sorted(files, key=operator.itemgetter(0, 1, 4), reverse=True) - - # Remove any files in excess of the maximum allowed - # -- Restart then whenever the (prefix, suffix) change - count = 0 - last_match = [None, None] - for f in files: - if last_match != f[0:2]: - last_match = f[0:2] - count = 0 - - if current_incarnation == f[2]: - logger.verbose("Skipping {0}".format(f[3])) - continue - - count += 1 - - if count > MAXIMUM_CACHED_FILES: - full_name = os.path.join(conf.get_lib_dir(), f[3]) - logger.verbose("Deleting {0}".format(full_name)) - os.remove(full_name) + self.archiver.purge() + self.archiver.archive() def stop(self): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/exthandlers.py walinuxagent-2.2.45/azurelinuxagent/ga/exthandlers.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/exthandlers.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/exthandlers.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,65 +14,98 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json +import operator import os -import os.path +import random import re import shutil import stat -import subprocess +import sys +import tempfile import time +import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil -import azurelinuxagent.common.utils.restutil as restutil -import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.version as version - -from azurelinuxagent.common.event import add_event, WALAEventOperation -from azurelinuxagent.common.exception import ExtensionError, ProtocolError, HttpError +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator +from azurelinuxagent.common.datacontract import get_properties, set_properties +from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA_INSTALL +from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds, report_event +from azurelinuxagent.common.exception import ExtensionError, ProtocolError, ProtocolNotFoundError, \ + ExtensionDownloadError, ExtensionErrorCodes, ExtensionUpdateError, ExtensionOperationError from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.version import AGENT_VERSION +from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ - ExtensionStatus, \ - ExtensionSubStatus, \ - Extension, \ - VMStatus, ExtHandler, \ - get_properties, \ - set_properties + ExtensionStatus, \ + ExtensionSubStatus, \ + VMStatus, ExtHandler from azurelinuxagent.common.utils.flexible_version import FlexibleVersion -from azurelinuxagent.common.utils.textutil import Version -from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.version import AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION, GOAL_STATE_AGENT_VERSION, \ + DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO -#HandlerEnvironment.json schema version +# HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 +EXTENSION_STATUS_ERROR = 'error' +EXTENSION_STATUS_SUCCESS = 'success' VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] +EXTENSION_TERMINAL_STATUSES = ['error', 'success'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" -HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN+"$", re.IGNORECASE) +HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN + "$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" -HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN+"\\"+HANDLER_PKG_EXT+"$", - re.IGNORECASE) +HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE) + +DEFAULT_EXT_TIMEOUT_MINUTES = 90 + +AGENT_STATUS_FILE = "waagent_status.json" + +NUMBER_OF_DOWNLOAD_RETRIES = 5 + +# This is the default value for the env variables, whenever we call a command which is not an update scenario, we +# set the env variable value to NOT_RUN to reduce ambiguity for the extension publishers +NOT_RUN = "NOT_RUN" + + +class ExtCommandEnvVariable(object): + Prefix = "AZURE_GUEST_AGENT" + DisableReturnCode = "%s_DISABLE_CMD_EXIT_CODE" % Prefix + UninstallReturnCode = "%s_UNINSTALL_CMD_EXIT_CODE" % Prefix + ExtensionPath = "%s_EXTENSION_PATH" % Prefix + ExtensionVersion = "%s_EXTENSION_VERSION" % Prefix + ExtensionSeqNumber = "ConfigSequenceNumber" # At par with Windows Guest Agent + UpdatingFromVersion = "%s_UPDATING_FROM_VERSION" % Prefix + + +def get_traceback(e): + if sys.version_info[0] == 3: + return e.__traceback__ + elif sys.version_info[0] == 2: + ex_type, ex, tb = sys.exc_info() + return tb + def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) + def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) + def parse_formatted_message(formatted_message): if formatted_message is None: return None @@ -80,8 +113,9 @@ validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') + def parse_ext_substatus(substatus): - #Check extension sub status format + # Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') @@ -93,45 +127,50 @@ status.message = parse_formatted_message(formatted_message) return status + def parse_ext_status(ext_status, data): if data is None or len(data) is None: return - #Currently, only the first status will be reported + # Currently, only the first status will be reported data = data[0] - #Check extension status format + # Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') - - validate_in_range(status_data['status'], VALID_EXTENSION_STATUS, - 'status/status') + + status = status_data['status'] + if status not in VALID_EXTENSION_STATUS: + status = EXTENSION_STATUS_ERROR applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') - ext_status.status = status_data.get('status') + ext_status.status = status ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) - substatus_list = status_data.get('substatus') + substatus_list = status_data.get('substatus', []) + # some extensions incorrectly report an empty substatus with a null value if substatus_list is None: - return + substatus_list = [] for substatus in substatus_list: if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) -# This code migrates, if it exists, handler state and status from an -# agent-owned directory into the handler-owned config directory -# -# Notes: -# - The v2.0.x branch wrote all handler-related state into the handler-owned -# config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). -# - The v2.1.x branch original moved that state into an agent-owned handler -# state directory (e.g., /var/lib/waagent/handler_state). -# - This move can cause v2.1.x agents to multiply invoke a handler's install -# command. It also makes clean-up more difficult since the agent must -# remove the state as well as the handler directory. + def migrate_handler_state(): + """ + Migrate handler state and status (if they exist) from an agent-owned directory into the + handler-owned config directory + + Notes: + - The v2.0.x branch wrote all handler-related state into the handler-owned config + directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). + - The v2.1.x branch original moved that state into an agent-owned handler + state directory (e.g., /var/lib/waagent/handler_state). + - This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes + clean-up more difficult since the agent must remove the state as well as the handler directory. + """ handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return @@ -164,6 +203,7 @@ NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" + Failed = "Failed" def get_exthandlers_handler(): @@ -176,24 +216,41 @@ self.protocol = None self.ext_handlers = None self.last_etag = None - self.last_guids = {} self.log_report = False self.log_etag = True + self.log_process = False + + self.report_status_error_state = ErrorState() + self.get_artifact_error_state = ErrorState(min_timedelta=ERROR_STATE_DELTA_INSTALL) def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() + self.get_artifact_error_state.reset() except Exception as e: - msg = u"Exception retrieving extension handlers: {0}".format( - ustr(e)) - logger.warn(msg) + msg = u"Exception retrieving extension handlers: {0}".format(ustr(e)) + detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) + + self.get_artifact_error_state.incr() + + if self.get_artifact_error_state.is_triggered(): + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.GetArtifactExtended, + is_success=False, + message="Failed to get extension artifact for over " + "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg)) + self.get_artifact_error_state.reset() + else: + logger.warn(msg) + add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, - message=msg) + message=detailed_msg) return try: @@ -201,35 +258,24 @@ logger.verbose(msg) # Log status report success on new config self.log_report = True - self.handle_ext_handlers(etag) - self.last_etag = etag + + if self.extension_processing_allowed(): + self.handle_ext_handlers(etag) + self.last_etag = etag self.report_ext_handlers_status() self.cleanup_outdated_handlers() except Exception as e: - msg = u"Exception processing extension handlers: {0}".format( - ustr(e)) + msg = u"Exception processing extension handlers: {0}".format(ustr(e)) + detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, - message=msg) + message=detailed_msg) return - def run_status(self): - self.report_ext_handlers_status() - return - - def get_guid(self, name): - return self.last_guids.get(name, None) - - def is_new_guid(self, ext_handler): - last_guid = self.get_guid(ext_handler.name) - if last_guid is None: - return True - return last_guid != ext_handler.properties.upgradeGuid - def cleanup_outdated_handlers(self): handlers = [] pkgs = [] @@ -253,10 +299,10 @@ separator = item.rfind('-') eh.name = item[0:separator] - eh.properties.version = str(FlexibleVersion(item[separator+1:])) + eh.properties.version = str(FlexibleVersion(item[separator + 1:])) handler = ExtHandlerInstance(eh, self.protocol) - except Exception as e: + except Exception: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue @@ -272,58 +318,117 @@ for pkg in pkgs: try: os.remove(pkg) - logger.verbose("Removed orphaned extension package " - "{0}".format(pkg)) - except Exception as e: - logger.warn("Failed to remove orphaned package: {0}".format( - pkg)) + logger.verbose("Removed orphaned extension package {0}".format(pkg)) + except OSError as e: + logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: - handler.rm_ext_handler_dir() - pkg = os.path.join(conf.get_lib_dir(), - handler.get_full_name() + HANDLER_PKG_EXT) + handler.remove_ext_handler() + pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) - logger.verbose("Removed extension package " - "{0}".format(pkg)) - except Exception as e: - logger.warn("Failed to remove extension package: " - "{0}".format(pkg)) - + logger.verbose("Removed extension package {0}".format(pkg)) + except OSError as e: + logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror)) + + def extension_processing_allowed(self): + if not conf.get_extensions_enabled(): + logger.verbose("Extension handling is disabled") + return False + + if conf.get_enable_overprovisioning(): + if not self.protocol.supports_overprovisioning(): + logger.verbose("Overprovisioning is enabled but protocol does not support it.") + else: + artifacts_profile = self.protocol.get_artifacts_profile() + if artifacts_profile and artifacts_profile.is_on_hold(): + logger.info("Extension handling is on hold") + return False + + return True + def handle_ext_handlers(self, etag=None): if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") return - if conf.get_enable_overprovisioning(): - artifacts_profile = self.protocol.get_artifacts_profile() - if artifacts_profile and artifacts_profile.is_on_hold(): - logger.info("Extension handling is on hold") - return + wait_until = datetime.datetime.utcnow() + datetime.timedelta(minutes=DEFAULT_EXT_TIMEOUT_MINUTES) + max_dep_level = max([handler.sort_key() for handler in self.ext_handlers.extHandlers]) + self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key')) for ext_handler in self.ext_handlers.extHandlers: - # TODO: handle install in sequence, enable in parallel self.handle_ext_handler(ext_handler, etag) - + + # Wait for the extension installation until it is handled. + # This is done for the install and enable. Not for the uninstallation. + # If handled successfully, proceed with the current handler. + # Otherwise, skip the rest of the extension installation. + dep_level = ext_handler.sort_key() + if dep_level >= 0 and dep_level < max_dep_level: + if not self.wait_for_handler_successful_completion(ext_handler, wait_until): + logger.warn("An extension failed or timed out, will skip processing the rest of the extensions") + break + + def wait_for_handler_successful_completion(self, ext_handler, wait_until): + ''' + Check the status of the extension being handled. + Wait until it has a terminal state or times out. + Return True if it is handled successfully. False if not. + ''' + handler_i = ExtHandlerInstance(ext_handler, self.protocol) + for ext in ext_handler.properties.extensions: + ext_completed, status = handler_i.is_ext_handling_complete(ext) + + # Keep polling for the extension status until it becomes success or times out + while not ext_completed and datetime.datetime.utcnow() <= wait_until: + time.sleep(5) + ext_completed, status = handler_i.is_ext_handling_complete(ext) + + # In case of timeout or terminal error state, we log it and return false + # so that the extensions waiting on this one can be skipped processing + if datetime.datetime.utcnow() > wait_until: + msg = "Extension {0} did not reach a terminal state within the allowed timeout. Last status was {1}".format( + ext.name, status) + logger.warn(msg) + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ExtensionProcessing, + is_success=False, + message=msg) + return False + + if status != EXTENSION_STATUS_SUCCESS: + msg = "Extension {0} did not succeed. Status was {1}".format(ext.name, status) + logger.warn(msg) + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ExtensionProcessing, + is_success=False, + message=msg) + return False + + return True + def handle_ext_handler(self, ext_handler, etag): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) try: state = ext_handler.properties.state - # The extension is to be enabled, there is an upgrade GUID - # and the GUID is NOT new - if state == u"enabled" and \ - ext_handler.properties.upgradeGuid is not None and \ - not self.is_new_guid(ext_handler): - logger.info("New GUID is the same as the old GUID. Exiting without upgrading.") + if ext_handler_i.decide_version(target_state=state) is None: + version = ext_handler_i.ext_handler.properties.version + name = ext_handler_i.ext_handler.name + err_msg = "Unable to find version {0} in manifest for extension {1}".format(version, name) + ext_handler_i.set_operation(WALAEventOperation.Download) + ext_handler_i.set_handler_status(message=ustr(err_msg), code=-1) + ext_handler_i.report_event(message=ustr(err_msg), is_success=False) return - ext_handler_i.decide_version(target_state=state) - if not ext_handler_i.is_upgrade and self.last_etag == etag: + self.get_artifact_error_state.reset() + if self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", ext_handler_i.pkg.version, @@ -336,51 +441,112 @@ ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) - if ext_handler.properties.upgradeGuid is not None: - ext_handler_i.logger.info("New Upgrade GUID: {0}", ext_handler.properties.upgradeGuid) - self.last_guids[ext_handler.name] = ext_handler.properties.upgradeGuid elif state == u"disabled": self.handle_disable(ext_handler_i) - # Remove the GUID from the dictionary so that it is upgraded upon re-enable - self.last_guids.pop(ext_handler.name, None) elif state == u"uninstall": self.handle_uninstall(ext_handler_i) - # Remove the GUID from the dictionary so that it is upgraded upon re-install - self.last_guids.pop(ext_handler.name, None) else: message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) + except ExtensionUpdateError as e: + # Not reporting the error as it has already been reported from the old version + self.handle_ext_handler_error(ext_handler_i, e, e.code, report_telemetry_event=False) + except ExtensionDownloadError as e: + self.handle_ext_handler_download_error(ext_handler_i, e, e.code) + except ExtensionError as e: + self.handle_ext_handler_error(ext_handler_i, e, e.code) except Exception as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) - ext_handler_i.report_event(message=ustr(e), is_success=False) - + self.handle_ext_handler_error(ext_handler_i, e) + + def handle_ext_handler_error(self, ext_handler_i, e, code=-1, report_telemetry_event=True): + msg = ustr(e) + ext_handler_i.set_handler_status(message=msg, code=code) + + if report_telemetry_event: + ext_handler_i.report_event(message=msg, is_success=False, log_event=True) + + def handle_ext_handler_download_error(self, ext_handler_i, e, code=-1): + msg = ustr(e) + ext_handler_i.set_handler_status(message=msg, code=code) + + self.get_artifact_error_state.incr() + if self.get_artifact_error_state.is_triggered(): + report_event(op=WALAEventOperation.Download, is_success=False, log_event=True, + message="Failed to get artifact for over " + "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg)) + self.get_artifact_error_state.reset() + def handle_enable(self, ext_handler_i): + self.log_process = True + uninstall_exit_code = None old_ext_handler_i = ext_handler_i.get_installed_ext_handler() - if old_ext_handler_i is not None and \ - old_ext_handler_i.version_gt(ext_handler_i): - raise ExtensionError(u"Downgrade not allowed") + handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Enable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.NotInstalled: ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) ext_handler_i.download() + ext_handler_i.initialize() ext_handler_i.update_settings() if old_ext_handler_i is None: ext_handler_i.install() - elif ext_handler_i.version_gt(old_ext_handler_i): - old_ext_handler_i.disable() - ext_handler_i.copy_status_files(old_ext_handler_i) - ext_handler_i.update() - old_ext_handler_i.uninstall() - old_ext_handler_i.rm_ext_handler_dir() - ext_handler_i.update_with_install() + elif ext_handler_i.version_ne(old_ext_handler_i): + uninstall_exit_code = ExtHandlersHandler._update_extension_handler_and_return_if_failed( + old_ext_handler_i, ext_handler_i) else: ext_handler_i.update_settings() - ext_handler_i.enable() + ext_handler_i.enable(uninstall_exit_code=uninstall_exit_code) + + @staticmethod + def _update_extension_handler_and_return_if_failed(old_ext_handler_i, ext_handler_i): + + def execute_old_handler_command_and_return_if_succeeds(func): + """ + Created a common wrapper to execute all commands that need to be executed from the old handler + so that it can have a common exception handling mechanism + :param func: The command to be executed on the old handler + :return: True if command execution succeeds and False if it fails + """ + continue_on_update_failure = False + exit_code = 0 + try: + continue_on_update_failure = ext_handler_i.load_manifest().is_continue_on_update_failure() + func() + except ExtensionError as e: + # Reporting the event with the old handler and raising a new ExtensionUpdateError to set the + # handler status on the new version + msg = "%s; ContinueOnUpdate: %s" % (ustr(e), continue_on_update_failure) + old_ext_handler_i.report_event(message=msg, is_success=False) + if not continue_on_update_failure: + raise ExtensionUpdateError(msg) + + exit_code = e.code + if isinstance(e, ExtensionOperationError): + exit_code = e.exit_code + + logger.info("Continue on Update failure flag is set, proceeding with update") + return exit_code + + disable_exit_code = execute_old_handler_command_and_return_if_succeeds( + func=lambda: old_ext_handler_i.disable()) + ext_handler_i.copy_status_files(old_ext_handler_i) + if ext_handler_i.version_gt(old_ext_handler_i): + ext_handler_i.update(disable_exit_code=disable_exit_code, + updating_from_version=old_ext_handler_i.ext_handler.properties.version) + else: + updating_from_version = ext_handler_i.ext_handler.properties.version + old_ext_handler_i.update(version=updating_from_version, + disable_exit_code=disable_exit_code, updating_from_version=updating_from_version) + uninstall_exit_code = execute_old_handler_command_and_return_if_succeeds( + func=lambda: old_ext_handler_i.uninstall()) + old_ext_handler_i.remove_ext_handler() + ext_handler_i.update_with_install(uninstall_exit_code=uninstall_exit_code) + return uninstall_exit_code def handle_disable(self, ext_handler_i): + self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Disable] current handler state is: {0}", handler_state.lower()) @@ -388,17 +554,26 @@ ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): + self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", handler_state.lower()) if handler_state != ExtHandlerState.NotInstalled: if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() - ext_handler_i.uninstall() - ext_handler_i.rm_ext_handler_dir() - + + # Try uninstalling the extension and swallow any exceptions in case of failures after logging them + try: + ext_handler_i.uninstall() + except ExtensionError as e: + ext_handler_i.report_event(message=ustr(e), is_success=False) + + ext_handler_i.remove_ext_handler() + def report_ext_handlers_status(self): - """Go through handler_state dir, collect and report status""" + """ + Go through handler_state dir, collect and report status + """ vm_status = VMStatus(status="Ready", message="Guest Agent is running") if self.ext_handlers is not None: for ext_handler in self.ext_handlers.extHandlers: @@ -417,23 +592,72 @@ self.protocol.report_vm_status(vm_status) if self.log_report: logger.verbose("Completed vm agent status report") + self.report_status_error_state.reset() + except ProtocolNotFoundError as e: + self.report_status_error_state.incr() + message = "Failed to report vm agent status: {0}".format(e) + logger.verbose(message) except ProtocolError as e: + self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, - version=CURRENT_VERSION, - op=WALAEventOperation.ExtensionProcessing, - is_success=False, - message=message) + version=CURRENT_VERSION, + op=WALAEventOperation.ExtensionProcessing, + is_success=False, + message=message) + + if self.report_status_error_state.is_triggered(): + message = "Failed to report vm agent status for more than {0}" \ + .format(self.report_status_error_state.min_timedelta) + + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ReportStatusExtended, + is_success=False, + message=message) + + self.report_status_error_state.reset() + + self.write_ext_handlers_status_to_info_file(vm_status) + + @staticmethod + def write_ext_handlers_status_to_info_file(vm_status): + status_path = os.path.join(conf.get_lib_dir(), AGENT_STATUS_FILE) + + agent_details = { + "agent_name": AGENT_NAME, + "current_version": str(CURRENT_VERSION), + "goal_state_version": str(GOAL_STATE_AGENT_VERSION), + "distro_details": "{0}:{1}".format(DISTRO_NAME, DISTRO_VERSION), + "last_successful_status_upload_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "python_version": "Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO) + } + + # Convert VMStatus class to Dict. + data = get_properties(vm_status) + + # The above class contains vmAgent.extensionHandlers + # (more info: azurelinuxagent.common.protocol.restapi.VMAgentStatus) + handler_statuses = data['vmAgent']['extensionHandlers'] + for handler_status in handler_statuses: + try: + handler_status.pop('code', None) + handler_status.pop('message', None) + handler_status.pop('extensions', None) + except KeyError: + pass + + agent_details['extensions_status'] = handler_statuses + agent_details_json = json.dumps(agent_details) + + fileutil.write_file(status_path, agent_details_json) def report_ext_handler_status(self, vm_status, ext_handler): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) - - handler_status = ext_handler_i.get_handler_status() + + handler_status = ext_handler_i.get_handler_status() if handler_status is None: return - guid = self.get_guid(ext_handler.name) - if guid is not None: - handler_status.upgradeGuid = guid handler_state = ext_handler_i.get_handler_state() if handler_state != ExtHandlerState.NotInstalled: @@ -441,14 +665,14 @@ active_exts = ext_handler_i.report_ext_status() handler_status.extensions.extend(active_exts) except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) + ext_handler_i.set_handler_status(message=ustr(e), code=e.code) try: heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) + ext_handler_i.set_handler_status(message=ustr(e), code=e.code) vm_status.vmAgent.extensionHandlers.append(handler_status) @@ -460,11 +684,9 @@ self.operation = None self.pkg = None self.pkg_file = None - self.is_upgrade = False + self.logger = None + self.set_logger() - prefix = "[{0}]".format(self.get_full_name()) - self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) - try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: @@ -480,10 +702,12 @@ pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) except ProtocolError as e: raise ExtensionError("Failed to get ext handler pkgs", e) + except ExtensionDownloadError: + self.set_operation(WALAEventOperation.Download) + raise # Determine the desired and installed versions - requested_version = FlexibleVersion( - str(self.ext_handler.properties.version)) + requested_version = FlexibleVersion(str(self.ext_handler.properties.version)) installed_version_string = self.get_installed_version() installed_version = requested_version \ if installed_version_string is None \ @@ -493,111 +717,57 @@ # - Find the installed package (its version must exactly match) # - Find the internal candidate (its version must exactly match) # - Separate the public packages - internal_pkg = None + selected_pkg = None installed_pkg = None - public_pkgs = [] + pkg_list.versions.sort(key=lambda p: FlexibleVersion(p.version)) for pkg in pkg_list.versions: pkg_version = FlexibleVersion(pkg.version) if pkg_version == installed_version: installed_pkg = pkg - if pkg.isinternal and pkg_version == requested_version: - internal_pkg = pkg - if not pkg.isinternal: - public_pkgs.append(pkg) - - internal_version = FlexibleVersion(internal_pkg.version) \ - if internal_pkg is not None \ - else FlexibleVersion() - public_pkgs.sort(key=lambda pkg: FlexibleVersion(pkg.version), reverse=True) - - # Determine the preferred version and type of upgrade occurring - preferred_version = max(requested_version, installed_version) - is_major_upgrade = preferred_version.major > installed_version.major - allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto' - - # Find the first public candidate which - # - Matches the preferred major version - # - Does not upgrade to a new, disallowed major version - # - And only increments the minor version if allowed - # Notes: - # - The patch / hotfix version is not considered - public_pkg = None - for pkg in public_pkgs: - pkg_version = FlexibleVersion(pkg.version) - if pkg_version.major == preferred_version.major \ - and (not pkg.disallow_major_upgrade or not is_major_upgrade) \ - and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor): - public_pkg = pkg - break - - # If there are no candidates, locate the highest public version whose - # major matches that installed - if internal_pkg is None and public_pkg is None: - for pkg in public_pkgs: - pkg_version = FlexibleVersion(pkg.version) - if pkg_version.major == installed_version.major: - public_pkg = pkg - break - - public_version = FlexibleVersion(public_pkg.version) \ - if public_pkg is not None \ - else FlexibleVersion() - - # Select the candidate - # - Use the public candidate if there is no internal candidate or - # the public is more recent (e.g., a hotfix patch) - # - Otherwise use the internal candidate - if internal_pkg is None or (public_pkg is not None and public_version > internal_version): - selected_pkg = public_pkg - else: - selected_pkg = internal_pkg - - selected_version = FlexibleVersion(selected_pkg.version) \ - if selected_pkg is not None \ - else FlexibleVersion() + if requested_version.matches(pkg_version): + selected_pkg = pkg # Finally, update the version only if not downgrading # Note: # - A downgrade, which will be bound to the same major version, # is allowed if the installed version is no longer available - if target_state == u"uninstall": + if target_state == u"uninstall" or target_state == u"disabled": if installed_pkg is None: msg = "Failed to find installed version of {0} " \ - "to uninstall".format(self.ext_handler.name) + "to uninstall".format(self.ext_handler.name) self.logger.warn(msg) self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ - if installed_version is not None else None - elif selected_pkg is None \ - or (installed_pkg is not None and selected_version < installed_version): - self.pkg = installed_pkg - self.ext_handler.properties.version = str(installed_version) \ - if installed_version is not None else None + if installed_version is not None else None else: self.pkg = selected_pkg - self.ext_handler.properties.version = str(selected_pkg.version) + if self.pkg is not None: + self.ext_handler.properties.version = str(selected_pkg.version) - # Note if the selected package is greater than that installed - if installed_pkg is None \ - or FlexibleVersion(self.pkg.version) > FlexibleVersion(installed_pkg.version): - self.is_upgrade = True + if self.pkg is not None: + self.logger.verbose("Use version: {0}", self.pkg.version) + self.set_logger() + return self.pkg - if self.pkg is None: - raise ExtensionError("Failed to find any valid extension package") - - self.logger.verbose("Use version: {0}", self.pkg.version) - return + def set_logger(self): + prefix = "[{0}]".format(self.get_full_name()) + self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) def version_gt(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) > FlexibleVersion(other_version) + def version_ne(self, other): + self_version = self.ext_handler.properties.version + other_version = other.ext_handler.properties.version + return FlexibleVersion(self_version) != FlexibleVersion(other_version) + def get_installed_ext_handler(self): lastest_version = self.get_installed_version() if lastest_version is None: return None - + installed_handler = ExtHandler() set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) installed_handler.properties.version = lastest_version @@ -611,21 +781,21 @@ continue separator = path.rfind('-') - version = FlexibleVersion(path[separator+1:]) + version_from_path = FlexibleVersion(path[separator + 1:]) state_path = os.path.join(path, 'config', 'HandlerState') if not os.path.exists(state_path) or \ - fileutil.read_file(state_path) == \ + fileutil.read_file(state_path) == \ ExtHandlerState.NotInstalled: logger.verbose("Ignoring version of uninstalled extension: " - "{0}".format(path)) + "{0}".format(path)) continue - if lastest_version is None or lastest_version < version: - lastest_version = version + if lastest_version is None or lastest_version < version_from_path: + lastest_version = version_from_path return str(lastest_version) if lastest_version is not None else None - + def copy_status_files(self, old_ext_handler_i): self.logger.info("Copy status files from old plugin to new") old_ext_dir = old_ext_handler_i.get_base_dir() @@ -647,99 +817,146 @@ def set_operation(self, op): self.operation = op - def report_event(self, message="", is_success=True): - version = self.ext_handler.properties.version - add_event(name=self.ext_handler.name, version=version, message=message, - op=self.operation, is_success=is_success) + def report_event(self, message="", is_success=True, duration=0, log_event=True): + ext_handler_version = self.ext_handler.properties.version + add_event(name=self.ext_handler.name, version=ext_handler_version, message=message, + op=self.operation, is_success=is_success, duration=duration, log_event=log_event) + + def _download_extension_package(self, source_uri, target_file): + self.logger.info("Downloading extension package: {0}", source_uri) + try: + if not self.protocol.download_ext_handler_pkg(source_uri, target_file): + raise Exception("Failed to download extension package - no error information is available") + except Exception as exception: + self.logger.info("Error downloading extension package: {0}", ustr(exception)) + if os.path.exists(target_file): + os.remove(target_file) + return False + return True + + def _unzip_extension_package(self, source_file, target_directory): + self.logger.info("Unzipping extension package: {0}", source_file) + try: + zipfile.ZipFile(source_file).extractall(target_directory) + except Exception as exception: + logger.info("Error while unzipping extension package: {0}", ustr(exception)) + os.remove(source_file) + if os.path.exists(target_directory): + shutil.rmtree(target_directory) + return False + return True def download(self): - self.logger.verbose("Download extension package") + begin_utc = datetime.datetime.utcnow() self.set_operation(WALAEventOperation.Download) - if self.pkg is None: - raise ExtensionError("No package uri found") - - package = None - for uri in self.pkg.uris: - try: - package = self.protocol.download_ext_handler_pkg(uri.uri) - if package is not None: + + if self.pkg is None or self.pkg.uris is None or len(self.pkg.uris) == 0: + raise ExtensionDownloadError("No package uri found") + + destination = os.path.join(conf.get_lib_dir(), self.get_extension_package_zipfile_name()) + + package_exists = False + if os.path.exists(destination): + self.logger.info("Using existing extension package: {0}", destination) + if self._unzip_extension_package(destination, self.get_base_dir()): + package_exists = True + else: + self.logger.info("The existing extension package is invalid, will ignore it.") + + if not package_exists: + downloaded = False + i = 0 + while i < NUMBER_OF_DOWNLOAD_RETRIES: + uris_shuffled = self.pkg.uris + random.shuffle(uris_shuffled) + + for uri in uris_shuffled: + if not self._download_extension_package(uri.uri, destination): + continue + + if self._unzip_extension_package(destination, self.get_base_dir()): + downloaded = True + break + + if downloaded: break - except Exception as e: - logger.warn("Error while downloading extension: {0}", e) - - if package is None: - raise ExtensionError("Failed to download extension") - - self.logger.verbose("Unpack extension package") - self.pkg_file = os.path.join(conf.get_lib_dir(), - os.path.basename(uri.uri) + ".zip") - try: - fileutil.write_file(self.pkg_file, bytearray(package), asbin=True) - zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir()) - except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) - raise ExtensionError(u"Failed to write and unzip plugin", e) - #Add user execute permission to all files under the base dir + self.logger.info("Failed to download the extension package from all uris, will retry after a minute") + time.sleep(60) + i += 1 + + if not downloaded: + raise ExtensionDownloadError("Failed to download extension", + code=ExtensionErrorCodes.PluginManifestDownloadError) + + duration = elapsed_milliseconds(begin_utc) + self.report_event(message="Download succeeded", duration=duration) + + self.pkg_file = destination + + def initialize(self): + self.logger.info("Initializing extension {0}".format(self.get_full_name())) + + # Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) - self.report_event(message="Download succeeded") - - self.logger.info("Initialize extension directory") - #Save HandlerManifest.json - man_file = fileutil.search_file(self.get_base_dir(), - 'HandlerManifest.json') + # Save HandlerManifest.json + man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: - raise ExtensionError("HandlerManifest.json not found") - + raise ExtensionDownloadError("HandlerManifest.json not found") + try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) - raise ExtensionError(u"Failed to save HandlerManifest.json", e) + fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) + raise ExtensionDownloadError(u"Failed to save HandlerManifest.json", e) - #Create status and config dir + # Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() - if seq_no > -1: + if status_path is not None: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, - "timestampUTC" : now, - "status" : { - "name" : self.ext_handler.name, - "operation" : "Enabling Handler", - "status" : "transitioning", - "code" : 0 + "timestampUTC": now, + "status": { + "name": self.ext_handler.name, + "operation": "Enabling Handler", + "status": "transitioning", + "code": 0 } } - fileutil.write_file(json.dumps(status), status_path) + fileutil.write_file(status_path, json.dumps(status)) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) - raise ExtensionError(u"Failed to create status or config dir", e) + fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) + raise ExtensionDownloadError(u"Failed to initialize extension '{0}'".format(self.get_full_name()), e) + + # Create cgroups for the extension + CGroupConfigurator.get_instance().create_extension_cgroups(self.get_full_name()) - #Save HandlerEnvironment.json + # Save HandlerEnvironment.json self.create_handler_env() - def enable(self): + def enable(self, uninstall_exit_code=None): + uninstall_exit_code = str(uninstall_exit_code) if uninstall_exit_code is not None else NOT_RUN + env = {ExtCommandEnvVariable.UninstallReturnCode: uninstall_exit_code} + self.set_operation(WALAEventOperation.Enable) man = self.load_manifest() enable_cmd = man.get_enable_command() self.logger.info("Enable extension [{0}]".format(enable_cmd)) - self.launch_command(enable_cmd, timeout=300) + self.launch_command(enable_cmd, timeout=300, + extension_error_code=ExtensionErrorCodes.PluginEnableProcessingFailed, env=env) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") @@ -748,51 +965,82 @@ man = self.load_manifest() disable_cmd = man.get_disable_command() self.logger.info("Disable extension [{0}]".format(disable_cmd)) - self.launch_command(disable_cmd, timeout=900) + self.launch_command(disable_cmd, timeout=900, + extension_error_code=ExtensionErrorCodes.PluginDisableProcessingFailed) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") - def install(self): + def install(self, uninstall_exit_code=None): + uninstall_exit_code = str(uninstall_exit_code) if uninstall_exit_code is not None else NOT_RUN + env = {ExtCommandEnvVariable.UninstallReturnCode: uninstall_exit_code} + man = self.load_manifest() install_cmd = man.get_install_command() self.logger.info("Install extension [{0}]".format(install_cmd)) self.set_operation(WALAEventOperation.Install) - self.launch_command(install_cmd, timeout=900) + self.launch_command(install_cmd, timeout=900, + extension_error_code=ExtensionErrorCodes.PluginInstallProcessingFailed, env=env) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): - try: - self.set_operation(WALAEventOperation.UnInstall) - man = self.load_manifest() - uninstall_cmd = man.get_uninstall_command() - self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) - self.launch_command(uninstall_cmd) - except ExtensionError as e: - self.report_event(message=ustr(e), is_success=False) - - def rm_ext_handler_dir(self): - try: + self.set_operation(WALAEventOperation.UnInstall) + man = self.load_manifest() + uninstall_cmd = man.get_uninstall_command() + self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) + self.launch_command(uninstall_cmd) + + def remove_ext_handler(self): + try: + zip_filename = os.path.join(conf.get_lib_dir(), self.get_extension_package_zipfile_name()) + if os.path.exists(zip_filename): + os.remove(zip_filename) + self.logger.verbose("Deleted the extension zip at path {0}", zip_filename) + base_dir = self.get_base_dir() if os.path.isdir(base_dir): - self.logger.info("Remove extension handler directory: {0}", - base_dir) - shutil.rmtree(base_dir) + self.logger.info("Remove extension handler directory: {0}", base_dir) + + # some extensions uninstall asynchronously so ignore error 2 while removing them + def on_rmtree_error(_, __, exc_info): + _, exception, _ = exc_info + if not isinstance(exception, OSError) or exception.errno != 2: # [Errno 2] No such file or directory + raise exception + + shutil.rmtree(base_dir, onerror=on_rmtree_error) except IOError as e: message = "Failed to remove extension handler directory: {0}".format(e) self.report_event(message=message, is_success=False) self.logger.warn(message) - def update(self): - self.set_operation(WALAEventOperation.Update) - man = self.load_manifest() - update_cmd = man.get_update_command() - self.logger.info("Update extension [{0}]".format(update_cmd)) - self.launch_command(update_cmd, timeout=900) - - def update_with_install(self): + # Also remove the cgroups for the extension + CGroupConfigurator.get_instance().remove_extension_cgroups(self.get_full_name()) + + def update(self, version=None, disable_exit_code=None, updating_from_version=None): + if version is None: + version = self.ext_handler.properties.version + + disable_exit_code = str(disable_exit_code) if disable_exit_code is not None else NOT_RUN + env = {'VERSION': version, ExtCommandEnvVariable.DisableReturnCode: disable_exit_code, + ExtCommandEnvVariable.UpdatingFromVersion: updating_from_version} + + try: + self.set_operation(WALAEventOperation.Update) + man = self.load_manifest() + update_cmd = man.get_update_command() + self.logger.info("Update extension [{0}]".format(update_cmd)) + self.launch_command(update_cmd, + timeout=900, + extension_error_code=ExtensionErrorCodes.PluginUpdateProcessingFailed, + env=env) + except ExtensionError: + # prevent the handler update from being retried + self.set_handler_state(ExtHandlerState.Failed) + raise + + def update_with_install(self, uninstall_exit_code=None): man = self.load_manifest() if man.is_update_with_install(): - self.install() + self.install(uninstall_exit_code=uninstall_exit_code) else: self.logger.info("UpdateWithInstall not set. " "Skip install during upgrade.") @@ -805,31 +1053,48 @@ item_path = os.path.join(conf_dir, item) if os.path.isfile(item_path): try: - seperator = item.rfind(".") - if seperator > 0 and item[seperator + 1:] == 'settings': + separator = item.rfind(".") + if separator > 0 and item[separator + 1:] == 'settings': curr_seq_no = int(item.split('.')[0]) if curr_seq_no > seq_no: seq_no = curr_seq_no - except Exception as e: + except (ValueError, IndexError, TypeError): self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no - def get_status_file_path(self): - seq_no = self.get_largest_seq_no() + def get_status_file_path(self, extension=None): path = None + seq_no = self.get_largest_seq_no() + + # Issue 1116: use the sequence number from goal state where possible + if extension is not None and extension.sequenceNumber is not None: + try: + gs_seq_no = int(extension.sequenceNumber) + + if gs_seq_no != seq_no: + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.SequenceNumberMismatch, + is_success=False, + message="Goal state: {0}, disk: {1}".format(gs_seq_no, seq_no), + log_event=False) + + seq_no = gs_seq_no + except ValueError: + logger.error('Sequence number [{0}] does not appear to be valid'.format(extension.sequenceNumber)) if seq_no > -1: path = os.path.join( - self.get_status_dir(), - "{0}.status".format(seq_no)) + self.get_status_dir(), + "{0}.status".format(seq_no)) return seq_no, path def collect_ext_status(self, ext): self.logger.verbose("Collect extension status") - seq_no, ext_status_file = self.get_status_file_path() + seq_no, ext_status_file = self.get_status_file_path(ext) if seq_no == -1: return None @@ -842,27 +1107,61 @@ ext_status.message = u"Failed to get status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" - except (ExtensionError, ValueError) as e: + except ExtensionError as e: + ext_status.message = u"Malformed status file {0}".format(e) + ext_status.code = ExtensionErrorCodes.PluginSettingsStatusInvalid + ext_status.status = "error" + except ValueError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" return ext_status - + + def get_ext_handling_status(self, ext): + seq_no, ext_status_file = self.get_status_file_path(ext) + if seq_no < 0 or ext_status_file is None: + return None + + # Missing status file is considered a non-terminal state here + # so that extension sequencing can wait until it becomes existing + if not os.path.exists(ext_status_file): + status = "warning" + else: + ext_status = self.collect_ext_status(ext) + status = ext_status.status if ext_status is not None else None + + return status + + def is_ext_handling_complete(self, ext): + status = self.get_ext_handling_status(ext) + + # when seq < 0 (i.e. no new user settings), the handling is complete and return None status + if status is None: + return (True, None) + + # If not in terminal state, it is incomplete + if status not in EXTENSION_TERMINAL_STATUSES: + return (False, status) + + # Extension completed, return its status + return (True, status) + def report_ext_status(self): active_exts = [] + # TODO Refactor or remove this common code pattern (for each extension subordinate to an ext_handler, do X). for ext in self.ext_handler.properties.extensions: ext_status = self.collect_ext_status(ext) if ext_status is None: continue try: - self.protocol.report_ext_status(self.ext_handler.name, ext.name, + self.protocol.report_ext_status(self.ext_handler.name, ext.name, ext_status) active_exts.append(ext.name) except ProtocolError as e: self.logger.error(u"Failed to report extension status: {0}", e) return active_exts - + def collect_heartbeat(self): man = self.load_manifest() if not man.is_report_heartbeat(): @@ -874,9 +1173,9 @@ raise ExtensionError("Failed to get heart beat file") if not self.is_responsive(heartbeat_file): return { - "status": "Unresponsive", - "code": -1, - "message": "Extension heartbeat is not responsive" + "status": "Unresponsive", + "code": -1, + "message": "Extension heartbeat is not responsive" } try: heartbeat_json = fileutil.read_file(heartbeat_file) @@ -886,47 +1185,73 @@ except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat - - def is_responsive(self, heartbeat_file): + + @staticmethod + def is_responsive(heartbeat_file): + """ + Was heartbeat_file updated within the last ten (10) minutes? + + :param heartbeat_file: str + :return: bool + """ last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) - return last_update <= 600 # updated within the last 10 min - - def launch_command(self, cmd, timeout=300): + return last_update <= 600 + + def launch_command(self, cmd, timeout=300, extension_error_code=ExtensionErrorCodes.PluginProcessingError, + env=None): + begin_utc = datetime.datetime.utcnow() self.logger.verbose("Launch command: [{0}]", cmd) + base_dir = self.get_base_dir() - try: - devnull = open(os.devnull, 'w') - child = subprocess.Popen(base_dir + "/" + cmd, - shell=True, - cwd=base_dir, - stdout=devnull, - env=os.environ) - except Exception as e: - #TODO do not catch all exception - raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) - retry = timeout - while retry > 0 and child.poll() is None: - time.sleep(1) - retry -= 1 - if retry == 0: - os.kill(child.pid, 9) - raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd)) - - ret = child.wait() - if ret == None or ret != 0: - raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd)) + with tempfile.TemporaryFile(dir=base_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=base_dir, mode="w+b") as stderr: + if env is None: + env = {} + env.update(os.environ) + # Always add Extension Path and version to the current launch_command (Ask from publishers) + env.update({ExtCommandEnvVariable.ExtensionPath: base_dir, + ExtCommandEnvVariable.ExtensionVersion: str(self.ext_handler.properties.version), + ExtCommandEnvVariable.ExtensionSeqNumber: str(self.get_seq_no())}) + + try: + # Some extensions erroneously begin cmd with a slash; don't interpret those + # as root-relative. (Issue #1170) + full_path = os.path.join(base_dir, cmd.lstrip(os.path.sep)) + + process_output = CGroupConfigurator.get_instance().start_extension_command( + extension_name=self.get_full_name(), + command=full_path, + timeout=timeout, + shell=True, + cwd=base_dir, + env=env, + stdout=stdout, + stderr=stderr, + error_code=extension_error_code) + + except OSError as e: + raise ExtensionError("Failed to launch '{0}': {1}".format(full_path, e.strerror), + code=extension_error_code) + + duration = elapsed_milliseconds(begin_utc) + log_msg = "{0}\n{1}".format(cmd, "\n".join([line for line in process_output.split('\n') if line != ""])) - self.report_event(message="Launch command succeeded: {0}".format(cmd)) + self.logger.verbose(log_msg) + self.report_event(message=log_msg, duration=duration, log_event=False) + + return process_output def load_manifest(self): man_file = self.get_manifest_file() try: data = json.loads(fileutil.read_file(man_file)) - except IOError as e: - raise ExtensionError('Failed to load manifest file.') - except ValueError as e: - raise ExtensionError('Malformed manifest file.') + except (IOError, OSError) as e: + raise ExtensionError('Failed to load manifest file ({0}): {1}'.format(man_file, e.strerror), + code=ExtensionErrorCodes.PluginHandlerManifestNotFound) + except ValueError: + raise ExtensionError('Malformed manifest file ({0}).'.format(man_file), + code=ExtensionErrorCodes.PluginHandlerManifestDeserializationError) return HandlerManifest(data[0]) @@ -936,18 +1261,18 @@ fileutil.write_file(settings_file, settings) except IOError as e: fileutil.clean_ioerror(e, - paths=[settings_file]) + paths=[settings_file]) raise ExtensionError(u"Failed to update settings file", e) def update_settings(self): if self.ext_handler.properties.extensions is None or \ len(self.ext_handler.properties.extensions) == 0: - #This is the behavior of waagent 2.0.x - #The new agent has to be consistent with the old one. + # This is the behavior of waagent 2.0.x + # The new agent has to be consistent with the old one. self.logger.info("Extension has no settings, write empty 0.settings") self.update_settings_file("0.settings", "") return - + for ext in self.ext_handler.properties.extensions: settings = { 'publicSettings': ext.publicSettings, @@ -955,7 +1280,7 @@ 'protectedSettingsCertThumbprint': ext.certificateThumbprint } ext_settings = { - "runtimeSettings":[{ + "runtimeSettings": [{ "handlerSettings": settings }] } @@ -966,34 +1291,32 @@ def create_handler_env(self): env = [{ "name": self.ext_handler.name, - "version" : HANDLER_ENVIRONMENT_VERSION, - "handlerEnvironment" : { - "logFolder" : self.get_log_dir(), - "configFolder" : self.get_conf_dir(), - "statusFolder" : self.get_status_dir(), - "heartbeatFile" : self.get_heartbeat_file() + "version": HANDLER_ENVIRONMENT_VERSION, + "handlerEnvironment": { + "logFolder": self.get_log_dir(), + "configFolder": self.get_conf_dir(), + "statusFolder": self.get_status_dir(), + "heartbeatFile": self.get_heartbeat_file() } }] try: fileutil.write_file(self.get_env_file(), json.dumps(env)) except IOError as e: fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) - raise ExtensionError(u"Failed to save handler environment", e) + paths=[self.get_base_dir(), self.pkg_file]) + raise ExtensionDownloadError(u"Failed to save handler environment", e) def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() + state_file = os.path.join(state_dir, "HandlerState") try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) - - state_file = os.path.join(state_dir, "HandlerState") fileutil.write_file(state_file, handler_state) except IOError as e: - fileutil.clean_ioerror(e, - paths=[state_file]) + fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e) - + def get_handler_state(self): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") @@ -1005,7 +1328,7 @@ except IOError as e: self.logger.error("Failed to get state: {0}", e) return ExtHandlerState.NotInstalled - + def set_handler_status(self, status="NotReady", message="", code=0): state_dir = self.get_conf_dir() @@ -1018,30 +1341,40 @@ status_file = os.path.join(state_dir, "HandlerStatus") try: - fileutil.write_file(status_file, json.dumps(get_properties(handler_status))) + handler_status_json = json.dumps(get_properties(handler_status)) + if handler_status_json is not None: + fileutil.write_file(status_file, handler_status_json) + else: + self.logger.error("Failed to create JSON document of handler status for {0} version {1}".format( + self.ext_handler.name, + self.ext_handler.properties.version)) except (IOError, ValueError, ProtocolError) as e: - fileutil.clean_ioerror(e, - paths=[status_file]) - self.logger.error("Failed to save handler status: {0}", e) - + fileutil.clean_ioerror(e, paths=[status_file]) + self.logger.error("Failed to save handler status: {0}, {1}", ustr(e), traceback.format_exc()) + def get_handler_status(self): state_dir = self.get_conf_dir() status_file = os.path.join(state_dir, "HandlerStatus") if not os.path.isfile(status_file): return None - + try: data = json.loads(fileutil.read_file(status_file)) - handler_status = ExtHandlerStatus() + handler_status = ExtHandlerStatus() set_properties("ExtHandlerStatus", handler_status, data) return handler_status except (IOError, ValueError) as e: self.logger.error("Failed to get handler status: {0}", e) + def get_extension_package_zipfile_name(self): + return "{0}__{1}{2}".format(self.ext_handler.name, + self.ext_handler.properties.version, + HANDLER_PKG_EXT) + def get_full_name(self): - return "{0}-{1}".format(self.ext_handler.name, + return "{0}-{1}".format(self.ext_handler.name, self.ext_handler.properties.version) - + def get_base_dir(self): return os.path.join(conf.get_lib_dir(), self.get_full_name()) @@ -1061,8 +1394,19 @@ return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') def get_log_dir(self): - return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name, - str(self.ext_handler.properties.version)) + return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name) + + def get_seq_no(self): + runtime_settings = self.ext_handler.properties.extensions + # If no runtime_settings available for this ext_handler, then return 0 (this is the behavior we follow + # for update_settings) + if not runtime_settings or len(runtime_settings) == 0: + return "0" + # Currently for every runtime settings we use the same sequence number + # (Check : def parse_plugin_settings(self, ext_handler, plugin_settings) in wire.py) + # Will have to revisit once the feature to enable multiple runtime settings is rolled out by CRP + return self.ext_handler.properties.extensions[0].sequenceNumber + class HandlerEnvironment(object): def __init__(self, data): @@ -1083,6 +1427,7 @@ def get_heartbeat_file(self): return self.data["handlerEnvironment"]["heartbeatFile"] + class HandlerManifest(object): def __init__(self, data): if data is None or data['handlerManifest'] is None: @@ -1110,12 +1455,6 @@ def get_disable_command(self): return self.data['handlerManifest']["disableCommand"] - def is_reboot_after_install(self): - """ - Deprecated - """ - return False - def is_report_heartbeat(self): return self.data['handlerManifest'].get('reportHeartbeat', False) @@ -1124,3 +1463,6 @@ if update_mode is None: return True return update_mode.lower() == "updatewithinstall" + + def is_continue_on_update_failure(self): + return self.data['handlerManifest'].get('continueOnUpdateFailure', False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/__init__.py walinuxagent-2.2.45/azurelinuxagent/ga/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/monitor.py walinuxagent-2.2.45/azurelinuxagent/ga/monitor.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/monitor.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/monitor.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,35 +12,37 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import datetime import json import os import platform -import time import threading +import time import uuid import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.networkutil as networkutil -from azurelinuxagent.common.event import add_event, WALAEventOperation -from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.errorstate import ErrorState +from azurelinuxagent.common.event import add_event, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE, \ + get_container_id_from_env +from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ - TelemetryEvent, \ - set_properties +from azurelinuxagent.common.protocol.healthservice import HealthService +from azurelinuxagent.common.protocol.imds import get_imds_client +from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam, TelemetryEventList +from azurelinuxagent.common.datacontract import set_properties from azurelinuxagent.common.utils.restutil import IOErrorCounter -from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib, hash_strings from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ - AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION + DISTRO_CODE_NAME, AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION, AGENT_EXECUTION_MODE def parse_event(data_str): @@ -85,22 +87,84 @@ return event +def generate_extension_metrics_telemetry_dictionary(schema_version=1.0, + performance_metrics=None): + if schema_version == 1.0: + telemetry_dict = {"SchemaVersion": 1.0} + if performance_metrics: + telemetry_dict["PerfMetrics"] = performance_metrics + return telemetry_dict + else: + return None + + def get_monitor_handler(): return MonitorHandler() class MonitorHandler(object): + EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1) + TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30) + # extension metrics period + CGROUP_TELEMETRY_POLLING_PERIOD = datetime.timedelta(minutes=5) + CGROUP_TELEMETRY_REPORTING_PERIOD = datetime.timedelta(minutes=30) + # host plugin + HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) + HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5) + # imds + IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) + IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3) + + # Resetting loggers period + RESET_LOGGERS_PERIOD = datetime.timedelta(hours=12) + def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() + self.imds_client = get_imds_client() + + self.event_thread = None + self.last_reset_loggers_time = None + self.last_event_collection = None + self.last_telemetry_heartbeat = None + self.last_cgroup_polling_telemetry = None + self.last_cgroup_report_telemetry = None + self.last_host_plugin_heartbeat = None + self.last_imds_heartbeat = None + self.protocol = None + self.health_service = None + self.last_route_table_hash = b'' + self.last_nic_state = {} + + self.counter = 0 self.sysinfo = [] + self.should_run = True + self.heartbeat_id = str(uuid.uuid4()).upper() + self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD) + self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD) def run(self): + self.init_protocols() self.init_sysinfo() + self.start() - event_thread = threading.Thread(target=self.daemon) - event_thread.setDaemon(True) - event_thread.start() + def stop(self): + self.should_run = False + if self.is_alive(): + self.event_thread.join() + + def init_protocols(self): + self.protocol = self.protocol_util.get_protocol() + self.health_service = HealthService(self.protocol.endpoint) + + def is_alive(self): + return self.event_thread is not None and self.event_thread.is_alive() + + def start(self): + self.event_thread = threading.Thread(target=self.daemon) + self.event_thread.setDaemon(True) + self.event_thread.setName("MonitorHandler") + self.event_thread.start() def init_sysinfo(self): osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), @@ -109,8 +173,7 @@ DISTRO_CODE_NAME, platform.release()) self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) - self.sysinfo.append( - TelemetryEventParam("GAVersion", CURRENT_AGENT)) + self.sysinfo.append(TelemetryEventParam("ExecutionMode", AGENT_EXECUTION_MODE)) try: ram = self.osutil.get_total_mem() @@ -118,11 +181,10 @@ self.sysinfo.append(TelemetryEventParam("RAM", ram)) self.sysinfo.append(TelemetryEventParam("Processors", processors)) except OSUtilError as e: - logger.warn("Failed to get system info: {0}", e) + logger.warn("Failed to get system info: {0}", ustr(e)) try: - protocol = self.protocol_util.get_protocol() - vminfo = protocol.get_vminfo() + vminfo = self.protocol.get_vminfo() self.sysinfo.append(TelemetryEventParam("VMName", vminfo.vmName)) self.sysinfo.append(TelemetryEventParam("TenantName", @@ -131,111 +193,322 @@ vminfo.roleName)) self.sysinfo.append(TelemetryEventParam("RoleInstanceName", vminfo.roleInstanceName)) - self.sysinfo.append(TelemetryEventParam("ContainerId", - vminfo.containerId)) except ProtocolError as e: - logger.warn("Failed to get system info: {0}", e) + logger.warn("Failed to get system info: {0}", ustr(e)) - def collect_event(self, evt_file_name): + try: + vminfo = self.imds_client.get_compute() + self.sysinfo.append(TelemetryEventParam('Location', + vminfo.location)) + self.sysinfo.append(TelemetryEventParam('SubscriptionId', + vminfo.subscriptionId)) + self.sysinfo.append(TelemetryEventParam('ResourceGroupName', + vminfo.resourceGroupName)) + self.sysinfo.append(TelemetryEventParam('VMId', + vminfo.vmId)) + self.sysinfo.append(TelemetryEventParam('ImageOrigin', + vminfo.image_origin)) + except (HttpError, ValueError) as e: + logger.warn("failed to get IMDS info: {0}", ustr(e)) + + @staticmethod + def collect_event(evt_file_name): try: logger.verbose("Found event file: {0}", evt_file_name) with open(evt_file_name, "rb") as evt_file: # if fail to open or delete the file, throw exception - data_str = evt_file.read().decode("utf-8", 'ignore') + data_str = evt_file.read().decode("utf-8") logger.verbose("Processed event file: {0}", evt_file_name) os.remove(evt_file_name) return data_str - except IOError as e: + except (IOError, UnicodeDecodeError) as e: + os.remove(evt_file_name) msg = "Failed to process {0}, {1}".format(evt_file_name, e) raise EventError(msg) def collect_and_send_events(self): - event_list = TelemetryEventList() - event_dir = os.path.join(conf.get_lib_dir(), "events") - event_files = os.listdir(event_dir) - for event_file in event_files: - if not event_file.endswith(".tld"): - continue - event_file_path = os.path.join(event_dir, event_file) + """ + Periodically read, parse, and send events located in the events folder. Currently, this is done every minute. + Any .tld file dropped in the events folder will be emitted. These event files can be created either by the + agent or the extensions. We don't have control over extension's events parameters, but we will override + any values they might have set for sys_info parameters. + """ + if self.last_event_collection is None: + self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD + + if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD): try: - data_str = self.collect_event(event_file_path) - except EventError as e: - logger.error("{0}", e) - continue + event_list = TelemetryEventList() + event_dir = os.path.join(conf.get_lib_dir(), "events") + event_files = os.listdir(event_dir) + for event_file in event_files: + if not event_file.endswith(".tld"): + continue + event_file_path = os.path.join(event_dir, event_file) + try: + data_str = self.collect_event(event_file_path) + except EventError as e: + logger.error("{0}", ustr(e)) + continue + + try: + event = parse_event(data_str) + self.add_sysinfo(event) + event_list.events.append(event) + except (ValueError, ProtocolError) as e: + logger.warn("Failed to decode event file: {0}", ustr(e)) + continue + + if len(event_list.events) == 0: + return + + try: + self.protocol.report_event(event_list) + except ProtocolError as e: + logger.error("{0}", ustr(e)) + except Exception as e: + logger.warn("Failed to send events: {0}", ustr(e)) + + self.last_event_collection = datetime.datetime.utcnow() + + def daemon(self): + min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD, + MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD, + MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD, + MonitorHandler.EVENT_COLLECTION_PERIOD, + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD, + MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds + while self.should_run: + self.send_telemetry_heartbeat() + self.poll_telemetry_metrics() + self.send_telemetry_metrics() + self.collect_and_send_events() + self.send_host_plugin_heartbeat() + self.send_imds_heartbeat() + self.log_altered_network_configuration() + self.reset_loggers() + time.sleep(min_delta) + + def reset_loggers(self): + """ + The loggers maintain hash-tables in memory and they need to be cleaned up from time to time. + For reference, please check azurelinuxagent.common.logger.Logger and + azurelinuxagent.common.event.EventLogger classes + """ + time_now = datetime.datetime.utcnow() + if not self.last_reset_loggers_time: + self.last_reset_loggers_time = time_now + if time_now >= (self.last_reset_loggers_time + + MonitorHandler.RESET_LOGGERS_PERIOD): try: - event = parse_event(data_str) - self.add_sysinfo(event) - event_list.events.append(event) - except (ValueError, ProtocolError) as e: - logger.warn("Failed to decode event file: {0}", e) + logger.reset_periodic() + finally: + self.last_reset_loggers_time = time_now + + def add_sysinfo(self, event): + """ + This method is called after parsing the event file in the events folder and before emitting it. This means + all events, either coming from the agent or from the extensions, are passed through this method. The purpose + is to add a static list of sys_info parameters such as VMName, Region, RAM, etc. If the sys_info parameters + are already populated in the event, they will be overwritten by the sys_info values obtained from the agent. + Since the ContainerId parameter is only populated on the fly for the agent events because it is not a static + sys_info parameter, an event coming from an extension will not have it, so we explicitly add it. + :param event: Event to be enriched with sys_info parameters + :return: Event with all parameters added, ready to be reported + """ + sysinfo_names = [v.name for v in self.sysinfo] + final_parameters = [] + + # Refer: azurelinuxagent.common.event.EventLogger.add_default_parameters_to_event for agent specific values. + # + # Default fields are only populated by Agent and not the extension. Agent will fill up any event if they don't + # have the default params. Example: GAVersion and ContainerId are populated for agent events on the fly, + # but not for extension events. Add it if it's missing. + default_values = [("ContainerId", get_container_id_from_env()), ("GAVersion", CURRENT_AGENT), + ("OpcodeName", ""), ("EventTid", 0), ("EventPid", 0), ("TaskName", ""), ("KeywordName", "")] + + for param in event.parameters: + # Discard any sys_info parameters already in the event, since they will be overwritten + if param.name in sysinfo_names: continue + final_parameters.append(param) - if len(event_list.events) == 0: - return + # Add sys_info params populated by the agent + final_parameters.extend(self.sysinfo) - try: - protocol = self.protocol_util.get_protocol() - protocol.report_event(event_list) - except ProtocolError as e: - logger.error("{0}", e) + for default_value in default_values: + if default_value[0] not in event: + final_parameters.append(TelemetryEventParam(default_value[0], default_value[1])) - def daemon(self): - period = datetime.timedelta(minutes=30) - protocol = self.protocol_util.get_protocol() - last_heartbeat = datetime.datetime.utcnow() - period - - # Create a new identifier on each restart and reset the counter - heartbeat_id = str(uuid.uuid4()).upper() - counter = 0 - while True: - if datetime.datetime.utcnow() >= (last_heartbeat + period): - last_heartbeat = datetime.datetime.utcnow() - incarnation = protocol.get_incarnation() - dropped_packets = self.osutil.get_firewall_dropped_packets( - protocol.endpoint) + event.parameters = final_parameters + + def send_imds_heartbeat(self): + """ + Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have + successfully called and validated a response in the last IMDS_HEALTH_PERIOD. + """ + + if self.last_imds_heartbeat is None: + self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD): + try: + is_currently_healthy, response = self.imds_client.validate() + + if is_currently_healthy: + self.imds_errorstate.reset() + else: + self.imds_errorstate.incr() + + is_healthy = self.imds_errorstate.is_triggered() is False + logger.verbose("IMDS health: {0} [{1}]", is_healthy, response) - msg = "{0};{1};{2};{3}".format( - incarnation, counter, heartbeat_id, dropped_packets) + self.health_service.report_imds_status(is_healthy, response) + except Exception as e: + msg = "Exception sending imds heartbeat: {0}".format(ustr(e)) add_event( name=AGENT_NAME, version=CURRENT_VERSION, - op=WALAEventOperation.HeartBeat, - is_success=True, - message=msg) + op=WALAEventOperation.ImdsHeartbeat, + is_success=False, + message=msg, + log_event=False) + + self.last_imds_heartbeat = datetime.datetime.utcnow() + + def send_host_plugin_heartbeat(self): + """ + Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to + communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD. + """ + if self.last_host_plugin_heartbeat is None: + self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= ( + self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD): + try: + host_plugin = self.protocol.client.get_host_plugin() + host_plugin.ensure_initialized() + is_currently_healthy = host_plugin.get_health() - counter += 1 + if is_currently_healthy: + self.host_plugin_errorstate.reset() + else: + self.host_plugin_errorstate.incr() - ioerrors = IOErrorCounter.get_and_reset() - hostplugin_errors = ioerrors.get("hostplugin") - protocol_errors = ioerrors.get("protocol") - other_errors = ioerrors.get("other") - - if hostplugin_errors > 0 or \ - protocol_errors > 0 or \ - other_errors > 0: - msg = "hostplugin:{0};protocol:{1};other:{2}".format( - hostplugin_errors, protocol_errors, other_errors) + is_healthy = self.host_plugin_errorstate.is_triggered() is False + logger.verbose("HostGAPlugin health: {0}", is_healthy) + + self.health_service.report_host_plugin_heartbeat(is_healthy) + + if not is_healthy: add_event( name=AGENT_NAME, version=CURRENT_VERSION, - op=WALAEventOperation.HttpErrors, + op=WALAEventOperation.HostPluginHeartbeatExtended, is_success=False, - msg=msg) + message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time), + log_event=False) + except Exception as e: + msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e)) + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HostPluginHeartbeat, + is_success=False, + message=msg, + log_event=False) + + self.last_host_plugin_heartbeat = datetime.datetime.utcnow() + + def send_telemetry_heartbeat(self): + + if self.last_telemetry_heartbeat is None: + self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD): try: - self.collect_and_send_events() + incarnation = self.protocol.get_incarnation() + dropped_packets = self.osutil.get_firewall_dropped_packets(self.protocol.endpoint) + msg = "{0};{1};{2};{3}".format(incarnation, self.counter, self.heartbeat_id, dropped_packets) + + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HeartBeat, + is_success=True, + message=msg, + log_event=False) + + self.counter += 1 + + io_errors = IOErrorCounter.get_and_reset() + hostplugin_errors = io_errors.get("hostplugin") + protocol_errors = io_errors.get("protocol") + other_errors = io_errors.get("other") + + if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0: + msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, + protocol_errors, + other_errors) + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HttpErrors, + is_success=True, + message=msg, + log_event=False) except Exception as e: - logger.warn("Failed to send events: {0}", e) - time.sleep(60) + logger.warn("Failed to send heartbeat: {0}", ustr(e)) - def add_sysinfo(self, event): - sysinfo_names = [v.name for v in self.sysinfo] - for param in event.parameters: - if param.name in sysinfo_names: - logger.verbose("Remove existing event parameter: [{0}:{1}]", - param.name, - param.value) - event.parameters.remove(param) - event.parameters.extend(self.sysinfo) + self.last_telemetry_heartbeat = datetime.datetime.utcnow() + + def poll_telemetry_metrics(self): + time_now = datetime.datetime.utcnow() + if not self.last_cgroup_polling_telemetry: + self.last_cgroup_polling_telemetry = time_now + + if time_now >= (self.last_cgroup_polling_telemetry + + MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD): + CGroupsTelemetry.poll_all_tracked() + self.last_cgroup_polling_telemetry = time_now + + def send_telemetry_metrics(self): + time_now = datetime.datetime.utcnow() + + if not self.last_cgroup_report_telemetry: + self.last_cgroup_report_telemetry = time_now + + if time_now >= (self.last_cgroup_report_telemetry + MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD): + performance_metrics = CGroupsTelemetry.report_all_tracked() + self.last_cgroup_report_telemetry = time_now + + if performance_metrics: + message = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, + performance_metrics=performance_metrics) + add_event(name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ExtensionMetricsData, + is_success=True, + message=ustr(message), + log_event=False) + + def log_altered_network_configuration(self): + """ + Check various pieces of network configuration and, if altered since the last check, log the new state. + """ + raw_route_list = self.osutil.read_route_table() + digest = hash_strings(raw_route_list) + if digest != self.last_route_table_hash: + self.last_route_table_hash = digest + route_list = self.osutil.get_list_of_routes(raw_route_list) + logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list)))) + + nic_state = self.osutil.get_nic_state() + if nic_state != self.last_nic_state: + description = "Initial" if self.last_nic_state == {} else "Updated" + logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values())))) + self.last_nic_state = nic_state diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/remoteaccess.py walinuxagent-2.2.45/azurelinuxagent/ga/remoteaccess.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/remoteaccess.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/remoteaccess.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,158 @@ +# Microsoft Azure Linux Agent +# +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import datetime +import os +import os.path +import traceback + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger + +from datetime import datetime, timedelta + +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import RemoteAccessError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION +from azurelinuxagent.common.osutil import get_osutil + +REMOTE_USR_EXPIRATION_FORMAT = "%a, %d %b %Y %H:%M:%S %Z" +DATE_FORMAT = "%Y-%m-%d" +TRANSPORT_PRIVATE_CERT = "TransportPrivate.pem" +REMOTE_ACCESS_ACCOUNT_COMMENT = "JIT_Account" +MAX_TRY_ATTEMPT = 5 +FAILED_ATTEMPT_THROTTLE = 1 + + +def get_remote_access_handler(): + return RemoteAccessHandler() + + +class RemoteAccessHandler(object): + def __init__(self): + self.os_util = get_osutil() + self.protocol_util = get_protocol_util() + self.protocol = None + self.cryptUtil = CryptUtil(conf.get_openssl_cmd()) + self.remote_access = None + self.incarnation = 0 + self.error_message = "" + + def run(self): + try: + if self.os_util.jit_enabled: + self.protocol = self.protocol_util.get_protocol() + current_incarnation = self.protocol.get_incarnation() + if self.incarnation != current_incarnation: + # something changed. Handle remote access if any. + self.incarnation = current_incarnation + self.remote_access = self.protocol.client.get_remote_access() + self.handle_remote_access() + except Exception as e: + msg = u"Exception processing remote access handler: {0} {1}".format(ustr(e), traceback.format_exc()) + logger.error(msg) + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.RemoteAccessHandling, + is_success=False, + message=msg) + + def handle_remote_access(self): + # Get JIT user accounts. + all_users = self.os_util.get_users() + existing_jit_users = set(u[0] for u in all_users if self.validate_jit_user(u[4])) + self.err_message = "" + if self.remote_access is not None: + goal_state_users = set(u.name for u in self.remote_access.user_list.users) + for acc in self.remote_access.user_list.users: + try: + raw_expiration = acc.expiration + account_expiration = datetime.strptime(raw_expiration, REMOTE_USR_EXPIRATION_FORMAT) + now = datetime.utcnow() + if acc.name not in existing_jit_users and now < account_expiration: + self.add_user(acc.name, acc.encrypted_password, account_expiration) + elif acc.name in existing_jit_users and now > account_expiration: + # user account expired, delete it. + logger.info("user {0} expired from remote_access".format(acc.name)) + self.remove_user(acc.name) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error processing user {0}. Exception: {1}"\ + .format(acc.name, ustr(rae)) + for user in existing_jit_users: + try: + if user not in goal_state_users: + # user explicitly removed + logger.info("User {0} removed from remote_access".format(user)) + self.remove_user(user) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ + .format(user, ustr(rae)) + else: + # All users removed, remove any remaining JIT accounts. + for user in existing_jit_users: + try: + logger.info("User {0} removed from remote_access. remote_access empty".format(user)) + self.remove_user(user) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ + .format(user, ustr(rae)) + + def validate_jit_user(self, comment): + return comment == REMOTE_ACCESS_ACCOUNT_COMMENT + + def add_user(self, username, encrypted_password, account_expiration): + try: + expiration_date = (account_expiration + timedelta(days=1)).strftime(DATE_FORMAT) + logger.verbose("Adding user {0} with expiration date {1}".format(username, expiration_date)) + self.os_util.useradd(username, expiration_date, REMOTE_ACCESS_ACCOUNT_COMMENT) + except Exception as e: + raise RemoteAccessError("Error adding user {0}. {1}".format(username, ustr(e))) + try: + prv_key = os.path.join(conf.get_lib_dir(), TRANSPORT_PRIVATE_CERT) + pwd = self.cryptUtil.decrypt_secret(encrypted_password, prv_key) + self.os_util.chpasswd(username, pwd, conf.get_password_cryptid(), conf.get_password_crypt_salt_len()) + self.os_util.conf_sudoer(username) + logger.info("User '{0}' added successfully with expiration in {1}".format(username, expiration_date)) + except Exception as e: + error = "Error adding user {0}. {1} ".format(username, str(e)) + try: + self.handle_failed_create(username) + error += "cleanup successful" + except RemoteAccessError as rae: + error += "and error cleaning up {0}".format(str(rae)) + raise RemoteAccessError("Error adding user {0} cleanup successful".format(username), ustr(e)) + + def handle_failed_create(self, username): + try: + self.delete_user(username) + except Exception as e: + raise RemoteAccessError("Failed to clean up after account creation for {0}.".format(username), e) + + def remove_user(self, username): + try: + self.delete_user(username) + except Exception as e: + raise RemoteAccessError("Failed to delete user {0}".format(username), e) + + def delete_user(self, username): + self.os_util.del_account(username) + logger.info("User deleted {0}".format(username)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/update.py walinuxagent-2.2.45/azurelinuxagent/ga/update.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/update.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/ga/update.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Windows Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob import json import os import platform +import random import re import shutil import signal @@ -38,6 +39,7 @@ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.event import add_event, add_periodic, \ elapsed_milliseconds, \ @@ -54,7 +56,7 @@ from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ - CURRENT_AGENT, CURRENT_VERSION, \ + CURRENT_AGENT, CURRENT_VERSION, DISTRO_NAME, DISTRO_VERSION, \ is_current_agent_installed from azurelinuxagent.ga.exthandlers import HandlerManifest @@ -71,10 +73,11 @@ MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted GOAL_STATE_INTERVAL = 3 +GOAL_STATE_INTERVAL_DISABLED = 5 * 60 ORPHAN_WAIT_INTERVAL = 15 * 60 -AGENT_SENTINAL_FILE = "current_version" +AGENT_SENTINEL_FILE = "current_version" READONLY_FILE_GLOBS = [ "*.crt", @@ -84,6 +87,7 @@ "ovf-env.xml" ] + def get_update_handler(): return UpdateHandler() @@ -162,20 +166,20 @@ logger.verbose(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) - # If the most current agent is the installed agent and update is enabled, - # assume updates are likely available and poll every second. - # This reduces the start-up impact of finding / launching agent updates on - # fresh VMs. - if latest_agent is None and conf.get_autoupdate_enabled(): - poll_interval = 1 - else: - poll_interval = CHILD_POLL_INTERVAL + # Setting the poll interval to poll every second to reduce the agent provisioning time; + # The daemon shouldn't wait for 60secs before starting the ext-handler in case the + # ext-handler kills itself during agent-update during the first 15 mins (CHILD_HEALTH_INTERVAL) + poll_interval = 1 ret = None start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: time.sleep(poll_interval) - ret = self.child_process.poll() + try: + ret = self.child_process.poll() + except OSError: + # if child_process has terminated, calling poll could raise an exception + ret = -1 if ret is not None: break @@ -189,7 +193,8 @@ version=agent_version, op=WALAEventOperation.Enable, is_success=True, - message=msg) + message=msg, + log_event=False) if ret is None: ret = self.child_process.wait() @@ -224,49 +229,87 @@ agent_cmd, ustr(e)) logger.warn(msg) + detailed_message = '{0} {1}'.format(msg, traceback.format_exc()) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, - message=msg) + message=detailed_message) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) self.child_process = None return - def run(self): + def run(self, debug=False): """ This is the main loop which watches for agent and extension updates. """ try: + # NOTE: Do not add any telemetry events until after the monitoring handler has been started with the + # call to 'monitor_thread.run()'. That method call initializes the protocol, which is needed in order to + # load the goal state and update the container id in memory. Any telemetry events sent before this happens + # will result in an uninitialized container id value. + logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) + # Log OS-specific info locally. + os_info_msg = u"Distro info: {0} {1}, osutil class being used: {2}, " \ + u"agent service name: {3}".format(DISTRO_NAME, DISTRO_VERSION, + type(self.osutil).__name__, self.osutil.service_name) + logger.info(os_info_msg) + # Launch monitoring threads from azurelinuxagent.ga.monitor import get_monitor_handler - get_monitor_handler().run() + monitor_thread = get_monitor_handler() + monitor_thread.run() + + # NOTE: Any telemetry events added from this point on will be properly populated with the container id. from azurelinuxagent.ga.env import get_env_handler - get_env_handler().run() + env_thread = get_env_handler() + env_thread.run() from azurelinuxagent.ga.exthandlers import get_exthandlers_handler, migrate_handler_state exthandlers_handler = get_exthandlers_handler() migrate_handler_state() + from azurelinuxagent.ga.remoteaccess import get_remote_access_handler + remote_access_handler = get_remote_access_handler() + self._ensure_no_orphans() self._emit_restart_event() self._ensure_partition_assigned() self._ensure_readonly_files() + self._ensure_cgroups_initialized() + + # Send OS-specific info as a telemetry event after the monitoring thread has been initialized, and with + # it the container id too. + add_event(AGENT_NAME, + op=WALAEventOperation.OSInfo, + message=os_info_msg) + + goal_state_interval = GOAL_STATE_INTERVAL \ + if conf.get_extensions_enabled() \ + else GOAL_STATE_INTERVAL_DISABLED while self.running: - if self._is_orphaned: + if not debug and self._is_orphaned: logger.info("Agent {0} is an orphan -- exiting", CURRENT_AGENT) break + if not monitor_thread.is_alive(): + logger.warn(u"Monitor thread died, restarting") + monitor_thread.start() + + if not env_thread.is_alive(): + logger.warn(u"Environment thread died, restarting") + env_thread.start() + if self._upgrade_available(): available_agent = self.get_latest_agent() if available_agent is None: @@ -285,24 +328,25 @@ last_etag = exthandlers_handler.last_etag exthandlers_handler.run() + remote_access_handler.run() + if last_etag != exthandlers_handler.last_etag: self._ensure_readonly_files() + duration = elapsed_milliseconds(utc_start) + logger.info('ProcessGoalState completed [incarnation {0}; {1} ms]', + exthandlers_handler.last_etag, + duration) add_event( AGENT_NAME, - version=CURRENT_VERSION, op=WALAEventOperation.ProcessGoalState, - is_success=True, - duration=elapsed_milliseconds(utc_start), - message="Incarnation {0}".format( - exthandlers_handler.last_etag), - log_event=True) + duration=duration, + message="Incarnation {0}".format(exthandlers_handler.last_etag)) - time.sleep(GOAL_STATE_INTERVAL) + time.sleep(goal_state_interval) except Exception as e: - msg = u"Agent {0} failed with exception: {1}".format( - CURRENT_AGENT, ustr(e)) - self._set_sentinal(msg=msg) + msg = u"Agent {0} failed with exception: {1}".format(CURRENT_AGENT, ustr(e)) + self._set_sentinel(msg=msg) logger.warn(msg) logger.warn(traceback.format_exc()) sys.exit(1) @@ -313,12 +357,7 @@ sys.exit(0) def forward_signal(self, signum, frame): - # Note: - # - At present, the handler is registered only for SIGTERM. - # However, clean shutdown is both SIGTERM and SIGKILL. - # A SIGKILL handler is not being registered at this time to - # minimize perturbing the code. - if signum in (signal.SIGTERM, signal.SIGKILL): + if signum == signal.SIGTERM: self._shutdown() if self.child_process is None: @@ -329,13 +368,14 @@ CURRENT_AGENT, signum, self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) + self.child_process.send_signal(signum) if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): self.signal_handler(signum, frame) elif self.signal_handler is signal.SIG_DFL: if signum == signal.SIGTERM: - # TODO: This should set self.running to False vs. just exiting + self._shutdown() sys.exit(0) return @@ -360,7 +400,7 @@ try: if not self._is_clean_start: msg = u"Agent did not terminate cleanly: {0}".format( - fileutil.read_file(self._sentinal_file_path())) + fileutil.read_file(self._sentinel_file_path())) logger.info(msg) add_event( AGENT_NAME, @@ -371,7 +411,6 @@ except Exception: pass - self._set_sentinal(msg="Starting") return def _ensure_no_orphans(self, orphan_wait_interval=ORPHAN_WAIT_INTERVAL): @@ -425,6 +464,12 @@ for path in glob.iglob(os.path.join(conf.get_lib_dir(), g)): os.chmod(path, stat.S_IRUSR) + def _ensure_cgroups_initialized(self): + configurator = CGroupConfigurator.get_instance() + configurator.create_agent_cgroups(track_cgroups=True) + configurator.cleanup_legacy_cgroups() + configurator.create_extension_cgroups_root() + def _evaluate_agent_health(self, latest_agent): """ Evaluate the health of the selected agent: If it is restarting @@ -469,10 +514,7 @@ def _get_host_plugin(self, protocol=None): return protocol.client.get_host_plugin() \ - if protocol and \ - type(protocol) is WireProtocol and \ - protocol.client \ - else None + if protocol and type(protocol) is WireProtocol and protocol.client else None def _get_pid_parts(self): pid_file = conf.get_agent_pid_file_path() @@ -489,7 +531,7 @@ @property def _is_clean_start(self): - return not os.path.isfile(self._sentinal_file_path()) + return not os.path.isfile(self._sentinel_file_path()) @property def _is_orphaned(self): @@ -534,7 +576,7 @@ known_versions = [agent.version for agent in self.agents] if CURRENT_VERSION not in known_versions: - logger.info( + logger.verbose( u"Running Agent {0} was not found in the agent manifest - adding to list", CURRENT_VERSION) known_versions.append(CURRENT_VERSION) @@ -559,33 +601,33 @@ self.agents.sort(key=lambda agent: agent.version, reverse=True) return - def _set_sentinal(self, agent=CURRENT_AGENT, msg="Unknown cause"): + def _set_sentinel(self, agent=CURRENT_AGENT, msg="Unknown cause"): try: fileutil.write_file( - self._sentinal_file_path(), + self._sentinel_file_path(), "[{0}] [{1}]".format(agent, msg)) except Exception as e: logger.warn( - u"Exception writing sentinal file {0}: {1}", - self._sentinal_file_path(), + u"Exception writing sentinel file {0}: {1}", + self._sentinel_file_path(), str(e)) return - def _sentinal_file_path(self): - return os.path.join(conf.get_lib_dir(), AGENT_SENTINAL_FILE) + def _sentinel_file_path(self): + return os.path.join(conf.get_lib_dir(), AGENT_SENTINEL_FILE) def _shutdown(self): self.running = False - if not os.path.isfile(self._sentinal_file_path()): + if not os.path.isfile(self._sentinel_file_path()): return try: - os.remove(self._sentinal_file_path()) + os.remove(self._sentinel_file_path()) except Exception as e: logger.warn( - u"Exception removing sentinal file {0}: {1}", - self._sentinal_file_path(), + u"Exception removing sentinel file {0}: {1}", + self._sentinel_file_path(), str(e)) return @@ -618,57 +660,47 @@ self.last_attempt_time = now protocol = self.protocol_util.get_protocol() - for update_goal_state in [False, True]: - try: - if update_goal_state: - protocol.update_goal_state(forced=True) + try: + manifest_list, etag = protocol.get_vmagent_manifests() - manifest_list, etag = protocol.get_vmagent_manifests() + manifests = [m for m in manifest_list.vmAgentManifests \ + if m.family == family and len(m.versionsManifestUris) > 0] + if len(manifests) == 0: + logger.verbose(u"Incarnation {0} has no {1} agent updates", + etag, family) + return False - manifests = [m for m in manifest_list.vmAgentManifests \ - if m.family == family and \ - len(m.versionsManifestUris) > 0] - if len(manifests) == 0: - logger.verbose(u"Incarnation {0} has no {1} agent updates", - etag, family) - return False - - pkg_list = protocol.get_vmagent_pkgs(manifests[0]) - - # Set the agents to those available for download at least as - # current as the existing agent and remove from disk any agent - # no longer reported to the VM. - # Note: - # The code leaves on disk available, but blacklisted, agents - # so as to preserve the state. Otherwise, those agents could be - # again downloaded and inappropriately retried. - host = self._get_host_plugin(protocol=protocol) - self._set_agents([GuestAgent(pkg=pkg, host=host) \ - for pkg in pkg_list.versions]) - - self._purge_agents() - self._filter_blacklisted_agents() - - # Return True if current agent is no longer available or an - # agent with a higher version number is available - return not self._is_version_eligible(base_version) \ - or (len(self.agents) > 0 \ - and self.agents[0].version > base_version) + pkg_list = protocol.get_vmagent_pkgs(manifests[0]) - except Exception as e: - if isinstance(e, ResourceGoneError): - continue + # Set the agents to those available for download at least as + # current as the existing agent and remove from disk any agent + # no longer reported to the VM. + # Note: + # The code leaves on disk available, but blacklisted, agents + # so as to preserve the state. Otherwise, those agents could be + # again downloaded and inappropriately retried. + host = self._get_host_plugin(protocol=protocol) + self._set_agents([GuestAgent(pkg=pkg, host=host) for pkg in pkg_list.versions]) - msg = u"Exception retrieving agent manifests: {0}".format( - ustr(e)) - logger.warn(msg) - add_event( - AGENT_NAME, - op=WALAEventOperation.Download, - version=CURRENT_VERSION, - is_success=False, - message=msg) - return False + self._purge_agents() + self._filter_blacklisted_agents() + + # Return True if current agent is no longer available or an + # agent with a higher version number is available + return not self._is_version_eligible(base_version) \ + or (len(self.agents) > 0 and self.agents[0].version > base_version) + + except Exception as e: + msg = u"Exception retrieving agent manifests: {0}".format( + ustr(traceback.format_exc())) + logger.warn(msg) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + return False def _write_pid_file(self): pid_files = self._get_pid_files() @@ -727,6 +759,13 @@ if isinstance(e, ResourceGoneError): raise + # The agent was improperly blacklisting versions due to a timeout + # encountered while downloading a later version. Errors of type + # socket.error are IOError, so this should provide sufficient + # protection against a large class of I/O operation failures. + if isinstance(e, IOError): + raise + # Note the failure, blacklist the agent if the package downloaded # - An exception with a downloaded package indicates the package # is corrupt (e.g., missing the HandlerManifest.json file) @@ -817,7 +856,9 @@ self._load_error() def _download(self): - for uri in self.pkg.uris: + uris_shuffled = self.pkg.uris + random.shuffle(uris_shuffled) + for uri in uris_shuffled: if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break @@ -859,6 +900,8 @@ def _fetch(self, uri, headers=None, use_proxy=True): package = None try: + is_healthy = True + error_response = '' resp = restutil.http_get(uri, use_proxy=use_proxy, headers=headers) if restutil.request_succeeded(resp): package = resp.read() @@ -867,8 +910,13 @@ asbin=True) logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) else: - logger.verbose("Fetch was unsuccessful [{0}]", - restutil.read_response_error(resp)) + error_response = restutil.read_response_error(resp) + logger.verbose("Fetch was unsuccessful [{0}]", error_response) + is_healthy = not restutil.request_failed_at_hostplugin(resp) + + if self.host is not None: + self.host.report_fetch_health(uri, is_healthy, source='GuestAgent', response=error_response) + except restutil.HttpError as http_error: if isinstance(http_error, ResourceGoneError): raise diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/__init__.py walinuxagent-2.2.45/azurelinuxagent/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/arch.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/arch.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/arch.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/arch.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/clearlinux.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/clearlinux.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/clearlinux.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/clearlinux.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/coreos.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/coreos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/coreos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/coreos.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/default.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/default.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,21 +14,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob import os.path +import re import signal import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common import version from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN + def read_input(message): if sys.version_info[0] >= 3: @@ -118,6 +122,21 @@ actions.append(DeprovisionAction(fileutil.rm_files, ["/var/lib/NetworkManager/dhclient-*.lease"])) + def del_ext_handler_files(self, warnings, actions): + ext_dirs = [d for d in os.listdir(conf.get_lib_dir()) + if os.path.isdir(os.path.join(conf.get_lib_dir(), d)) + and re.match(HANDLER_NAME_PATTERN, d) is not None + and not version.is_agent_path(d)] + + for ext_dir in ext_dirs: + ext_base = os.path.join(conf.get_lib_dir(), ext_dir) + files = glob.glob(os.path.join(ext_base, 'status', '*.status')) + files += glob.glob(os.path.join(ext_base, 'config', '*.settings')) + files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus')) + files += glob.glob(os.path.join(ext_base, 'mrseq')) + + if len(files) > 0: + actions.append(DeprovisionAction(fileutil.rm_files, files)) def del_lib_dir_files(self, warnings, actions): known_files = [ @@ -144,44 +163,6 @@ if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) - def cloud_init_dirs(self, include_once=True): - dirs = [ - "/var/lib/cloud/instance", - "/var/lib/cloud/instances/", - "/var/lib/cloud/data" - ] - if include_once: - dirs += [ - "/var/lib/cloud/scripts/per-once" - ] - return dirs - - def cloud_init_files(self, include_once=True, deluser=False): - files = [] - if deluser: - files += [ - "/etc/sudoers.d/90-cloud-init-users" - ] - if include_once: - files += [ - "/var/lib/cloud/sem/config_scripts_per_once.once" - ] - return files - - def del_cloud_init(self, warnings, actions, - include_once=True, deluser=False): - dirs = [d for d in self.cloud_init_dirs(include_once=include_once) \ - if os.path.isdir(d)] - if len(dirs) > 0: - actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) - - files = [f for f in self.cloud_init_files( - include_once=include_once, - deluser=deluser) \ - if os.path.isfile(f)] - if len(files) > 0: - actions.append(DeprovisionAction(fileutil.rm_files, files)) - def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] actions.append(DeprovisionAction(self.osutil.set_hostname, @@ -203,7 +184,6 @@ if conf.get_delete_root_password(): self.del_root_password(warnings, actions) - self.del_cloud_init(warnings, actions, deluser=deluser) self.del_dirs(warnings, actions) self.del_files(warnings, actions) self.del_resolv(warnings, actions) @@ -217,10 +197,9 @@ warnings = [] actions = [] - self.del_cloud_init(warnings, actions, - include_once=False, deluser=False) self.del_dhcp_lease(warnings, actions) self.del_lib_dir_files(warnings, actions) + self.del_ext_handler_files(warnings, actions) return warnings, actions diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/factory.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME @@ -24,7 +22,11 @@ from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler -from .ubuntu import UbuntuDeprovisionHandler +from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler + + +from distutils.version import LooseVersion as Version + def get_deprovision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, @@ -32,10 +34,13 @@ if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": - return UbuntuDeprovisionHandler() + if Version(distro_version) >= Version('18.04'): + return Ubuntu1804DeprovisionHandler() + else: + return UbuntuDeprovisionHandler() if distro_name == "coreos": return CoreOSDeprovisionHandler() - if distro_name == "clear linux": + if "Clear Linux" in distro_full_name: return ClearLinuxDeprovisionHandler() return DeprovisionHandler() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/__init__.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/ubuntu.py walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/deprovision/ubuntu.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -40,3 +40,13 @@ files_to_del = ["/etc/resolvconf/resolv.conf.d/tail", "/etc/resolvconf/resolv.conf.d/original"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) + + +class Ubuntu1804DeprovisionHandler(UbuntuDeprovisionHandler): + def __init__(self): + super(Ubuntu1804DeprovisionHandler, self).__init__() + + def del_resolv(self, warnings, actions): + # no changes will be made to /etc/resolv.conf + warnings.append("WARNING! /etc/resolv.conf will NOT be removed, this is a behavior change to earlier " + "versions of Ubuntu.") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/__init__.py walinuxagent-2.2.45/azurelinuxagent/pa/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/cloudinit.py walinuxagent-2.2.45/azurelinuxagent/pa/provision/cloudinit.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/cloudinit.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/provision/cloudinit.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,11 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os import os.path +import subprocess import time from datetime import datetime @@ -28,7 +29,7 @@ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil -from azurelinuxagent.common.event import elapsed_milliseconds +from azurelinuxagent.common.event import elapsed_milliseconds, WALAEventOperation from azurelinuxagent.common.exception import ProvisionError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import OVF_FILE_NAME @@ -41,13 +42,6 @@ super(CloudInitProvisionHandler, self).__init__() def run(self): - # If provision is enabled, run default provision handler - if conf.get_provision_enabled(): - logger.warn("Provisioning flag is enabled, which overrides using " - "cloud-init; running the default provisioning code") - super(CloudInitProvisionHandler, self).run() - return - try: if super(CloudInitProvisionHandler, self).is_provisioned(): logger.info("Provisioning already completed, skipping.") @@ -64,17 +58,18 @@ logger.info("Finished provisioning") self.report_ready(thumbprint) - self.report_event("Provision succeed", + self.report_event("Provisioning with cloud-init succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: - logger.error("Provisioning failed: {0}", ustr(e)) + msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) + self.report_event(msg) return - def wait_for_ovfenv(self, max_retry=360, sleep_time=5): + def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to copy ovf-env.xml file from provision ISO """ @@ -82,7 +77,8 @@ for retry in range(0, max_retry): if os.path.isfile(ovf_file_path): try: - OvfEnv(fileutil.read_file(ovf_file_path)) + ovf_env = OvfEnv(fileutil.read_file(ovf_file_path)) + self.handle_provision_guest_agent(ovf_env.provision_guest_agent) return except ProtocolError as pe: raise ProvisionError("OVF xml could not be parsed " @@ -130,3 +126,69 @@ raise ProvisionError("Giving up, ssh host key was not found at {0} " "after {1}s".format(path, max_retry * sleep_time)) + +def _cloud_init_is_enabled_systemd(): + """ + Determine whether or not cloud-init is enabled on a systemd machine. + + Args: + None + + Returns: + bool: True if cloud-init is enabled, False if otherwise. + """ + + try: + systemctl_output = subprocess.check_output([ + 'systemctl', + 'is-enabled', + 'cloud-init-local.service' + ], stderr=subprocess.STDOUT).decode('utf-8').replace('\n', '') + + unit_is_enabled = systemctl_output == 'enabled' + except Exception as exc: + logger.info('Error getting cloud-init enabled status from systemctl: {0}'.format(exc)) + unit_is_enabled = False + + return unit_is_enabled + +def _cloud_init_is_enabled_service(): + """ + Determine whether or not cloud-init is enabled on a non-systemd machine. + + Args: + None + + Returns: + bool: True if cloud-init is enabled, False if otherwise. + """ + + try: + subprocess.check_output([ + 'service', + 'cloud-init', + 'status' + ], stderr=subprocess.STDOUT) + + unit_is_enabled = True + except Exception as exc: + logger.info('Error getting cloud-init enabled status from service: {0}'.format(exc)) + unit_is_enabled = False + + return unit_is_enabled + +def cloud_init_is_enabled(): + """ + Determine whether or not cloud-init is enabled. + + Args: + None + + Returns: + bool: True if cloud-init is enabled, False if otherwise. + """ + + unit_is_enabled = _cloud_init_is_enabled_systemd() or _cloud_init_is_enabled_service() + logger.info('cloud-init is enabled: {0}'.format(unit_is_enabled)) + + return unit_is_enabled diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/default.py walinuxagent-2.2.45/azurelinuxagent/pa/provision/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/provision/default.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -77,7 +77,7 @@ logger.info("Copying ovf-env.xml") ovf_env = self.protocol_util.copy_ovf_env() - self.protocol_util.get_protocol_by_file() + self.protocol_util.get_protocol(by_file=True) self.report_not_ready("Provisioning", "Starting") logger.info("Starting provisioning") @@ -88,17 +88,20 @@ self.write_provisioned() - self.report_event("Provision succeed", + self.report_event("Provisioning succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) + self.handle_provision_guest_agent(ovf_env.provision_guest_agent) + self.report_ready(thumbprint) logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: + msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) - logger.error("Provisioning failed: {0}", ustr(e)) + self.report_event(msg, is_success=False) return @staticmethod @@ -110,20 +113,30 @@ pids = [] for pid in pids: try: - pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() - if CLOUD_INIT_REGEX.match(pname): - is_running = True - msg = "cloud-init is running [PID {0}, {1}]".format(pid, - pname) - if is_expected: - logger.verbose(msg) - else: - logger.error(msg) - break + with open(os.path.join('/proc', pid, 'cmdline'), 'rb') as fh: + pname = fh.read() + if CLOUD_INIT_REGEX.match(pname): + is_running = True + msg = "cloud-init is running [PID {0}, {1}]".format(pid, + pname) + if is_expected: + logger.verbose(msg) + else: + logger.error(msg) + break except IOError: continue return is_running == is_expected + @staticmethod + def _get_uptime_seconds(): + try: + with open('/proc/uptime') as fh: + uptime, _ = fh.readline().split() + return uptime + except: + return 0 + def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): @@ -190,6 +203,19 @@ self.provisioned_file_path(), get_osutil().get_instance_id()) + @staticmethod + def write_agent_disabled(): + logger.warn("Disabling guest agent in accordance with ovf-env.xml") + fileutil.write_file(conf.get_disable_agent_file_path(), '') + + def handle_provision_guest_agent(self, provision_guest_agent): + self.report_event(message=provision_guest_agent, + is_success=True, + duration=0, + operation=WALAEventOperation.ProvisionGuestAgent) + if provision_guest_agent and provision_guest_agent.lower() == 'false': + self.write_agent_disabled() + def provision(self, ovfenv): logger.info("Handle ovf-env.xml.") try: @@ -264,12 +290,13 @@ logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) - def report_event(self, message, is_success=False, duration=0): + def report_event(self, message, is_success=False, duration=0, + operation=WALAEventOperation.Provision): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, - op=WALAEventOperation.Provision) + op=operation) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/factory.py walinuxagent-2.2.45/azurelinuxagent/pa/provision/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/provision/factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,25 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.logger as logger - -from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common import logger from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ProvisionHandler -from .cloudinit import CloudInitProvisionHandler +from .cloudinit import CloudInitProvisionHandler, cloud_init_is_enabled -def get_provision_handler(distro_name=DISTRO_NAME, +def get_provision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): - if conf.get_provision_cloudinit(): + provisioning_agent = conf.get_provisioning_agent() + + if provisioning_agent == 'cloud-init' or ( + provisioning_agent == 'auto' and + cloud_init_is_enabled()): + logger.info('Using cloud-init for provisioning') return CloudInitProvisionHandler() + logger.info('Using waagent for provisioning') return ProvisionHandler() - diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/__init__.py walinuxagent-2.2.45/azurelinuxagent/pa/provision/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/provision/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.provision.factory import get_provision_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/centos.py walinuxagent-2.2.45/azurelinuxagent/pa/rdma/centos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/centos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/rdma/centos.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob @@ -59,7 +59,7 @@ # Find out RDMA firmware version and see if the existing package needs # updating or if the package is missing altogether (and install it) - fw_version = RDMAHandler.get_rdma_version() + fw_version = self.get_rdma_version() if not fw_version: raise Exception('Cannot determine RDMA firmware version') logger.info("RDMA: found firmware version: {0}".format(fw_version)) @@ -241,4 +241,4 @@ raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install) logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install) logger.info("RDMA: Machine will now be rebooted.") - self.reboot_system() \ No newline at end of file + self.reboot_system() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/factory.py walinuxagent-2.2.45/azurelinuxagent/pa/rdma/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/rdma/factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger @@ -23,6 +23,8 @@ from .centos import CentOSRDMAHandler from .ubuntu import UbuntuRDMAHandler +from distutils.version import LooseVersion as Version + def get_rdma_handler( distro_full_name=DISTRO_FULL_NAME, @@ -30,12 +32,13 @@ ): """Return the handler object for RDMA driver handling""" if ( - distro_full_name == 'SUSE Linux Enterprise Server' and - int(distro_version) > 11 + (distro_full_name == 'SUSE Linux Enterprise Server' or + distro_full_name == 'SLES') and + Version(distro_version) > Version('11') ): return SUSERDMAHandler() - if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS': + if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS' or distro_full_name == 'Red Hat Enterprise Linux Server': return CentOSRDMAHandler(distro_version) if distro_full_name == 'Ubuntu': diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/__init__.py walinuxagent-2.2.45/azurelinuxagent/pa/rdma/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/rdma/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.rdma.factory import get_rdma_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/suse.py walinuxagent-2.2.45/azurelinuxagent/pa/rdma/suse.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/suse.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/rdma/suse.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2017 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob @@ -28,7 +28,7 @@ def install_driver(self): """Install the appropriate driver package for the RDMA firmware""" - fw_version = RDMAHandler.get_rdma_version() + fw_version = self.get_rdma_version() if not fw_version: error_msg = 'RDMA: Could not determine firmware version. ' error_msg += 'Therefore, no driver will be installed.' @@ -40,7 +40,23 @@ zypper_remove = 'zypper -n rm %s' zypper_search = 'zypper -n se -s %s' zypper_unlock = 'zypper removelock %s' - package_name = 'msft-rdma-kmp-default' + package_name = 'dummy' + # Figure out the kernel that is running to find the proper kmp + cmd = 'uname -r' + status, kernel_release = shellutil.run_get_output(cmd) + if 'default' in kernel_release: + package_name = 'msft-rdma-kmp-default' + info_msg = 'RDMA: Detected kernel-default' + logger.info(info_msg) + elif 'azure' in kernel_release: + package_name = 'msft-rdma-kmp-azure' + info_msg = 'RDMA: Detected kernel-azure' + logger.info(info_msg) + else: + error_msg = 'RDMA: Could not detect kernel build, unable to ' + error_msg += 'load kernel module. Kernel release: "%s"' + logger.error(error_msg % kernel_release) + return cmd = zypper_search % package_name status, repo_package_info = shellutil.run_get_output(cmd) driver_package_versions = [] @@ -91,7 +107,7 @@ logger.info("Package '%s' is not a match." % entry) else: logger.info("Package '%s' is a match. Installing." % entry) - complete_name = '%s-%s' % (package_name, version) + complete_name = '%s-%s' % (package_name, entry) cmd = zypper_install % complete_name result = shellutil.run(cmd) if result: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/ubuntu.py walinuxagent-2.2.45/azurelinuxagent/pa/rdma/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/azurelinuxagent/pa/rdma/ubuntu.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob @@ -32,7 +32,7 @@ def install_driver(self): #Install the appropriate driver package for the RDMA firmware - nd_version = RDMAHandler.get_rdma_version() + nd_version = self.get_rdma_version() if not nd_version: logger.error("RDMA: Could not determine firmware version. No driver will be installed") return diff -Nru walinuxagent-2.2.21+really2.2.20/bin/waagent walinuxagent-2.2.45/bin/waagent --- walinuxagent-2.2.21+really2.2.20/bin/waagent 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/bin/waagent 2019-11-07 00:36:56.000000000 +0000 @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.6+ and Openssl 1.0+ +# Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/bin/waagent2.0 walinuxagent-2.2.45/bin/waagent2.0 --- walinuxagent-2.2.21+really2.2.20/bin/waagent2.0 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/bin/waagent2.0 2019-11-07 00:36:56.000000000 +0000 @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.6+ and Openssl 1.0+ +# Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/Changelog walinuxagent-2.2.45/Changelog --- walinuxagent-2.2.21+really2.2.20/Changelog 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/Changelog 2019-11-07 00:36:56.000000000 +0000 @@ -1,6 +1,8 @@ WALinuxAgent Changelog ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +Refer to releases WALinuxAgent release page: https://github.com/Azure/WALinuxAgent/releases for detailed changelog after v2.2.0 + 12 August 2016, v2.1.6 . Improved RDMA support . Extension state migration diff -Nru walinuxagent-2.2.21+really2.2.20/CODEOWNERS walinuxagent-2.2.45/CODEOWNERS --- walinuxagent-2.2.21+really2.2.20/CODEOWNERS 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/CODEOWNERS 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,14 @@ +# See https://help.github.com/articles/about-codeowners/ +# for more info about CODEOWNERS file + +# It uses the same pattern rule for gitignore file +# https://git-scm.com/docs/gitignore#_pattern_format + +# Provisioning Agent +# The Azure Linux Provisioning team is interested in getting notifications +# when there are requests for changes in the provisioning agent. For any +# questions, please feel free to reach out to thstring@microsoft.com. +/azurelinuxagent/pa/ @trstringer @anhvoms + +# Guest Agent team +* @narrieta @vrdmr @pgombar @larohra diff -Nru walinuxagent-2.2.21+really2.2.20/config/66-azure-storage.rules walinuxagent-2.2.45/config/66-azure-storage.rules --- walinuxagent-2.2.21+really2.2.20/config/66-azure-storage.rules 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/66-azure-storage.rules 2019-11-07 00:36:56.000000000 +0000 @@ -6,6 +6,7 @@ # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" +ATTRS{device_id}=="?00000001-0001-*", ENV{fabric_name}="BEK", GOTO="azure_names" # Wellknown SCSI controllers ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" diff -Nru walinuxagent-2.2.21+really2.2.20/config/alpine/waagent.conf walinuxagent-2.2.45/config/alpine/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/alpine/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/alpine/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -79,14 +84,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/arch/waagent.conf walinuxagent-2.2.45/config/arch/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/arch/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/arch/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -14,7 +14,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -60,6 +61,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -105,14 +109,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/bigip/waagent.conf walinuxagent-2.2.45/config/bigip/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/bigip/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/bigip/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -16,6 +16,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -25,7 +29,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -54,6 +59,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -83,14 +91,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/clearlinux/waagent.conf walinuxagent-2.2.45/config/clearlinux/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/clearlinux/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/clearlinux/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -25,7 +25,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -59,6 +60,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -77,8 +81,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -86,5 +90,5 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs +# - The default is false to protect the state of existing VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/coreos/waagent.conf walinuxagent-2.2.45/config/coreos/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/coreos/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/coreos/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. @@ -60,6 +65,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -105,14 +113,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks OS.AllowHTTP=y # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/debian/waagent.conf walinuxagent-2.2.45/config/debian/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/debian/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/config/debian/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,132 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=y + +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=y + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=y + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=auto + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=y + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Allow reset password of sys user +Provisioning.AllowResetSysUser=n + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=y + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. +ResourceDisk.Filesystem=ext4 + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) +Logs.Verbose=n + +# Enable Console logging, default is y +# Logs.Console=y + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the SSH ClientAliveInterval +# OS.SshClientAliveInterval=180 + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +# Lib.Dir=/var/lib/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +# Extension.LogDir=/var/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +# AutoUpdate.Enabled=y + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is enabled +# EnableOverProvisioning=y + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/freebsd/waagent.conf walinuxagent-2.2.45/config/freebsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/freebsd/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/freebsd/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -54,6 +59,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -103,14 +111,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/gaia/waagent.conf walinuxagent-2.2.45/config/gaia/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/gaia/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/gaia/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -57,6 +62,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -102,14 +110,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/iosxe/waagent.conf walinuxagent-2.2.45/config/iosxe/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/iosxe/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/config/iosxe/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,122 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=n + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=y + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=n + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=rsa + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=n + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Allow reset password of sys user +Provisioning.AllowResetSysUser=n + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=n + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. +ResourceDisk.Filesystem=ext4 + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) +Logs.Verbose=n + +# Enable Console logging, default is y +# Logs.Console=y + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the SSH ClientAliveInterval +# OS.SshClientAliveInterval=180 + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +# Lib.Dir=/var/lib/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +# Extension.LogDir=/var/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +AutoUpdate.Enabled=y + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is enabled +# EnableOverProvisioning=y + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/nsbsd/waagent.conf walinuxagent-2.2.45/config/nsbsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/nsbsd/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/config/nsbsd/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,120 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=y + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=n + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=n + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=rsa + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=y + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=n + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs' here. +ResourceDisk.Filesystem=ufs + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) TODO set n +Logs.Verbose=n + +# Enable Console logging, default is y +# Logs.Console=y + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +OS.PasswordPath=/etc/master.passwd + +OS.SudoersDir=/usr/local/etc/sudoers.d + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +Lib.Dir=/usr/Firewall/var/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +Extension.LogDir=/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +AutoUpdate.Enabled=n + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is disabled +# EnableOverProvisioning=n + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n diff -Nru walinuxagent-2.2.21+really2.2.20/config/openbsd/waagent.conf walinuxagent-2.2.45/config/openbsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/openbsd/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/openbsd/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -15,6 +15,7 @@ Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. @@ -54,6 +55,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -101,8 +105,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -110,5 +114,5 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs +# - The default is false to protect the state of existing VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/suse/waagent.conf walinuxagent-2.2.45/config/suse/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/suse/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/suse/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -60,6 +65,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -97,6 +105,9 @@ # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y +# Enable checking RDMA driver version and update +# OS.CheckRdmaDriver=y + # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y @@ -105,14 +116,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/ubuntu/waagent.conf walinuxagent-2.2.45/config/ubuntu/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/ubuntu/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/ubuntu/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=n +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=y @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -60,6 +65,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -85,6 +93,9 @@ # Enable RDMA kernel update, this value is effective on Ubuntu # OS.UpdateRdmaDriver=y +# Enable checking RDMA driver version and update +# OS.CheckRdmaDriver=y + # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y @@ -93,14 +104,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/waagent.conf walinuxagent-2.2.45/config/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/config/waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -15,6 +19,7 @@ Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -57,6 +62,9 @@ # Enable verbose logging (y|n) Logs.Verbose=n +# Enable Console logging, default is y +# Logs.Console=y + # Is FIPS enabled OS.EnableFIPS=n @@ -97,6 +105,9 @@ # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y +# Enable checking RDMA driver version and update +# OS.CheckRdmaDriver=y + # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y @@ -105,14 +116,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/debian/changelog walinuxagent-2.2.45/debian/changelog --- walinuxagent-2.2.21+really2.2.20/debian/changelog 2018-03-20 22:11:57.000000000 +0000 +++ walinuxagent-2.2.45/debian/changelog 2022-04-20 09:12:04.000000000 +0000 @@ -1,3 +1,48 @@ +walinuxagent (2.2.45-0ubuntu1~18.04.1ubuntu0~ppa1) bionic; urgency=medium + + * Build on arm64. (LP: #1954678) + + -- Brian Murray Wed, 20 Apr 2022 02:12:04 -0700 + +walinuxagent (2.2.45-0ubuntu1~18.04.1) bionic; urgency=medium + + * New upstream release (LP: #1851064) + + -- Brian Murray Tue, 14 Jan 2020 08:44:47 -0800 + +walinuxagent (2.2.44-0ubuntu1) focal; urgency=medium + + * New upstream release (LP: #1851064) + + -- Mathieu Trudel-Lapierre Thu, 21 Nov 2019 10:19:10 -0500 + +walinuxagent (2.2.40-0ubuntu1) eoan; urgency=medium + + * New upstream release (LP: #1827995) + * debian/patches/CVE-2019-0804.patch: dropped; included in upstream release. + + -- Mathieu Trudel-Lapierre Wed, 05 Jun 2019 15:26:47 -0400 + +walinuxagent (2.2.32-0ubuntu2) disco; urgency=medium + + * debian/patches/CVE-2019-0804.patch: + - Cherry-pick fixes from upstream for handling swap file. + + -- Łukasz 'sil2100' Zemczak Mon, 11 Mar 2019 09:30:11 +0100 + +walinuxagent (2.2.32-0ubuntu1) disco; urgency=medium + + * New upstream release (LP: #1799498). + * debian/patches/disable_import_test.patch: refreshed patch. + * debian/control: + - Add the python3-mock build-dependency. + * debian/rules: + - Run unit tests but don't fail the build if they fail. Previously, due to + a bug in setup.py, those were never being run during build so the + failures are not regressions. + + -- Łukasz 'sil2100' Zemczak Mon, 29 Oct 2018 09:13:23 +0100 + walinuxagent (2.2.21+really2.2.20-0ubuntu3) bionic; urgency=medium * Drop Recommends of linux-image-extra-virtual, as that depends on a kernel diff -Nru walinuxagent-2.2.21+really2.2.20/debian/control walinuxagent-2.2.45/debian/control --- walinuxagent-2.2.21+really2.2.20/debian/control 2018-03-20 22:11:56.000000000 +0000 +++ walinuxagent-2.2.45/debian/control 2022-04-20 09:12:02.000000000 +0000 @@ -8,13 +8,14 @@ python3-all, python3-setuptools, python3-pyasn1, + python3-mock, openssl (>= 1.0), Standards-Version: 3.9.8 X-Python3-Version: >= 3.2 Homepage: http://go.microsoft.com/fwlink/?LinkId=250998 Package: walinuxagent -Architecture: amd64 +Architecture: amd64 arm64 Depends: cloud-init (>= 0.7.3~bzr826-0ubuntu2), isc-dhcp-client, openssh-server (>= 1:5.9p1), diff -Nru walinuxagent-2.2.21+really2.2.20/debian/patches/disable_import_test.patch walinuxagent-2.2.45/debian/patches/disable_import_test.patch --- walinuxagent-2.2.21+really2.2.20/debian/patches/disable_import_test.patch 2018-02-23 18:17:20.000000000 +0000 +++ walinuxagent-2.2.45/debian/patches/disable_import_test.patch 2019-05-23 21:15:53.000000000 +0000 @@ -1,15 +1,17 @@ -Index: walinuxagent-2.2.21+really2.2.20/config/waagent.conf +Index: walinuxagent-2.2.32/config/waagent.conf =================================================================== ---- walinuxagent-2.2.21+really2.2.20.orig/config/waagent.conf -+++ walinuxagent-2.2.21+really2.2.20/config/waagent.conf -@@ -3,16 +3,16 @@ +--- walinuxagent-2.2.32.orig/config/waagent.conf ++++ walinuxagent-2.2.32/config/waagent.conf +@@ -3,7 +3,7 @@ # # Enable instance creation -Provisioning.Enabled=y +Provisioning.Enabled=n - # Rely on cloud-init to provision + # Enable extension handling. Do not disable this unless you do not need password reset, + # backup, monitoring, or any extension handling whatsoever. +@@ -13,10 +13,10 @@ Extensions.Enabled=y Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. @@ -21,8 +23,8 @@ +Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". - Provisioning.SshHostKeyPairType=rsa -@@ -36,14 +36,14 @@ Provisioning.ExecuteCustomData=n + # The "auto" option is supported on OpenSSH 5.9 (2011) and later. +@@ -41,14 +41,14 @@ Provisioning.ExecuteCustomData=n Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. diff -Nru walinuxagent-2.2.21+really2.2.20/debian/rules walinuxagent-2.2.45/debian/rules --- walinuxagent-2.2.21+really2.2.20/debian/rules 2018-02-23 18:17:20.000000000 +0000 +++ walinuxagent-2.2.45/debian/rules 2019-05-23 21:15:53.000000000 +0000 @@ -25,3 +25,9 @@ override_dh_python3: dh_python3 -O--buildsystem=pybuild --shebang "/usr/bin/env python3" + +override_dh_auto_test: + # No test were run on any of the previous versions, temporarily + # ignoring the failures as they're not regressions (LP: #1800499) + # XXX: Those need to be fixed ASAP. + -dh_auto_test -O--buildsystem=pybuild diff -Nru walinuxagent-2.2.21+really2.2.20/.flake8 walinuxagent-2.2.45/.flake8 --- walinuxagent-2.2.21+really2.2.20/.flake8 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/.flake8 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ +# +# The project did not use flake8 since inception so there are a number +# of time-consuming flake8-identified improvements that are just a lot +# of busy work. Each of these should be disabled and code cleaned up. +# +# W503: Line break occurred before a binary operator +# W504: Line break occurred after a binary operator +# E126: Continuation line over-indented for hanging indent +# E127: Continuation line over-indented for visual indent +# E128: Continuation line under-indented for visual indent +# E201: Whitespace after '(' +# E202: Whitespace before ')' +# E203: Whitespace before ':' +# E221: Multiple spaces before operator +# E225: Missing whitespace around operator +# E226: Missing whitespace around arithmetic operator +# E231: Missing whitespace after ',', ';', or ':' +# E261: At least two spaces before inline comment +# E265: Block comment should start with '# ' +# E302: Expected 2 blank lines, found 0 +# E501: Line too long (xx > yy characters) +# E502: The backslash is redundant between brackets +# F401: Module imported but unused +# F403: 'from module import *' used; unable to detect undefined names +# F405: Name may be undefined, or defined from star imports: module +# + +[flake8] +ignore = W503,W504,E126,E127,E128,E201,E202,E203,E221,E225,E226,E231,E261,E265,E302,E501,E502,F401,F403,F405 +exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,tests +max-complexity = 30 +max-line-length = 120 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/.github/CONTRIBUTING.md walinuxagent-2.2.45/.github/CONTRIBUTING.md --- walinuxagent-2.2.21+really2.2.20/.github/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/.github/CONTRIBUTING.md 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,77 @@ +# Contributing to Linux Guest Agent +First, thank you for contributing to WALinuxAgent repository! + +## Basics +If you would like to become an active contributor to this project, please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +## Table of Contents +[Before starting](#before-starting) +- [Github basics](#github-basics) +- [Code of Conduct](#code-of-conduct) + +[Making Changes](#making-changes) +- [Pull Requests](#pull-requests) +- [Pull Request Guidelines](#pull-request-guidelines) + - [Cleaning up commits](#cleaning-up-commits) + - [General guidelines](#general-guidelines) + - [Testing guidelines](#testing-guidelines) + +## Before starting + +### Github basics + +#### GitHub workflow + +If you don't have experience with Git and Github, some of the terminology and process can be confusing. [Here's a guide to understanding Github](https://guides.github.com/introduction/flow/). + +#### Forking the Azure/Guest-Configuration-Extension repository + +Unless you are working with multiple contributors on the same file, we ask that you fork the repository and submit your Pull Request from there. [Here's a guide to forks in Github](https://guides.github.com/activities/forking/). + +### Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Making Changes + +### Pull Requests + +You can find all of the pull requests that have been opened in the [Pull Request](https://github.com/Azure/Guest-Configuration-Extension/pulls) section of the repository. + +To open your own pull request, click [here](https://github.com/Azure/WALinuxAgent/compare). When creating a pull request, keep the following in mind: +- Make sure you are pointing to the fork and branch that your changes were made in +- Choose the correct branch you want your pull request to be merged into +- The pull request template that is provided **should be filled out**; this is not something that should just be deleted or ignored when the pull request is created + - Deleting or ignoring this template will elongate the time it takes for your pull request to be reviewed + + +### Pull Request Guidelines + +A pull request template will automatically be included as a part of your PR. Please fill out the checklist as specified. Pull requests **will not be reviewed** unless they include a properly completed checklist. + +#### Cleaning up Commits + +If you are thinking about making a large change, **break up the change into small, logical, testable chunks, and organize your pull requests accordingly**. + +Often when a pull request is created with a large number of files changed and/or a large number of lines of code added and/or removed, GitHub will have a difficult time opening up the changes on their site. This forces the Azure Guest-Configuration-Extension team to use separate software to do a code review on the pull request. + +If you find yourself creating a pull request and are unable to see all the changes on GitHub, we recommend **splitting the pull request into multiple pull requests that are able to be reviewed on GitHub**. + +If splitting up the pull request is not an option, we recommend **creating individual commits for different parts of the pull request, which can be reviewed individually on GitHub**. + +For more information on cleaning up the commits in a pull request, such as how to rebase, squash, and cherry-pick, click [here](https://github.com/Azure/azure-powershell/blob/dev/documentation/cleaning-up-commits.md). + +#### General guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Title of the pull request is clear and informative +- There are a small number of commits that each have an informative message +- A description of the changes the pull request makes is included, and a reference to the issue being resolved, if the change address any +- All files have the Microsoft copyright header + +#### Testing Guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Pull request includes test coverage for the included changes \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/.github/ISSUE_TEMPLATE/bug_report.md walinuxagent-2.2.45/.github/ISSUE_TEMPLATE/bug_report.md --- walinuxagent-2.2.21+really2.2.20/.github/ISSUE_TEMPLATE/bug_report.md 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/.github/ISSUE_TEMPLATE/bug_report.md 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,23 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "[BUG]" +labels: triage +assignees: narrieta, pgombar, vrdmr, larohra + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +Note: Please add some context which would help us understand the problem better +1. Section of the log where the error occurs. +2. Serial console output +3. Steps to reproduce the behavior. + +**Distro and WALinuxAgent details (please complete the following information):** + - Distro and Version: [e.g. Ubuntu 16.04] + - WALinuxAgent version [e.g. 2.2.34, you can copy the output of `waagent --version`, more info [here](https://github.com/Azure/WALinuxAgent/wiki/FAQ#what-does-goal-state-agent-mean-in-waagent---version-output) ] + +**Additional context** +Add any other context about the problem here. diff -Nru walinuxagent-2.2.21+really2.2.20/.github/PULL_REQUEST_TEMPLATE.md walinuxagent-2.2.45/.github/PULL_REQUEST_TEMPLATE.md --- walinuxagent-2.2.21+really2.2.20/.github/PULL_REQUEST_TEMPLATE.md 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/.github/PULL_REQUEST_TEMPLATE.md 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,22 @@ + + +## Description + +Issue # + + +--- + +### PR information +- [ ] The title of the PR is clear and informative. +- [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). +- [ ] Except for special cases involving multiple contributors, the PR is started from a fork of the main repository, not a branch. +- [ ] If applicable, the PR references the bug/issue that it fixes in the description. +- [ ] New Unit tests were added for the changes made and Travis.CI is passing. + +### Quality of Code and Contribution Guidelines +- [ ] I have read the [contribution guidelines](https://github.com/Azure/WALinuxAgent/blob/master/.github/CONTRIBUTING.md). \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/.gitignore walinuxagent-2.2.45/.gitignore --- walinuxagent-2.2.21+really2.2.20/.gitignore 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/.gitignore 2019-11-07 00:36:56.000000000 +0000 @@ -69,3 +69,6 @@ # rope project .ropeproject/ + +# mac osx specific files +.DS_Store diff -Nru walinuxagent-2.2.21+really2.2.20/init/openwrt/waagent walinuxagent-2.2.45/init/openwrt/waagent --- walinuxagent-2.2.21+really2.2.20/init/openwrt/waagent 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/init/openwrt/waagent 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,60 @@ +#!/bin/sh /etc/rc.common +# Init file for AzureLinuxAgent. +# +# Copyright 2018 Microsoft Corporation +# Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +# description: AzureLinuxAgent +# + +START=60 +STOP=80 + + +RETVAL=0 +FriendlyName="AzureLinuxAgent" +WAZD_BIN=/usr/sbin/waagent +WAZD_CONF=/etc/waagent.conf +WAZD_PIDFILE=/var/run/waagent.pid + +test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } +test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } + +start() +{ + echo -n "Starting $FriendlyName: " + $WAZD_BIN -start + RETVAL=$? + echo + return $RETVAL +} + +stop() +{ + echo -n "Stopping $FriendlyName: " + if [ -f "$WAZD_PIDFILE" ] + then + kill -9 `cat ${WAZD_PIDFILE}` + rm ${WAZD_PIDFILE} + RETVAL=$? + echo + return $RETVAL + else + echo "$FriendlyName already stopped." + fi +} + diff -Nru walinuxagent-2.2.21+really2.2.20/__main__.py walinuxagent-2.2.45/__main__.py --- walinuxagent-2.2.21+really2.2.20/__main__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/__main__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.agent as agent diff -Nru walinuxagent-2.2.21+really2.2.20/README.md walinuxagent-2.2.45/README.md --- walinuxagent-2.2.21+really2.2.20/README.md 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/README.md 2019-11-07 00:36:56.000000000 +0000 @@ -1,54 +1,77 @@ -## Microsoft Azure Linux Agent README +# Microsoft Azure Linux Agent -### INTRODUCTION +## Master branch status -The Microsoft Azure Linux Agent (waagent) manages Linux & BSD provisioning, -and VM interaction with the Azure Fabric Controller. It provides the following -functionality for Linux and BSD IaaS deployments: - - * Image Provisioning - - Creation of a user account - - Configuring SSH authentication types - - Deployment of SSH public keys and key pairs - - Setting the host name - - Publishing the host name to the platform DNS - - Reporting SSH host key fingerprint to the platform - - Resource Disk Management - - Formatting and mounting the resource disk - - Configuring swap space - - * Networking - - Manages routes to improve compatibility with platform DHCP servers - - Ensures the stability of the network interface name - - * Kernel - - Configure virtual NUMA (disable for kernel <2.6.37) - - Consume Hyper-V entropy for /dev/random - - Configure SCSI timeouts for the root device (which could be remote) - - * Diagnostics - - Console redirection to the serial port - - * SCVMM Deployments - - Detect and bootstrap the VMM agent for Linux when running in a System - Center Virtual Machine Manager 2012R2 environment - - * VM Extension - - Inject component authored by Microsoft and Partners into Linux VM (IaaS) - to enable software and configuration automation - - VM Extension reference implementation on https://github.com/Azure/azure-linux-extensions +[![Travis CI](https://travis-ci.org/Azure/WALinuxAgent.svg?branch=develop)](https://travis-ci.org/Azure/WALinuxAgent/branches) +[![CodeCov](https://codecov.io/gh/Azure/WALinusAgent/branch/develop/graph/badge.svg)](https://codecov.io/gh/Azure/WALinuxAgent/branch/develop) +Each badge below represents our basic validation tests for an image, which are executed several times each day. These include provisioning, user account, disk, extension and networking scenarios. -### COMMUNICATION +Image | Status | +------|--------| +Canonical UbuntuServer 14.04.5-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-LTS__agent--bvt.svg) +Canonical UbuntuServer 14.04.5-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-DAILY-LTS__agent--bvt.svg) +Canonical UbuntuServer 16.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-LTS__agent--bvt.svg) +Canonical UbuntuServer 16.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-DAILY-LTS__agent--bvt.svg) +Canonical UbuntuServer 18.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-LTS__agent--bvt.svg) +Canonical UbuntuServer 18.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-DAILY-LTS__agent--bvt.svg) +Credativ Debian 8|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8__agent--bvt.svg) +Credativ Debian 8-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8-DAILY__agent--bvt.svg) +Credativ Debian 9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9__agent--bvt.svg) +Credativ Debian 9-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9-DAILY__agent--bvt.svg) +OpenLogic CentOS 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_6.9__agent--bvt.svg) +OpenLogic CentOS 7.4|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_7.4__agent--bvt.svg) +RedHat RHEL 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_6.9__agent--bvt.svg) +RedHat RHEL 7-RAW|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_7-RAW__agent--bvt.svg) +SUSE SLES 12-SP3|![badge](https://dcrbadges.blob.core.windows.net/scenarios/SUSE_SLES_12-SP3__agent--bvt.svg) + +## Introduction + +The Microsoft Azure Linux Agent (waagent) manages Linux provisioning and VM interaction with the Azure Fabric Controller. It provides the following +functionality for Linux IaaS deployments: + +* Image Provisioning + * Creation of a user account + * Configuring SSH authentication types + * Deployment of SSH public keys and key pairs + * Setting the host name + * Publishing the host name to the platform DNS + * Reporting SSH host key fingerprint to the platform + * Resource Disk Management + * Formatting and mounting the resource disk + * Configuring swap space + +* Networking + * Manages routes to improve compatibility with platform DHCP servers + * Ensures the stability of the network interface name + +* Kernel + * Configure virtual NUMA (disable for kernel <2.6.37) + * Consume Hyper-V entropy for /dev/random + * Configure SCSI timeouts for the root device (which could be remote) + +* Diagnostics + * Console redirection to the serial port + +* SCVMM Deployments + * Detect and bootstrap the VMM agent for Linux when running in a System + Center Virtual Machine Manager 2012R2 environment + +* VM Extension + * Inject component authored by Microsoft and Partners into Linux VM (IaaS) + to enable software and configuration automation + * VM Extension reference implementation on [GitHub](https://github.com/Azure/azure-linux-extensions) + +## Communication The information flow from the platform to the agent occurs via two channels: - * A boot-time attached DVD for IaaS deployments. - This DVD includes an OVF-compliant configuration file that includes all - provisioning information other than the actual SSH keypairs. +* A boot-time attached DVD for IaaS deployments. + This DVD includes an OVF-compliant configuration file that includes all + provisioning information other than the actual SSH keypairs. - * A TCP endpoint exposing a REST API used to obtain deployment and topology - configuration. +* A TCP endpoint exposing a REST API used to obtain deployment and topology + configuration. The agent will use an HTTP proxy if provided via the `http_proxy` (for `http` requests) or `https_proxy` (for `https` requests) environment variables. The `HttpProxy.Host` and @@ -56,117 +79,111 @@ settings. Due to limitations of Python, the agent *does not* support HTTP proxies requiring authentication. - -### REQUIREMENTS +## Requirements The following systems have been tested and are known to work with the Azure Linux Agent. Please note that this list may differ from the official list -of supported systems on the Microsoft Azure Platform as described here: -http://support.microsoft.com/kb/2805216 +of supported systems on the Microsoft Azure Platform as described [here](http://support.microsoft.com/kb/2805216). Waagent depends on some system packages in order to function properly: - * Python 2.6+ - * OpenSSL 1.0+ - * OpenSSH 5.3+ - * Filesystem utilities: sfdisk, fdisk, mkfs, parted - * Password tools: chpasswd, sudo - * Text processing tools: sed, grep - * Network tools: ip-route +* Python 2.6+ +* OpenSSL 1.0+ +* OpenSSH 5.3+ +* Filesystem utilities: sfdisk, fdisk, mkfs, parted +* Password tools: chpasswd, sudo +* Text processing tools: sed, grep +* Network tools: ip-route - -### INSTALLATION +## Installation Installation via your distribution's package repository is preferred. You can also customize your own RPM or DEB packages using the configuration samples provided (see deb and rpm sections below). -For more advanced installation options, such as installing to custom locations -or prefixes, you can use ***setuptools*** to install from source by running: - - #sudo python setup.py install --register-service +For more advanced installation options, such as installing to custom locations or prefixes, you can use **setuptools** to install from source by running: + +```bash + sudo python setup.py install --register-service +``` You can view more installation options by running: - #sudo python setup.py install --help +```bash + sudo python setup.py install --help +``` -The agent's log file is kept at /var/log/waagent.log. +The agent's log file is kept at `/var/log/waagent.log`. -### UPGRADE +## Upgrade -Upgrading via your distribution's package repository is preferred. +Upgrading via your distribution's package repository is strongly preferred. If upgrading manually, same with installation above by running: - #sudo python setup.py install --force +```bash + sudo python setup.py install --force +``` Restart waagent service,for most of linux distributions: - #sudo service waagent restart +```bash + sudo service waagent restart +``` For Ubuntu, use: - #sudo service walinuxagent restart +```bash + sudo service walinuxagent restart +``` For CoreOS, use: - #sudo systemctl restart waagent - -The agent's log file is kept at /var/log/waagent.log. +```bash + sudo systemctl restart waagent +``` +## Command line options -### COMMAND LINE OPTIONS +### Flags -Flags: +`-verbose`: Increase verbosity of specified command - -verbose: Increase verbosity of specified command +`-force`: Skip interactive confirmation for some commands - -force: Skip interactive confirmation for some commands +### Commands -Commands: +`-help`: Lists the supported commands and flags. --help: Lists the supported commands and flags. +`-deprovision`: Attempt to clean the system and make it suitable for re-provisioning, by deleting the following: --deprovision: Attempt to clean the system and make it suitable for -re-provisioning, by deleting the following: - - * All SSH host keys (if Provisioning.RegenerateSshHostKeyPair - is 'y' in the configuration file) - * Nameserver configuration in /etc/resolv.conf - * Root password from /etc/shadow (if - Provisioning.DeleteRootPassword is 'y' in the configuration file) - * Cached DHCP client leases - * Resets host name to localhost.localdomain +* All SSH host keys (if Provisioning.RegenerateSshHostKeyPair is 'y' in the configuration file) +* Nameserver configuration in /etc/resolv.conf +* Root password from /etc/shadow (if Provisioning.DeleteRootPassword is 'y' in the configuration file) +* Cached DHCP client leases +* Resets host name to localhost.localdomain - WARNING! Deprovision does not guarantee that the image is cleared of - all sensitive information and suitable for redistribution. + **WARNING!** Deprovision does not guarantee that the image is cleared of all sensitive information and suitable for redistribution. --deprovision+user: Performs everything under deprovision (above) -and also deletes the last provisioned user account and associated data. +`-deprovision+user`: Performs everything under deprovision (above) and also deletes the last provisioned user account and associated data. --version: Displays the version of waagent +`-version`: Displays the version of waagent --serialconsole: Configures GRUB to mark ttyS0 (the first serial port) -as the boot console. This ensures that kernel bootup logs are sent to -the serial port and made available for debugging. +`-serialconsole`: Configures GRUB to mark ttyS0 (the first serial port) as the boot console. This ensures that kernel bootup logs are sent to the serial port and made available for debugging. --daemon: Run waagent as a daemon to manage interaction with the -platform. This argument is specified to waagent in the waagent init -script. +`-daemon`: Run waagent as a daemon to manage interaction with the platform. This argument is specified to waagent in the waagent init script. --start: Run waagent as a background process +`-start`: Run waagent as a background process -### CONFIGURATION +## Configuration -A configuration file (/etc/waagent.conf) controls the actions of -waagent. Blank lines and lines whose first character is a `#` are -ignored (end-of-line comments are *not* supported). +A configuration file (/etc/waagent.conf) controls the actions of waagent. Blank lines and lines whose first character is a `#` are ignored (end-of-line comments are *not* supported). A sample configuration file is shown below: -``` -Provisioning.Enabled=y -Provisioning.UseCloudInit=n +```yml +Extensions.Enabled=y +Provisioning.Agent=auto Provisioning.DeleteRootPassword=n Provisioning.RegenerateSshHostKeyPair=y Provisioning.SshHostKeyPairType=rsa @@ -180,6 +197,7 @@ ResourceDisk.MountPoint=/mnt/resource ResourceDisk.MountOptions=None ResourceDisk.EnableSwap=n +ResourceDisk.EnableSwapEncryption=n ResourceDisk.SwapSizeMB=0 Logs.Verbose=n OS.AllowHTTP=n @@ -190,6 +208,8 @@ OS.SshDir=/etc/ssh HttpProxy.Host=None HttpProxy.Port=None +CGroups.EnforceLimits=y +CGroups.Excluded=customscript,runcommand ``` The various configuration options are described in detail below. Configuration @@ -197,19 +217,51 @@ configuration options can be specified as "y" or "n". The special keyword "None" may be used for some string type configuration entries as detailed below. -#### Configuration File Options +### Configuration File Options + +#### __Extensions.Enabled__ -* __Provisioning.Enabled__ _Type: Boolean_ -_Default: y_ +_Default: y_ + +This allows the user to enable or disable the extension handling functionality in the +agent. Valid values are "y" or "n". If extension handling is disabled, the goal state +will still be processed and VM status is still reported, but only every 5 minutes. +Extension config within the goal state will be ignored. Note that functionality such +as password reset, ssh key updates and backups depend on extensions. Only disable this +if you do not need extensions at all. + +_Note_: disabling extensions in this manner is not the same as running completely +without the agent. In order to do that, the `provisionVMAgent` flag must be set at +provisioning time, via whichever API is being used. We will provide more details on +this on our wiki when it is generally available. + +#### __Provisioning.Agent__ + +_Type: String_ +_Default: auto_ + +Choose which provisioning agent to use (or allow waagent to figure it out by +specifying "auto"). Possible options are "auto" (default), "waagent", "cloud-init", +or "disabled". + +#### __Provisioning.Enabled__ (*removed in VERSION*) + +_Type: Boolean_ +_Default: y_ This allows the user to enable or disable the provisioning functionality in the agent. Valid values are "y" or "n". If provisioning is disabled, SSH host and user keys in the image are preserved and any configuration specified in the Azure provisioning API is ignored. -* __Provisioning.UseCloudInit__ -_Type: Boolean_ +_Note_: This configuration option has been removed and has no effect. waagent +now auto-detects cloud-init as a provisioning agent (with an option to override +with `Provisioning.Agent`). + +#### __Provisioning.UseCloudInit__ (*removed in VERSION*) + +_Type: Boolean_ _Default: n_ This options enables / disables support for provisioning by means of cloud-init. @@ -218,16 +270,22 @@ disabled ("n") for this option to have an effect. Setting _Provisioning.Enabled_ to true ("y") overrides this option and runs the built-in agent provisioning code. -* __Provisioning.DeleteRootPassword__ -_Type: Boolean_ +_Note_: This configuration option has been removed and has no effect. waagent +now auto-detects cloud-init as a provisioning agent (with an option to override +with `Provisioning.Agent`). + +#### __Provisioning.DeleteRootPassword__ + +_Type: Boolean_ _Default: n_ If set, the root password in the /etc/shadow file is erased during the provisioning process. -* __Provisioning.RegenerateSshHostKeyPair__ -_Type: Boolean_ -_Default: y_ +#### __Provisioning.RegenerateSshHostKeyPair__ + +_Type: Boolean_ +_Default: y_ If set, all SSH host key pairs (ecdsa, dsa and rsa) are deleted during the provisioning process from /etc/ssh/. And a single fresh key pair is generated. @@ -236,9 +294,10 @@ re-create SSH key pairs for any missing encryption types when the SSH daemon is restarted (for example, upon a reboot). -* __Provisioning.SshHostKeyPairType__ -_Type: String_ -_Default: rsa_ +#### __Provisioning.SshHostKeyPairType__ + +_Type: String_ +_Default: rsa_ This can be set to an encryption algorithm type that is supported by the SSH daemon on the VM. The typically supported values are "rsa", "dsa" and "ecdsa". @@ -246,9 +305,10 @@ use putty.exe on Windows to connect to a Linux deployment, please use "rsa" or "dsa". -* __Provisioning.MonitorHostName__ -_Type: Boolean_ -_Default: y_ +#### __Provisioning.MonitorHostName__ + +_Type: Boolean_ +_Default: y_ If set, waagent will monitor the Linux VM for hostname changes (as returned by the "hostname" command) and automatically update the networking configuration in @@ -256,185 +316,249 @@ servers, networking will be restarted in the VM. This will result in brief loss of Internet connectivity. -* __Provisioning.DecodeCustomData__ -_Type: Boolean_ -_Default: n_ +#### __Provisioning.DecodeCustomData__ + +_Type: Boolean_ +_Default: n_ If set, waagent will decode CustomData from Base64. -* __Provisioning.ExecuteCustomData__ -_Type: Boolean_ -_Default: n_ +#### __Provisioning.ExecuteCustomData__ + +_Type: Boolean_ +_Default: n_ If set, waagent will execute CustomData after provisioning. -* __Provisioning.PasswordCryptId__ -_Type:String_ -_Default:6_ - -Algorithm used by crypt when generating password hash. - 1 - MD5 - 2a - Blowfish - 5 - SHA-256 - 6 - SHA-512 - -* __Provisioning.PasswordCryptSaltLength__ -_Type:String_ -_Default:10_ +#### __Provisioning.PasswordCryptId__ + +_Type: String_ +_Default: 6_ + +Algorithm used by crypt when generating password hash. + +* 1 - MD5 +* 2a - Blowfish +* 5 - SHA-256 +* 6 - SHA-512 + +#### __Provisioning.PasswordCryptSaltLength__ + +_Type: String_ +_Default: 10_ Length of random salt used when generating password hash. -* __ResourceDisk.Format__ -_Type: Boolean_ -_Default: y_ - -If set, the resource disk provided by the platform will be formatted and mounted -by waagent if the filesystem type requested by the user in -"ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of -type Linux (83) will be made available on the disk. Note that this partition -will not be formatted if it can be successfully mounted. - -* __ResourceDisk.Filesystem__ -_Type: String_ -_Default: ext4_ +#### __ResourceDisk.Format__ + +_Type: Boolean_ +_Default: y_ + +If set, the resource disk provided by the platform will be formatted and mounted by waagent if the filesystem type requested by the user in "ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of +type Linux (83) will be made available on the disk. Note that this partition will not be formatted if it can be successfully mounted. + +#### __ResourceDisk.Filesystem__ + +_Type: String_ +_Default: ext4_ This specifies the filesystem type for the resource disk. Supported values vary by Linux distribution. If the string is X, then mkfs.X should be present on the Linux image. SLES 11 images should typically use 'ext3'. BSD images should use 'ufs2' here. -* __ResourceDisk.MountPoint__ -_Type: String_ -_Default: /mnt/resource_ +#### __ResourceDisk.MountPoint__ + +_Type: String_ +_Default: /mnt/resource_ This specifies the path at which the resource disk is mounted. -* __ResourceDisk.MountOptions__ -_Type: String_ -_Default: None_ +#### __ResourceDisk.MountOptions__ + +_Type: String_ +_Default: None_ Specifies disk mount options to be passed to the mount -o command. This is a comma separated list of values, ex. 'nodev,nosuid'. See mount(8) for details. -* __ResourceDisk.EnableSwap__ -_Type: Boolean_ -_Default: n_ +#### __ResourceDisk.EnableSwap__ + +_Type: Boolean_ +_Default: n_ If set, a swap file (/swapfile) is created on the resource disk and added to the system swap space. -* __ResourceDisk.SwapSizeMB__ -_Type: Integer_ -_Default: 0_ - -The size of the swap file in megabytes. - -* Logs.Verbose -_Type: Boolean_ -_Default: n_ +#### __ResourceDisk.EnableSwapEncryption__ + +_Type: Boolean_ +_Default: n_ + +If set, the swap file (/swapfile) is mounted as an encrypted filesystem. + +#### __ResourceDisk.SwapSizeMB__ + +_Type: Integer_ +_Default: 0_ + +The size of the swap file in megabytes. + +#### __Logs.Verbose__ + +_Type: Boolean_ +_Default: n_ If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. -* __OS.AllowHTTP__ -_Type: Boolean_ -_Default: n_ - -If set to `y` and SSL support is not compiled into Python, the agent will fall-back to -use HTTP. Otherwise, if SSL support is not compiled into Python, the agent will fail -all HTTPS requests. - -Note: Allowing HTTP may unintentionally expose secure data. - -* __OS.EnableRDMA__ -_Type: Boolean_ -_Default: n_ +#### __OS.AllowHTTP__ + +_Type: Boolean_ +_Default: n_ + +If SSL support is not compiled into Python, the agent will fail all HTTPS requests. +You can set this option to 'y' to make the agent fall-back to HTTP, instead of failing the requests. + +NOTE: Allowing HTTP may unintentionally expose secure data. + +#### __OS.EnableRDMA__ + +_Type: Boolean_ +_Default: n_ If set, the agent will attempt to install and then load an RDMA kernel driver that matches the version of the firmware on the underlying hardware. -* __OS.EnableFIPS__ -_Type: Boolean_ -_Default: n_ +#### __OS.EnableFIPS__ + +_Type: Boolean_ +_Default: n_ If set, the agent will emit into the environment "OPENSSL_FIPS=1" when executing OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libraries. -Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant are +Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant certificates are installed, then enabling this option will cause all OpenSSL commands to fail._ -* __OS.RootDeviceScsiTimeout__ -_Type: Integer_ -_Default: 300_ +#### __OS.RootDeviceScsiTimeout__ + +_Type: Integer_ +_Default: 300_ This configures the SCSI timeout in seconds on the root device. If not set, the system defaults are used. -* __OS.OpensslPath__ -_Type: String_ -_Default: None_ +#### __OS.OpensslPath__ + +_Type: String_ +_Default: None_ This can be used to specify an alternate path for the openssl binary to use for cryptographic operations. -* __OS.SshClientAliveInterval__ -_Type: Integer_ +#### __OS.SshClientAliveInterval__ + +_Type: Integer_ _Default: 180_ This values sets the number of seconds the agent uses for the SSH ClientAliveInterval configuration option. -* __OS.SshDir__ -_Type: String_ -_Default: `/etc/ssh`_ +#### __OS.SshDir__ + +_Type: String_ +_Default: `/etc/ssh`_ This option can be used to override the normal location of the SSH configuration directory. -* __HttpProxy.Host, HttpProxy.Port__ -_Type: String_ -_Default: None_ +#### __HttpProxy.Host, HttpProxy.Port__ + +_Type: String_ +_Default: None_ If set, the agent will use this proxy server to access the internet. These values *will* override the `http_proxy` or `https_proxy` environment variables. Lastly, `HttpProxy.Host` is required (if to be used) and `HttpProxy.Port` is optional. -### APPENDIX +#### __CGroups.EnforceLimits__ + +_Type: Boolean_ +_Default: y_ + +If set, the agent will attempt to set cgroups limits for cpu and memory for the agent process itself +as well as extension processes. See the wiki for further details on this. + +#### __CGroups.Excluded__ + +_Type: String_ +_Default: customscript,runcommand_ + +The list of extensions which will be excluded from cgroups limits. This should be comma separated. + +### Telemetry + +WALinuxAgent collects usage data and sends it to Microsoft to help improve our products and services. The data collected is used to track service health and +assist with Azure support requests. Data collected does not include any personally identifiable information. Read our [privacy statement](http://go.microsoft.com/fwlink/?LinkId=521839) +to learn more. + +WALinuxAgent does not support disabling telemetry at this time. WALinuxAgent must be removed to disable telemetry collection. If you need this feature, +please open an issue in GitHub and explain your requirement. + +### Appendix -We do not maintain packaging information in this repo but some samples -are shown below as a reference. See the downstream distribution -repositories for officially maintained packaging. +We do not maintain packaging information in this repo but some samples are shown below as a reference. See the downstream distribution repositories for officially maintained packaging. #### deb packages -The official Ubuntu WALinuxAgent package can be found here: -https://launchpad.net/ubuntu/+source/walinuxagent +The official Ubuntu WALinuxAgent package can be found [here](https://launchpad.net/ubuntu/+source/walinuxagent). Run once: - 1. Install required packages: - `sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper` - - 2. Create the pbuilder environment: - `sudo pbuilder create --debootstrapopts --variant=buildd` - - 3. Obtain from a downstream package repo + +1. Install required packages + + ```bash + sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper + ``` + +2. Create the pbuilder environment + + ```bash + sudo pbuilder create --debootstrapopts --variant=buildd + ``` + +3. Obtain `waagent.dsc` from a downstream package repo To compile the package, from the top-most directory: - 1. Build the source package: - `dpkg-buildpackage -S` - 2. Build the package: - `sudo pbuilder build ` +1. Build the source package + + ```bash + dpkg-buildpackage -S + ``` + +2. Build the package + + ```bash + sudo pbuilder build waagent.dsc + ``` + +3. Fetch the built package, usually from `/var/cache/pbuilder/result` - 3. Fetch the built package, usually from `/var/cache/pbuilder/result` - #### rpm packages -The instructions below describe how to build an rpm package. +The instructions below describe how to build an rpm package. + +1. Install setuptools + + ```bash + curl https://bootstrap.pypa.io/ez_setup.py -o - | python + ``` + +2. The following command will build the binary and source RPMs: - 1. Install setuptools - `curl https://bootstrap.pypa.io/ez_setup.py -o - | python` - - 2. The following command will build the binary and source RPMs: - `python setup.py bdist_rpm` + ```bash + python setup.py bdist_rpm + ``` ----- diff -Nru walinuxagent-2.2.21+really2.2.20/requirements.txt walinuxagent-2.2.45/requirements.txt --- walinuxagent-2.2.21+really2.2.20/requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/requirements.txt 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,2 @@ +distro; python_version >= '3.8' +pyasn1 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/setup.py walinuxagent-2.2.45/setup.py --- walinuxagent-2.2.21+really2.2.20/setup.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/setup.py 2019-11-07 00:36:56.000000000 +0000 @@ -25,7 +25,9 @@ from azurelinuxagent.common.osutil import get_osutil import setuptools from setuptools import find_packages -from setuptools.command.install import install as _install +from setuptools.command.install import install as _install +import subprocess +import sys root_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(root_dir) @@ -58,11 +60,13 @@ data_files.append((dest, src)) -def set_freebsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/freebsd/waagent"]): +def set_freebsd_rc_files(data_files, dest="/etc/rc.d/", + src=["init/freebsd/waagent"]): data_files.append((dest, src)) -def set_openbsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/openbsd/waagent"]): +def set_openbsd_rc_files(data_files, dest="/etc/rc.d/", + src=["init/openbsd/waagent"]): data_files.append((dest, src)) @@ -91,7 +95,6 @@ if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) - elif name == 'arch': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, src=["config/arch/waagent.conf"]) @@ -106,8 +109,7 @@ set_udev_files(data_files) set_files(data_files, dest="/usr/share/oem", src=["init/coreos/cloud-config.yml"]) - elif name == 'clear linux os for intel architecture' \ - or name == 'clear linux software for intel architecture': + elif "Clear Linux" in fullname: set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, dest="/usr/share/defaults/waagent", src=["config/clearlinux/waagent.conf"]) @@ -131,14 +133,14 @@ # Ubuntu15.04+ uses systemd set_systemd_files(data_files, src=["init/ubuntu/walinuxagent.service"]) - elif name == 'suse': + elif name == 'suse' or name == 'opensuse': set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if fullname == 'SUSE Linux Enterprise Server' and \ version.startswith('11') or \ - fullname == 'openSUSE' and version.startswith( + fullname == 'openSUSE' and version.startswith( '13.1'): set_sysv_files(data_files, dest='/etc/init.d', src=["init/suse/waagent"]) @@ -153,6 +155,27 @@ set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/openbsd/waagent.conf"]) set_openbsd_rc_files(data_files) + elif name == 'debian': + set_bin_files(data_files) + set_conf_files(data_files, src=["config/debian/waagent.conf"]) + set_logrotate_files(data_files) + set_udev_files(data_files, dest="/lib/udev/rules.d") + if debian_has_systemd(): + set_systemd_files(data_files) + elif name == 'iosxe': + set_bin_files(data_files) + set_conf_files(data_files, src=["config/iosxe/waagent.conf"]) + set_logrotate_files(data_files) + set_udev_files(data_files) + set_systemd_files(data_files, dest="/usr/lib/systemd/system") + if version.startswith("7.1"): + # TODO this is a mitigation to systemctl bug on 7.1 + set_sysv_files(data_files) + elif name == 'openwrt': + set_bin_files(data_files) + set_conf_files(data_files) + set_logrotate_files(data_files) + set_sysv_files(data_files, dest='/etc/init.d', src=["init/openwrt/waagent"]) else: # Use default setting set_bin_files(data_files) @@ -163,6 +186,14 @@ return data_files +def debian_has_systemd(): + try: + return subprocess.check_output( + ['cat', '/proc/1/comm']).strip() == 'systemd' + except subprocess.CalledProcessError: + return False + + class install(_install): user_options = _install.user_options + [ ('lnx-distro=', None, 'target Linux distribution'), @@ -199,6 +230,20 @@ osutil.start_agent_service() +# Note to packagers and users from source. +# In version 3.5 of Python distribution information handling in the platform +# module was deprecated. Depending on the Linux distribution the +# implementation may be broken prior to Python 3.7 wher the functionality +# will be removed from Python 3 +requires = [] +if float(sys.version[:3]) >= 3.7: + requires = ['distro'] + +modules = [] + +if "bdist_egg" in sys.argv: + modules.append("__main__") + setuptools.setup( name=AGENT_NAME, version=AGENT_VERSION, @@ -208,8 +253,9 @@ platforms='Linux', url='https://github.com/Azure/WALinuxAgent', license='Apache License Version 2.0', - packages=find_packages(exclude=["tests"]), - py_modules=["__main__"], + packages=find_packages(exclude=["tests*"]), + py_modules=modules, + install_requires=requires, cmdclass={ 'install': install } diff -Nru walinuxagent-2.2.21+really2.2.20/test-requirements.txt walinuxagent-2.2.45/test-requirements.txt --- walinuxagent-2.2.21+really2.2.20/test-requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/test-requirements.txt 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,5 @@ +codecov +coverage +flake8; python_version >= '2.7' +mock +nose \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/__init__.py walinuxagent-2.2.45/tests/common/dhcp/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/dhcp/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/test_dhcp.py walinuxagent-2.2.45/tests/common/dhcp/test_dhcp.py --- walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/test_dhcp.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/dhcp/test_dhcp.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import mock @@ -22,6 +22,13 @@ class TestDHCP(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + def test_wireserver_route_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/__init__.py walinuxagent-2.2.45/tests/common/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/__init__.py walinuxagent-2.2.45/tests/common/osutil/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/mock_osutil.py walinuxagent-2.2.45/tests/common/osutil/mock_osutil.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/mock_osutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/mock_osutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class MockOSUtil(DefaultOSUtil): + def __init__(self): + self.all_users = {} + self.sudo_users = set() + self.jit_enabled = True + + def useradd(self, username, expiration=None, comment=None): + if username == "": + raise Exception("test exception for bad username") + if username in self.all_users: + raise Exception("test exception, user already exists") + self.all_users[username] = (username, None, None, None, comment, None, None, expiration) + + def conf_sudoer(self, username, nopasswd=False, remove=False): + if not remove: + self.sudo_users.add(username) + else: + self.sudo_users.remove(username) + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + if password == "": + raise Exception("test exception for bad password") + user = self.all_users[username] + self.all_users[username] = (user[0], password, user[2], user[3], user[4], user[5], user[6], user[7]) + + def del_account(self, username): + if username == "": + raise Exception("test exception, bad data") + if username not in self.all_users: + raise Exception("test exception, user does not exist to delete") + self.all_users.pop(username) + + def get_users(self): + return self.all_users.values() \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_alpine.py walinuxagent-2.2.45/tests/common/osutil/test_alpine.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_alpine.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_alpine.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.alpine import AlpineOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestAlpineOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, AlpineOSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_arch.py walinuxagent-2.2.45/tests/common/osutil/test_arch.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_arch.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_arch.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.arch import ArchUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestArchUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, ArchUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_bigip.py walinuxagent-2.2.45/tests/common/osutil/test_bigip.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_bigip.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_bigip.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import os import socket -import time import azurelinuxagent.common.osutil.bigip as osutil import azurelinuxagent.common.osutil.default as default import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.osutil.bigip import BigIpOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * @@ -69,19 +69,6 @@ self.assertEqual(args[0].call_count, 1) -class TestBigIpOSUtil_get_dhcp_pid(AgentTestCase): - - @patch.object(shellutil, "run_get_output", return_value=(0, 8623)) - def test_success(self, *args): - result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) - self.assertEqual(result, 8623) - - @patch.object(shellutil, "run_get_output", return_value=(1, 'foo')) - def test_failure(self, *args): - result = osutil.BigIpOSUtil.get_dhcp_pid(osutil.BigIpOSUtil()) - self.assertEqual(result, None) - - class TestBigIpOSUtil_useradd(AgentTestCase): @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) @@ -319,5 +306,16 @@ self.assertEqual(args[2].call_count, 0) +class TestBigIpOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, BigIpOSUtil()) + + if __name__ == '__main__': unittest.main() \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_clearlinux.py walinuxagent-2.2.45/tests/common/osutil/test_clearlinux.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_clearlinux.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_clearlinux.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.clearlinux import ClearLinuxUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestClearLinuxUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, ClearLinuxUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_coreos.py walinuxagent-2.2.45/tests/common/osutil/test_coreos.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_coreos.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_coreos.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.coreos import CoreOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestAlpineOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, CoreOSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default_osutil.py walinuxagent-2.2.45/tests/common/osutil/test_default_osutil.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default_osutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_default_osutil.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,182 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.default import DefaultOSUtil, shellutil +from tests.tools import * + + +class DefaultOsUtilTestCase(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroups") + self.mock_base_cgroups = patch("azurelinuxagent.common.osutil.default.BASE_CGROUPS", self.cgroups_file_system_root) + self.mock_base_cgroups.start() + + def tearDown(self): + self.mock_base_cgroups.stop() + + @staticmethod + def _get_mount_commands(mock): + mount_commands = '' + for call_args in mock.call_args_list: + args, kwargs = call_args + mount_commands += ';' + args[0] + return mount_commands + + def test_mount_cgroups_should_mount_the_cpu_and_memory_controllers(self): + # the mount command requires root privileges; make it a no op and check only for file existence + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: + DefaultOSUtil().mount_cgroups() + + # the directories for the controllers should have been created + for controller in ['cpu', 'memory', 'cpuacct', 'cpu,cpuacct']: + directory = os.path.join(self.cgroups_file_system_root, controller) + self.assertTrue(os.path.exists(directory), "A directory for controller {0} was not created".format(controller)) + + # the cgroup filesystem and the cpu and memory controllers should have been mounted + mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) + + self.assertRegex(mount_commands, ';mount.* cgroup_root ', 'The cgroups file system was not mounted') + self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') + self.assertRegex(mount_commands, ';mount.* memory ', 'The memory controller was not mounted') + + def test_mount_cgroups_should_not_mount_the_cgroups_file_system_when_it_already_exists(self): + os.mkdir(self.cgroups_file_system_root) + + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: + DefaultOSUtil().mount_cgroups() + + mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) + + self.assertNotIn('cgroup_root', mount_commands, 'The cgroups file system should not have been mounted') + self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') + self.assertRegex(mount_commands, ';mount.* memory ', 'The memory controller was not mounted') + + def test_mount_cgroups_should_not_mount_cgroup_controllers_when_they_already_exist(self): + os.mkdir(self.cgroups_file_system_root) + os.mkdir(os.path.join(self.cgroups_file_system_root, 'cpu,cpuacct')) + os.mkdir(os.path.join(self.cgroups_file_system_root, 'memory')) + + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: + DefaultOSUtil().mount_cgroups() + + mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) + + self.assertNotIn('cgroup_root', mount_commands, 'The cgroups file system should not have been mounted') + self.assertNotIn('cpu,cpuacct', mount_commands, 'The cpu controller should not have been mounted') + self.assertNotIn('memory', mount_commands, 'The memory controller should not have been mounted') + + def test_mount_cgroups_should_handle_errors_when_mounting_an_individual_controller(self): + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + if 'memory' in cmd: + raise Exception('A test exception mounting the memory controller') + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: + with patch("azurelinuxagent.common.cgroupconfigurator.logger.warn") as mock_logger_warn: + DefaultOSUtil().mount_cgroups() + + # the cgroup filesystem and the cpu controller should still have been mounted + mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) + + self.assertRegex(mount_commands, ';mount.* cgroup_root ', 'The cgroups file system was not mounted') + self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') + + # A warning should have been logged for the memory controller + args, kwargs = mock_logger_warn.call_args + self.assertIn('A test exception mounting the memory controller', args) + + def test_mount_cgroups_should_raise_when_the_cgroups_filesystem_fails_to_mount(self): + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + if 'cgroup_root' in cmd: + raise Exception('A test exception mounting the cgroups file system') + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: + with self.assertRaises(Exception) as context_manager: + DefaultOSUtil().mount_cgroups() + + self.assertRegex(str(context_manager.exception), 'A test exception mounting the cgroups file system') + + mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) + self.assertNotIn('memory', mount_commands, 'The memory controller should not have been mounted') + self.assertNotIn('cpu', mount_commands, 'The cpu controller should not have been mounted') + + def test_mount_cgroups_should_raise_when_all_controllers_fail_to_mount(self): + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + if 'memory' in cmd or 'cpu,cpuacct' in cmd: + raise Exception('A test exception mounting a cgroup controller') + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output): + with self.assertRaises(Exception) as context_manager: + DefaultOSUtil().mount_cgroups() + + self.assertRegex(str(context_manager.exception), 'A test exception mounting a cgroup controller') + + def test_mount_cgroups_should_not_create_symbolic_links_when_the_cpu_controller_fails_to_mount(self): + original_run_get_output = shellutil.run_get_output + + def mock_run_get_output(cmd, *args, **kwargs): + if cmd.startswith('mount '): + if 'cpu,cpuacct' in cmd: + raise Exception('A test exception mounting the cpu controller') + return 0, None + return original_run_get_output(cmd, *args, **kwargs) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output): + with patch("azurelinuxagent.common.osutil.default.os.symlink") as patch_symlink: + DefaultOSUtil().mount_cgroups() + + self.assertEquals(patch_symlink.call_count, 0, 'A symbolic link should not have been created') + + def test_default_service_name(self): + self.assertEquals(DefaultOSUtil().get_service_name(), "waagent") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default.py walinuxagent-2.2.45/tests/common/osutil/test_default.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_default.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,24 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import socket import glob import mock +import traceback +import re import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil -from azurelinuxagent.common.utils import fileutil -from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from tests.tools import * +actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route' + + +def fake_is_loopback(_, iface): + return iface.startswith('lo') + + class TestOSUtil(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + def test_restart(self): # setup retries = 3 @@ -88,7 +103,83 @@ self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) - def test_get_first_if(self): + def test_empty_proc_net_route(self): + routing_table = "" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0) + + def test_no_routes(self): + routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_bogus_proc_net_route(self): + routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_valid_routes(self): + routing_table = \ + 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \ + 'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \ + 'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \ + 'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ + 'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ + 'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n' + known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(raw_route_list), 6) + self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash) + + route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list) + + self.assertEqual(len(route_list), 5) + self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') + self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') + self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') + self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') + self.assertEqual(route_list[1].flags, 1) + self.assertEqual(route_list[2].flags, 15) + self.assertEqual(route_list[3].flags, 7) + self.assertEqual(route_list[3].metric, 0) + self.assertEqual(route_list[4].metric, 10) + self.assertEqual(route_list[0].interface, 'eth0') + self.assertEqual(route_list[4].interface, 'docker0') + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0') + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'}) + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) + def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock): + """ + Validate that the agent can find the first active non-loopback + interface. + + This test case used to run live, but not all developers have an eth* + interface. It is perfectly valid to have a br*, but this test does not + account for that. + """ + ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() + self.assertEqual(ifname, 'eth0') + self.assertEqual(ipaddr, '10.0.0.1') + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0') + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'}) + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) + def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) @@ -97,9 +188,31 @@ except socket.error: self.fail("not a valid ip address") + def test_get_first_if_all_loopback(self): + fake_ifaces = {'lo':'127.0.0.1'} + with patch.object(osutil.DefaultOSUtil, 'get_primary_interface', return_value='bogus0'): + with patch.object(osutil.DefaultOSUtil, '_get_all_interfaces', return_value=fake_ifaces): + self.assertEqual(('', ''), osutil.DefaultOSUtil().get_first_if()) + + def test_get_all_interfaces(self): + loopback_count = 0 + non_loopback_count = 0 + + for iface in osutil.DefaultOSUtil()._get_all_interfaces(): + if iface == 'lo': + loopback_count += 1 + else: + non_loopback_count += 1 + + self.assertEqual(loopback_count, 1, 'Exactly 1 loopback network interface should exist') + self.assertGreater(loopback_count, 0, 'At least 1 non-loopback network interface should exist') + def test_isloopback(self): - self.assertTrue(osutil.DefaultOSUtil().is_loopback(b'lo')) - self.assertFalse(osutil.DefaultOSUtil().is_loopback(b'eth0')) + for iface in osutil.DefaultOSUtil()._get_all_interfaces(): + if iface == 'lo': + self.assertTrue(osutil.DefaultOSUtil().is_loopback(iface)) + else: + self.assertFalse(osutil.DefaultOSUtil().is_loopback(iface)) def test_isprimary(self): routing_table = "\ @@ -178,6 +291,7 @@ try: osutil.DefaultOSUtil().get_first_if()[0] except Exception as e: + print(traceback.format_exc()) exception = True self.assertFalse(exception) @@ -199,12 +313,23 @@ self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") + def test_dhcp_lease_custom_dns(self): + """ + Validate that the wireserver address is coming from option 245 + (on default configurations the address is also available in the domain-name-servers option, but users + may set up a custom dns server on their vnet) + """ + with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): + with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.custom.dns"))): + endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() + self.assertEqual(endpoint, "168.63.129.16") + def test_dhcp_lease_multi(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) - self.assertEqual(endpoint, "second") + self.assertEqual(endpoint, "168.63.129.2") def test_get_total_mem(self): """ @@ -386,6 +511,9 @@ self.assertEqual( "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8", util._correct_instance_id("544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8")) + self.assertEqual( + "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8", + util._correct_instance_id("544cdfd0-cb4e-4b4a-9954-5bdf3ed5c3b8")) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', @@ -442,6 +570,10 @@ def test_is_current_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() + mock_read.return_value = "11111111-2222-3333-4444-556677889900" + self.assertFalse(util.is_current_instance_id( + "B9F3C233-9913-9F42-8EB3-BA656DF32502")) + mock_read.return_value = "B9F3C233-9913-9F42-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @@ -450,6 +582,14 @@ self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) + mock_read.return_value = "b9f3c233-9913-9f42-8eb3-ba656df32502" + self.assertTrue(util.is_current_instance_id( + "B9F3C233-9913-9F42-8EB3-BA656DF32502")) + + mock_read.return_value = "33c2f3b9-1399-429f-8eb3-ba656df32502" + self.assertTrue(util.is_current_instance_id( + "B9F3C233-9913-9F42-8EB3-BA656DF32502")) + @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_is_current_instance_id_from_dmidecode(self, mock_shell, mock_isfile): @@ -516,6 +656,16 @@ self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') + def test_get_firewall_dropped_packets_transient_error_ignored(self, mock_output): + osutil._enable_firewall = True + util = osutil.DefaultOSUtil() + + mock_output.side_effect = [ + (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), + (3, "can't initialize iptables table `security': iptables who? (do you need to insmod?)")] + self.assertEqual(0, util.get_firewall_dropped_packets("not used")) + + @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() @@ -638,6 +788,34 @@ ]) self.assertFalse(osutil._enable_firewall) + @patch('azurelinuxagent.common.utils.shellutil.run_get_output') + @patch('azurelinuxagent.common.utils.shellutil.run') + def test_enable_firewall_checks_for_invalid_iptables_options(self, mock_run, mock_output): + osutil._enable_firewall = True + util = osutil.DefaultOSUtil() + + dst = '1.2.3.4' + version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) + wait = "-w" + + # iptables uses the following exit codes + # 0 - correct function + # 1 - other errors + # 2 - errors which appear to be caused by invalid or abused command + # line parameters + mock_run.side_effect = [2] + mock_output.return_value = (0, version) + + self.assertFalse(util.enable_firewall(dst_ip='1.2.3.4', uid=42)) + self.assertFalse(osutil._enable_firewall) + + mock_run.assert_has_calls([ + call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), + ]) + mock_output.assert_has_calls([ + call(osutil.IPTABLES_VERSION) + ]) + @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') @@ -670,12 +848,20 @@ version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" - mock_run.side_effect = [0, 0] + mock_run.side_effect = [0, 1, 0, 1, 0, 1] mock_output.side_effect = [(0, version), (0, "Output")] - self.assertTrue(util.remove_firewall()) + self.assertTrue(util.remove_firewall(dst, uid)) mock_run.assert_has_calls([ - call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) + # delete rules < 2.2.26 + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), + call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), + + # delete rules >= 2.2.26 + call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) @@ -689,15 +875,17 @@ osutil._enable_firewall = True util = osutil.DefaultOSUtil() + dst_ip='1.2.3.4' + uid=42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" - mock_run.side_effect = [1, 0] + mock_run.side_effect = [2] mock_output.side_effect = [(0, version), (1, "Output")] - self.assertFalse(util.remove_firewall()) + self.assertFalse(util.remove_firewall(dst_ip, uid)) mock_run.assert_has_calls([ - call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) @@ -713,6 +901,85 @@ self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) + @skip_if_predicate_true(running_under_travis, "The ip command isn't available in Travis") + def test_get_nic_state(self): + state = osutil.DefaultOSUtil().get_nic_state() + self.assertNotEqual(state, {}) + self.assertGreater(len(state.keys()), 1) + + another_state = osutil.DefaultOSUtil().get_nic_state() + name = list(another_state.keys())[0] + another_state[name].add_ipv4("xyzzy") + self.assertNotEqual(state, another_state) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, osutil.DefaultOSUtil()) + + def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self): + original_run_command = shellutil.run_command + + def mock_run_command(cmd): + return original_run_command(["pidof", "non-existing-process"]) + + with patch("azurelinuxagent.common.utils.shellutil.run_command", side_effect=mock_run_command): + pid_list = osutil.DefaultOSUtil().get_dhcp_pid() + + self.assertTrue(len(pid_list) == 0, "the return value is not an empty list: {0}".format(pid_list)) + + @patch('os.walk', return_value=[('host3/target3:0:1/3:0:1:0/block', ['sdb'], [])]) + @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value='{00000000-0001-8899-0000-000000000000}') + @patch('os.listdir', return_value=['00000000-0001-8899-0000-000000000000']) + @patch('os.path.exists', return_value=True) + def test_device_for_ide_port_gen1_success( + self, + os_path_exists, + os_listdir, + fileutil_read_file, + os_walk): + dev = osutil.DefaultOSUtil().device_for_ide_port(1) + self.assertEqual(dev, 'sdb', 'The returned device should be the resource disk') + + @patch('os.walk', return_value=[('host0/target0:0:0/0:0:0:1/block', ['sdb'], [])]) + @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value='{f8b3781a-1e82-4818-a1c3-63d806ec15bb}') + @patch('os.listdir', return_value=['f8b3781a-1e82-4818-a1c3-63d806ec15bb']) + @patch('os.path.exists', return_value=True) + def test_device_for_ide_port_gen2_success( + self, + os_path_exists, + os_listdir, + fileutil_read_file, + os_walk): + dev = osutil.DefaultOSUtil().device_for_ide_port(1) + self.assertEqual(dev, 'sdb', 'The returned device should be the resource disk') + + @patch('os.listdir', return_value=['00000000-0000-0000-0000-000000000000']) + @patch('os.path.exists', return_value=True) + def test_device_for_ide_port_none( + self, + os_path_exists, + os_listdir): + dev = osutil.DefaultOSUtil().device_for_ide_port(1) + self.assertIsNone(dev, 'None should be returned if no resource disk found') + +def osutil_get_dhcp_pid_should_return_a_list_of_pids(test_instance, osutil_instance): + """ + This is a very basic test for osutil.get_dhcp_pid. It is simply meant to exercise the implementation of that method + in case there are any basic errors, such as a typos, etc. The test does not verify that the implementation returns + the PID for the actual dhcp client; in fact, it uses a mock that invokes pidof to return the PID of an arbitrary + process (the pidof process itself). Most implementations of get_dhcp_pid use pidof with the appropriate name for + the dhcp client. + The test is defined as a global function to make it easily accessible from the test suites for each distro. + """ + original_run_command = shellutil.run_command + + def mock_run_command(cmd): + return original_run_command(["pidof", "pidof"]) + + with patch("azurelinuxagent.common.utils.shellutil.run_command", side_effect=mock_run_command): + pid = osutil_instance.get_dhcp_pid() + + test_instance.assertTrue(len(pid) != 0, "get_dhcp_pid did not return a PID") + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_factory.py walinuxagent-2.2.45/tests/common/osutil/test_factory.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_factory.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_factory.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,273 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.factory import _get_osutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil +from azurelinuxagent.common.osutil.arch import ArchUtil +from azurelinuxagent.common.osutil.clearlinux import ClearLinuxUtil +from azurelinuxagent.common.osutil.coreos import CoreOSUtil +from azurelinuxagent.common.osutil.debian import DebianOSBaseUtil, DebianOSModernUtil +from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil +from azurelinuxagent.common.osutil.openbsd import OpenBSDOSUtil +from azurelinuxagent.common.osutil.redhat import RedhatOSUtil, Redhat6xOSUtil +from azurelinuxagent.common.osutil.suse import SUSEOSUtil, SUSE11OSUtil +from azurelinuxagent.common.osutil.ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ + UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil +from azurelinuxagent.common.osutil.alpine import AlpineOSUtil +from azurelinuxagent.common.osutil.bigip import BigIpOSUtil +from azurelinuxagent.common.osutil.gaia import GaiaOSUtil +from azurelinuxagent.common.osutil.iosxe import IosxeOSUtil +from azurelinuxagent.common.osutil.openwrt import OpenWRTOSUtil +from tests.tools import * + + +class TestOsUtilFactory(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + @patch("azurelinuxagent.common.logger.warn") + def test_get_osutil_it_should_return_default(self, patch_logger): + ret = _get_osutil(distro_name="", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == DefaultOSUtil) + self.assertEquals(patch_logger.call_count, 1) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_ubuntu(self): + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="", + distro_version="10.04", + distro_full_name="") + self.assertTrue(type(ret) == UbuntuOSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="", + distro_version="12.04", + distro_full_name="") + self.assertTrue(type(ret) == Ubuntu12OSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="trusty", + distro_version="14.04", + distro_full_name="") + self.assertTrue(type(ret) == Ubuntu14OSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="xenial", + distro_version="16.04", + distro_full_name="") + self.assertTrue(type(ret) == Ubuntu16OSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="", + distro_version="18.04", + distro_full_name="") + self.assertTrue(type(ret) == Ubuntu18OSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + ret = _get_osutil(distro_name="ubuntu", + distro_code_name="", + distro_version="10.04", + distro_full_name="Snappy Ubuntu Core") + self.assertTrue(type(ret) == UbuntuSnappyOSUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + def test_get_osutil_it_should_return_arch(self): + ret = _get_osutil(distro_name="arch", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == ArchUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_clear_linux(self): + ret = _get_osutil(distro_name="clear linux", + distro_code_name="", + distro_version="", + distro_full_name="Clear Linux") + self.assertTrue(type(ret) == ClearLinuxUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_alpine(self): + ret = _get_osutil(distro_name="alpine", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == AlpineOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_kali(self): + ret = _get_osutil(distro_name="kali", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == DebianOSBaseUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_coreos(self): + ret = _get_osutil(distro_name="coreos", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == CoreOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_suse(self): + ret = _get_osutil(distro_name="suse", + distro_code_name="", + distro_version="10", + distro_full_name="") + self.assertTrue(type(ret) == SUSEOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="suse", + distro_code_name="", + distro_full_name="SUSE Linux Enterprise Server", + distro_version="11") + self.assertTrue(type(ret) == SUSE11OSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="suse", + distro_code_name="", + distro_full_name="openSUSE", + distro_version="12") + self.assertTrue(type(ret) == SUSE11OSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_debian(self): + ret = _get_osutil(distro_name="debian", + distro_code_name="", + distro_full_name="", + distro_version="7") + self.assertTrue(type(ret) == DebianOSBaseUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="debian", + distro_code_name="", + distro_full_name="", + distro_version="8") + self.assertTrue(type(ret) == DebianOSModernUtil) + self.assertEquals(ret.get_service_name(), "walinuxagent") + + def test_get_osutil_it_should_return_redhat(self): + ret = _get_osutil(distro_name="redhat", + distro_code_name="", + distro_full_name="", + distro_version="6") + self.assertTrue(type(ret) == Redhat6xOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="centos", + distro_code_name="", + distro_full_name="", + distro_version="6") + self.assertTrue(type(ret) == Redhat6xOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="oracle", + distro_code_name="", + distro_full_name="", + distro_version="6") + self.assertTrue(type(ret) == Redhat6xOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="redhat", + distro_code_name="", + distro_full_name="", + distro_version="7") + self.assertTrue(type(ret) == RedhatOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="centos", + distro_code_name="", + distro_full_name="", + distro_version="7") + self.assertTrue(type(ret) == RedhatOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + ret = _get_osutil(distro_name="oracle", + distro_code_name="", + distro_full_name="", + distro_version="7") + self.assertTrue(type(ret) == RedhatOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_euleros(self): + ret = _get_osutil(distro_name="euleros", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == RedhatOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_freebsd(self): + ret = _get_osutil(distro_name="freebsd", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == FreeBSDOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_openbsd(self): + ret = _get_osutil(distro_name="openbsd", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == OpenBSDOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_bigip(self): + ret = _get_osutil(distro_name="bigip", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == BigIpOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_gaia(self): + ret = _get_osutil(distro_name="gaia", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == GaiaOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_iosxe(self): + ret = _get_osutil(distro_name="iosxe", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == IosxeOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") + + def test_get_osutil_it_should_return_openwrt(self): + ret = _get_osutil(distro_name="openwrt", + distro_code_name="", + distro_version="", + distro_full_name="") + self.assertTrue(type(ret) == OpenWRTOSUtil) + self.assertEquals(ret.get_service_name(), "waagent") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_freebsd.py walinuxagent-2.2.45/tests/common/osutil/test_freebsd.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_freebsd.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_freebsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,124 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import mock + +from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil +import azurelinuxagent.common.utils.shellutil as shellutil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + +class TestFreeBSDOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, FreeBSDOSUtil()) + + def test_empty_proc_net_route(self): + route_table = "" + + with patch.object(shellutil, 'run_command', return_value=route_table): + # Header line only + self.assertEqual(len(FreeBSDOSUtil().read_route_table()), 1) + + def test_no_routes(self): + route_table = """Routing tables + +Internet: +Destination Gateway Flags Netif Expire +""" + + with patch.object(shellutil, 'run_command', return_value=route_table): + raw_route_list = FreeBSDOSUtil().read_route_table() + + self.assertEqual(len(FreeBSDOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_bogus_proc_net_route(self): + route_table = """Routing tables + +Internet: +Destination Gateway Flags Netif Expire +1.1.1 0.0.0 +""" + + with patch.object(shellutil, 'run_command', return_value=route_table): + raw_route_list = FreeBSDOSUtil().read_route_table() + + self.assertEqual(len(FreeBSDOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_valid_routes(self): + route_table = """Routing tables + +Internet: +Destination Gateway Flags Netif Expire +0.0.0.0 10.145.187.193 UGS em0 +10.145.187.192/26 0.0.0.0 US em0 +168.63.129.16 10.145.187.193 UH em0 +169.254.169.254 10.145.187.193 UHS em0 +192.168.43.0 0.0.0.0 US vtbd0 +""" + + with patch.object(shellutil, 'run_command', return_value=route_table): + raw_route_list = FreeBSDOSUtil().read_route_table() + + self.assertEqual(len(raw_route_list), 6) + + route_list = FreeBSDOSUtil().get_list_of_routes(raw_route_list) + + self.assertEqual(len(route_list), 5) + self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') + self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') + self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') + self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') + self.assertEqual(route_list[1].flags, 1) + self.assertEqual(route_list[2].flags, 33) + self.assertEqual(route_list[3].flags, 5) + self.assertEqual((route_list[3].metric - route_list[4].metric), 1) + self.assertEqual(route_list[0].interface, 'em0') + self.assertEqual(route_list[4].interface, 'vtbd0') + + def test_get_first_if(self): + """ + Validate that the agent can find the first active non-loopback + interface. + This test case used to run live, but not all developers have an eth* + interface. It is perfectly valid to have a br*, but this test does not + account for that. + """ + freebsdosutil = FreeBSDOSUtil() + + with patch.object(freebsdosutil, '_get_net_info', return_value=('em0', '10.0.0.1', 'e5:f0:38:aa:da:52')): + ifname, ipaddr = freebsdosutil.get_first_if() + + self.assertEqual(ifname, 'em0') + self.assertEqual(ipaddr, '10.0.0.1') + + def test_no_primary_does_not_throw(self): + freebsdosutil = FreeBSDOSUtil() + + with patch.object(freebsdosutil, '_get_net_info', return_value=('em0', '10.0.0.1', 'e5:f0:38:aa:da:52')): + try: + freebsdosutil.get_first_if()[0] + except Exception as e: + print(traceback.format_exc()) + exception = True + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_nsbsd.py walinuxagent-2.2.45/tests/common/osutil/test_nsbsd.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_nsbsd.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_nsbsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.utils.fileutil import read_file +from azurelinuxagent.common.osutil.nsbsd import NSBSDOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * +from os import path + + +class TestNSBSDOSUtil(AgentTestCase): + dhclient_pid_file = "/var/run/dhclient.pid" + + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver + original_isfile = path.isfile + + def mock_isfile(path): + return True if path == self.dhclient_pid_file else original_isfile(path) + + original_read_file = read_file + + def mock_read_file(file, *args, **kwargs): + return "123" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs) + + with patch("os.path.isfile", mock_isfile): + with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file): + pid_list = NSBSDOSUtil().get_dhcp_pid() + + self.assertEquals(pid_list, [123]) + + def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self): + with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver + # + # PID file does not exist + # + original_isfile = path.isfile + + def mock_isfile(path): + return False if path == self.dhclient_pid_file else original_isfile(path) + + with patch("os.path.isfile", mock_isfile): + pid_list = NSBSDOSUtil().get_dhcp_pid() + + self.assertEquals(pid_list, []) + + # + # PID file is empty + # + original_isfile = path.isfile + + def mock_isfile(path): + return True if path == self.dhclient_pid_file else original_isfile(path) + + original_read_file = read_file + + def mock_read_file(file, *args, **kwargs): + return "" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs) + + with patch("os.path.isfile", mock_isfile): + with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file): + pid_list = NSBSDOSUtil().get_dhcp_pid() + + self.assertEquals(pid_list, []) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_openbsd.py walinuxagent-2.2.45/tests/common/osutil/test_openbsd.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_openbsd.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_openbsd.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.openbsd import OpenBSDOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestAlpineOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, OpenBSDOSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_openwrt.py walinuxagent-2.2.45/tests/common/osutil/test_openwrt.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_openwrt.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_openwrt.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.openwrt import OpenWRTOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestOpenWRTOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, OpenWRTOSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_redhat.py walinuxagent-2.2.45/tests/common/osutil/test_redhat.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_redhat.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_redhat.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestRedhat6xOSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Redhat6xOSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_suse.py walinuxagent-2.2.45/tests/common/osutil/test_suse.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_suse.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_suse.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.suse import SUSE11OSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestSUSE11OSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, SUSE11OSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_ubuntu.py walinuxagent-2.2.45/tests/common/osutil/test_ubuntu.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_ubuntu.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/osutil/test_ubuntu.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,45 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.osutil.ubuntu import Ubuntu12OSUtil, Ubuntu18OSUtil +from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids +from tests.tools import * + + +class TestUbuntu12OSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Ubuntu12OSUtil()) + + +class TestUbuntu18OSUtil(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + def test_get_dhcp_pid_should_return_a_list_of_pids(self): + osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Ubuntu18OSUtil()) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupapi.py walinuxagent-2.2.45/tests/common/test_cgroupapi.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupapi.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_cgroupapi.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,642 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import subprocess +from azurelinuxagent.common.cgroupapi import CGroupsApi, FileSystemCgroupsApi, SystemdCgroupsApi, CGROUPS_FILE_SYSTEM_ROOT, VM_AGENT_CGROUP_NAME +from azurelinuxagent.common.exception import CGroupsException, ExtensionError, ExtensionErrorCodes +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import shellutil +from nose.plugins.attrib import attr +from tests.utils.cgroups_tools import CGroupsTools +from tests.tools import * + + +class _MockedFileSystemTestCase(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroup") + os.mkdir(self.cgroups_file_system_root) + os.mkdir(os.path.join(self.cgroups_file_system_root, "cpu")) + os.mkdir(os.path.join(self.cgroups_file_system_root, "memory")) + + self.mock_cgroups_file_system_root = patch("azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) + self.mock_cgroups_file_system_root.start() + + def tearDown(self): + self.mock_cgroups_file_system_root.stop() + AgentTestCase.tearDown(self) + + +class CGroupsApiTestCase(_MockedFileSystemTestCase): + def test_create_should_return_a_SystemdCgroupsApi_on_systemd_platforms(self): + with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True): + api = CGroupsApi.create() + + self.assertTrue(type(api) == SystemdCgroupsApi) + + def test_create_should_return_a_FileSystemCgroupsApi_on_non_systemd_platforms(self): + with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False): + api = CGroupsApi.create() + + self.assertTrue(type(api) == FileSystemCgroupsApi) + + def test_is_systemd_should_return_true_when_systemd_manages_current_process(self): + path_exists = os.path.exists + + def mock_path_exists(path): + if path == "/run/systemd/system/": + mock_path_exists.path_tested = True + return True + return path_exists(path) + + mock_path_exists.path_tested = False + + with patch("azurelinuxagent.common.cgroupapi.os.path.exists", mock_path_exists): + is_systemd = CGroupsApi._is_systemd() + + self.assertTrue(is_systemd) + + self.assertTrue(mock_path_exists.path_tested, 'The expected path was not tested; the implementation of CGroupsApi._is_systemd() may have changed.') + + def test_is_systemd_should_return_false_when_systemd_does_not_manage_current_process(self): + path_exists = os.path.exists + + def mock_path_exists(path): + if path == "/run/systemd/system/": + mock_path_exists.path_tested = True + return False + return path_exists(path) + + mock_path_exists.path_tested = False + + with patch("azurelinuxagent.common.cgroupapi.os.path.exists", mock_path_exists): + is_systemd = CGroupsApi._is_systemd() + + self.assertFalse(is_systemd) + + self.assertTrue(mock_path_exists.path_tested, 'The expected path was not tested; the implementation of CGroupsApi._is_systemd() may have changed.') + + def test_foreach_controller_should_execute_operation_on_all_mounted_controllers(self): + executed_controllers = [] + + def controller_operation(controller): + executed_controllers.append(controller) + + CGroupsApi._foreach_controller(controller_operation, 'A dummy message') + + # The setUp method mocks azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT to have the cpu and memory controllers mounted + self.assertIn('cpu', executed_controllers, 'The operation was not executed on the cpu controller') + self.assertIn('memory', executed_controllers, 'The operation was not executed on the memory controller') + self.assertEqual(len(executed_controllers), 2, 'The operation was not executed on unexpected controllers: {0}'.format(executed_controllers)) + + def test_foreach_controller_should_handle_errors_in_individual_controllers(self): + successful_controllers = [] + + def controller_operation(controller): + if controller == 'cpu': + raise Exception('A test exception') + + successful_controllers.append(controller) + + with patch("azurelinuxagent.common.cgroupapi.logger.warn") as mock_logger_warn: + CGroupsApi._foreach_controller(controller_operation, 'A dummy message') + + self.assertIn('memory', successful_controllers, 'The operation was not executed on the memory controller') + self.assertEqual(len(successful_controllers), 1, 'The operation was not executed on unexpected controllers: {0}'.format(successful_controllers)) + + args, kwargs = mock_logger_warn.call_args + (message_format, controller, error, message) = args + self.assertEquals(message_format, 'Error in cgroup controller "{0}": {1}. {2}') + self.assertEquals(controller, 'cpu') + self.assertEquals(error, 'A test exception') + self.assertEquals(message, 'A dummy message') + + +class FileSystemCgroupsApiTestCase(_MockedFileSystemTestCase): + def test_cleanup_legacy_cgroups_should_move_daemon_pid_to_new_cgroup_and_remove_legacy_cgroups(self): + # Set up a mock /var/run/waagent.pid file + daemon_pid = "42" + daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") + fileutil.write_file(daemon_pid_file, daemon_pid + "\n") + + # Set up old controller cgroups and add the daemon PID to them + legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) + legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) + + # Set up new controller cgroups and add extension handler's PID to them + new_cpu_cgroup = CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "cpu", "999") + new_memory_cgroup = CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "memory", "999") + + with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): + FileSystemCgroupsApi().cleanup_legacy_cgroups() + + # The method should have added the daemon PID to the new controllers and deleted the old ones + new_cpu_contents = fileutil.read_file(os.path.join(new_cpu_cgroup, "cgroup.procs")) + new_memory_contents = fileutil.read_file(os.path.join(new_memory_cgroup, "cgroup.procs")) + + self.assertTrue(daemon_pid in new_cpu_contents) + self.assertTrue(daemon_pid in new_memory_contents) + + self.assertFalse(os.path.exists(legacy_cpu_cgroup)) + self.assertFalse(os.path.exists(legacy_memory_cgroup)) + + # Assert the event parameters that were sent out + self.assertEquals(len(mock_add_event.call_args_list), 2) + self.assertTrue(all(kwargs['op'] == 'CGroupsCleanUp' for _, kwargs in mock_add_event.call_args_list)) + self.assertTrue(all(kwargs['is_success'] for _, kwargs in mock_add_event.call_args_list)) + self.assertTrue(any( + re.match(r"Moved daemon's PID from legacy cgroup to /.*/cgroup/cpu/walinuxagent.service", kwargs['message']) + for _, kwargs in mock_add_event.call_args_list)) + self.assertTrue(any( + re.match(r"Moved daemon's PID from legacy cgroup to /.*/cgroup/memory/walinuxagent.service", kwargs['message']) + for _, kwargs in mock_add_event.call_args_list)) + + def test_create_agent_cgroups_should_create_cgroups_on_all_controllers(self): + agent_cgroups = FileSystemCgroupsApi().create_agent_cgroups() + + def assert_cgroup_created(controller): + cgroup_path = os.path.join(self.cgroups_file_system_root, controller, VM_AGENT_CGROUP_NAME) + self.assertTrue(any(cgroups.path == cgroup_path for cgroups in agent_cgroups)) + self.assertTrue(any(cgroups.name == VM_AGENT_CGROUP_NAME for cgroups in agent_cgroups)) + self.assertTrue(os.path.exists(cgroup_path)) + cgroup_task = int(fileutil.read_file(os.path.join(cgroup_path, "cgroup.procs"))) + current_process = os.getpid() + self.assertEqual(cgroup_task, current_process) + + assert_cgroup_created("cpu") + assert_cgroup_created("memory") + + def test_create_extension_cgroups_root_should_create_root_directory_for_extensions(self): + FileSystemCgroupsApi().create_extension_cgroups_root() + + cpu_cgroup = os.path.join(self.cgroups_file_system_root, "cpu", "walinuxagent.extensions") + self.assertTrue(os.path.exists(cpu_cgroup)) + + memory_cgroup = os.path.join(self.cgroups_file_system_root, "memory", "walinuxagent.extensions") + self.assertTrue(os.path.exists(memory_cgroup)) + + def test_create_extension_cgroups_should_create_cgroups_on_all_controllers(self): + api = FileSystemCgroupsApi() + api.create_extension_cgroups_root() + extension_cgroups = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + def assert_cgroup_created(controller): + cgroup_path = os.path.join(self.cgroups_file_system_root, controller, "walinuxagent.extensions", + "Microsoft.Compute.TestExtension_1.2.3") + + self.assertTrue(any(cgroups.path == cgroup_path for cgroups in extension_cgroups)) + self.assertTrue(os.path.exists(cgroup_path)) + + assert_cgroup_created("cpu") + assert_cgroup_created("memory") + + def test_remove_extension_cgroups_should_remove_all_cgroups(self): + api = FileSystemCgroupsApi() + api.create_extension_cgroups_root() + extension_cgroups = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + api.remove_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + for cgroup in extension_cgroups: + self.assertFalse(os.path.exists(cgroup.path)) + + def test_remove_extension_cgroups_should_log_a_warning_when_the_cgroup_contains_active_tasks(self): + api = FileSystemCgroupsApi() + api.create_extension_cgroups_root() + api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + with patch("azurelinuxagent.common.cgroupapi.logger.warn") as mock_logger_warn: + with patch("azurelinuxagent.common.cgroupapi.os.rmdir", side_effect=OSError(16, "Device or resource busy")): + api.remove_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + args, kwargs = mock_logger_warn.call_args + message = args[0] + self.assertIn("still has active tasks", message) + + def test_get_extension_cgroups_should_return_all_cgroups(self): + api = FileSystemCgroupsApi() + api.create_extension_cgroups_root() + created = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + retrieved = api.get_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") + + self.assertEqual(len(retrieved), len(created)) + + for cgroup in created: + self.assertTrue(any(retrieved_cgroup.path == cgroup.path for retrieved_cgroup in retrieved)) + + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_add_the_child_process_to_the_extension_cgroup(self, _): + api = FileSystemCgroupsApi() + api.create_extension_cgroups_root() + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + extension_cgroups, process_output = api.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="echo $$", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + # The expected format of the process output is [stdout]\n{PID}\n\n\n[stderr]\n" + pattern = re.compile(r"\[stdout\]\n(\d+)\n\n\n\[stderr\]\n") + m = pattern.match(process_output) + + try: + pid_from_output = int(m.group(1)) + except Exception as e: + self.fail("No PID could be extracted from the process output! Error: {0}".format(ustr(e))) + + for cgroup in extension_cgroups: + cgroups_procs_path = os.path.join(cgroup.path, "cgroup.procs") + with open(cgroups_procs_path, "r") as f: + contents = f.read() + pid_from_cgroup = int(contents) + + self.assertEquals(pid_from_output, pid_from_cgroup, + "The PID from the process output ({0}) does not match the PID found in the" + "process cgroup {1} ({2})".format(pid_from_output, cgroups_procs_path, pid_from_cgroup)) + + +@skip_if_predicate_false(is_systemd_present, "Systemd cgroups API doesn't manage cgroups on systems not using systemd.") +class SystemdCgroupsApiTestCase(AgentTestCase): + def test_get_extensions_slice_root_name_should_return_the_root_slice_for_extensions(self): + root_slice_name = SystemdCgroupsApi()._get_extensions_slice_root_name() + self.assertEqual(root_slice_name, "system-walinuxagent.extensions.slice") + + def test_get_extension_slice_name_should_return_the_slice_for_the_given_extension(self): + extension_name = "Microsoft.Azure.DummyExtension-1.0" + extension_slice_name = SystemdCgroupsApi()._get_extension_slice_name(extension_name) + self.assertEqual(extension_slice_name, "system-walinuxagent.extensions-Microsoft.Azure.DummyExtension_1.0.slice") + + @attr('requires_sudo') + def test_create_extension_cgroups_root_should_create_extensions_root_slice(self): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + SystemdCgroupsApi().create_extension_cgroups_root() + + unit_name = SystemdCgroupsApi()._get_extensions_slice_root_name() + _, status = shellutil.run_get_output("systemctl status {0}".format(unit_name)) + self.assertIn("Loaded: loaded", status) + self.assertIn("Active: active", status) + + shellutil.run_get_output("systemctl stop {0}".format(unit_name)) + shellutil.run_get_output("systemctl disable {0}".format(unit_name)) + os.remove("/etc/systemd/system/{0}".format(unit_name)) + shellutil.run_get_output("systemctl daemon-reload") + + @attr('requires_sudo') + def test_create_extension_cgroups_should_create_extension_slice(self): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + extension_name = "Microsoft.Azure.DummyExtension-1.0" + cgroups = SystemdCgroupsApi().create_extension_cgroups(extension_name) + cpu_cgroup, memory_cgroup = cgroups[0], cgroups[1] + self.assertEqual(cpu_cgroup.path, "/sys/fs/cgroup/cpu/system.slice/Microsoft.Azure.DummyExtension_1.0") + self.assertEqual(memory_cgroup.path, "/sys/fs/cgroup/memory/system.slice/Microsoft.Azure.DummyExtension_1.0") + + unit_name = SystemdCgroupsApi()._get_extension_slice_name(extension_name) + self.assertEqual("system-walinuxagent.extensions-Microsoft.Azure.DummyExtension_1.0.slice", unit_name) + + _, status = shellutil.run_get_output("systemctl status {0}".format(unit_name)) + self.assertIn("Loaded: loaded", status) + self.assertIn("Active: active", status) + + shellutil.run_get_output("systemctl stop {0}".format(unit_name)) + shellutil.run_get_output("systemctl disable {0}".format(unit_name)) + os.remove("/etc/systemd/system/{0}".format(unit_name)) + shellutil.run_get_output("systemctl daemon-reload") + + def assert_cgroups_created(self, extension_cgroups): + self.assertEqual(len(extension_cgroups), 2, + 'start_extension_command did not return the expected number of cgroups') + + cpu_found = memory_found = False + + for cgroup in extension_cgroups: + match = re.match( + r'^/sys/fs/cgroup/(cpu|memory)/system.slice/Microsoft.Compute.TestExtension_1\.2\.3\_([a-f0-9-]+)\.scope$', + cgroup.path) + + self.assertTrue(match is not None, "Unexpected path for cgroup: {0}".format(cgroup.path)) + + if match.group(1) == 'cpu': + cpu_found = True + if match.group(1) == 'memory': + memory_found = True + + self.assertTrue(cpu_found, 'start_extension_command did not return a cpu cgroup') + self.assertTrue(memory_found, 'start_extension_command did not return a memory cgroup') + + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_create_extension_scopes(self, _): + original_popen = subprocess.Popen + + def mock_popen(*args, **kwargs): + return original_popen("date", **kwargs) + + # we mock subprocess.Popen to execute a dummy command (date), so no actual cgroups are created; their paths + # should be computed properly, though + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", mock_popen): + extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + shell=False, + timeout=300, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + self.assert_cgroups_created(extension_cgroups) + + @attr('requires_sudo') + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.2)) + def test_start_extension_command_should_use_systemd_and_not_the_fallback_option_if_successful(self, _): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) \ + as patch_mock_popen: + extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + # We should have invoked the extension command only once and succeeded + self.assertEquals(1, patch_mock_popen.call_count) + + args = patch_mock_popen.call_args[0][0] + self.assertIn("systemd-run --unit", args) + + self.assert_cgroups_created(extension_cgroups) + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.2)) + def test_start_extension_command_should_use_fallback_option_if_systemd_fails(self, _): + original_popen = subprocess.Popen + + def mock_popen(*args, **kwargs): + # Inject a syntax error to the call + systemd_command = args[0].replace('systemd-run', 'systemd-run syntax_error') + new_args = (systemd_command,) + return original_popen(new_args, **kwargs) + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) \ + as patch_mock_popen: + # We expect this call to fail because of the syntax error + extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + args, kwargs = mock_add_event.call_args + self.assertIn("Failed to run systemd-run for unit Microsoft.Compute.TestExtension_1.2.3", + kwargs['message']) + self.assertIn("Failed to find executable syntax_error: No such file or directory", + kwargs['message']) + self.assertEquals(False, kwargs['is_success']) + self.assertEquals('InvokeCommandUsingSystemd', kwargs['op']) + + # We expect two calls to Popen, first for the systemd-run call, second for the fallback option + self.assertEquals(2, patch_mock_popen.call_count) + + first_call_args = patch_mock_popen.mock_calls[0][1][0] + second_call_args = patch_mock_popen.mock_calls[1][1][0] + self.assertIn("systemd-run --unit", first_call_args) + self.assertNotIn("systemd-run --unit", second_call_args) + + # No cgroups should have been created + self.assertEquals(extension_cgroups, []) + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) + def test_start_extension_command_should_use_fallback_option_if_systemd_times_out(self, _): + # Systemd has its own internal timeout which is shorter than what we define for extension operation timeout. + # When systemd times out, it will write a message to stderr and exit with exit code 1. + # In that case, we will internally recognize the failure due to the non-zero exit code, not as a timeout. + original_popen = subprocess.Popen + systemd_timeout_command = "echo 'Failed to start transient scope unit: Connection timed out' >&2 && exit 1" + + def mock_popen(*args, **kwargs): + # If trying to invoke systemd, mock what would happen if systemd timed out internally: + # write failure to stderr and exit with exit code 1. + new_args = args + if "systemd-run" in args[0]: + new_args = (systemd_timeout_command,) + + return original_popen(new_args, **kwargs) + + expected_output = "[stdout]\n{0}\n\n\n[stderr]\n" + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) \ + as patch_mock_popen: + extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="echo 'success'", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + # We expect two calls to Popen, first for the systemd-run call, second for the fallback option + self.assertEquals(2, patch_mock_popen.call_count) + + first_call_args = patch_mock_popen.mock_calls[0][1][0] + second_call_args = patch_mock_popen.mock_calls[1][1][0] + self.assertIn("systemd-run --unit", first_call_args) + self.assertNotIn("systemd-run --unit", second_call_args) + + self.assertEquals(extension_cgroups, []) + self.assertEquals(expected_output.format("success"), process_output) + + @attr('requires_sudo') + @patch("azurelinuxagent.common.cgroupapi.add_event") + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_not_use_fallback_option_if_extension_fails(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) \ + as patch_mock_popen: + with self.assertRaises(ExtensionError) as context_manager: + SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="ls folder_does_not_exist", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + # We should have invoked the extension command only once, in the systemd-run case + self.assertEquals(1, patch_mock_popen.call_count) + args = patch_mock_popen.call_args[0][0] + self.assertIn("systemd-run --unit", args) + + self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) + self.assertIn("Non-zero exit code", ustr(context_manager.exception)) + + @attr('requires_sudo') + @patch("azurelinuxagent.common.cgroupapi.add_event") + def test_start_extension_command_should_not_use_fallback_option_if_extension_times_out(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.utils.extensionprocessutil.wait_for_process_completion_or_timeout", + return_value=[True, None]): + with patch("azurelinuxagent.common.cgroupapi.SystemdCgroupsApi._is_systemd_failure", + return_value=False): + with self.assertRaises(ExtensionError) as context_manager: + SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + self.assertEquals(context_manager.exception.code, + ExtensionErrorCodes.PluginHandlerScriptTimedout) + self.assertIn("Timeout", ustr(context_manager.exception)) + + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_capture_only_the_last_subprocess_output(self, _): + original_popen = subprocess.Popen + + def mock_popen(*args, **kwargs): + # Inject a syntax error to the call + systemd_command = args[0].replace('systemd-run', 'systemd-run syntax_error') + new_args = (systemd_command,) + return original_popen(new_args, **kwargs) + + expected_output = "[stdout]\n{0}\n\n\n[stderr]\n" + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.common.cgroupapi.add_event"): + with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen): + # We expect this call to fail because of the syntax error + extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="echo 'very specific test message'", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + self.assertEquals(expected_output.format("very specific test message"), process_output) + self.assertEquals(extension_cgroups, []) + + @patch("azurelinuxagent.common.utils.fileutil.read_file") + def test_create_agent_cgroups_should_create_cgroups_on_all_controllers(self, patch_read_file): + mock_proc_self_cgroup = '''12:blkio:/system.slice/walinuxagent.service +11:memory:/system.slice/walinuxagent.service +10:perf_event:/ +9:hugetlb:/ +8:freezer:/ +7:net_cls,net_prio:/ +6:devices:/system.slice/walinuxagent.service +5:cpuset:/ +4:cpu,cpuacct:/system.slice/walinuxagent.service +3:pids:/system.slice/walinuxagent.service +2:rdma:/ +1:name=systemd:/system.slice/walinuxagent.service +0::/system.slice/walinuxagent.service +''' + patch_read_file.return_value = mock_proc_self_cgroup + agent_cgroups = SystemdCgroupsApi().create_agent_cgroups() + + def assert_cgroup_created(controller): + expected_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "system.slice", VM_AGENT_CGROUP_NAME) + + self.assertTrue(any(cgroups.path == expected_cgroup_path for cgroups in agent_cgroups)) + self.assertTrue(any(cgroups.name == VM_AGENT_CGROUP_NAME for cgroups in agent_cgroups)) + + assert_cgroup_created("cpu") + assert_cgroup_created("memory") + + +class SystemdCgroupsApiMockedFileSystemTestCase(_MockedFileSystemTestCase): + def test_cleanup_legacy_cgroups_should_remove_legacy_cgroups(self): + # Set up a mock /var/run/waagent.pid file + daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") + fileutil.write_file(daemon_pid_file, "42\n") + + # Set up old controller cgroups, but do not add the daemon's PID to them + legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", '') + legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", '') + + with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): + SystemdCgroupsApi().cleanup_legacy_cgroups() + + self.assertFalse(os.path.exists(legacy_cpu_cgroup)) + self.assertFalse(os.path.exists(legacy_memory_cgroup)) + + def test_cleanup_legacy_cgroups_should_report_an_error_when_the_daemon_pid_was_added_to_the_legacy_cgroups(self): + # Set up a mock /var/run/waagent.pid file + daemon_pid = "42" + daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") + fileutil.write_file(daemon_pid_file, daemon_pid + "\n") + + # Set up old controller cgroups and add the daemon's PID to them + legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) + legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) + + with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): + with self.assertRaises(CGroupsException) as context_manager: + SystemdCgroupsApi().cleanup_legacy_cgroups() + + self.assertEquals(context_manager.exception.message, "The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) + + # The method should have deleted the legacy cgroups + self.assertFalse(os.path.exists(legacy_cpu_cgroup)) + self.assertFalse(os.path.exists(legacy_memory_cgroup)) + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupconfigurator.py walinuxagent-2.2.45/tests/common/test_cgroupconfigurator.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupconfigurator.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_cgroupconfigurator.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,295 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import subprocess + +import errno +from azurelinuxagent.common.cgroup import CGroup +from azurelinuxagent.common.cgroupapi import VM_AGENT_CGROUP_NAME +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.osutil.default import DefaultOSUtil +from tests.utils.cgroups_tools import CGroupsTools +from tests.tools import * + + +class CGroupConfiguratorTestCase(AgentTestCase): + @classmethod + def setUpClass(cls): + AgentTestCase.setUpClass() + + # Use the file system implementation of CGroupsApi (FileSystemCgroupsApi) + cls.mock_is_systemd = patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False) + cls.mock_is_systemd.start() + + # Use the default implementation of osutil + cls.mock_get_osutil = patch("azurelinuxagent.common.cgroupconfigurator.get_osutil", return_value=DefaultOSUtil()) + cls.mock_get_osutil.start() + + # Currently osutil.is_cgroups_supported() returns False on Travis runs. We need to revisit this design; in the + # meanwhile mock the method to return True + cls.mock_is_cgroups_supported = patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=True) + cls.mock_is_cgroups_supported.start() + + # Mounting the cgroup filesystem requires root privileges. Since these tests do not perform any actual operation on cgroups, make it a noop. + cls.mock_mount_cgroups = patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.mount_cgroups") + cls.mock_mount_cgroups.start() + + @classmethod + def tearDownClass(cls): + cls.mock_mount_cgroups.stop() + cls.mock_is_cgroups_supported.stop() + cls.mock_get_osutil.stop() + cls.mock_is_systemd.stop() + + AgentTestCase.tearDownClass() + + def setUp(self): + AgentTestCase.setUp(self) + CGroupConfigurator._instance = None # force get_instance() to create a new instance for each test + + self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroup") + os.mkdir(self.cgroups_file_system_root) + os.mkdir(os.path.join(self.cgroups_file_system_root, "cpu")) + os.mkdir(os.path.join(self.cgroups_file_system_root, "memory")) + + self.mock_cgroups_file_system_root = patch("azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) + self.mock_cgroups_file_system_root.start() + + def tearDown(self): + self.mock_cgroups_file_system_root.stop() + + def test_init_should_mount_the_cgroups_file_system(self): + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.mount_cgroups") as mock_mount_cgroups: + CGroupConfigurator.get_instance() + + self.assertEqual(mock_mount_cgroups.call_count, 1) + + def test_init_should_disable_cgroups_when_they_are_not_supported(self): + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=False): + self.assertFalse(CGroupConfigurator.get_instance().enabled()) + + def test_enable_and_disable_should_change_the_enabled_state_of_cgroups(self): + configurator = CGroupConfigurator.get_instance() + + self.assertTrue(configurator.enabled()) + + configurator.disable() + self.assertFalse(configurator.enabled()) + + configurator.enable() + self.assertTrue(configurator.enabled()) + + def test_enable_should_raise_CGroupsException_when_cgroups_are_not_supported(self): + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=False): + with self.assertRaises(CGroupsException) as context_manager: + CGroupConfigurator.get_instance().enable() + self.assertIn("cgroups are not supported", str(context_manager.exception)) + + def test_cgroup_operations_should_not_invoke_the_cgroup_api_when_cgroups_are_not_enabled(self): + configurator = CGroupConfigurator.get_instance() + configurator.disable() + + # List of operations to test, and the functions to mock used in order to do verifications + operations = [ + [lambda: configurator.create_agent_cgroups(track_cgroups=False), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_agent_cgroups"], + [lambda: configurator.cleanup_legacy_cgroups(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.cleanup_legacy_cgroups"], + [lambda: configurator.create_extension_cgroups_root(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups_root"], + [lambda: configurator.create_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups"], + [lambda: configurator.remove_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.remove_extension_cgroups"] + ] + + for op in operations: + with patch(op[1]) as mock_cgroup_api_operation: + op[0]() + + self.assertEqual(mock_cgroup_api_operation.call_count, 0) + + def test_cgroup_operations_should_log_a_warning_when_the_cgroup_api_raises_an_exception(self): + configurator = CGroupConfigurator.get_instance() + + # cleanup_legacy_cgroups disables cgroups on error, so make disable() a no-op + with patch.object(configurator, "disable"): + # List of operations to test, and the functions to mock in order to raise exceptions + operations = [ + [lambda: configurator.create_agent_cgroups(track_cgroups=False), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_agent_cgroups"], + [lambda: configurator.cleanup_legacy_cgroups(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.cleanup_legacy_cgroups"], + [lambda: configurator.create_extension_cgroups_root(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups_root"], + [lambda: configurator.create_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups"], + [lambda: configurator.remove_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.remove_extension_cgroups"] + ] + + def raise_exception(*_): + raise Exception("A TEST EXCEPTION") + + for op in operations: + with patch("azurelinuxagent.common.cgroupconfigurator.logger.warn") as mock_logger_warn: + with patch(op[1], raise_exception): + op[0]() + + self.assertEquals(mock_logger_warn.call_count, 1) + + args, kwargs = mock_logger_warn.call_args + message = args[0] + self.assertIn("A TEST EXCEPTION", message) + + def test_start_extension_command_should_forward_to_subprocess_popen_when_groups_are_not_enabled(self): + configurator = CGroupConfigurator.get_instance() + configurator.disable() + + with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command") as mock_fs: + with patch("azurelinuxagent.common.cgroupapi.SystemdCgroupsApi.start_extension_command") as mock_systemd: + with patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion") as mock_popen: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=False, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + self.assertEqual(mock_popen.call_count, 1) + self.assertEqual(mock_fs.call_count, 0) + self.assertEqual(mock_systemd.call_count, 0) + + def test_start_extension_command_should_forward_to_cgroups_api_when_groups_are_enabled(self): + configurator = CGroupConfigurator.get_instance() + + with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command", + return_value=[[], None]) as mock_start_extension_command: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=False, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + self.assertEqual(mock_start_extension_command.call_count, 1) + + def test_start_extension_command_should_start_tracking_the_extension_cgroups(self): + CGroupConfigurator.get_instance().start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=False, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( + self.cgroups_file_system_root, "cpu", "walinuxagent.extensions/Microsoft.Compute.TestExtension_1.2.3"))) + self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( + self.cgroups_file_system_root, "memory", "walinuxagent.extensions/Microsoft.Compute.TestExtension_1.2.3"))) + + def test_start_extension_command_should_raise_an_exception_when_the_command_cannot_be_started(self): + configurator = CGroupConfigurator.get_instance() + + def raise_exception(*_, **__): + raise Exception("A TEST EXCEPTION") + + with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command", raise_exception): + with self.assertRaises(Exception) as context_manager: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + timeout=300, + shell=False, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + self.assertIn("A TEST EXCEPTION", str(context_manager.exception)) + + def test_cleanup_legacy_cgroups_should_disable_cgroups_when_it_fails_to_process_legacy_cgroups(self): + # Set up a mock /var/run/waagent.pid file + daemon_pid = "42" + daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") + fileutil.write_file(daemon_pid_file, daemon_pid + "\n") + + # Set up old controller cgroups and add the daemon PID to them + CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) + CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) + + # Set up new controller cgroups and add extension handler's PID to them + CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "cpu", "999") + CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "memory", "999") + + def mock_append_file(filepath, contents, **kwargs): + if re.match(r'/.*/cpu/.*/cgroup.procs', filepath): + raise OSError(errno.ENOSPC, os.strerror(errno.ENOSPC)) + fileutil.append_file(filepath, controller, **kwargs) + + # Start tracking a couple of dummy cgroups + CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "cpu")) + CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "memory")) + + cgroup_configurator = CGroupConfigurator.get_instance() + + with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): + with patch("azurelinuxagent.common.cgroupapi.fileutil.append_file", side_effect=mock_append_file): + cgroup_configurator.cleanup_legacy_cgroups() + + self.assertEquals(len(mock_add_event.call_args_list), 1) + _, kwargs = mock_add_event.call_args_list[0] + self.assertEquals(kwargs['op'], 'CGroupsCleanUp') + self.assertFalse(kwargs['is_success']) + self.assertEquals(kwargs['message'], 'Failed to process legacy cgroups. Collection of resource usage data will be disabled. [Errno 28] No space left on device') + + self.assertFalse(cgroup_configurator.enabled()) + self.assertEquals(len(CGroupsTelemetry._tracked), 0) + + @patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True) + def test_cleanup_legacy_cgroups_should_disable_cgroups_when_the_daemon_was_added_to_the_legacy_cgroup_on_systemd(self, _): + # Set up a mock /var/run/waagent.pid file + daemon_pid = "42" + daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") + fileutil.write_file(daemon_pid_file, daemon_pid + "\n") + + # Set up old controller cgroups and add the daemon PID to them + CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) + CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) + + # Start tracking a couple of dummy cgroups + CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "cpu")) + CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "memory")) + + cgroup_configurator = CGroupConfigurator.get_instance() + + with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as mock_add_event: + with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): + cgroup_configurator.cleanup_legacy_cgroups() + + self.assertEquals(len(mock_add_event.call_args_list), 1) + _, kwargs = mock_add_event.call_args_list[0] + self.assertEquals(kwargs['op'], 'CGroupsCleanUp') + self.assertFalse(kwargs['is_success']) + self.assertEquals( + kwargs['message'], + "Failed to process legacy cgroups. Collection of resource usage data will be disabled. The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) + + self.assertFalse(cgroup_configurator.enabled()) + self.assertEquals(len(CGroupsTelemetry._tracked), 0) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroups.py walinuxagent-2.2.45/tests/common/test_cgroups.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroups.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_cgroups.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,158 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import random + +from azurelinuxagent.common.cgroup import CpuCgroup, MemoryCgroup, CGroup +from azurelinuxagent.common.exception import CGroupsException +from tests.tools import * + + +def consume_cpu_time(): + waste = 0 + for x in range(1, 200000): + waste += random.random() + return waste + + +class TestCGroup(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + + def tearDown(self): + AgentTestCase.tearDown(self) + + with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks: + tasks.truncate(0) + with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks: + tasks.truncate(0) + + def test_correct_creation(self): + test_cgroup = CGroup.create("dummy_path", "cpu", "test_extension") + self.assertIsInstance(test_cgroup, CpuCgroup) + self.assertEqual(test_cgroup.controller, "cpu") + self.assertEqual(test_cgroup.path, "dummy_path") + self.assertEqual(test_cgroup.name, "test_extension") + + test_cgroup = CGroup.create("dummy_path", "memory", "test_extension") + self.assertIsInstance(test_cgroup, MemoryCgroup) + self.assertEqual(test_cgroup.controller, "memory") + self.assertEqual(test_cgroup.path, "dummy_path") + self.assertEqual(test_cgroup.name, "test_extension") + + def test_is_active(self): + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount"), "cpu", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + + with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks: + tasks.write(str(1000).encode()) + + self.assertEqual(True, test_cgroup.is_active()) + + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount"), "memory", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + + with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks: + tasks.write(str(1000).encode()) + + self.assertEqual(True, test_cgroup.is_active()) + + @patch("azurelinuxagent.common.logger.periodic_warn") + def test_is_active_file_not_present(self, patch_periodic_warn): + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_cpu_mount"), "cpu", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_memory_mount"), "memory", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + + self.assertEqual(0, patch_periodic_warn.call_count) + + @patch("azurelinuxagent.common.logger.periodic_warn") + def test_is_active_incorrect_file(self, patch_periodic_warn): + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), "cpu", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + self.assertEqual(1, patch_periodic_warn.call_count) + + test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), "memory", "test_extension") + self.assertEqual(False, test_cgroup.is_active()) + self.assertEqual(2, patch_periodic_warn.call_count) + + +class TestCpuCgroup(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_cpu_cgroup_create(self, patch_get_proc_stat): + patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", "dummy_proc_stat")) + test_cpu_cg = CpuCgroup("test_extension", "dummy_path") + + self.assertEqual(398488, test_cpu_cg._current_system_cpu) + self.assertEqual(0, test_cpu_cg._current_cpu_total) + self.assertEqual(0, test_cpu_cg._previous_cpu_total) + self.assertEqual(0, test_cpu_cg._previous_system_cpu) + + self.assertEqual("cpu", test_cpu_cg.controller) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_processor_cores", return_value=1) + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_get_cpu_usage(self, patch_get_proc_stat, *args): + patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", "dummy_proc_stat")) + test_cpu_cg = CpuCgroup("test_extension", os.path.join(data_dir, "cgroups", "cpu_mount")) + + # Mocking CPU consumption + patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", + "dummy_proc_stat_updated")) + + cpu_usage = test_cpu_cg.get_cpu_usage() + + self.assertEqual(5.114, cpu_usage) + + def test_get_current_cpu_total_exception_handling(self): + test_cpu_cg = CpuCgroup("test_extension", "dummy_path") + self.assertRaises(IOError, test_cpu_cg._get_current_cpu_total) + + # Trying to raise ERRNO 20. + test_cpu_cg = CpuCgroup("test_extension", os.path.join(data_dir, "cgroups", "cpu_mount", "cpuacct.stat")) + self.assertRaises(CGroupsException, test_cpu_cg._get_current_cpu_total) + + +class TestMemoryCgroup(AgentTestCase): + def test_memory_cgroup_create(self): + test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount")) + self.assertEqual("memory", test_mem_cg.controller) + + def test_get_metrics(self): + test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount")) + + memory_usage = test_mem_cg.get_memory_usage() + self.assertEqual(100000, memory_usage) + + max_memory_usage = test_mem_cg.get_max_memory_usage() + self.assertEqual(1000000, max_memory_usage) + + def test_get_metrics_when_files_not_present(self): + test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups")) + + memory_usage = test_mem_cg.get_memory_usage() + self.assertEqual(0, memory_usage) + + max_memory_usage = test_mem_cg.get_max_memory_usage() + self.assertEqual(0, max_memory_usage) \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupstelemetry.py walinuxagent-2.2.45/tests/common/test_cgroupstelemetry.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroupstelemetry.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_cgroupstelemetry.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,699 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import errno +import os +import random +import time + +from mock import patch + +from azurelinuxagent.common.cgroup import CGroup +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry, Metric +from azurelinuxagent.common.osutil.default import BASE_CGROUPS +from azurelinuxagent.common.protocol.restapi import ExtHandlerProperties, ExtHandler +from azurelinuxagent.ga.exthandlers import ExtHandlerInstance +from nose.plugins.attrib import attr +from tests.tools import AgentTestCase, skip_if_predicate_false, skip_if_predicate_true, \ + are_cgroups_enabled, is_trusty_in_travis, i_am_root + + +def median(lst): + data = sorted(lst) + l_len = len(data) + if l_len < 1: + return None + if l_len % 2 == 0: + return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0 + else: + return data[int((l_len - 1) / 2)] + + +def generate_metric_list(lst): + return [float(sum(lst)) / float(len(lst)), + min(lst), + max(lst), + median(lst), + len(lst)] + + +def consume_cpu_time(): + waste = 0 + for x in range(1, 200000): + waste += random.random() + return waste + + +def consume_memory(): + waste = [] + for x in range(1, 3): + waste.append([random.random()] * 10000) + time.sleep(0.1) + waste *= 0 + return waste + + +def make_new_cgroup(name="test-cgroup"): + return CGroupConfigurator.get_instance().create_extension_cgroups(name) + + +class TestCGroupsTelemetry(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + CGroupsTelemetry.reset() + + def tearDown(self): + AgentTestCase.tearDown(self) + CGroupsTelemetry.reset() + + def _assert_cgroup_metrics_equal(self, cpu_usage, memory_usage, max_memory_usage): + for _, cgroup_metric in CGroupsTelemetry._cgroup_metrics.items(): + self.assertListEqual(cgroup_metric.get_memory_usage()._data, memory_usage) + self.assertListEqual(cgroup_metric.get_max_memory_usage()._data, max_memory_usage) + self.assertListEqual(cgroup_metric.get_cpu_usage()._data, cpu_usage) + + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_telemetry_polling_with_active_cgroups(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + patch_is_active.return_value = True + + current_cpu = 30 + current_memory = 209715200 + current_max_memory = 471859200 + + patch_get_cpu_percent.return_value = current_cpu + patch_get_memory_usage.return_value = current_memory # example 200 MB + patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB + + poll_count = 1 + + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) + self._assert_cgroup_metrics_equal( + cpu_usage=[current_cpu] * data_count, + memory_usage=[current_memory] * data_count, + max_memory_usage=[current_max_memory] * data_count) + + CGroupsTelemetry.report_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + self._assert_cgroup_metrics_equal([], [], []) + + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_telemetry_polling_with_inactive_cgroups(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + patch_is_active.return_value = False + + no_extensions_expected = 0 + data_count = 1 + current_cpu = 30 + current_memory = 209715200 + current_max_memory = 471859200 + + patch_get_cpu_percent.return_value = current_cpu + patch_get_memory_usage.return_value = current_memory # example 200 MB + patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + CGroupsTelemetry.poll_all_tracked() + + for i in range(num_extensions): + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + self._assert_cgroup_metrics_equal( + cpu_usage=[current_cpu] * data_count, + memory_usage=[current_memory] * data_count, + max_memory_usage=[current_max_memory] * data_count) + + CGroupsTelemetry.report_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), no_extensions_expected) + self._assert_cgroup_metrics_equal([], [], []) + + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_telemetry_polling_with_changing_cgroups_state(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + patch_is_active.return_value = True + + no_extensions_expected = 0 + expected_data_count = 2 + + current_cpu = 30 + current_memory = 209715200 + current_max_memory = 471859200 + + patch_get_cpu_percent.return_value = current_cpu + patch_get_memory_usage.return_value = current_memory # example 200 MB + patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + CGroupsTelemetry.poll_all_tracked() + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + + patch_is_active.return_value = False + CGroupsTelemetry.poll_all_tracked() + + for i in range(num_extensions): + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + self._assert_cgroup_metrics_equal( + cpu_usage=[current_cpu] * expected_data_count, + memory_usage=[current_memory] * expected_data_count, + max_memory_usage=[current_max_memory] * expected_data_count) + + CGroupsTelemetry.report_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), no_extensions_expected) + self._assert_cgroup_metrics_equal([], [], []) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + @patch("azurelinuxagent.common.logger.periodic_warn") + @patch("azurelinuxagent.common.utils.fileutil.read_file") + def test_telemetry_polling_to_not_generate_transient_logs_ioerror_file_not_found(self, mock_read_file, + patch_periodic_warn, *args): + num_extensions = 1 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + self.assertEqual(0, patch_periodic_warn.call_count) + + # Not expecting logs present for io_error with errno=errno.ENOENT + io_error_2 = IOError() + io_error_2.errno = errno.ENOENT + mock_read_file.side_effect = io_error_2 + + poll_count = 1 + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + self.assertEqual(0, patch_periodic_warn.call_count) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + @patch("azurelinuxagent.common.logger.periodic_warn") + @patch("azurelinuxagent.common.utils.fileutil.read_file") + def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied(self, mock_read_file, + patch_periodic_warn, *args): + num_extensions = 1 + num_controllers = 2 + is_active_check_per_controller = 2 + + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + self.assertEqual(0, patch_periodic_warn.call_count) + + # Expecting logs to be present for different kind of errors + io_error_3 = IOError() + io_error_3.errno = errno.EPERM + mock_read_file.side_effect = io_error_3 + + poll_count = 1 + expected_count_per_call = num_controllers + is_active_check_per_controller + # each collect per controller would generate a log statement, and each cgroup would invoke a + # is active check raising an exception + + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + self.assertEqual(poll_count * expected_count_per_call, patch_periodic_warn.call_count) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + @patch("azurelinuxagent.common.utils.fileutil.read_file") + def test_telemetry_polling_to_generate_transient_logs_index_error(self, mock_read_file, *args): + num_extensions = 1 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + # Generating a different kind of error (non-IOError) to check the logging. + # Trying to invoke IndexError during the getParameter call + mock_read_file.return_value = '' + + with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn: + expected_call_count = 1 # called only once at start, and then gets removed from the tracked data. + for data_count in range(1, 10): + CGroupsTelemetry.poll_all_tracked() + self.assertEqual(expected_call_count, patch_periodic_warn.call_count) + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.cgroup.CpuCgroup._update_cpu_data") + def test_telemetry_calculations(self, *args): + num_polls = 10 + num_extensions = 1 + num_summarization_values = 7 + + cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)] + + # only verifying calculations and not validity of the values. + memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked)) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + for i in range(num_polls): + patch_is_active.return_value = True + patch_get_cpu_percent.return_value = cpu_percent_values[i] + patch_get_memory_usage.return_value = memory_usage_values[i] # example 200 MB + patch_get_memory_max_usage.return_value = max_memory_usage_values[i] # example 450 MB + CGroupsTelemetry.poll_all_tracked() + + collected_metrics = CGroupsTelemetry.report_all_tracked() + for i in range(num_extensions): + name = "dummy_extension_{0}".format(i) + + self.assertIn(name, collected_metrics) + self.assertIn("memory", collected_metrics[name]) + self.assertIn("cur_mem", collected_metrics[name]["memory"]) + self.assertIn("max_mem", collected_metrics[name]["memory"]) + self.assertEqual(num_summarization_values, len(collected_metrics[name]["memory"]["cur_mem"])) + self.assertEqual(num_summarization_values, len(collected_metrics[name]["memory"]["max_mem"])) + + self.assertListEqual(generate_metric_list(memory_usage_values), + collected_metrics[name]["memory"]["cur_mem"][0:5]) + self.assertListEqual(generate_metric_list(max_memory_usage_values), + collected_metrics[name]["memory"]["max_mem"][0:5]) + + self.assertIn("cpu", collected_metrics[name]) + self.assertIn("cur_cpu", collected_metrics[name]["cpu"]) + self.assertEqual(num_summarization_values, len(collected_metrics[name]["cpu"]["cur_cpu"])) + self.assertListEqual(generate_metric_list(cpu_percent_values), + collected_metrics[name]["cpu"]["cur_cpu"][0:5]) + + # mocking get_proc_stat to make it run on Mac and other systems + # this test does not need to read the values of the /proc/stat file + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_cgroup_tracking(self, *args): + num_extensions = 5 + num_controllers = 2 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked)) + + # mocking get_proc_stat to make it run on Mac and other systems + # this test does not need to read the values of the /proc/stat file + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_cgroup_pruning(self, *args): + num_extensions = 5 + num_controllers = 2 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked)) + + CGroupsTelemetry.prune_all_tracked() + + for i in range(num_extensions): + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertEqual(0, len(CGroupsTelemetry._tracked)) + + # mocking get_proc_stat to make it run on Mac and other systems + # this test does not need to read the values of the /proc/stat file + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_cgroup_is_tracked(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}". + format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertFalse(CGroupsTelemetry.is_tracked("not_present_cpu_dummy_path")) + self.assertFalse(CGroupsTelemetry.is_tracked("not_present_memory_dummy_path")) + + # mocking get_proc_stat to make it run on Mac and other systems + # this test does not need to read the values of the /proc/stat file + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_process_cgroup_metric_with_incorrect_cgroups_mounted(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + patch_get_cpu_usage.side_effect = Exception("File not found") + patch_get_memory_usage.side_effect = Exception("File not found") + + for data_count in range(1, 10): + CGroupsTelemetry.poll_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + + collected_metrics = {} + for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items(): + collected_metrics[name] = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics) + self.assertEqual(collected_metrics[name], {}) # empty + + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): + num_extensions = 5 + + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + patch_is_active.return_value = True + patch_get_memory_usage.side_effect = Exception("File not found") + + current_cpu = 30 + patch_get_cpu_percent.return_value = current_cpu + + poll_count = 1 + + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + self._assert_cgroup_metrics_equal(cpu_usage=[current_cpu] * data_count, memory_usage=[], max_memory_usage=[]) + + CGroupsTelemetry.report_all_tracked() + + self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) + self._assert_cgroup_metrics_equal([], [], []) + + @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_process_cgroup_metric_with_no_cpu_cgroup_mounted(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + patch_is_active.return_value = True + + patch_get_cpu_usage.side_effect = Exception("File not found") + + current_memory = 209715200 + current_max_memory = 471859200 + + patch_get_memory_usage.return_value = current_memory # example 200 MB + patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB + + poll_count = 1 + + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) + self._assert_cgroup_metrics_equal( + cpu_usage=[], + memory_usage=[current_memory] * data_count, + max_memory_usage=[current_max_memory] * data_count) + + CGroupsTelemetry.report_all_tracked() + + self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) + self._assert_cgroup_metrics_equal([], [], []) + + @patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") + @patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") + @patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") + def test_extension_temetry_not_sent_for_empty_perf_metrics(self, *args): + num_extensions = 5 + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + with patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry._process_cgroup_metric") as \ + patch_process_cgroup_metric: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + + patch_is_active.return_value = False + patch_process_cgroup_metric.return_value = {} + poll_count = 1 + + for data_count in range(poll_count, 10): + CGroupsTelemetry.poll_all_tracked() + + collected_metrics = CGroupsTelemetry.report_all_tracked() + self.assertEqual(0, len(collected_metrics)) + + @skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") + @skip_if_predicate_true(is_trusty_in_travis, "Does not run on Trusty in Travis") + @attr('requires_sudo') + def test_telemetry_with_tracked_cgroup(self): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + # This test has some timing issues when systemd is managing cgroups, so we force the file system API + # by creating a new instance of the CGroupConfigurator + with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False): + cgroup_configurator_instance = CGroupConfigurator._instance + CGroupConfigurator._instance = None + + try: + max_num_polls = 30 + time_to_wait = 3 + extn_name = "foobar-1.0.0" + num_summarization_values = 7 + + cgs = make_new_cgroup(extn_name) + self.assertEqual(len(cgs), 2) + + ext_handler_properties = ExtHandlerProperties() + ext_handler_properties.version = "1.0.0" + self.ext_handler = ExtHandler(name='foobar') + self.ext_handler.properties = ext_handler_properties + self.ext_handler_instance = ExtHandlerInstance(ext_handler=self.ext_handler, protocol=None) + + command = self.create_script("keep_cpu_busy_and_consume_memory_for_5_seconds", ''' +nohup python -c "import time + +for i in range(5): + x = [1, 2, 3, 4, 5] * (i * 1000) + time.sleep({0}) + x *= 0 + print('Test loop')" & +'''.format(time_to_wait)) + + self.log_dir = os.path.join(self.tmp_dir, "log") + + with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", lambda *_: self.tmp_dir) as \ + patch_get_base_dir: + with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", lambda *_: self.log_dir) as \ + patch_get_log_dir: + self.ext_handler_instance.launch_command(command) + + # + # If the test is made to run using the systemd API, then the paths of the cgroups need to be checked differently: + # + # self.assertEquals(len(CGroupsTelemetry._tracked), 2) + # cpu = os.path.join(BASE_CGROUPS, "cpu", "system.slice", r"foobar_1.0.0_.*\.scope") + # self.assertTrue(any(re.match(cpu, tracked.path) for tracked in CGroupsTelemetry._tracked)) + # memory = os.path.join(BASE_CGROUPS, "memory", "system.slice", r"foobar_1.0.0_.*\.scope") + # self.assertTrue(any(re.match(memory, tracked.path) for tracked in CGroupsTelemetry._tracked)) + # + self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( + BASE_CGROUPS, "cpu", "walinuxagent.extensions", "foobar_1.0.0"))) + self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( + BASE_CGROUPS, "memory", "walinuxagent.extensions", "foobar_1.0.0"))) + + for i in range(max_num_polls): + CGroupsTelemetry.poll_all_tracked() + time.sleep(0.5) + + collected_metrics = CGroupsTelemetry.report_all_tracked() + + self.assertIn("memory", collected_metrics[extn_name]) + self.assertIn("cur_mem", collected_metrics[extn_name]["memory"]) + self.assertIn("max_mem", collected_metrics[extn_name]["memory"]) + self.assertEqual(len(collected_metrics[extn_name]["memory"]["cur_mem"]), num_summarization_values) + self.assertEqual(len(collected_metrics[extn_name]["memory"]["max_mem"]), num_summarization_values) + + self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][6], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][6], str) + + self.assertIn("cpu", collected_metrics[extn_name]) + self.assertIn("cur_cpu", collected_metrics[extn_name]["cpu"]) + self.assertEqual(len(collected_metrics[extn_name]["cpu"]["cur_cpu"]), num_summarization_values) + + self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][6], str) + + for i in range(5): + self.assertGreater(collected_metrics[extn_name]["memory"]["cur_mem"][i], 0) + self.assertGreater(collected_metrics[extn_name]["memory"]["max_mem"][i], 0) + self.assertGreaterEqual(collected_metrics[extn_name]["cpu"]["cur_cpu"][i], 0) + # Equal because CPU could be zero for minimum value. + finally: + CGroupConfigurator._instance = cgroup_configurator_instance + + +class TestMetric(AgentTestCase): + def test_empty_metrics(self): + test_metric = Metric() + self.assertEqual("None", test_metric.first_poll_time()) + self.assertEqual("None", test_metric.last_poll_time()) + self.assertEqual(0, test_metric.count()) + self.assertEqual(None, test_metric.median()) + self.assertEqual(None, test_metric.max()) + self.assertEqual(None, test_metric.min()) + self.assertEqual(None, test_metric.average()) + + def test_metrics(self): + num_polls = 10 + + test_values = [random.randint(0, 100) for _ in range(num_polls)] + + test_metric = Metric() + for value in test_values: + test_metric.append(value) + + self.assertListEqual(generate_metric_list(test_values), [test_metric.average(), test_metric.min(), + test_metric.max(), test_metric.median(), + test_metric.count()]) + + test_metric.clear() + self.assertEqual("None", test_metric.first_poll_time()) + self.assertEqual("None", test_metric.last_poll_time()) + self.assertEqual(0, test_metric.count()) + self.assertEqual(None, test_metric.median()) + self.assertEqual(None, test_metric.max()) + self.assertEqual(None, test_metric.min()) + self.assertEqual(None, test_metric.average()) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_conf.py walinuxagent-2.2.45/tests/common/test_conf.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_conf.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_conf.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import mock @@ -27,45 +27,48 @@ # Note: # -- These values *MUST* match those from data/test_waagent.conf EXPECTED_CONFIGURATION = { - "Provisioning.Enabled" : True, - "Provisioning.UseCloudInit" : True, - "Provisioning.DeleteRootPassword" : True, - "Provisioning.RegenerateSshHostKeyPair" : True, - "Provisioning.SshHostKeyPairType" : "rsa", - "Provisioning.MonitorHostName" : True, - "Provisioning.DecodeCustomData" : False, - "Provisioning.ExecuteCustomData" : False, - "Provisioning.PasswordCryptId" : '6', - "Provisioning.PasswordCryptSaltLength" : 10, - "Provisioning.AllowResetSysUser" : False, - "ResourceDisk.Format" : True, - "ResourceDisk.Filesystem" : "ext4", - "ResourceDisk.MountPoint" : "/mnt/resource", - "ResourceDisk.EnableSwap" : False, - "ResourceDisk.SwapSizeMB" : 0, - "ResourceDisk.MountOptions" : None, - "Logs.Verbose" : False, - "OS.EnableFIPS" : True, - "OS.RootDeviceScsiTimeout" : '300', - "OS.OpensslPath" : '/usr/bin/openssl', - "OS.SshClientAliveInterval" : 42, - "OS.SshDir" : "/notareal/path", - "HttpProxy.Host" : None, - "HttpProxy.Port" : None, - "DetectScvmmEnv" : False, - "Lib.Dir" : "/var/lib/waagent", - "DVD.MountPoint" : "/mnt/cdrom/secure", - "Pid.File" : "/var/run/waagent.pid", - "Extension.LogDir" : "/var/log/azure", - "OS.HomeDir" : "/home", - "OS.EnableRDMA" : False, - "OS.UpdateRdmaDriver" : False, - "OS.CheckRdmaDriver" : False, - "AutoUpdate.Enabled" : True, - "AutoUpdate.GAFamily" : "Prod", - "EnableOverProvisioning" : False, - "OS.AllowHTTP" : False, - "OS.EnableFirewall" : True + "Extensions.Enabled": True, + "Provisioning.Agent": "auto", + "Provisioning.DeleteRootPassword": True, + "Provisioning.RegenerateSshHostKeyPair": True, + "Provisioning.SshHostKeyPairType": "rsa", + "Provisioning.MonitorHostName": True, + "Provisioning.DecodeCustomData": False, + "Provisioning.ExecuteCustomData": False, + "Provisioning.PasswordCryptId": '6', + "Provisioning.PasswordCryptSaltLength": 10, + "Provisioning.AllowResetSysUser": False, + "ResourceDisk.Format": True, + "ResourceDisk.Filesystem": "ext4", + "ResourceDisk.MountPoint": "/mnt/resource", + "ResourceDisk.EnableSwap": False, + "ResourceDisk.EnableSwapEncryption": False, + "ResourceDisk.SwapSizeMB": 0, + "ResourceDisk.MountOptions": None, + "Logs.Verbose": False, + "OS.EnableFIPS": True, + "OS.RootDeviceScsiTimeout": '300', + "OS.OpensslPath": '/usr/bin/openssl', + "OS.SshClientAliveInterval": 42, + "OS.SshDir": "/notareal/path", + "HttpProxy.Host": None, + "HttpProxy.Port": None, + "DetectScvmmEnv": False, + "Lib.Dir": "/var/lib/waagent", + "DVD.MountPoint": "/mnt/cdrom/secure", + "Pid.File": "/var/run/waagent.pid", + "Extension.LogDir": "/var/log/azure", + "OS.HomeDir": "/home", + "OS.EnableRDMA": False, + "OS.UpdateRdmaDriver": False, + "OS.CheckRdmaDriver": False, + "AutoUpdate.Enabled": True, + "AutoUpdate.GAFamily": "Prod", + "EnableOverProvisioning": True, + "OS.AllowHTTP": False, + "OS.EnableFirewall": False, + "CGroups.EnforceLimits": False, + "CGroups.Excluded": "customscript,runcommand", } def setUp(self): @@ -78,6 +81,7 @@ def test_key_value_handling(self): self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad")) self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad")) + self.assertEqual("delalloc,rw,noatime,nobarrier,users,mode=777", self.conf.get("FauxKey3", "Bad")) def test_get_ssh_dir(self): self.assertTrue(get_ssh_dir(self.conf).startswith("/notareal/path")) @@ -101,8 +105,8 @@ def test_get_fips_enabled(self): self.assertTrue(get_fips_enabled(self.conf)) - def test_get_provision_cloudinit(self): - self.assertTrue(get_provision_cloudinit(self.conf)) + def test_get_provision_agent(self): + self.assertTrue(get_provisioning_agent(self.conf) == 'auto') def test_get_configuration(self): configuration = conf.get_configuration(self.conf) @@ -110,4 +114,58 @@ for k in TestConf.EXPECTED_CONFIGURATION.keys(): self.assertEqual( TestConf.EXPECTED_CONFIGURATION[k], - configuration[k]) + configuration[k], + k) + + def test_get_agent_disabled_file_path(self): + self.assertEqual(get_disable_agent_file_path(self.conf), + os.path.join(self.tmp_dir, DISABLE_AGENT_FILE)) + + def test_write_agent_disabled(self): + """ + Test writing disable_agent is empty + """ + from azurelinuxagent.pa.provision.default import ProvisionHandler + + disable_file_path = get_disable_agent_file_path(self.conf) + self.assertFalse(os.path.exists(disable_file_path)) + ProvisionHandler.write_agent_disabled() + self.assertTrue(os.path.exists(disable_file_path)) + self.assertEqual('', fileutil.read_file(disable_file_path)) + + def test_get_extensions_enabled(self): + self.assertTrue(get_extensions_enabled(self.conf)) + + @patch('azurelinuxagent.common.conf.ConfigurationProvider.get') + def assert_get_cgroups_excluded(self, patch_get, config, expected_value): + patch_get.return_value = config + self.assertEqual(expected_value, conf.get_cgroups_excluded(self.conf)) + + def test_get_cgroups_excluded(self): + self.assert_get_cgroups_excluded(config=None, + expected_value=[]) + + self.assert_get_cgroups_excluded(config='', + expected_value=[]) + + self.assert_get_cgroups_excluded(config=' ', + expected_value=[]) + + self.assert_get_cgroups_excluded(config=' , ,, ,', + expected_value=[]) + + standard_values = ['customscript', 'runcommand'] + self.assert_get_cgroups_excluded(config='CustomScript, RunCommand', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config='customScript, runCommand , , ,,', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config=' customscript,runcommand ', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config='customscript,, runcommand', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config=',,customscript ,runcommand', + expected_value=standard_values) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_errorstate.py walinuxagent-2.2.45/tests/common/test_errorstate.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_errorstate.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_errorstate.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,104 @@ +from datetime import timedelta + +from azurelinuxagent.common.errorstate import * +from tests.tools import * + + +class TestErrorState(unittest.TestCase): + def test_errorstate00(self): + """ + If ErrorState is never incremented, it will never trigger. + """ + test_subject = ErrorState(timedelta(seconds=10000)) + self.assertFalse(test_subject.is_triggered()) + self.assertEqual(0, test_subject.count) + self.assertEqual('unknown', test_subject.fail_time) + + def test_errorstate01(self): + """ + If ErrorState is never incremented, and the timedelta is zero it will + not trigger. + """ + test_subject = ErrorState(timedelta(seconds=0)) + self.assertFalse(test_subject.is_triggered()) + self.assertEqual(0, test_subject.count) + self.assertEqual('unknown', test_subject.fail_time) + + def test_errorstate02(self): + """ + If ErrorState is triggered, and the current time is within timedelta + of now it will trigger. + """ + test_subject = ErrorState(timedelta(seconds=0)) + test_subject.incr() + + self.assertTrue(test_subject.is_triggered()) + self.assertEqual(1, test_subject.count) + self.assertEqual('0.0 min', test_subject.fail_time) + + @patch('azurelinuxagent.common.errorstate.datetime') + def test_errorstate03(self, mock_time): + """ + ErrorState will not trigger until + 1. ErrorState has been incr() at least once. + 2. The timedelta from the first incr() has elapsed. + """ + test_subject = ErrorState(timedelta(minutes=15)) + + for x in range(1, 10): + mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=x)) + + test_subject.incr() + self.assertFalse(test_subject.is_triggered()) + + mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=30)) + test_subject.incr() + self.assertTrue(test_subject.is_triggered()) + self.assertEqual('29.0 min', test_subject.fail_time) + + def test_errorstate04(self): + """ + If ErrorState is reset the timestamp of the last incr() is reset to + None. + """ + + test_subject = ErrorState(timedelta(minutes=15)) + self.assertTrue(test_subject.timestamp is None) + + test_subject.incr() + self.assertTrue(test_subject.timestamp is not None) + + test_subject.reset() + self.assertTrue(test_subject.timestamp is None) + + def test_errorstate05(self): + """ + Test the fail_time for various scenarios + """ + + test_subject = ErrorState(timedelta(minutes=15)) + self.assertEqual('unknown', test_subject.fail_time) + + test_subject.incr() + self.assertEqual('0.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60) + self.assertEqual('1.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=73) + self.assertEqual('1.22 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=120) + self.assertEqual('2.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 59) + self.assertEqual('59.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60) + self.assertEqual('1.0 hr', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 95) + self.assertEqual('1.58 hr', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60 * 3) + self.assertEqual('3.0 hr', test_subject.fail_time) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_event.py walinuxagent-2.2.45/tests/common/test_event.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_event.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_event.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,26 +12,94 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function -from datetime import datetime +import json +import os +import threading +from datetime import datetime, timedelta -import azurelinuxagent.common.event as event -import azurelinuxagent.common.logger as logger +from mock import patch, Mock +from azurelinuxagent.common import event, logger from azurelinuxagent.common.event import add_event, \ - mark_event_status, should_emit_event, \ - WALAEventOperation + WALAEventOperation, elapsed_milliseconds +from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.version import CURRENT_VERSION - -from tests.tools import * +from azurelinuxagent.common.protocol.wire import GoalState +from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.utils.extensionprocessutil import read_output +from azurelinuxagent.common.version import CURRENT_VERSION, CURRENT_AGENT +from azurelinuxagent.ga.monitor import MonitorHandler +from tests.tools import AgentTestCase, load_data, data_dir class TestEvent(AgentTestCase): + def test_add_event_should_read_container_id_from_process_environment(self): + tmp_file = os.path.join(self.tmp_dir, "tmp_file") + + def patch_save_event(json_data): + fileutil.write_file(tmp_file, json_data) + + with patch("azurelinuxagent.common.event.EventLogger.save_event", side_effect=patch_save_event): + # No container id is set + os.environ.pop(event.CONTAINER_ID_ENV_VARIABLE, None) + event.add_event(name='dummy_name') + data = fileutil.read_file(tmp_file) + self.assertTrue('{"name": "ContainerId", "value": "UNINITIALIZED"}' in data or + '{"value": "UNINITIALIZED", "name": "ContainerId"}' in data) + + # Container id is set as an environment variable explicitly + os.environ[event.CONTAINER_ID_ENV_VARIABLE] = '424242' + event.add_event(name='dummy_name') + data = fileutil.read_file(tmp_file) + self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format( + os.environ[event.CONTAINER_ID_ENV_VARIABLE]) in data or + '{{"value": "{0}", "name": "ContainerId"}}'.format( + os.environ[event.CONTAINER_ID_ENV_VARIABLE]) in data) + + # Container id is set as an environment variable when parsing the goal state + xml_text = load_data("wire/goal_state.xml") + goal_state = GoalState(xml_text) + + container_id = goal_state.container_id + event.add_event(name='dummy_name') + data = fileutil.read_file(tmp_file) + self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format(container_id) in data or + '{{"value": "{0}", "name": "ContainerId"}}'.format(container_id), data) + + # Container id is updated as the goal state changes, both in telemetry event and in environment variables + new_container_id = "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2" + xml_text = load_data("wire/goal_state.xml") + xml_text_updated = xml_text.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", new_container_id) + goal_state = GoalState(xml_text_updated) + + event.add_event(name='dummy_name') + data = fileutil.read_file(tmp_file) + + # Assert both the environment variable and telemetry event got updated + self.assertEquals(os.environ[event.CONTAINER_ID_ENV_VARIABLE], new_container_id) + self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format(new_container_id) in data or + '{{"value": "{0}", "name": "ContainerId"}}'.format(new_container_id), data) + + os.environ.pop(event.CONTAINER_ID_ENV_VARIABLE) + + def test_add_event_should_handle_event_errors(self): + with patch("azurelinuxagent.common.utils.fileutil.mkdir", side_effect=OSError): + with patch('azurelinuxagent.common.logger.periodic_error') as mock_logger_periodic_error: + add_event('test', message='test event') + + # The event shouldn't have been created + self.assertTrue(len(os.listdir(self.tmp_dir)) == 0) + + # The exception should have been caught and logged + args = mock_logger_periodic_error.call_args + exception_message = args[0][1] + self.assertIn("[EventError] Failed to create events folder", exception_message) + def test_event_status_event_marked(self): es = event.__event_status__ @@ -49,8 +117,7 @@ self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_records_status(self): - d = tempfile.mkdtemp() - es = event.EventStatus(tempfile.mkdtemp()) + es = event.EventStatus() es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) @@ -70,7 +137,7 @@ self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_should_emit_event_ignores_unknown_operations(self): - event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) + event.__event_status__ = event.EventStatus() self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) @@ -81,9 +148,8 @@ self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) - def test_should_emit_event_handles_known_operations(self): - event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) + event.__event_status__ = event.EventStatus() # Known operations always initially "fire" for op in event.__event_status_operations__: @@ -108,12 +174,64 @@ self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertFalse(event.should_emit_event("Foo", "1.2", op, False)) + @patch('azurelinuxagent.common.event.EventLogger') + @patch('azurelinuxagent.common.logger.error') + @patch('azurelinuxagent.common.logger.warn') + @patch('azurelinuxagent.common.logger.info') + def test_should_log_errors_if_failed_operation_and_empty_event_dir(self, + mock_logger_info, + mock_logger_warn, + mock_logger_error, + mock_reporter): + mock_reporter.event_dir = None + add_event("dummy name", + version=CURRENT_VERSION, + op=WALAEventOperation.Download, + is_success=False, + message="dummy event message", + reporter=mock_reporter) + + self.assertEquals(1, mock_logger_error.call_count) + self.assertEquals(1, mock_logger_warn.call_count) + self.assertEquals(0, mock_logger_info.call_count) + + args = mock_logger_error.call_args[0] + self.assertEquals(('dummy name', 'Download', 'dummy event message', 0), args[1:]) + + @patch('azurelinuxagent.common.event.EventLogger') + @patch('azurelinuxagent.common.logger.error') + @patch('azurelinuxagent.common.logger.warn') + @patch('azurelinuxagent.common.logger.info') + def test_should_log_errors_if_failed_operation_and_not_empty_event_dir(self, + mock_logger_info, + mock_logger_warn, + mock_logger_error, + mock_reporter): + mock_reporter.event_dir = "dummy" + + with patch("azurelinuxagent.common.event.should_emit_event", return_value=True) as mock_should_emit_event: + with patch("azurelinuxagent.common.event.mark_event_status"): + with patch("azurelinuxagent.common.event.EventLogger._add_event"): + add_event("dummy name", + version=CURRENT_VERSION, + op=WALAEventOperation.Download, + is_success=False, + message="dummy event message") + + self.assertEquals(1, mock_should_emit_event.call_count) + self.assertEquals(1, mock_logger_error.call_count) + self.assertEquals(0, mock_logger_warn.call_count) + self.assertEquals(0, mock_logger_info.call_count) + + args = mock_logger_error.call_args[0] + self.assertEquals(('dummy name', 'Download', 'dummy event message', 0), args[1:]) + @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_not_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") - mock_event.assert_called_once() + self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_does_not_emit_if_previously_sent(self, mock_event): @@ -154,26 +272,121 @@ @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_forwards_args(self, mock_event): event.__event_logger__.reset_periodic() + event_time = datetime.utcnow().__str__() + event.add_periodic(logger.EVERY_DAY, "FauxEvent", op=WALAEventOperation.Log, is_success=True, duration=0, + version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", is_internal=False, + log_event=True, force=False) + mock_event.assert_called_once_with("FauxEvent", op=WALAEventOperation.Log, is_success=True, duration=0, + version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", + is_internal=False, log_event=True) - event.add_periodic(logger.EVERY_DAY, "FauxEvent") - mock_event.assert_called_once_with( - "FauxEvent", - duration=0, evt_type='', is_internal=False, is_success=True, - log_event=True, message='', op=WALAEventOperation.Unknown, - version=str(CURRENT_VERSION)) + @patch("azurelinuxagent.common.event.datetime") + @patch('azurelinuxagent.common.event.EventLogger.add_event') + def test_periodic_forwards_args_default_values(self, mock_event, mock_datetime): + event.__event_logger__.reset_periodic() + event.add_periodic(logger.EVERY_DAY, "FauxEvent", message="FauxEventMessage") + mock_event.assert_called_once_with("FauxEvent", op=WALAEventOperation.Unknown, is_success=True, duration=0, + version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", + is_internal=False, log_event=True) + + @patch("azurelinuxagent.common.event.EventLogger.add_event") + def test_add_event_default_variables(self, mock_add_event): + add_event('test', message='test event') + mock_add_event.assert_called_once_with('test', duration=0, evt_type='', is_internal=False, is_success=True, + log_event=True, message='test event', op=WALAEventOperation.Unknown, + version=str(CURRENT_VERSION)) def test_save_event(self): add_event('test', message='test event') self.assertTrue(len(os.listdir(self.tmp_dir)) == 1) + # checking the extension of the file created. + for filename in os.listdir(self.tmp_dir): + self.assertEqual(".tld", filename[-4:]) + + def test_save_event_message_with_non_ascii_characters(self): + test_data_dir = os.path.join(data_dir, "events", "collect_and_send_extension_stdout_stderror") + msg = "" + + with open(os.path.join(test_data_dir, "dummy_stdout_with_non_ascii_characters"), mode="r+b") as stdout: + with open(os.path.join(test_data_dir, "dummy_stderr_with_non_ascii_characters"), mode="r+b") as stderr: + msg = read_output(stdout, stderr) + + duration = elapsed_milliseconds(datetime.utcnow()) + log_msg = "{0}\n{1}".format("DummyCmd", "\n".join([line for line in msg.split('\n') if line != ""])) + + with patch("azurelinuxagent.common.event.datetime") as patch_datetime: + patch_datetime.utcnow = Mock(return_value=datetime.strptime("2019-01-01 01:30:00", + '%Y-%m-%d %H:%M:%S')) + with patch('os.getpid', return_value=42): + with patch("threading.Thread.getName", return_value="HelloWorldTask"): + add_event('test_extension', message=log_msg, duration=duration) + + for tld_file in os.listdir(self.tmp_dir): + event_str = MonitorHandler.collect_event(os.path.join(self.tmp_dir, tld_file)) + event_json = json.loads(event_str) + + self.assertEqual(len(event_json["parameters"]), 15) + + # Checking the contents passed above, and also validating the default values that were passed in. + for i in event_json["parameters"]: + if i["name"] == "Name": + self.assertEqual(i["value"], "test_extension") + elif i["name"] == "Message": + self.assertEqual(i["value"], log_msg) + elif i["name"] == "Version": + self.assertEqual(i["value"], str(CURRENT_VERSION)) + elif i['name'] == 'IsInternal': + self.assertEqual(i['value'], False) + elif i['name'] == 'Operation': + self.assertEqual(i['value'], 'Unknown') + elif i['name'] == 'OperationSuccess': + self.assertEqual(i['value'], True) + elif i['name'] == 'Duration': + self.assertEqual(i['value'], 0) + elif i['name'] == 'ExtensionType': + self.assertEqual(i['value'], '') + elif i['name'] == 'ContainerId': + self.assertEqual(i['value'], 'UNINITIALIZED') + elif i['name'] == 'OpcodeName': + self.assertEqual(i['value'], '2019-01-01 01:30:00') + elif i['name'] == 'EventTid': + self.assertEqual(i['value'], threading.current_thread().ident) + elif i['name'] == 'EventPid': + self.assertEqual(i['value'], 42) + elif i['name'] == 'TaskName': + self.assertEqual(i['value'], 'HelloWorldTask') + elif i['name'] == 'KeywordName': + self.assertEqual(i['value'], '') + elif i['name'] == 'GAVersion': + self.assertEqual(i['value'], str(CURRENT_AGENT)) + else: + self.assertFalse(True, "Contains a field outside the defaults expected. Field Name: {0}". + format(i['name'])) + + def test_save_event_message_with_decode_errors(self): + tmp_file = os.path.join(self.tmp_dir, "tmp_file") + fileutil.write_file(tmp_file, "This is not JSON data", encoding="utf-16") + + for tld_file in os.listdir(self.tmp_dir): + try: + MonitorHandler.collect_event(os.path.join(self.tmp_dir, tld_file)) + except Exception as e: + self.assertIsInstance(e, EventError) + def test_save_event_rollover(self): - add_event('test', message='first event') - for i in range(0, 999): + # We keep 1000 events only, and the older ones are removed. + + num_of_events = 999 + add_event('test', message='first event') # this makes number of events to num_of_events + 1. + for i in range(num_of_events): add_event('test', message='test event {0}'.format(i)) + num_of_events += 1 # adding the first add_event. + events = os.listdir(self.tmp_dir) events.sort() - self.assertTrue(len(events) == 1000) + self.assertTrue(len(events) == num_of_events, "{0} is not equal to {1}".format(len(events), num_of_events)) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: @@ -181,14 +394,17 @@ self.assertTrue('first event' in first_event_text) add_event('test', message='last event') + # Adding the above event displaces the first_event + events = os.listdir(self.tmp_dir) events.sort() - self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) + self.assertTrue(len(events) == num_of_events, + "{0} events found, {1} expected".format(len(events), num_of_events)) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() - self.assertFalse('first event' in first_event_text) + self.assertFalse('first event' in first_event_text, "'first event' not in {0}".format(first_event_text)) self.assertTrue('test event 0' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) @@ -218,3 +434,24 @@ with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) + + def test_elapsed_milliseconds(self): + utc_start = datetime.utcnow() + timedelta(days=1) + self.assertEqual(0, elapsed_milliseconds(utc_start)) + + @patch('azurelinuxagent.common.event.EventLogger.save_event') + def test_report_metric(self, mock_event): + event.report_metric("cpu", "%idle", "_total", 10.0) + self.assertEqual(1, mock_event.call_count) + event_json = mock_event.call_args[0][0] + self.assertIn("69B669B9-4AF8-4C50-BDC4-6006FA76E975", event_json) + self.assertIn("%idle", event_json) + import json + event_dictionary = json.loads(event_json) + self.assertEqual(event_dictionary['providerId'], "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + for parameter in event_dictionary["parameters"]: + if parameter['name'] == 'Counter': + self.assertEqual(parameter['value'], '%idle') + break + else: + self.fail("Counter '%idle' not found in event parameters: {0}".format(repr(event_dictionary))) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_logger.py walinuxagent-2.2.45/tests/common/test_logger.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_logger.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_logger.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,55 +12,185 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import json from datetime import datetime -import azurelinuxagent.common.logger as logger +from mock import patch, MagicMock -from tests.tools import * - -_MSG = "This is our test logging message {0} {1}" +from azurelinuxagent.common import logger +from azurelinuxagent.common.event import add_log_event +from tests.tools import AgentTestCase + +_MSG_INFO = "This is our test info logging message {0} {1}" +_MSG_WARN = "This is our test warn logging message {0} {1}" +_MSG_ERROR = "This is our test error logging message {0} {1}" +_MSG_VERBOSE = "This is our test verbose logging message {0} {1}" _DATA = ["arg1", "arg2"] -class TestLogger(AgentTestCase): - @patch('azurelinuxagent.common.logger.Logger.info') - def test_periodic_emits_if_not_previously_sent(self, mock_info): +class TestLogger(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) logger.reset_periodic() - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) - mock_info.assert_called_once() + def tearDown(self): + AgentTestCase.tearDown(self) + logger.reset_periodic() + @patch('azurelinuxagent.common.logger.Logger.verbose') + @patch('azurelinuxagent.common.logger.Logger.warn') + @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') - def test_periodic_does_not_emit_if_previously_sent(self, mock_info): - logger.reset_periodic() + def test_periodic_emits_if_not_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose): + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, logger.LogLevel.INFO, *_DATA) + self.assertEqual(1, mock_info.call_count) + + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, logger.LogLevel.ERROR, *_DATA) + self.assertEqual(1, mock_error.call_count) - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, logger.LogLevel.WARNING, *_DATA) + self.assertEqual(1, mock_warn.call_count) + + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, logger.LogLevel.VERBOSE, *_DATA) + self.assertEqual(1, mock_verbose.call_count) + + @patch('azurelinuxagent.common.logger.Logger.verbose') + @patch('azurelinuxagent.common.logger.Logger.warn') + @patch('azurelinuxagent.common.logger.Logger.error') + @patch('azurelinuxagent.common.logger.Logger.info') + def test_periodic_does_not_emit_if_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose): + # The count does not increase from 1 - the first time it sends the data. + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) + self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_info.call_count) - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) + self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_info.call_count) + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_warn.call_count) + + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_warn.call_count) + + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_error.call_count) + + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_error.call_count) + + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) + self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_verbose.call_count) + + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) + self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(1, mock_verbose.call_count) + + self.assertEqual(4, len(logger.DEFAULT_LOGGER.periodic_messages)) + + @patch('azurelinuxagent.common.logger.Logger.verbose') + @patch('azurelinuxagent.common.logger.Logger.warn') + @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') - def test_periodic_emits_after_elapsed_delta(self, mock_info): - logger.reset_periodic() - - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) + def test_periodic_emits_after_elapsed_delta(self, mock_info, mock_error, mock_warn, mock_verbose): + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertEqual(1, mock_info.call_count) - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertEqual(1, mock_info.call_count) - logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG)] = \ - datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) + logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_INFO)] = datetime.now() - \ + logger.EVERY_DAY - logger.EVERY_HOUR + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) + self.assertEqual(2, mock_info.call_count) + + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + self.assertEqual(1, mock_warn.call_count) + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + self.assertEqual(1, mock_warn.call_count) + + logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_WARN)] = datetime.now() - \ + logger.EVERY_DAY - logger.EVERY_HOUR + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + self.assertEqual(2, mock_info.call_count) + + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + self.assertEqual(1, mock_error.call_count) + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + self.assertEqual(1, mock_error.call_count) + + logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_ERROR)] = datetime.now() - \ + logger.EVERY_DAY - logger.EVERY_HOUR + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + self.assertEqual(2, mock_info.call_count) + + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) + self.assertEqual(1, mock_verbose.call_count) + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) + self.assertEqual(1, mock_verbose.call_count) + + logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_VERBOSE)] = datetime.now() - \ + logger.EVERY_DAY - logger.EVERY_HOUR + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertEqual(2, mock_info.call_count) + @patch('azurelinuxagent.common.logger.Logger.verbose') + @patch('azurelinuxagent.common.logger.Logger.warn') + @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') - def test_periodic_forwards_message_and_args(self, mock_info): - logger.reset_periodic() + def test_periodic_forwards_message_and_args(self, mock_info, mock_error, mock_warn, mock_verbose): + logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) + mock_info.assert_called_once_with(_MSG_INFO, *_DATA) + + logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) + mock_error.assert_called_once_with(_MSG_ERROR, *_DATA) + + logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) + mock_warn.assert_called_once_with(_MSG_WARN, *_DATA) + + logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) + mock_verbose.assert_called_once_with(_MSG_VERBOSE, *_DATA) + + def test_telemetry_logger(self): + mock = MagicMock() + appender = logger.TelemetryAppender(logger.LogLevel.WARNING, mock) + appender.write(logger.LogLevel.WARNING, "--unit-test--") + + mock.assert_called_once_with(logger.LogLevel.WARNING, "--unit-test--") + + @patch('azurelinuxagent.common.event.EventLogger.save_event') + def test_telemetry_logger1(self, mock_save): + appender = logger.TelemetryAppender(logger.LogLevel.WARNING, add_log_event) + appender.write(logger.LogLevel.WARNING, "--unit-test--") + + self.assertEqual(1, mock_save.call_count) + telemetry_json = json.loads(mock_save.call_args[0][0]) + + self.assertEqual('FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F', telemetry_json['providerId']) + self.assertEqual(7, telemetry_json['eventId']) + + self.assertEqual(12, len(telemetry_json['parameters'])) + for x in telemetry_json['parameters']: + if x['name'] == 'EventName': + self.assertEqual(x['value'], 'Log') + + elif x['name'] == 'CapabilityUsed': + self.assertEqual(x['value'], 'WARNING') + + elif x['name'] == 'Context1': + self.assertEqual(x['value'], '--unit-test--') + + elif x['name'] == 'Context2': + self.assertEqual(x['value'], '') - logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) - mock_info.assert_called_once_with(_MSG, *_DATA) + elif x['name'] == 'Context3': + self.assertEqual(x['value'], '') diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_telemetryevent.py walinuxagent-2.2.45/tests/common/test_telemetryevent.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_telemetryevent.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_telemetryevent.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,49 @@ +# Copyright 2019 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam +from tests.tools import AgentTestCase + + +def get_test_event(name="DummyExtension", op="Unknown", is_success=True, duration=0, version="foo", evt_type="", is_internal=False, + message="DummyMessage", eventId=1): + event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") + event.parameters.append(TelemetryEventParam('Name', name)) + event.parameters.append(TelemetryEventParam('Version', str(version))) + event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) + event.parameters.append(TelemetryEventParam('Operation', op)) + event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) + event.parameters.append(TelemetryEventParam('Message', message)) + event.parameters.append(TelemetryEventParam('Duration', duration)) + event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) + return event + + +class TestTelemetryEvent(AgentTestCase): + def test_contains_works_for_TelemetryEvent(self): + test_event = get_test_event(message="Dummy Event") + + self.assertTrue('Name' in test_event) + self.assertTrue('Version' in test_event) + self.assertTrue('IsInternal' in test_event) + self.assertTrue('Operation' in test_event) + self.assertTrue('OperationSuccess' in test_event) + self.assertTrue('Message' in test_event) + self.assertTrue('Duration' in test_event) + self.assertTrue('ExtensionType' in test_event) + + self.assertFalse('GAVersion' in test_event) + self.assertFalse('ContainerId' in test_event) \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_version.py walinuxagent-2.2.45/tests/common/test_version.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/common/test_version.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function @@ -23,10 +23,76 @@ from azurelinuxagent.common.version import set_current_agent, \ AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ - get_f5_platform + get_f5_platform, get_distro from tests.tools import * +def freebsd_system(): + return ["FreeBSD"] + + +def freebsd_system_release(x, y, z): + return "10.0" + + +def openbsd_system(): + return ["OpenBSD"] + + +def openbsd_system_release(x, y, z): + return "20.0" + + +def default_system(): + return [""] + + +def default_system_no_linux_distro(): + return '', '', '' + + +def default_system_exception(): + raise Exception + + +class TestAgentVersion(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + return + + @mock.patch('platform.system', side_effect=freebsd_system) + @mock.patch('re.sub', side_effect=freebsd_system_release) + def test_distro_is_correct_format_when_freebsd(self, platform_system_name, mock_variable): + osinfo = get_distro() + freebsd_list = ['freebsd', "10.0", '', 'freebsd'] + self.assertListEqual(freebsd_list, osinfo) + return + + @mock.patch('platform.system', side_effect=openbsd_system) + @mock.patch('re.sub', side_effect=openbsd_system_release) + def test_distro_is_correct_format_when_openbsd(self, platform_system_name, mock_variable): + osinfo = get_distro() + openbsd_list = ['openbsd', "20.0", '', 'openbsd'] + self.assertListEqual(openbsd_list, osinfo) + return + + @mock.patch('platform.system', side_effect=default_system) + @mock.patch('platform.dist', side_effect=default_system_no_linux_distro) + def test_distro_is_correct_format_when_default_case(self, platform_system_name, default_system_no_linux): + osinfo = get_distro() + default_list = ['', '', '', ''] + self.assertListEqual(default_list, osinfo) + return + + @mock.patch('platform.system', side_effect=default_system) + @mock.patch('platform.dist', side_effect=default_system_exception) + def test_distro_is_correct_for_exception_case(self, platform_system_name, default_system_no_linux): + osinfo = get_distro() + default_list = ['unknown', 'FFFF', '', ''] + self.assertListEqual(default_list, osinfo) + return + + class TestCurrentAgentName(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/__init__.py walinuxagent-2.2.45/tests/daemon/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/daemon/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/test_daemon.py walinuxagent-2.2.45/tests/daemon/test_daemon.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/test_daemon.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/daemon/test_daemon.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +from multiprocessing import Process from azurelinuxagent.daemon import * from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT +from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * @@ -27,16 +29,17 @@ def __call__(self, *args, **kw): self.count = self.count - 1 - #Stop daemon after restarting for n times + # Stop daemon after restarting for n times if self.count <= 0: self.daemon_handler.running = False raise Exception("Mock unhandled exception") + class TestDaemon(AgentTestCase): @patch("time.sleep") def test_daemon_restart(self, mock_sleep): - #Mock daemon function + # Mock daemon function daemon_handler = get_daemon_handler() mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) daemon_handler.daemon = mock_daemon @@ -51,7 +54,7 @@ @patch("time.sleep") @patch("azurelinuxagent.daemon.main.conf") @patch("azurelinuxagent.daemon.main.sys.exit") - def test_check_pid(self, mock_exit, mock_conf, mock_sleep): + def test_check_pid(self, mock_exit, mock_conf, _): daemon_handler = get_daemon_handler() mock_pid_file = os.path.join(self.tmp_dir, "pid") @@ -65,7 +68,7 @@ @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True) - def test_set_openssl_fips(self, mock_conf, mock_daemon): + def test_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): @@ -75,13 +78,59 @@ @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False) - def test_does_not_set_openssl_fips(self, mock_conf, mock_daemon): + def test_does_not_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ) - + + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest') + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run') + def test_daemon_agent_enabled(self, patch_run_provision, patch_run_latest, gpa): + """ + Agent should run normally when no disable_agent is found + """ + with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()): + self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) + daemon_handler = get_daemon_handler() + + def stop_daemon(child_args): + daemon_handler.running = False + + patch_run_latest.side_effect = stop_daemon + daemon_handler.run() + + self.assertEqual(1, patch_run_provision.call_count) + self.assertEqual(1, patch_run_latest.call_count) + + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest', side_effect=AgentTestCase.fail) + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run', side_effect=ProvisionHandler.write_agent_disabled) + def test_daemon_agent_disabled(self, _, patch_run_latest, gpa): + """ + Agent should provision, then sleep forever when disable_agent is found + """ + + with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()): + # file is created by provisioning handler + self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) + daemon_handler = get_daemon_handler() + + # we need to assert this thread will sleep forever, so fork it + daemon = Process(target=daemon_handler.run) + daemon.start() + daemon.join(timeout=5) + + self.assertTrue(daemon.is_alive()) + daemon.terminate() + + # disable_agent was written, run_latest was not called + self.assertTrue(os.path.exists(conf.get_disable_agent_file_path())) + self.assertEqual(0, patch_run_latest.call_count) + + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/test_resourcedisk.py walinuxagent-2.2.45/tests/daemon/test_resourcedisk.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/test_resourcedisk.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/daemon/test_resourcedisk.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/cpu_mount/cpuacct.stat walinuxagent-2.2.45/tests/data/cgroups/cpu_mount/cpuacct.stat --- walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/cpu_mount/cpuacct.stat 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/cgroups/cpu_mount/cpuacct.stat 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,2 @@ +user 50000 +system 100000 diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/dummy_proc_stat walinuxagent-2.2.45/tests/data/cgroups/dummy_proc_stat --- walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/dummy_proc_stat 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/cgroups/dummy_proc_stat 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,12 @@ +cpu 40362 2657 5493 349635 341 0 938 0 0 0 +cpu0 10043 1084 1319 86971 129 0 369 0 0 0 +cpu1 10069 653 1244 87708 51 0 202 0 0 0 +cpu2 10416 528 1492 87075 86 0 239 0 0 0 +cpu3 9833 391 1436 87878 73 0 126 0 0 0 +intr 1202440 15 1020 0 0 0 0 0 0 1 579 0 0 85138 0 0 0 0 0 0 0 0 0 0 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 572 65513 499 34 53368 177617 392 149 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 2294069 +btime 1562092648 +processes 3715 +procs_running 1 +procs_blocked 0 +softirq 1244004 191505 366613 7 1187 62878 0 1006 328256 2205 290347 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/dummy_proc_stat_updated walinuxagent-2.2.45/tests/data/cgroups/dummy_proc_stat_updated --- walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/dummy_proc_stat_updated 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/cgroups/dummy_proc_stat_updated 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,12 @@ +cpu 286534 2705 70195 2971012 1358 0 11637 0 0 0 +cpu0 73053 1096 18020 739721 460 0 3510 0 0 0 +cpu1 70934 664 16722 745032 184 0 2552 0 0 0 +cpu2 74991 539 17715 739096 505 0 3128 0 0 0 +cpu3 67554 405 17736 747162 208 0 2446 0 0 0 +intr 16171532 15 6790 0 0 0 0 0 0 1 2254 0 0 550798 0 0 0 0 0 0 0 0 0 0 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 50645 129322 4209 34 458202 1721987 504 149 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 36112837 +btime 1562092648 +processes 27949 +procs_running 2 +procs_blocked 1 +softirq 17121870 1838273 5563635 21 8079 119728 0 6931 5982692 2629 3599882 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes walinuxagent-2.2.45/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes --- walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +1000000 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/memory_mount/memory.usage_in_bytes walinuxagent-2.2.45/tests/data/cgroups/memory_mount/memory.usage_in_bytes --- walinuxagent-2.2.21+really2.2.20/tests/data/cgroups/memory_mount/memory.usage_in_bytes 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/cgroups/memory_mount/memory.usage_in_bytes 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +100000 \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.custom.dns walinuxagent-2.2.45/tests/data/dhcp.leases.custom.dns --- walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.custom.dns 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/dhcp.leases.custom.dns 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,56 @@ +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers invalid; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:01; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 never; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers expired; + option dhcp-renewal-time 4294967295; + option unknown-245 a8:3f:81:02; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 4 2015/06/16 16:58:54; + rebind 4 2015/06/16 16:58:54; + expire 4 2015/06/16 16:58:54; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 8.8.8.8; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2152/07/23 23:27:10; +} diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.multi walinuxagent-2.2.45/tests/data/dhcp.leases.multi --- walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.multi 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/dhcp.leases.multi 2019-11-07 00:36:56.000000000 +0000 @@ -10,7 +10,7 @@ option domain-name-servers first; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; @@ -29,7 +29,7 @@ option domain-name-servers second; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; @@ -48,7 +48,7 @@ option domain-name-servers expired; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:03; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429123264-1.tld walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264-1.tld --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429123264-1.tld 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264-1.tld 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "DscPerformConsistency" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "[0]....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[-1]: error msg: "Error occurred processing (0, u'nxOMSPlugin', {u'WorkspaceID': MI_STRING: u'5a46ca0a-e748-4262-9f0a-f55a05513d9e', u'Name': MI_STRING: u'SimpleOMSPluginConfiguration', u'Plugins': MI_INSTANCEA: [{u'PluginName': 'Common', u'Ensure': 'Present'}, {u'PluginName': 'PatchManagement', u'Ensure': 'Present'}, {u'PluginName': 'Antimalware', u'Ensure': 'Present'}, {u'PluginName': 'SecurityBaseline', u'Ensure': 'Present'}, {u'PluginName': 'DockerBaseline', u'Ensure': 'Absent'}, {u'PluginName': 'ChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'SoftwareChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'ServiceChangeTracking', u'Ensure': 'Present'}]})"\n....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[0][0][0]Exiting - closing socket\nExiting - closing socket\nExiting - closing socket\nExiting - closing socket\n\n---LOG---\n2019/06/17 06:14:49: DEBUG: Scripts/nxPackage.pyc(251):\nPackageGroup type is \n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Starting consistency engine.\n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Checking consistency for current configuration.\n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Common/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/PatchManagement/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Antimalware/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SecurityBaseline/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SoftwareChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/ServiceChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:32: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Consistency check completed.\n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message cURL failed to perform on this base url: https://scus-agentservice-prod-1.azure-automation.net/Accounts/5a46ca0a-e748-4262-9f0a-f55a05513d9e/Nodes(AgentId='6446114e-b8a9-410b-af0e-41d4d0ce83b6')/SendReport with this error message: Problem with the local SSL certificate. Make sure cURL and SSL libraries are up to date. \n\tError Code : 1 \n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message Failed to report to all reporting servers. Last server failed with HTTP response code: 0. \n\tError Code : 1 \n" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429123264.tld walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264.tld --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429123264.tld 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264.tld 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "DscPerformConsistency" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "[0]....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[-1]: error msg: "Error occurred processing (0, u'nxOMSPlugin', {u'WorkspaceID': MI_STRING: u'5a46ca0a-e748-4262-9f0a-f55a05513d9e', u'Name': MI_STRING: u'SimpleOMSPluginConfiguration', u'Plugins': MI_INSTANCEA: [{u'PluginName': 'Common', u'Ensure': 'Present'}, {u'PluginName': 'PatchManagement', u'Ensure': 'Present'}, {u'PluginName': 'Antimalware', u'Ensure': 'Present'}, {u'PluginName': 'SecurityBaseline', u'Ensure': 'Present'}, {u'PluginName': 'DockerBaseline', u'Ensure': 'Absent'}, {u'PluginName': 'ChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'SoftwareChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'ServiceChangeTracking', u'Ensure': 'Present'}]})"\n....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[0][0][0]Exiting - closing socket\nExiting - closing socket\nExiting - closing socket\nExiting - closing socket\n\n---LOG---\n2019/06/17 06:14:49: DEBUG: Scripts/nxPackage.pyc(251):\nPackageGroup type is \n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Starting consistency engine.\n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Checking consistency for current configuration.\n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Common/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/PatchManagement/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Antimalware/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SecurityBaseline/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SoftwareChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/ServiceChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:32: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Consistency check completed.\n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message cURL failed to perform on this base url: https://scus-agentservice-prod-1.azure-automation.net/Accounts/5a46ca0a-e748-4262-9f0a-f55a05513d9e/Nodes(AgentId='6446114e-b8a9-410b-af0e-41d4d0ce83b6')/SendReport with this error message: Problem with the local SSL certificate. Make sure cURL and SSL libraries are up to date. \n\tError Code : 1 \n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message Failed to report to all reporting servers. Last server failed with HTTP response code: 0. \n\tError Code : 1 \n" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429133818-1.tld walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818-1.tld --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429133818-1.tld 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818-1.tld 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "DscPerformInventory" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "(0, {'__Inventory': MI_INSTANCEA: [{'Name': MI_STRING: 'gcc-8-base', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libgcc1', 'Version': MI_STRING: '1:8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libstdc++6', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libnss-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libsystemd0', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpam-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'udev', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libudev1', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'dbus', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdbus-1-3', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd-sysv', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-pkg5.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-inst2.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdb5.3', 'Version': MI_STRING: '5.3.28-13.1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt-utils', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libssl1.1', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-stdlib', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-stdlib', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'update-notifier-common', 'Version': MI_STRING: '3.192.1.7', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdw1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libelf1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-0', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-data', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'openssl', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'xxd', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-tiny', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-runtime', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-common', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-gdbm', 'Version': MI_STRING: '3.6.8-1~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libcups2', 'Version': MI_STRING: '2.2.7-1ubuntu2.6', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-modules-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-headers-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-cloud-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-cryptography', 'Version': MI_STRING: '2.1.4-1ubuntu1.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-jinja2', 'Version': MI_STRING: '2.10-1ubuntu0.18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}]})Exiting - closing socket\n\n---LOG---\n2019/06/17 06:16:55: DEBUG: Scripts/nxAvailableUpdates.py(62):\nCompleted checking Available Updates\n2019/06/17 06:17:56: DEBUG: Scripts/nxAvailableUpdates.pyc(51):\nStarting to check Available Updates\n2019/06/17 06:17:58: DEBUG: Scripts/nxAvailableUpdates.pyc(80):\nRetrieving update package list using cmd:LANG=en_US.UTF8 apt-get -s dist-upgrade | grep "^Inst"\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(96):\nNumber of packages being written to the XML: 56\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(62):\nCompleted checking Available Updates\n" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429133818.tld walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818.tld --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_invalid_data/1560752429133818.tld 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818.tld 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "DscPerformInventory" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "(0, {'__Inventory': MI_INSTANCEA: [{'Name': MI_STRING: 'gcc-8-base', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libgcc1', 'Version': MI_STRING: '1:8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libstdc++6', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libnss-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libsystemd0', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpam-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'udev', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libudev1', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'dbus', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdbus-1-3', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd-sysv', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-pkg5.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-inst2.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdb5.3', 'Version': MI_STRING: '5.3.28-13.1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt-utils', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libssl1.1', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-stdlib', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-stdlib', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'update-notifier-common', 'Version': MI_STRING: '3.192.1.7', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdw1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libelf1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-0', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-data', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'openssl', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'xxd', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-tiny', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-runtime', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-common', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-gdbm', 'Version': MI_STRING: '3.6.8-1~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libcups2', 'Version': MI_STRING: '2.2.7-1ubuntu2.6', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-modules-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-headers-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-cloud-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-cryptography', 'Version': MI_STRING: '2.1.4-1ubuntu1.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-jinja2', 'Version': MI_STRING: '2.10-1ubuntu0.18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}]})Exiting - closing socket\n\n---LOG---\n2019/06/17 06:16:55: DEBUG: Scripts/nxAvailableUpdates.py(62):\nCompleted checking Available Updates\n2019/06/17 06:17:56: DEBUG: Scripts/nxAvailableUpdates.pyc(51):\nStarting to check Available Updates\n2019/06/17 06:17:58: DEBUG: Scripts/nxAvailableUpdates.pyc(80):\nRetrieving update package list using cmd:LANG=en_US.UTF8 apt-get -s dist-upgrade | grep "^Inst"\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(96):\nNumber of packages being written to the XML: 56\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(62):\nCompleted checking Available Updates\n" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_unreadable_data/IncorrectExtension.tmp walinuxagent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/IncorrectExtension.tmp --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_unreadable_data/IncorrectExtension.tmp 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/IncorrectExtension.tmp 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "Install" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "HelloWorld" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_unreadable_data/UnreadableFile.tld walinuxagent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/UnreadableFile.tld --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_events_unreadable_data/UnreadableFile.tld 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/UnreadableFile.tld 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,32 @@ + { + "eventId": 1, + "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters": [ + { + "name": "Name", + "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" + }, + { + + "name": "Version", + "value": "1.11.5" + }, + + { + "name": "Operation", + "value": "Install" + }, + { + "name": "OperationSuccess", + "value": false + }, + { + "name": "Message", + "value": "HelloWorld" + }, + { + "name": "Duration", + "value": 300000 + } + ] + } \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stderr_with_non_ascii_characters walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stderr_with_non_ascii_characters --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stderr_with_non_ascii_characters 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stderr_with_non_ascii_characters 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +STDERR Worldעיות אחרותआज"" \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stdout_with_non_ascii_characters walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stdout_with_non_ascii_characters --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stdout_with_non_ascii_characters 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_stdout_with_non_ascii_characters 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +STDOUT Worldעיות אחרותआज"" \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stderr walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stderr --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stderr 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stderr 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +The five boxing wizards jump quickly. \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stdout walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stdout --- walinuxagent-2.2.21+really2.2.20/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stdout 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stdout 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +The quick brown fox jumps over the lazy dog. \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/dsc_event.json walinuxagent-2.2.45/tests/data/ext/dsc_event.json --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/dsc_event.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/dsc_event.json 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,38 @@ +{ + "eventId":"1", + "parameters":[ + { + "name":"Name", + "value":"Microsoft.Azure.GuestConfiguration.DSCAgent" + }, + { + "name":"Version", + "value":"1.18.0" + }, + { + "name":"IsInternal", + "value":true + }, + { + "name":"Operation", + "value":"GuestConfigAgent.Scenario" + }, + { + "name":"OperationSuccess", + "value":true + }, + { + "name":"Message", + "value":"[2019-11-05 10:06:52.688] [PID 11487] [TID 11513] [Timer Manager] [INFO] [89f9cf47-c02d-4774-b21a-abdf2beb3cd9] Run pull refresh for timer 'dsc_refresh_timer'\n" + }, + { + "name":"Duration", + "value":0 + }, + { + "name":"ExtentionType", + "value":"" + } + ], + "providerId":"69B669B9-4AF8-4C50-BDC4-6006FA76E975" +} \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/event_from_agent.json walinuxagent-2.2.45/tests/data/ext/event_from_agent.json --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/event_from_agent.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/event_from_agent.json 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1 @@ +{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "WALinuxAgent"}, {"name": "Version", "value": "2.2.44"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "ProcessGoalState"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": "Incarnation 12"}, {"name": "Duration", "value": 16610}, {"name": "ExtensionType", "value": ""}, {"name": "GAVersion", "value": "WALinuxAgent-2.2.44"}, {"name": "ContainerId", "value": "TEST-CONTAINER-ID-ALREADY-PRESENT-GUID"}, {"name": "OpcodeName", "value": "2019-11-02 01:42:49.188030"}, {"name": "EventTid", "value": 140240384030528}, {"name": "EventPid", "value": 108573}, {"name": "TaskName", "value": "ExtHandler"}, {"name": "KeywordName", "value": ""}]} \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/event_from_extension.xml walinuxagent-2.2.45/tests/data/ext/event_from_extension.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/event_from_extension.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/event_from_extension.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/event.json walinuxagent-2.2.45/tests/data/ext/event.json --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/event.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/event.json 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,38 @@ +{ + "eventId":1, + "providerId":"69B669B9-4AF8-4C50-BDC4-6006FA76E975", + "parameters":[ + { + "name":"Name", + "value":"CustomScript" + }, + { + "name":"Version", + "value":"1.4.1.0" + }, + { + "name":"IsInternal", + "value":false + }, + { + "name":"Operation", + "value":"RunScript" + }, + { + "name":"OperationSuccess", + "value":true + }, + { + "name":"Message", + "value":"(01302)Script is finished. ---stdout--- hello ---errout--- " + }, + { + "name":"Duration", + "value":0 + }, + { + "name":"ExtensionType", + "value":"" + } + ] +} \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/event.xml walinuxagent-2.2.45/tests/data/ext/event.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/event.xml 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/event.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ext/sample_ext-1.3.0/exit.sh walinuxagent-2.2.45/tests/data/ext/sample_ext-1.3.0/exit.sh --- walinuxagent-2.2.21+really2.2.20/tests/data/ext/sample_ext-1.3.0/exit.sh 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ext/sample_ext-1.3.0/exit.sh 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +exit $* \ No newline at end of file Binary files /tmp/tmpe6xs_1et/n7BhQf5Ahp/walinuxagent-2.2.21+really2.2.20/tests/data/ext/sample_ext-1.3.0.zip and /tmp/tmpe6xs_1et/iC60lf7qSG/walinuxagent-2.2.45/tests/data/ext/sample_ext-1.3.0.zip differ Binary files /tmp/tmpe6xs_1et/n7BhQf5Ahp/walinuxagent-2.2.21+really2.2.20/tests/data/ga/WALinuxAgent-2.2.19.zip and /tmp/tmpe6xs_1et/iC60lf7qSG/walinuxagent-2.2.45/tests/data/ga/WALinuxAgent-2.2.19.zip differ Binary files /tmp/tmpe6xs_1et/n7BhQf5Ahp/walinuxagent-2.2.21+really2.2.20/tests/data/ga/WALinuxAgent-2.2.45.zip and /tmp/tmpe6xs_1et/iC60lf7qSG/walinuxagent-2.2.45/tests/data/ga/WALinuxAgent-2.2.45.zip differ diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/imds/unicode.json walinuxagent-2.2.45/tests/data/imds/unicode.json --- walinuxagent-2.2.21+really2.2.20/tests/data/imds/unicode.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/imds/unicode.json 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "compute": { + "location": "wéstus", + "name": "héalth", + "offer": "UbuntuSérvér", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "tésts", + "sku": "16.04-LTS", + "subscriptionId": "21b2dc34-bcé6-4é63-9449-d2a8d1c2339é", + "tags": "", + "version": "16.04.201805220", + "vmId": "é7fdbfc4-2déb-4a4é-8615-éa6aaf50162é", + "vmScaleSetName": "", + "vmSize": "Standard_D2_V2", + "zone": "" + }, + "network": { + "interface": [ + { + "ipv4": { + "ipAddress": [ + { + "privateIpAddress": "10.0.1.4", + "publicIpAddress": "40.112.128.120" + } + ], + "subnet": [ + { + "address": "10.0.1.0", + "prefix": "24" + } + ] + }, + "ipv6": { + "ipAddress": [] + }, + "macAddress": "000D3A3382E8" + } + ] + } +} + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/imds/valid.json walinuxagent-2.2.45/tests/data/imds/valid.json --- walinuxagent-2.2.21+really2.2.20/tests/data/imds/valid.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/imds/valid.json 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "compute": { + "location": "westus", + "name": "health", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "tests", + "sku": "16.04-LTS", + "subscriptionId": "21b2dc34-bce6-4e63-9449-d2a8d1c2339e", + "tags": "", + "version": "16.04.201805220", + "vmId": "e7fdbfc4-2deb-4a4e-8615-ea6aaf50162e", + "vmScaleSetName": "", + "vmSize": "Standard_D2_V2", + "zone": "" + }, + "network": { + "interface": [ + { + "ipv4": { + "ipAddress": [ + { + "privateIpAddress": "10.0.1.4", + "publicIpAddress": "40.112.128.120" + } + ], + "subnet": [ + { + "address": "10.0.1.0", + "prefix": "24" + } + ] + }, + "ipv6": { + "ipAddress": [] + }, + "macAddress": "000D3A3382E8" + } + ] + } +} + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-2.xml walinuxagent-2.2.45/tests/data/ovf-env-2.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-2.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ovf-env-2.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + true + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-3.xml walinuxagent-2.2.45/tests/data/ovf-env-3.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-3.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ovf-env-3.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-4.xml walinuxagent-2.2.45/tests/data/ovf-env-4.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-4.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ovf-env-4.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + bad data + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env.xml walinuxagent-2.2.45/tests/data/ovf-env.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env.xml 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/ovf-env.xml 2019-11-07 00:36:56.000000000 +0000 @@ -26,4 +26,15 @@ CustomData + + 1.0 + + kms.core.windows.net + false + + true + true + false + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/test_waagent.conf walinuxagent-2.2.45/tests/data/test_waagent.conf --- walinuxagent-2.2.21+really2.2.20/tests/data/test_waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/test_waagent.conf 2019-11-07 00:36:56.000000000 +0000 @@ -6,12 +6,13 @@ =Value0 FauxKey1= Value1 FauxKey2=Value2 Value2 +FauxKey3=delalloc,rw,noatime,nobarrier,users,mode=777 -# Enable instance creation -Provisioning.Enabled=y +# Enable extension handling +Extensions.Enabled=y -# Rely on cloud-init to provision -Provisioning.UseCloudInit=y +# Specify provisioning agent. +Provisioning.Agent=auto # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y @@ -19,7 +20,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # An EOL comment that should be ignored # Monitor host name changes and publish changes via DHCP requests. @@ -53,6 +55,9 @@ # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n +# Use encrypted swap +ResourceDisk.EnableSwapEncryption=n + # Size of the swapfile. ResourceDisk.SwapSizeMB=0 @@ -113,8 +118,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -122,5 +127,11 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs -OS.EnableFirewall=y +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/certs_format_not_pfx.xml walinuxagent-2.2.45/tests/data/wire/certs_format_not_pfx.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/certs_format_not_pfx.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/certs_format_not_pfx.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,7 @@ + + + 2012-11-30 + 12 + CertificatesNonPfxPackage + NotPFXData + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/certs_no_format_specified.xml walinuxagent-2.2.45/tests/data/wire/certs_no_format_specified.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/certs_no_format_specified.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/certs_no_format_specified.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,81 @@ + + + 2012-11-30 + 12 + + MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog +QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid +72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy +NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj +NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE +ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 +sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC +DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe +GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h +gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei +SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI +ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP +jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE +un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF +k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL +9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu +tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos +Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N +itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH +gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx +iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO +JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU +Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 +so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 +7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W +sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E +iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW +lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi +nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB +eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i +DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J +Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq +jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE +Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu +DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic +If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO +g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq +JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR +Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 +NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ +QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW +xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 +zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl +4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh +Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 +zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH +Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 +0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q +jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s +tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL +1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 +lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw +5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A +ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf +dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b +pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 ++P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY +6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB +/DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ +2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI +dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 +cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H +rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi +5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy +RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG +rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 +EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR +cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL +Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u +ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv +ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D +u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL +h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 +h+249Wj0Bw== + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/encrypted.enc walinuxagent-2.2.45/tests/data/wire/encrypted.enc --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/encrypted.enc 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/encrypted.enc 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,9 @@ +MIIBlwYJKoZIhvcNAQcDoIIBiDCCAYQCAQIxggEwMIIBLAIBAoAUW4P+tNXlmDXW +H30raKBkpUhXYwUwDQYJKoZIhvcNAQEBBQAEggEAP0LpwacLdJyvNQVmSyXPGM0i +mNJSHPQsAXLFFcmWmCAGiEsQWiHKV9mON/eyd6DjtgbTuhVNHPY/IDSDXfjgLxdX +NK1XejuEaVTwdVtCJWl5l4luOeCMDueitoIgBqgkbFpteqV6s8RFwnv+a2HhM0lc +TUwim6skx1bFs0csDD5DkM7R10EWxWHjdKox8R8tq/C2xpaVWRvJ52/DCVgeHOfh +orV0GmBK0ue/mZVTxu8jz2BxQUBhHXNWjBuNuGNmUuZvD0VY1q2K6Fa3xzv32mfB +xPKgt6ru/wG1Kn6P8yMdKS3bQiNZxE1D1o3epDujiygQahUby5cI/WXk7ryZ1DBL +BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECAxpp+ZE6rpAgChqxBVpU047fb4zinTV +5xaG7lN15YEME4q8CqcF/Ji3NbHPmdw1/gtf diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_multiple_extensions.xml walinuxagent-2.2.45/tests/data/wire/ext_conf_multiple_extensions.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_multiple_extensions.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/ext_conf_multiple_extensions.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,76 @@ + + + + + Prod + + http://manifest_of_ga.xml + + + + Test + + http://manifest_of_ga.xml + + + + + + + + + + + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_sequencing.xml walinuxagent-2.2.45/tests/data/wire/ext_conf_sequencing.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_sequencing.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/ext_conf_sequencing.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,34 @@ + + + + Prod + + http://manifest_of_ga.xml + + + + Test + + http://manifest_of_ga.xml + + + + + + + + + + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + +https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/goal_state_remote_access.xml walinuxagent-2.2.45/tests/data/wire/goal_state_remote_access.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/goal_state_remote_access.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/goal_state_remote_access.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,30 @@ + + + 2010-12-15 + 1 + + Started + + 16001 + + + + c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 + http://remoteaccessinfouri/ + + + + MachineRole_IN_0 + Started + + http://hostingenvuri/ + http://sharedconfiguri/ + http://certificatesuri/ + http://extensionsconfiguri/ + http://fullconfiguri/ + DummyRoleConfigName.xml + + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/manifest_deletion.xml walinuxagent-2.2.45/tests/data/wire/manifest_deletion.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/manifest_deletion.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/manifest_deletion.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + 1.0.0 + + http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 + + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_10_accounts.xml walinuxagent-2.2.45/tests/data/wire/remote_access_10_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_10_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/remote_access_10_accounts.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,97 @@ + + + 1 + 1 + + + testAccount1 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount2 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount3 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount4 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount5 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount6 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount7 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount8 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount9 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount10 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_duplicate_accounts.xml walinuxagent-2.2.45/tests/data/wire/remote_access_duplicate_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_duplicate_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/remote_access_duplicate_accounts.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,25 @@ + + + 1 + 1 + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_no_accounts.xml walinuxagent-2.2.45/tests/data/wire/remote_access_no_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_no_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/remote_access_no_accounts.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,6 @@ + + + 1 + 1 + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_single_account.xml walinuxagent-2.2.45/tests/data/wire/remote_access_single_account.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_single_account.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/remote_access_single_account.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,16 @@ + + + 1 + 1 + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_two_accounts.xml walinuxagent-2.2.45/tests/data/wire/remote_access_two_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_two_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/remote_access_two_accounts.xml 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,25 @@ + + + 1 + 1 + + + testAccount1 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount2 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/sample.pem walinuxagent-2.2.45/tests/data/wire/sample.pem --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/sample.pem 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/sample.pem 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC3zCdThkBDYu83 +M7ouc03caqyEwV6lioWbtYdnraoftbuCJrOhy+WipSCVAmhlu/tpaItuzwB9/VTw +eSWfB/hB2sabVTKgU8gTQrI6ISy2ocLjqTIZuOETJuGlAIw6OXorhdUr8acZ8ohb +ftZIbS9YKxbO7sQi+20sT2ugROJnO7IDGbb2vWhEhp2NAieJ8Nnq0SMv1+cZJZYk +6hiFVSl12g0egVFrRTJBvvTbPS7amLAQkauK/IxG28jZR61pMbHHX+xBg4Iayb2i +qp8YnwK3qtf0stc0h9snnLnHSODva1Bo6qVBEcrkuXmtrHL2nUMsV/MgWG3HMgJJ +6Jf/wSFpAgMBAAECggEBALepsS6cvADajzK5ZPXf0NFOY6CxXnPLrWGAj5NCDftr +7bjMFbq7dngFzD46zrnClCOsDZEoF1TO3p8CYF6/Zwvfo5E7HMDrl8XvYwwFdJn3 +oTlALMlZXsh1lQv+NSJFp1hwfylPbGzYV/weDeEIAkR3om4cWDCg0GJz5peb3iXK +5fimrZsnInhktloU2Ep20UepR8wbhS5WP7B2s32OULTlWiGdORUVrHJQbTN6O0NZ +WzmAcsgfmW1KEBOR9sDFbAdldt8/WcLJVIfWOdFVbCbOaxrnRnZ8j8tsafziVncD +QFRpNeyOHZR5S84oAPo2EIVeFCLLeo3Wit/O3IFmhhUCgYEA5jrs0VSowb/xU/Bw +wm1cKnSqsub3p3GLPL4TdODYMHH56Wv8APiwcW9O1+oRZoM9M/8KXkDlfFOz10tY +bMYvF8MzFKIzzi5TxaWqSWsNeXpoqtFqUed7KRh3ybncIqFAAauTwmAhAlEmGR/e +AY7Oy4b2lnRU1ssIOd0VnSnAqTcCgYEAzF6746DhsInlFIQGsUZBOmUtwyu0k1kc +gkWhJt5SyQHZtX1SMV2RI6CXFpUZcjv31jM30GmXdvkuj2dIHaDZB5V5BlctPJZq +FH0RFxmFHXk+npLJnKKSX1H3/2PxTUsSBcFHEaPCgvIz3720bX7fqRIFtVdrcbQA +cB9DARbjWl8CgYBKADyoWCbaB+EA0vLbe505RECtulF176gKgSnt0muKvsfOQFhC +06ya+WUFP4YSRjLA6MQjYYahvKG8nMoyRE1UvPhJNI2kQv3INKSUbqVpG3BTH3am +Ftpebi/qliPsuZnCL60RuCZEAWNWhgisxYMwphPSblfqpl3hg290EbyMZwKBgQCs +mypHQ166EozW+fcJDFQU9NVkrGoTtMR+Rj6oLEdxG037mb+sj+EAXSaeXQkj0QAt ++g4eyL+zLRuk5E8lLu9+F0EjGMfNDyDC8ypW/yfNT9SSa1k6IJhNR1aUbZ2kcU3k +bGwQuuWSYOttAbT8cZaHHgCSOyY03xkrmUunBOS6MwKBgBK4D0Uv7ZDf3Y38A07D +MblDQj3wZeFu6IWi9nVT12U3WuEJqQqqxWnWmETa+TS/7lhd0GjTB+79+qOIhmls +XSAmIS/rBUGlk5f9n+vBjQkpbqAvcXV7I/oQASpVga1xB9EuMvXc9y+x/QfmrYVM +zqxRWJIMASPLiQr79V0zXGXP +-----END PRIVATE KEY----- \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/trans_pub walinuxagent-2.2.45/tests/data/wire/trans_pub --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/trans_pub 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/data/wire/trans_pub 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA09wkCR3pXk16iBIqMh5N +c5YLnHMpPK4k+3hhkxVKixTSUjprTAen6DZ8/bbOtWzBb5AnPoBVaiMgSotC6ndb +IJdlO/xFRuUeciOS9f/4n8ZoubPQbknNkikQsvYLwh9AsfYiI+Ur0s5AfTRbvhYV +wrdCpwnorDwZxVp5JdPWvtdBwYyoSNxYmSkougwm/csy58T4kx1tcNQZj4+ztmJy +7wpe8E9opWxzofaOuoFLx62NdvMvKt7NNQPPjmubJEnMI7lKTamiG5iDvfBTKQBQ +9XF3svxadLKrPW/jOs5uqfAEDKivrslH+GNMF+MU693yoUaid+K/ZWfP1exgVNmx +cQIDAQAB +-----END PUBLIC KEY----- diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/__init__.py walinuxagent-2.2.45/tests/distro/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/distro/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/test_resourceDisk.py walinuxagent-2.2.45/tests/distro/test_resourceDisk.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/test_resourceDisk.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/distro/test_resourceDisk.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx +import os +import stat import sys from azurelinuxagent.common.utils import shellutil from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler @@ -38,6 +40,11 @@ # assert assert os.path.exists(test_file) + # only the owner should have access + mode = os.stat(test_file).st_mode & ( + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + assert mode == stat.S_IRUSR | stat.S_IWUSR + # cleanup os.remove(test_file) @@ -49,7 +56,7 @@ file_size = 1024 * 128 # execute - if sys.version_info >= (3,3): + if sys.version_info >= (3, 3): with patch("os.posix_fallocate", side_effect=Exception('failure')): get_resourcedisk_handler().mkfile(test_file, file_size) @@ -76,20 +83,20 @@ resource_disk_handler.mkfile(test_file, file_size) # assert - if sys.version_info >= (3,3): + if sys.version_info >= (3, 3): with patch("os.posix_fallocate") as posix_fallocate: - assert posix_fallocate.assert_not_called() + self.assertEqual(0, posix_fallocate.call_count) assert run_patch.call_count == 1 assert "dd if" in run_patch.call_args_list[0][0][0] - def test_change_partition_type(self): resource_handler = get_resourcedisk_handler() # test when sfdisk --part-type does not exist with patch.object(shellutil, "run_get_output", side_effect=[[1, ''], [0, '']]) as run_patch: - resource_handler.change_partition_type(suppress_message=True, option_str='') + resource_handler.change_partition_type( + suppress_message=True, option_str='') # assert assert run_patch.call_count == 2 @@ -99,12 +106,42 @@ # test when sfdisk --part-type exists with patch.object(shellutil, "run_get_output", side_effect=[[0, '']]) as run_patch: - resource_handler.change_partition_type(suppress_message=True, option_str='') + resource_handler.change_partition_type( + suppress_message=True, option_str='') # assert assert run_patch.call_count == 1 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] + def test_check_existing_swap_file(self): + test_file = os.path.join(self.tmp_dir, 'test_swap_file') + file_size = 1024 * 128 + if os.path.exists(test_file): + os.remove(test_file) + + with open(test_file, "wb") as file: + file.write(bytearray(file_size)) + + os.chmod(test_file, stat.S_ISUID | stat.S_ISGID | stat.S_IRUSR | + stat.S_IWUSR | stat.S_IRWXG | stat.S_IRWXO) # 0o6677 + + def swap_on(_): # mimic the output of "swapon -s" + return [ + "Filename Type Size Used Priority", + "{0} partition 16498684 0 -2".format(test_file) + ] + + with patch.object(shellutil, "run_get_output", side_effect=swap_on): + get_resourcedisk_handler().check_existing_swap_file( + test_file, test_file, file_size) + + # it should remove access from group, others + mode = os.stat(test_file).st_mode & (stat.S_ISUID | stat.S_ISGID | + stat.S_IRWXU | stat.S_IWUSR | stat.S_IRWXG | stat.S_IRWXO) # 0o6777 + assert mode == stat.S_ISUID | stat.S_ISGID | stat.S_IRUSR | stat.S_IWUSR # 0o6600 + + os.remove(test_file) + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/test_scvmm.py walinuxagent-2.2.45/tests/distro/test_scvmm.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/test_scvmm.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/distro/test_scvmm.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/__init__.py walinuxagent-2.2.45/tests/ga/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_env.py walinuxagent-2.2.45/tests/ga/test_env.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_env.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_env.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,75 +12,150 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import glob -import tempfile - -import os -from mock import patch - -from azurelinuxagent.common.utils import fileutil -from azurelinuxagent.ga.env import MAXIMUM_CACHED_FILES, EnvHandler -from tests.tools import AgentTestCase - - -class TestEnv(AgentTestCase): - @patch("azurelinuxagent.common.conf.get_lib_dir") - def test_purge_disk_cache(self, mock_conf, *args): - names = [ - ("Prod", "agentsManifest"), - ("Test", "agentsManifest"), - ("FauxExtension1", "manifest.xml"), - ("FauxExtension2", "manifest.xml"), - ("GoalState", "xml"), - ("ExtensionsConfig", "xml") - ] - - env = EnvHandler() - - tmp_dir = tempfile.mkdtemp() - mock_conf.return_value = tmp_dir - - # write incarnations 1-100 - for t in names: - self._create_files(tmp_dir, - t[0], - t[1], - 2 * MAXIMUM_CACHED_FILES, - with_sleep=0.001) - - # update incarnation 1 with the latest timestamp - for t in names: - f = os.path.join(tmp_dir, '.'.join((t[0], '1', t[1]))) - fileutil.write_file(f, "faux content") - - # ensure the expected number of files are created - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - self.assertEqual(2 * MAXIMUM_CACHED_FILES, len(glob.glob(p))) - - env.purge_disk_cache() - - # ensure the expected number of files remain - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) - incarnation2 = os.path.join(tmp_dir, '{0}.2.{1}'.format(t[0], t[1])) - self.assertEqual(MAXIMUM_CACHED_FILES, len(glob.glob(p))) - self.assertTrue(os.path.exists(incarnation1)) - self.assertFalse(os.path.exists(incarnation2)) - - # write incarnation 101 - for t in names: - f = os.path.join(tmp_dir, '.'.join((t[0], '101', t[1]))) - fileutil.write_file(f, "faux content") - - # call to purge should be ignored, since interval has not elapsed - env.purge_disk_cache() - - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) - self.assertEqual(MAXIMUM_CACHED_FILES + 1, len(glob.glob(p))) - self.assertTrue(os.path.exists(incarnation1)) +from azurelinuxagent.common.osutil.default import DefaultOSUtil, shellutil +from azurelinuxagent.ga.env import EnvHandler +from tests.tools import * + + +class TestEnvHandler(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + # save the original run_command so that mocks can reference it + self.shellutil_run_command = shellutil.run_command + + # save an instance of the original DefaultOSUtil so that mocks can reference it + self.default_osutil = DefaultOSUtil() + + # AgentTestCase.setUp mocks osutil.factory._get_osutil; we override that mock for this class with a new mock + # that always returns the default implementation. + self.mock_get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", return_value=DefaultOSUtil()) + self.mock_get_osutil.start() + + + def tearDown(self): + self.mock_get_osutil.stop() + AgentTestCase.tearDown(self) + + def test_get_dhcp_client_pid_should_return_a_sorted_list_of_pids(self): + with patch("azurelinuxagent.common.utils.shellutil.run_command", return_value="11 9 5 22 4 6"): + pids = EnvHandler().get_dhcp_client_pid() + self.assertEquals(pids, [4, 5, 6, 9, 11, 22]) + + def test_get_dhcp_client_pid_should_return_an_empty_list_and_log_a_warning_when_dhcp_client_is_not_running(self): + with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): + with patch('azurelinuxagent.common.logger.Logger.warn') as mock_warn: + pids = EnvHandler().get_dhcp_client_pid() + + self.assertEquals(pids, []) + + self.assertEquals(mock_warn.call_count, 1) + args, kwargs = mock_warn.call_args + message = args[0] + self.assertEquals("Dhcp client is not running.", message) + + def test_get_dhcp_client_pid_should_return_and_empty_list_and_log_an_error_when_an_invalid_command_is_used(self): + with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["non-existing-command"])): + with patch('azurelinuxagent.common.logger.Logger.error') as mock_error: + pids = EnvHandler().get_dhcp_client_pid() + + self.assertEquals(pids, []) + + self.assertEquals(mock_error.call_count, 1) + args, kwargs = mock_error.call_args + self.assertIn("Failed to get the PID of the DHCP client", args[0]) + self.assertIn("No such file or directory", args[1]) + + def test_get_dhcp_client_pid_should_not_log_consecutive_errors(self): + env_handler = EnvHandler() + + with patch('azurelinuxagent.common.logger.Logger.warn') as mock_warn: + def assert_warnings(count): + self.assertEquals(mock_warn.call_count, count) + + for call_args in mock_warn.call_args_list: + args, kwargs = call_args + self.assertEquals("Dhcp client is not running.", args[0]) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): + # it should log the first error + pids = env_handler.get_dhcp_client_pid() + self.assertEquals(pids, []) + assert_warnings(1) + + # it should not log subsequent errors + for i in range(0, 3): + pids = env_handler.get_dhcp_client_pid() + self.assertEquals(pids, []) + self.assertEquals(mock_warn.call_count, 1) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", return_value="123"): + # now it should succeed + pids = env_handler.get_dhcp_client_pid() + self.assertEquals(pids, [123]) + assert_warnings(1) + + with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): + # it should log the new error + pids = env_handler.get_dhcp_client_pid() + self.assertEquals(pids, []) + assert_warnings(2) + + # it should not log subsequent errors + for i in range(0, 3): + pids = env_handler.get_dhcp_client_pid() + self.assertEquals(pids, []) + self.assertEquals(mock_warn.call_count, 2) + + def test_handle_dhclient_restart_should_reconfigure_network_routes_when_dhcp_client_restarts(self): + with patch("azurelinuxagent.common.dhcp.DhcpHandler.conf_routes") as mock_conf_routes: + env_handler = EnvHandler() + + # + # before the first call to handle_dhclient_restart, EnvHandler configures the network routes and initializes the DHCP PIDs + # + with patch.object(env_handler, "get_dhcp_client_pid", return_value=[123]): + env_handler.dhcp_handler.conf_routes() + env_handler.dhcp_id_list = env_handler.get_dhcp_client_pid() + self.assertEquals(mock_conf_routes.call_count, 1) + + # + # if the dhcp client has not been restarted then it should not reconfigure the network routes + # + def mock_check_pid_alive(pid): + if pid == 123: + return True + raise Exception("Unexpected PID: {0}".format(pid)) + + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): + with patch.object(env_handler, "get_dhcp_client_pid", side_effect=Exception("get_dhcp_client_pid should not have been invoked")): + env_handler.handle_dhclient_restart() + self.assertEquals(mock_conf_routes.call_count, 1) # count did not change + + # + # if the process was restarted then it should reconfigure the network routes + # + def mock_check_pid_alive(pid): + if pid == 123: + return False + raise Exception("Unexpected PID: {0}".format(pid)) + + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): + with patch.object(env_handler, "get_dhcp_client_pid", return_value=[456, 789]): + env_handler.handle_dhclient_restart() + self.assertEquals(mock_conf_routes.call_count, 2) # count increased + + # + # if the new dhcp client has not been restarted then it should not reconfigure the network routes + # + def mock_check_pid_alive(pid): + if pid in [456, 789]: + return True + raise Exception("Unexpected PID: {0}".format(pid)) + + with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): + with patch.object(env_handler, "get_dhcp_client_pid", side_effect=Exception("get_dhcp_client_pid should not have been invoked")): + env_handler.handle_dhclient_restart() + self.assertEquals(mock_conf_routes.call_count, 2) # count did not change diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_extension.py walinuxagent-2.2.45/tests/ga/test_extension.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_extension.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_extension.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,31 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import glob -import os import os.path -import shutil -import tempfile -import zipfile -import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.utils.fileutil as fileutil +from datetime import timedelta +from azurelinuxagent.ga.monitor import get_monitor_handler +from nose.plugins.attrib import attr from tests.protocol.mockwiredata import * -from azurelinuxagent.common.exception import * -from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ - ExtensionStatus, \ - ExtensionSubStatus, \ - Extension, \ - VMStatus, ExtHandler, \ - get_properties +from azurelinuxagent.common.protocol.restapi import Extension, ExtHandlerProperties from azurelinuxagent.ga.exthandlers import * -from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.common.protocol.wire import WireProtocol, InVMArtifactsProfile + +# Mocking the original sleep to reduce test execution time +SLEEP = time.sleep + + +def mock_sleep(sec=0.01): + SLEEP(sec) + + +def do_not_run_test(): + return True + + +def raise_system_exception(): + raise Exception + + +def raise_ioerror(*args): + e = IOError() + from errno import EIO + e.errno = EIO + raise e + class TestExtensionCleanup(AgentTestCase): def setUp(self): @@ -45,19 +57,19 @@ self.lib_dir = tempfile.mkdtemp() def _install_handlers(self, start=0, count=1, - handler_state=ExtHandlerState.Installed): + handler_state=ExtHandlerState.Installed): src = os.path.join(data_dir, "ext", "sample_ext-1.3.0.zip") version = FlexibleVersion("1.3.0") version += start - version.patch - for i in range(start, start+count): + for i in range(start, start + count): eh = ExtHandler() eh.name = "sample_ext" eh.properties.version = str(version) handler = ExtHandlerInstance(eh, "unused") dst = os.path.join(self.lib_dir, - handler.get_full_name()+HANDLER_PKG_EXT) + handler.get_full_name() + HANDLER_PKG_EXT) shutil.copy(src, dst) if not handler_state is None: @@ -65,7 +77,7 @@ handler.set_handler_state(handler_state) version += 1 - + def _count_packages(self): return len(glob.glob(os.path.join(self.lib_dir, "*.zip"))) @@ -73,13 +85,13 @@ paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths - if os.path.isdir(p) and self._is_installed(p)]) + if os.path.isdir(p) and self._is_installed(p)]) def _count_uninstalled(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths - if os.path.isdir(p) and not self._is_installed(p)]) + if os.path.isdir(p) and not self._is_installed(p)]) def _is_installed(self, path): path = os.path.join(path, 'config', 'HandlerState') @@ -135,6 +147,7 @@ self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) + class TestHandlerStateMigration(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) @@ -156,9 +169,9 @@ def _prepare_handler_state(self): handler_state_path = os.path.join( - self.tmp_dir, - "handler_state", - self.ext_handler_i.get_full_name()) + self.tmp_dir, + "handler_state", + self.ext_handler_i.get_full_name()) os.makedirs(handler_state_path) fileutil.write_file( os.path.join(handler_state_path, "state"), @@ -170,9 +183,9 @@ def _prepare_handler_config(self): handler_config_path = os.path.join( - self.tmp_dir, - self.ext_handler_i.get_full_name(), - "config") + self.tmp_dir, + self.ext_handler_i.get_full_name(), + "config") os.makedirs(handler_config_path) return @@ -233,6 +246,24 @@ self.assertEquals(handler_status.message, message) return + def test_set_handler_status_ignores_none_content(self): + """ + Validate that set_handler_status ignore cases where json.dumps + returns a value of None. + """ + self._prepare_handler_state() + self._prepare_handler_config() + + status = "Ready" + code = 0 + message = "A message" + + try: + with patch('json.dumps', return_value=None): + self.ext_handler_i.set_handler_status(status=status, code=code, message=message) + except Exception as e: + self.fail("set_handler_status threw an exception") + @patch("shutil.move", side_effect=Exception) def test_migration_ignores_move_errors(self, shutil_mock): self._prepare_handler_state() @@ -255,24 +286,49 @@ self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return + +class ExtensionTestCase(AgentTestCase): + @classmethod + def setUpClass(cls): + cls.cgroups_enabled = CGroupConfigurator.get_instance().enabled() + CGroupConfigurator.get_instance().disable() + + @classmethod + def tearDownClass(cls): + if cls.cgroups_enabled: + CGroupConfigurator.get_instance().enable() + else: + CGroupConfigurator.get_instance().disable() + + +@patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") -class TestExtension(AgentTestCase): +class TestExtension(ExtensionTestCase): - def _assert_handler_status(self, report_vm_status, expected_status, - expected_ext_count, version): + def _assert_handler_status(self, report_vm_status, expected_status, + expected_ext_count, version, + expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) - self.assertEquals("OSTCExtensions.ExampleHandlerLinux", + self.assertEquals(expected_handler_name, handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) return - + + def _assert_ext_pkg_file_status(self, expected_to_be_present=True, extension_version="1.0.0", + extension_handler_name="OSTCExtensions.ExampleHandlerLinux"): + zip_file_format = "{0}__{1}.zip" + if expected_to_be_present: + self.assertIn(zip_file_format.format(extension_handler_name, extension_version), os.listdir(conf.get_lib_dir())) + else: + self.assertNotIn(zip_file_format.format(extension_handler_name, extension_version), os.listdir(conf.get_lib_dir())) + def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args @@ -280,11 +336,11 @@ self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) return - def _create_mock(self, test_data, mock_http_get, MockCryptUtil): + def _create_mock(self, test_data, mock_http_get, MockCryptUtil, *args): """Test enable/disable/uninstall of an extension""" handler = get_exthandlers_handler() - #Mock protocol to return test data + # Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util @@ -295,70 +351,206 @@ handler.protocol_util.get_protocol = Mock(return_value=protocol) return handler, protocol - + + def _set_up_update_test_and_update_gs(self, patch_command, *args): + """ + This helper function sets up the Update test by setting up the protocol and ext_handler and asserts the + ext_handler runs fine the first time before patching a failure command for testing. + :param patch_command: The patch_command to setup for failure + :param args: Any additional args passed to the function, needed for creating a mock for handler and protocol + :return: test_data, exthandlers_handler, protocol + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful + exthandlers_handler.run() + + self.assertEqual(0, patch_command.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, update version + test_data.goal_state = test_data.goal_state.replace("1<", "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') + test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') + + # Ensure the patched command fails + patch_command.return_value = "exit 1" + + return test_data, exthandlers_handler, protocol + def test_ext_handler(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) - #Test enable scenario. + # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test goal state not changed + # Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") - #Test goal state changed + # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") - test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) - - #Test hotfix + + # Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") - test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") - test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) - #Test upgrade + # Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") - test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) - #Test disable + # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "NotReady", + self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") - #Test uninstall + # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) - #Test uninstall again! + # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) + def test_ext_zip_file_packages_removed_in_update_case(self, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.0.0") + + # Update the package + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") + self._assert_ext_status(protocol.report_ext_status, "success", 1) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version="1.0.0") + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.1.0") + + # Update the package second time + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", + "seqNo=\"2\"") + test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_ext_status(protocol.report_ext_status, "success", 2) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version="1.1.0") + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.2.0") + + def test_ext_zip_file_packages_removed_in_uninstall_case(self, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + extension_version = "1.0.0" + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, extension_version) + self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version=extension_version) + + # Test uninstall + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version=extension_version) + + def test_ext_zip_file_packages_removed_in_update_and_uninstall_case(self, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.0.0") + + # Update the package + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") + self._assert_ext_status(protocol.report_ext_status, "success", 1) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version="1.0.0") + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.1.0") + + # Update the package second time + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", + "seqNo=\"2\"") + test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_ext_status(protocol.report_ext_status, "success", 2) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version="1.1.0") + self._assert_ext_pkg_file_status(expected_to_be_present=True, + extension_version="1.2.0") + + # Test uninstall + test_data.goal_state = test_data.goal_state.replace("3<", + "4<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + self._assert_ext_pkg_file_status(expected_to_be_present=False, + extension_version="1.2.0") + def test_ext_handler_no_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) exthandlers_handler, protocol = self._create_mock(test_data, *args) @@ -377,117 +569,368 @@ test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) - #Assert no extension handler status + # Assert no extension handler status exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) - - def test_ext_handler_rollingupgrade(self, *args): - test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) + + def test_ext_handler_sequencing(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) exthandlers_handler, protocol = self._create_mock(test_data, *args) - #Test enable scenario. + # Test enable scenario. exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test goal state not changed + # check handler list + self.assertTrue(exthandlers_handler.ext_handlers is not None) + self.assertTrue(exthandlers_handler.ext_handlers.extHandlers is not None) + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 1) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 2) + + # Test goal state not changed + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") + + # Test goal state changed + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + # Swap the dependency ordering + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", + "dependencyLevel=\"3\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", + "dependencyLevel=\"4\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 1) + + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 3) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 4) + + # Test disable + # In the case of disable, the last extension to be enabled should be + # the first extension disabled. The first extension enabled should be + # the last one disabled. + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "NotReady", + 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 4) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 3) + + # Test uninstall + # In the case of uninstall, the last extension to be installed should be + # the first extension uninstalled. The first extension installed + # should be the last one uninstalled. + test_data.goal_state = test_data.goal_state.replace("3<", + "4<") + test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") + # Swap the dependency ordering AGAIN + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"3\"", + "dependencyLevel=\"6\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"4\"", + "dependencyLevel=\"5\"") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 6) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 5) + + def test_ext_handler_sequencing_default_dependency_level(self, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) + + def test_ext_handler_sequencing_invalid_dependency_level(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", + "dependencyLevel=\"a6\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", + "dependencyLevel=\"5b\"") + exthandlers_handler.run() + + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) + + @patch('time.gmtime', MagicMock(return_value=time.gmtime(0))) + def test_ext_handler_reporting_status_file(self, *args): + expected_status = ''' +{{ + "agent_name": "{agent_name}", + "current_version": "{current_version}", + "goal_state_version": "{goal_state_version}", + "distro_details": "{distro_details}", + "last_successful_status_upload_time": "{last_successful_status_upload_time}", + "python_version": "{python_version}", + "extensions_status": [ + {{ + "name": "OSTCExtensions.ExampleHandlerLinux", + "version": "1.0.0", + "status": "Ready" + }}, + {{ + "name": "Microsoft.Powershell.ExampleExtension", + "version": "1.0.0", + "status": "Ready" + }}, + {{ + "name": "Microsoft.EnterpriseCloud.Monitoring.ExampleHandlerLinux", + "version": "1.0.0", + "status": "Ready" + }}, + {{ + "name": "Microsoft.CPlat.Core.ExampleExtensionLinux", + "version": "1.0.0", + "status": "Ready" + }}, + {{ + "name": "Microsoft.OSTCExtensions.Edp.ExampleExtensionLinuxInTest", + "version": "1.0.0", + "status": "Ready" + }} + ] +}}'''.format(agent_name=AGENT_NAME, + current_version=str(CURRENT_VERSION), + goal_state_version=str(GOAL_STATE_AGENT_VERSION), + distro_details="{0}:{1}".format(DISTRO_NAME, DISTRO_VERSION), + last_successful_status_upload_time=time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + python_version="Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)) + + expected_status_json = json.loads(expected_status) + + test_data = WireProtocolData(DATA_FILE_MULTIPLE_EXT) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + + status_path = os.path.join(conf.get_lib_dir(), AGENT_STATUS_FILE) + actual_status_json = json.loads(fileutil.read_file(status_path)) + + self.assertEquals(expected_status_json, actual_status_json) + + def test_ext_handler_rollingupgrade(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test goal state changed without new GUID + # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test GUID change without new version available + # Test minor version bump test_data.goal_state = test_data.goal_state.replace("2<", "3<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654321", "FE0987654322") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test hotfix available without GUID change + # Test hotfix version bump test_data.goal_state = test_data.goal_state.replace("3<", "4<") - test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.1.1") exthandlers_handler.run() - self._assert_no_handler_status(protocol.report_vm_status) + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test GUID change with hotfix + # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654322", "FE0987654323") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") - self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_handler_status(protocol.report_vm_status, "NotReady", + 1, "1.1.1") - #Test disable + # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") - test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "NotReady", - 1, "1.1.1") + self._assert_no_handler_status(protocol.report_vm_status) - #Test uninstall + # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") - test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) - #Test uninstall again! + # Test re-install test_data.goal_state = test_data.goal_state.replace("7<", "8<") + test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") exthandlers_handler.run() - self._assert_no_handler_status(protocol.report_vm_status) + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test re-install + # Test version bump post-re-install test_data.goal_state = test_data.goal_state.replace("8<", "9<") - test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test upgrade available without GUID change + # Test rollback test_data.goal_state = test_data.goal_state.replace("9<", "10<") - test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") - exthandlers_handler.run() - self._assert_no_handler_status(protocol.report_vm_status) - - #Test GUID change with upgrade available - test_data.goal_state = test_data.goal_state.replace("10<", - "11<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654323", "FE0987654324") + test_data.ext_conf = test_data.ext_conf.replace("1.2.0", "1.1.0") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.add_event') - def test_ext_handler_download_failure(self, mock_add_event, *args): + def test_ext_handler_download_failure_transient(self, mock_add_event, *args): + original_sleep = time.sleep + test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) exthandlers_handler.run() + + self.assertEquals(0, mock_add_event.call_count) + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.ga.exthandlers.add_event') + def test_ext_handler_report_status_permanent(self, mock_add_event, mock_error_state, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.report_vm_status = Mock(side_effect=ProtocolError) + + mock_error_state.return_value = True + exthandlers_handler.run() + self.assertEquals(5, mock_add_event.call_count) + args, kw = mock_add_event.call_args + self.assertEquals(False, kw['is_success']) + self.assertTrue("Failed to report vm agent status" in kw['message']) + self.assertEquals("ReportStatusExtended", kw['op']) + + @patch('azurelinuxagent.ga.exthandlers.add_event') + def test_ext_handler_report_status_resource_gone(self, mock_add_event, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.report_vm_status = Mock(side_effect=ResourceGoneError) + + exthandlers_handler.run() + self.assertEquals(4, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) - self.assertEquals("OSTCExtensions.ExampleHandlerLinux", kw['name']) + self.assertTrue("ResourceGoneError" in kw['message']) + self.assertEquals("ExtensionProcessing", kw['op']) + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event') + def test_ext_handler_download_failure_permanent_ProtocolError(self, mock_add_event, mock_error_state, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.get_ext_handler_pkgs = Mock(side_effect=ProtocolError) + + mock_error_state.return_value = True + + exthandlers_handler.run() + + self.assertEquals(1, mock_add_event.call_count) + args, kw = mock_add_event.call_args_list[0] + self.assertEquals(False, kw['is_success']) + self.assertTrue("Failed to get ext handler pkgs" in kw['message']) + self.assertTrue("ProtocolError" in kw['message']) + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.common.event.add_event') + def test_ext_handler_download_failure_permanent_with_ExtensionDownloadError_and_triggered(self, mock_add_event, + mock_error_state, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.get_ext_handler_pkgs = Mock(side_effect=ExtensionDownloadError) + + mock_error_state.return_value = True + + exthandlers_handler.run() + + self.assertEquals(1, mock_add_event.call_count) + args, kw = mock_add_event.call_args_list[0] + self.assertEquals(False, kw['is_success']) + self.assertTrue("Failed to get artifact for over" in kw['message']) + self.assertTrue("ExtensionDownloadError" in kw['message']) self.assertEquals("Download", kw['op']) + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.common.event.add_event') + def test_ext_handler_download_failure_permanent_with_ExtensionDownloadError_and_not_triggered(self, mock_add_event, + mock_error_state, + *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.get_ext_handler_pkgs = Mock(side_effect=ExtensionDownloadError) + + mock_error_state.return_value = False + + exthandlers_handler.run() + + self.assertEquals(0, mock_add_event.call_count) + @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) - + mock_fileutil.write_file.return_value = IOError("Mock IO Error") exthandlers_handler.run() + def test_extension_processing_allowed(self, *args): + exthandlers_handler = get_exthandlers_handler() + exthandlers_handler.protocol = Mock() + + # disable extension handling in configuration + with patch.object(conf, 'get_extensions_enabled', return_value=False): + self.assertFalse(exthandlers_handler.extension_processing_allowed()) + + # enable extension handling in configuration + with patch.object(conf, "get_extensions_enabled", return_value=True): + # disable overprovisioning in configuration + with patch.object(conf, 'get_enable_overprovisioning', return_value=False): + self.assertTrue(exthandlers_handler.extension_processing_allowed()) + + # enable overprovisioning in configuration + with patch.object(conf, "get_enable_overprovisioning", return_value=True): + # disable protocol support for over-provisioning + with patch.object(exthandlers_handler.protocol, 'supports_overprovisioning', return_value=False): + self.assertTrue(exthandlers_handler.extension_processing_allowed()) + + # enable protocol support for over-provisioning + with patch.object(exthandlers_handler.protocol, "supports_overprovisioning", return_value=True): + with patch.object(exthandlers_handler.protocol.get_artifacts_profile(), "is_on_hold", + side_effect=[True, False]): + # Enable on_hold property in artifact_blob + self.assertFalse(exthandlers_handler.extension_processing_allowed()) + + # Disable on_hold property in artifact_blob + self.assertTrue(exthandlers_handler.extension_processing_allowed()) + def test_handle_ext_handlers_on_hold_true(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) @@ -496,17 +939,16 @@ exthandlers_handler.protocol = protocol # Disable extension handling blocking - conf.get_enable_overprovisioning = Mock(return_value=False) - with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: - exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called() + exthandlers_handler.extension_processing_allowed = Mock(return_value=False) + with patch.object(ExtHandlersHandler, 'handle_ext_handlers') as patch_handle_ext_handlers: + exthandlers_handler.run() + self.assertEqual(0, patch_handle_ext_handlers.call_count) # enable extension handling blocking - conf.get_enable_overprovisioning = Mock(return_value=True) - with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: - exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_not_called() - + exthandlers_handler.extension_processing_allowed = Mock(return_value=True) + with patch.object(ExtHandlersHandler, 'handle_ext_handlers') as patch_handle_ext_handlers: + exthandlers_handler.run() + self.assertEqual(1, patch_handle_ext_handlers.call_count) def test_handle_ext_handlers_on_hold_false(self, *args): test_data = WireProtocolData(DATA_FILE) @@ -517,20 +959,39 @@ # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) - #Test when is_on_hold returns False + # Test when is_on_hold returns False from azurelinuxagent.common.protocol.wire import InVMArtifactsProfile mock_in_vm_artifacts_profile = InVMArtifactsProfile(MagicMock()) mock_in_vm_artifacts_profile.is_on_hold = Mock(return_value=False) protocol.get_artifacts_profile = Mock(return_value=mock_in_vm_artifacts_profile) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called_once() + self.assertEqual(1, patch_handle_ext_handler.call_count) - #Test when in_vm_artifacts_profile is not available + # Test when in_vm_artifacts_profile is not available protocol.get_artifacts_profile = Mock(return_value=None) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called_once() + self.assertEqual(1, patch_handle_ext_handler.call_count) + + def test_last_etag_on_extension_processing(self, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.ext_handlers, etag = protocol.get_ext_handlers() + exthandlers_handler.protocol = protocol + + # Disable extension handling blocking in the first run and enable in the 2nd run + with patch.object(exthandlers_handler, 'extension_processing_allowed', side_effect=[False, True]): + exthandlers_handler.run() + self.assertIsNone(exthandlers_handler.last_etag, + "The last etag should be None initially as extension_processing is False") + self.assertNotEqual(etag, exthandlers_handler.last_etag, + "Last etag and etag should not be same if extension processing is disabled") + exthandlers_handler.run() + self.assertIsNotNone(exthandlers_handler.last_etag, + "Last etag should not be none if extension processing is allowed") + self.assertEqual(etag, exthandlers_handler.last_etag, + "Last etag and etag should be same if extension processing is enabled") def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): @@ -546,8 +1007,8 @@ exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") - #Remove status file and re-run collecting extension status - status_file = os.path.join(self.tmp_dir, + # Remove status file and re-run collecting extension status + status_file = os.path.join(self.tmp_dir, "OSTCExtensions.ExampleHandlerLinux-1.0.0", "status", "0.status") self.assertTrue(os.path.isfile(status_file)) @@ -557,31 +1018,188 @@ self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "error", 0) - def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): - for internal in [False, True]: - for autoupgrade in [False, True]: - if internal: - config_version = '1.3.0' - decision_version = '1.3.0' - if autoupgrade: - datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION - else: - datafile = DATA_FILE_EXT_INTERNALVERSION - else: - config_version = '1.0.0' - if autoupgrade: - datafile = DATA_FILE_EXT_AUTOUPGRADE - decision_version = '1.2.0' - else: - datafile = DATA_FILE - decision_version = '1.0.0' + def test_wait_for_handler_successful_completion_empty_exts(self, *args): + ''' + Testing wait_for_handler_successful_completion() when there is no extension in a handler. + Expected to return True. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) - _, protocol = self._create_mock(WireProtocolData(datafile), *args) - ext_handlers, _ = protocol.get_ext_handlers() - self.assertEqual(1, len(ext_handlers.extHandlers)) - ext_handler = ext_handlers.extHandlers[0] - self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) - self.assertEqual(config_version, ext_handler.properties.version, "config version.") + handler = ExtHandler(name="handler") + + ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=None) + self.assertTrue(exthandlers_handler.wait_for_handler_successful_completion(handler, datetime.datetime.utcnow())) + + def _helper_wait_for_handler_successful_completion(self, exthandlers_handler): + ''' + Call wait_for_handler_successful_completion() passing a handler with an extension. + Override the wait time to be 5 seconds to minimize the timout duration. + Return the value returned by wait_for_handler_successful_completion(). + ''' + handler_name = "Handler" + exthandler = ExtHandler(name=handler_name) + extension = Extension(name=handler_name) + exthandler.properties.extensions.append(extension) + + # Override the timeout value to minimize the test duration + wait_until = datetime.datetime.utcnow() + datetime.timedelta(seconds=0.1) + return exthandlers_handler.wait_for_handler_successful_completion(exthandler, wait_until) + + def test_wait_for_handler_successful_completion_no_status(self, *args): + ''' + Testing wait_for_handler_successful_completion() when there is no status file or seq_no is negative. + Expected to return False. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=None) + self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) + + def test_wait_for_handler_successful_completion_success_status(self, *args): + ''' + Testing wait_for_handler_successful_completion() when there is successful status. + Expected to return True. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + status = "success" + + ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) + self.assertTrue(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) + + def test_wait_for_handler_successful_completion_error_status(self, *args): + ''' + Testing wait_for_handler_successful_completion() when there is error status. + Expected to return False. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + status = "error" + + ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) + self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) + + def test_wait_for_handler_successful_completion_timeout(self, *args): + ''' + Testing wait_for_handler_successful_completion() when there is non terminal status. + Expected to return False. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Choose a non-terminal status + status = "warning" + + ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) + self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) + + def test_get_ext_handling_status(self, *args): + ''' + Testing get_ext_handling_status() function with various cases and + verifying against the expected values + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + handler_name = "Handler" + exthandler = ExtHandler(name=handler_name) + extension = Extension(name=handler_name) + exthandler.properties.extensions.append(extension) + + # In the following list of test cases, the first element corresponds to seq_no. + # the second element is the status file name, the third element indicates if the status file exits or not. + # The fourth element is the expected value from get_ext_handling_status() + test_cases = [ + [-5, None, False, None], + [-1, None, False, None], + [0, None, False, None], + [0, "filename", False, "warning"], + [0, "filename", True, ExtensionStatus(status="success")], + [5, "filename", False, "warning"], + [5, "filename", True, ExtensionStatus(status="success")] + ] + + orig_state = os.path.exists + for case in test_cases: + ext_handler_i = ExtHandlerInstance(exthandler, protocol) + ext_handler_i.get_status_file_path = MagicMock(return_value=(case[0], case[1])) + os.path.exists = MagicMock(return_value=case[2]) + if case[2]: + # when the status file exists, it is expected return the value from collect_ext_status() + ext_handler_i.collect_ext_status = MagicMock(return_value=case[3]) + + status = ext_handler_i.get_ext_handling_status(extension) + if case[2]: + self.assertEqual(status, case[3].status) + else: + self.assertEqual(status, case[3]) + + os.path.exists = orig_state + + def test_is_ext_handling_complete(self, *args): + ''' + Testing is_ext_handling_complete() with various input and + verifying against the expected output values. + ''' + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + handler_name = "Handler" + exthandler = ExtHandler(name=handler_name) + extension = Extension(name=handler_name) + exthandler.properties.extensions.append(extension) + + ext_handler_i = ExtHandlerInstance(exthandler, protocol) + + # Testing no status case + ext_handler_i.get_ext_handling_status = MagicMock(return_value=None) + completed, status = ext_handler_i.is_ext_handling_complete(extension) + self.assertTrue(completed) + self.assertEqual(status, None) + + # Here the key represents the possible input value to is_ext_handling_complete() + # the value represents the output tuple from is_ext_handling_complete() + expected_results = { + "error": (True, "error"), + "success": (True, "success"), + "warning": (False, "warning"), + "transitioning": (False, "transitioning") + } + + for key in expected_results.keys(): + ext_handler_i.get_ext_handling_status = MagicMock(return_value=key) + completed, status = ext_handler_i.is_ext_handling_complete(extension) + self.assertEqual(completed, expected_results[key][0]) + self.assertEqual(status, expected_results[key][1]) + + def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): + for internal in [False, True]: + for autoupgrade in [False, True]: + if internal: + config_version = '1.3.0' + decision_version = '1.3.0' + if autoupgrade: + datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION + else: + datafile = DATA_FILE_EXT_INTERNALVERSION + else: + config_version = '1.0.0' + decision_version = '1.0.0' + if autoupgrade: + datafile = DATA_FILE_EXT_AUTOUPGRADE + else: + datafile = DATA_FILE + + _, protocol = self._create_mock(WireProtocolData(datafile), *args) + ext_handlers, _ = protocol.get_ext_handlers() + self.assertEqual(1, len(ext_handlers.extHandlers)) + ext_handler = ext_handlers.extHandlers[0] + self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) + self.assertEqual(config_version, ext_handler.properties.version, "config version.") ExtHandlerInstance(ext_handler, protocol).decide_version() self.assertEqual(decision_version, ext_handler.properties.version, "decision version.") @@ -595,44 +1213,1414 @@ # (installed_version, config_version, exptected_version, autoupgrade_expected_version) cases = [ - (None, '2.0', '2.0.0', '2.2.0'), - (None, '2.0.0', '2.0.0', '2.2.0'), - ('1.0', '1.0.0', '1.0.0', '1.2.0'), - (None, '2.1.0', '2.1.1', '2.2.0'), - (None, '2.2.0', '2.2.0', '2.2.0'), - (None, '2.3.0', '2.3.0', '2.3.0'), - (None, '2.4.0', '2.4.0', '2.4.0'), - (None, '3.0', '3.0', '3.1'), - (None, '4.0', '4.0.0.1', '4.1.0.0'), + (None, '2.0', '2.0.0'), + (None, '2.0.0', '2.0.0'), + ('1.0', '1.0.0', '1.0.0'), + (None, '2.1.0', '2.1.0'), + (None, '2.1.1', '2.1.1'), + (None, '2.2.0', '2.2.0'), + (None, '2.3.0', '2.3.0'), + (None, '2.4.0', '2.4.0'), + (None, '3.0', '3.0'), + (None, '3.1', '3.1'), + (None, '4.0', '4.0.0.1'), + (None, '4.1', '4.1.0.0'), ] _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) version_uri = Mock() version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' - for (installed_version, config_version, expected_version, autoupgrade_expected_version) in cases: + for (installed_version, config_version, expected_version) in cases: ext_handler = Mock() ext_handler.properties = Mock() ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' ext_handler.versionUris = [version_uri] ext_handler.properties.version = config_version - + ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) ext_handler_instance.get_installed_version = Mock(return_value=installed_version) ext_handler_instance.decide_version() self.assertEqual(expected_version, ext_handler.properties.version) - ext_handler.properties.version = config_version - ext_handler.properties.upgradePolicy = 'auto' + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_extensions_disabled(self, _, *args): + # test status is reported for no extensions + test_data = WireProtocolData(DATA_FILE_NO_EXT) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) - ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) - ext_handler_instance.get_installed_version = Mock(return_value=installed_version) + # test status is reported, but extensions are not processed + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) - ext_handler_instance.decide_version() - self.assertEqual(autoupgrade_expected_version, ext_handler.properties.version) + def test_extensions_deleted(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_DELETION) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial enable is successful + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Update incarnation, simulate new extension version and old one deleted + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', + 'version="1.0.1"') + test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') + + # Ensure new extension can be enabled + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.install', side_effect=ExtHandlerInstance.install, + autospec=True) + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_install_command') + def test_install_failure(self, patch_get_install_command, patch_install, *args): + """ + When extension install fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install is unsuccessful + patch_get_install_command.return_value = "exit.sh 1" + exthandlers_handler.run() + + self.assertEqual(1, patch_install.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.0") + + # Ensure subsequent no further retries are made + exthandlers_handler.run() + self.assertEqual(1, patch_install.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + + @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_install_command') + def test_install_failure_check_exception_handling(self, patch_get_install_command, patch_handle_ext_handler_error, + *args): + """ + When extension install fails, the operation should be reported to our telemetry service. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure install is unsuccessful + patch_get_install_command.return_value = "exit.sh 1" + exthandlers_handler.run() + + self.assertEqual(1, protocol.report_vm_status.call_count) + self.assertEqual(1, patch_handle_ext_handler_error.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') + def test_enable_failure(self, patch_get_enable_command, *args): + """ + When extension enable fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install is successful, but enable fails + patch_get_enable_command.call_count = 0 + patch_get_enable_command.return_value = "exit.sh 1" + exthandlers_handler.run() + + self.assertEqual(1, patch_get_enable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_enable_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + + @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') + def test_enable_failure_check_exception_handling(self, patch_get_enable_command, + patch_handle_ext_handler_error, *args): + """ + When extension enable fails, the operation should be reported. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install is successful, but enable fails + patch_get_enable_command.call_count = 0 + patch_get_enable_command.return_value = "exit.sh 1" + exthandlers_handler.run() + + self.assertEqual(1, patch_get_enable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self.assertEqual(1, patch_handle_ext_handler_error.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_disable_failure(self, patch_get_disable_command, *args): + """ + When extension disable fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful, but disable fails + patch_get_disable_command.call_count = 0 + patch_get_disable_command.return_value = "exit.sh 1" + exthandlers_handler.run() + + self.assertEqual(0, patch_get_disable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, disable extension + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + # Ensure there are no further retries + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(3, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_disable_failure_with_exception_handling(self, patch_get_disable_command, + patch_handle_ext_handler_error, *args): + """ + When extension disable fails, the operation should be reported. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful, but disable fails + patch_get_disable_command.call_count = 0 + patch_get_disable_command.return_value = "exit 1" + exthandlers_handler.run() + + self.assertEqual(0, patch_get_disable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, disable extension + test_data.goal_state = test_data.goal_state.replace("1<", "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + + exthandlers_handler.run() + + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + self.assertEqual(1, patch_handle_ext_handler_error.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') + def test_uninstall_failure(self, patch_get_uninstall_command, *args): + """ + When extension uninstall fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful, but uninstall fails + patch_get_uninstall_command.call_count = 0 + patch_get_uninstall_command.return_value = "exit 1" + exthandlers_handler.run() + + self.assertEqual(0, patch_get_uninstall_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, disable extension + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) + self._assert_no_handler_status(protocol.report_vm_status) + + # Ensure there are no further retries + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(3, protocol.report_vm_status.call_count) + self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) + self._assert_no_handler_status(protocol.report_vm_status) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') + def test_upgrade_failure(self, patch_get_update_command, *args): + """ + Extension upgrade failure should not be retried + """ + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_update_command, + *args) + + exthandlers_handler.run() + self.assertEqual(1, patch_get_update_command.call_count) + + # On the next iteration, update should not be retried + exthandlers_handler.run() + self.assertEqual(1, patch_get_update_command.call_count) + + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test__extension_upgrade_failure_when_prev_version_disable_fails(self, patch_get_disable_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: + exthandlers_handler.run() + + # When the previous version's disable fails, we expect the upgrade scenario to fail, so the enable + # for the new version is not called and the new version handler's status is reported as not ready. + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") + + # Ensure we are processing the same goal state only once + loop_run = 5 + for x in range(loop_run): + exthandlers_handler.run() + + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test__extension_upgrade_failure_when_prev_version_disable_fails_and_recovers_on_next_incarnation(self, patch_get_disable_command, + *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: + exthandlers_handler.run() + + # When the previous version's disable fails, we expect the upgrade scenario to fail, so the enable + # for the new version is not called and the new version handler's status is reported as not ready. + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") + + # Ensure we are processing the same goal state only once + loop_run = 5 + for x in range(loop_run): + exthandlers_handler.run() + + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + + # Force a new goal state incarnation, only then will we attempt the upgrade again + test_data.goal_state = test_data.goal_state.replace("2<", "3<") + + # Ensure disable won't fail by making launch_command a no-op + with patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.launch_command') as patch_launch_command: + exthandlers_handler.run() + self.assertEqual(2, patch_get_disable_command.call_count) + self.assertEqual(1, patch_get_enable_command.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test__extension_upgrade_failure_when_prev_version_disable_fails_incorrect_zip(self, patch_get_disable_command, + *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + # The download logic has retry logic that sleeps before each try - make sleep a no-op. + with patch("time.sleep"): + with patch("zipfile.ZipFile.extractall") as patch_zipfile_extractall: + with patch( + 'azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: + patch_zipfile_extractall.side_effect = raise_ioerror + # The zipfile was corrupt and the upgrade sequence failed + exthandlers_handler.run() + + # We never called the disable of the old version due to the failure when unzipping the new version, + # nor the enable of the new version + self.assertEqual(0, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + + # Ensure we are processing the same goal state only once + loop_run = 5 + for x in range(loop_run): + exthandlers_handler.run() + + self.assertEqual(0, patch_get_disable_command.call_count) + self.assertEqual(0, patch_get_enable_command.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test__old_handler_reports_failure_on_disable_fail_on_update(self, patch_get_disable_command, *args): + old_version, new_version = "1.0.0", "1.0.1" + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch.object(ExtHandlerInstance, "report_event", autospec=True) as patch_report_event: + exthandlers_handler.run() # Download the new update the first time, and then we patch the download method. + self.assertEqual(1, patch_get_disable_command.call_count) + + old_version_args, old_version_kwargs = patch_report_event.call_args + new_version_args, new_version_kwargs = patch_report_event.call_args_list[0] + + self.assertEqual(new_version_args[0].ext_handler.properties.version, new_version, + "The first call to report event should be from the new version of the ext-handler " + "to report download succeeded") + + self.assertEqual(new_version_kwargs['message'], "Download succeeded", + "The message should be Download Succedded") + + self.assertEqual(old_version_args[0].ext_handler.properties.version, old_version, + "The last report event call should be from the old version ext-handler " + "to report the event from the previous version") + + self.assertFalse(old_version_kwargs['is_success'], "The last call to report event should be for a failure") + + self.assertTrue('Error' in old_version_kwargs['message'], "No error reported") + + # This is ensuring that the error status is being written to the new version + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version=new_version) + + @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') + def test_upgrade_failure_with_exception_handling(self, patch_get_update_command, + patch_handle_ext_handler_error, *args): + """ + Extension upgrade failure should not be retried + """ + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_update_command, + *args) + + exthandlers_handler.run() + self.assertEqual(1, patch_get_update_command.call_count) + self.assertEqual(1, patch_handle_ext_handler_error.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_extension_upgrade_should_pass_when_continue_on_update_failure_is_true_and_prev_version_disable_fails( + self, patch_get_disable_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ + as mock_continue_on_update_failure: + # These are just testing the mocks have been called and asserting the test conditions have been met + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(2, mock_continue_on_update_failure.call_count, + "This should be called twice, for both disable and uninstall") + + # Ensure the handler status and ext_status is successful + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') + def test_extension_upgrade_should_pass_when_continue_on_update_failue_is_true_and_prev_version_uninstall_fails( + self, patch_get_uninstall_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_uninstall_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ + as mock_continue_on_update_failure: + # These are just testing the mocks have been called and asserting the test conditions have been met + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(2, mock_continue_on_update_failure.call_count, + "This should be called twice, for both disable and uninstall") + + # Ensure the handler status and ext_status is successful + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_false_and_prev_version_disable_fails( + self, patch_get_disable_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=False) \ + as mock_continue_on_update_failure: + # These are just testing the mocks have been called and asserting the test conditions have been met + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(1, mock_continue_on_update_failure.call_count, + "The first call would raise an exception") + + # Assert test scenario + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') + def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_false_and_prev_version_uninstall_fails( + self, patch_get_uninstall_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_uninstall_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=False) \ + as mock_continue_on_update_failure: + # These are just testing the mocks have been called and asserting the test conditions have been met + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(2, mock_continue_on_update_failure.call_count, + "The second call would raise an exception") + + # Assert test scenario + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_true_and_old_disable_and_new_enable_fails( + self, patch_get_disable_command, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, + *args) + + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ + as mock_continue_on_update_failure: + with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command', return_value="exit 1")\ + as patch_get_enable: + + # These are just testing the mocks have been called and asserting the test conditions have been met + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(2, mock_continue_on_update_failure.call_count) + self.assertEqual(1, patch_get_enable.call_count) + + # Assert test scenario + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) + def test_uninstall_rc_env_var_should_report_not_run_for_non_update_calls_to_exthandler_run( + self, patch_continue_on_update, *args): + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(Mock(), *args) + + with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", + side_effect=[ExtensionError("Disable Failed"), "ok", ExtensionError("uninstall failed"), + "ok", "ok", "New enable run ok"]) as patch_start_cmd: + exthandlers_handler.run() + _, update_kwargs = patch_start_cmd.call_args_list[1] + _, install_kwargs = patch_start_cmd.call_args_list[3] + _, enable_kwargs = patch_start_cmd.call_args_list[4] + + # Ensure that the env variables were present in the first run when failures were thrown for update + self.assertEqual(2, patch_continue_on_update.call_count) + self.assertTrue( + '-update' in update_kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode in update_kwargs['env'], + "The update command call should have Disable Failed in env variable") + self.assertTrue( + '-install' in install_kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode not in install_kwargs[ + 'env'], + "The Disable Failed env variable should be removed from install command") + self.assertTrue( + '-install' in install_kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in install_kwargs[ + 'env'], + "The install command call should have Uninstall Failed in env variable") + self.assertTrue( + '-enable' in enable_kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in enable_kwargs['env'], + "The enable command call should have Uninstall Failed in env variable") + + # Initiating another run which shouldn't have any failed env variables in it if no failures + # Updating Incarnation + test_data.goal_state = test_data.goal_state.replace("2<", "3<") + exthandlers_handler.run() + _, new_enable_kwargs = patch_start_cmd.call_args + + # Ensure the new run didn't have Disable Return Code env variable + self.assertNotIn(ExtCommandEnvVariable.DisableReturnCode, new_enable_kwargs['env']) + + # Ensure the new run had Uninstall Return Code env variable == NOT_RUN + self.assertIn(ExtCommandEnvVariable.UninstallReturnCode, new_enable_kwargs['env']) + self.assertTrue( + new_enable_kwargs['env'][ExtCommandEnvVariable.UninstallReturnCode] == NOT_RUN) + + # Ensure the handler status and ext_status is successful + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + def test_ext_path_and_version_env_variables_set_for_ever_operation(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + with patch.object(CGroupConfigurator.get_instance(), "start_extension_command") as patch_start_cmd: + exthandlers_handler.run() + + # Extension Path and Version should be set for all launch_command calls + for args, kwargs in patch_start_cmd.call_args_list: + self.assertIn(ExtCommandEnvVariable.ExtensionPath, kwargs['env']) + self.assertIn('OSTCExtensions.ExampleHandlerLinux-1.0.0', + kwargs['env'][ExtCommandEnvVariable.ExtensionPath]) + self.assertIn(ExtCommandEnvVariable.ExtensionVersion, kwargs['env']) + self.assertEqual("1.0.0", kwargs['env'][ExtCommandEnvVariable.ExtensionVersion]) + + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + + @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") + def test_ext_sequence_no_should_be_set_for_every_command_call(self, _, *args): + test_data = WireProtocolData(DATA_FILE_MULTIPLE_EXT) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + with patch("subprocess.Popen") as patch_popen: + exthandlers_handler.run() + + for _, kwargs in patch_popen.call_args_list: + self.assertIn(ExtCommandEnvVariable.ExtensionSeqNumber, kwargs['env']) + self.assertEqual(kwargs['env'][ExtCommandEnvVariable.ExtensionSeqNumber], "0") + + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + + # Next incarnation and seq for extensions, update version + test_data.goal_state = test_data.goal_state.replace("1<", "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') + test_data.ext_conf = test_data.ext_conf.replace('seqNo="0"', 'seqNo="1"') + test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + with patch("subprocess.Popen") as patch_popen: + exthandlers_handler.run() + + for _, kwargs in patch_popen.call_args_list: + self.assertIn(ExtCommandEnvVariable.ExtensionSeqNumber, kwargs['env']) + self.assertEqual(kwargs['env'][ExtCommandEnvVariable.ExtensionSeqNumber], "1") + + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") + + def test_ext_sequence_no_should_be_set_from_within_extension(self, *args): + + test_file_name = "testfile.sh" + handler_json = { + "installCommand": test_file_name, + "uninstallCommand": test_file_name, + "updateCommand": test_file_name, + "enableCommand": test_file_name, + "disableCommand": test_file_name, + "rebootAfterInstall": False, + "reportHeartbeat": False, + "continueOnUpdateFailure": False + } + manifest = HandlerManifest({'handlerManifest': handler_json}) + + # Script prints env variables passed to this process and prints all starting with ConfigSequenceNumber + test_file = """ + printenv | grep ConfigSequenceNumber + """ + + base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.0', test_file_name) + self.create_script(test_file_name, test_file, base_dir) + + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + expected_seq_no = 0 + + with patch.object(ExtHandlerInstance, "load_manifest", return_value=manifest): + with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: + exthandlers_handler.run() + + for _, kwargs in mock_report_event.call_args_list: + # The output is of the format - 'testfile.sh\n[stdout]ConfigSequenceNumber=N\n[stderr]' + if test_file_name not in kwargs['message']: + continue + self.assertIn("{0}={1}".format(ExtCommandEnvVariable.ExtensionSeqNumber, expected_seq_no), + kwargs['message']) + + # Update goal state, extension version and seq no + test_data.goal_state = test_data.goal_state.replace("1<", "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') + test_data.ext_conf = test_data.ext_conf.replace('seqNo="0"', 'seqNo="1"') + test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') + expected_seq_no = 1 + base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.1', test_file_name) + self.create_script(test_file_name, test_file, base_dir) + + with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: + exthandlers_handler.run() + + for _, kwargs in mock_report_event.call_args_list: + # The output is of the format - 'testfile.sh\n[stdout]ConfigSequenceNumber=N\n[stderr]' + if test_file_name not in kwargs['message']: + continue + self.assertIn("{0}={1}".format(ExtCommandEnvVariable.ExtensionSeqNumber, expected_seq_no), + kwargs['message']) + + def test_correct_exit_code_should_be_set_on_uninstall_cmd_failure(self, *args): + test_file_name = "testfile.sh" + test_error_file_name = "error.sh" + handler_json = { + "installCommand": test_file_name + " -install", + "uninstallCommand": test_error_file_name, + "updateCommand": test_file_name + " -update", + "enableCommand": test_file_name + " -enable", + "disableCommand": test_error_file_name, + "rebootAfterInstall": False, + "reportHeartbeat": False, + "continueOnUpdateFailure": True + } + manifest = HandlerManifest({'handlerManifest': handler_json}) + + # Script prints env variables passed to this process and prints all starting with ConfigSequenceNumber + test_file = """ + printenv | grep AZURE_ + """ + + exit_code = 151 + test_error_content = """ + exit %s + """ % exit_code + + error_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.0', test_error_file_name) + self.create_script(test_error_file_name, test_error_content, error_dir) + + test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(Mock(), *args) + + base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.1', test_file_name) + self.create_script(test_file_name, test_file, base_dir) + + with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.load_manifest", return_value=manifest): + with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: + exthandlers_handler.run() + + _, disable_kwargs = mock_report_event.call_args_list[1] + _, update_kwargs = mock_report_event.call_args_list[2] + _, uninstall_kwargs = mock_report_event.call_args_list[3] + _, install_kwargs = mock_report_event.call_args_list[4] + _, enable_kwargs = mock_report_event.call_args_list[5] + + self.assertIn("%s=%s" % (ExtCommandEnvVariable.DisableReturnCode, exit_code), update_kwargs['message']) + self.assertIn("%s=%s" % (ExtCommandEnvVariable.UninstallReturnCode, exit_code), install_kwargs['message']) + self.assertIn("%s=%s" % (ExtCommandEnvVariable.UninstallReturnCode, exit_code), enable_kwargs['message']) + + +@patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.utils.restutil.http_get") +class TestExtensionSequencing(AgentTestCase): + + def _create_mock(self, mock_http_get, MockCryptUtil): + test_data = WireProtocolData(DATA_FILE) + + # Mock protocol to return test data + mock_http_get.side_effect = test_data.mock_http_get + MockCryptUtil.side_effect = test_data.mock_crypt_util + + protocol = WireProtocol("foo.bar") + protocol.detect() + protocol.report_ext_status = MagicMock() + protocol.report_vm_status = MagicMock() + protocol.get_artifacts_profile = MagicMock() + + handler = get_exthandlers_handler() + handler.protocol_util.get_protocol = Mock(return_value=protocol) + handler.ext_handlers, handler.last_etag = protocol.get_ext_handlers() + conf.get_enable_overprovisioning = Mock(return_value=False) + + def wait_for_handler_successful_completion(prev_handler, wait_until): + return orig_wait_for_handler_successful_completion(prev_handler, + datetime.datetime.utcnow() + datetime.timedelta( + seconds=5)) + + orig_wait_for_handler_successful_completion = handler.wait_for_handler_successful_completion + handler.wait_for_handler_successful_completion = wait_for_handler_successful_completion + return handler + + def _set_dependency_levels(self, dependency_levels, exthandlers_handler): + ''' + Creates extensions with the given dependencyLevel + ''' + handler_map = dict() + all_handlers = [] + for h, level in dependency_levels: + if handler_map.get(h) is None: + handler = ExtHandler(name=h) + extension = Extension(name=h) + handler.properties.state = "enabled" + handler.properties.extensions.append(extension) + handler_map[h] = handler + all_handlers.append(handler) + + handler = handler_map[h] + for ext in handler.properties.extensions: + ext.dependencyLevel = level + + exthandlers_handler.ext_handlers.extHandlers = [] + for handler in all_handlers: + exthandlers_handler.ext_handlers.extHandlers.append(handler) + + def _validate_extension_sequence(self, expected_sequence, exthandlers_handler): + installed_extensions = [a[0].name for a, k in exthandlers_handler.handle_ext_handler.call_args_list] + self.assertListEqual(expected_sequence, installed_extensions, + "Expected and actual list of extensions are not equal") + + def _run_test(self, extensions_to_be_failed, expected_sequence, exthandlers_handler): + ''' + Mocks get_ext_handling_status() to mimic error status for a given extension. + Calls ExtHandlersHandler.run() + Verifies if the ExtHandlersHandler.handle_ext_handler() was called with appropriate extensions + in the expected order. + ''' + + def get_ext_handling_status(ext): + status = "error" if ext.name in extensions_to_be_failed else "success" + return status + + ExtHandlerInstance.get_ext_handling_status = MagicMock(side_effect=get_ext_handling_status) + exthandlers_handler.handle_ext_handler = MagicMock() + exthandlers_handler.run() + self._validate_extension_sequence(expected_sequence, exthandlers_handler) + + def test_handle_ext_handlers(self, *args): + ''' + Tests extension sequencing among multiple extensions with dependencies. + This test introduces failure in all possible levels and extensions. + Verifies that the sequencing is in the expected order and a failure in one extension + skips the rest of the extensions in the sequence. + ''' + exthandlers_handler = self._create_mock(*args) + + self._set_dependency_levels([("A", 3), ("B", 2), ("C", 2), ("D", 1), ("E", 1), ("F", 1), ("G", 1)], + exthandlers_handler) + + extensions_to_be_failed = [] + expected_sequence = ["D", "E", "F", "G", "B", "C", "A"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["D"] + expected_sequence = ["D"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["E"] + expected_sequence = ["D", "E"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["F"] + expected_sequence = ["D", "E", "F"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["G"] + expected_sequence = ["D", "E", "F", "G"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["B"] + expected_sequence = ["D", "E", "F", "G", "B"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["C"] + expected_sequence = ["D", "E", "F", "G", "B", "C"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["A"] + expected_sequence = ["D", "E", "F", "G", "B", "C", "A"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + def test_handle_ext_handlers_with_uninstallation(self, *args): + ''' + Tests extension sequencing among multiple extensions with dependencies when + some extension are to be uninstalled. + Verifies that the sequencing is in the expected order and the uninstallation takes place + prior to all the installation/enable. + ''' + exthandlers_handler = self._create_mock(*args) + + # "A", "D" and "F" are marked as to be uninstalled + self._set_dependency_levels([("A", 0), ("B", 2), ("C", 2), ("D", 0), ("E", 1), ("F", 0), ("G", 1)], + exthandlers_handler) + + extensions_to_be_failed = [] + expected_sequence = ["A", "D", "F", "E", "G", "B", "C"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + def test_handle_ext_handlers_fallback(self, *args): + ''' + This test makes sure that the extension sequencing is applied only when the user specifies + dependency information in the extension. + When there is no dependency specified, the agent is expected to assign dependencyLevel=0 to all extension. + Also, it is expected to install all the extension no matter if there is any failure in any of the extensions. + ''' + exthandlers_handler = self._create_mock(*args) + + self._set_dependency_levels([("A", 1), ("B", 1), ("C", 1), ("D", 1), ("E", 1), ("F", 1), ("G", 1)], + exthandlers_handler) + + # Expected sequence must contain all the extensions in the given order. + # The following test cases verfy against this same expected sequence no matter if any extension failed + expected_sequence = ["A", "B", "C", "D", "E", "F", "G"] + + # Make sure that failure in any extension does not prevent other extensions to be installed + extensions_to_be_failed = [] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["A"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["B"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["C"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["D"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["E"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["F"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + extensions_to_be_failed = ["G"] + self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) + + +class TestInVMArtifactsProfile(AgentTestCase): + def test_it_should_parse_boolean_values(self): + profile_json = '{ "onHold": true }' + profile = InVMArtifactsProfile(profile_json) + self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) + + profile_json = '{ "onHold": false }' + profile = InVMArtifactsProfile(profile_json) + self.assertFalse(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) + + def test_it_should_parse_boolean_values_encoded_as_strings(self): + profile_json = '{ "onHold": "true" }' + profile = InVMArtifactsProfile(profile_json) + self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) + + profile_json = '{ "onHold": "false" }' + profile = InVMArtifactsProfile(profile_json) + self.assertFalse(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) + + profile_json = '{ "onHold": "TRUE" }' + profile = InVMArtifactsProfile(profile_json) + self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) + + +@skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") +@patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) +@patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True) +@patch("azurelinuxagent.common.conf.get_cgroups_enforce_limits", return_value=False) +@patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.utils.restutil.http_get") +class TestExtensionWithCGroupsEnabled(AgentTestCase): + def _assert_handler_status(self, report_vm_status, expected_status, + expected_ext_count, version, + expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): + self.assertTrue(report_vm_status.called) + args, kw = report_vm_status.call_args + vm_status = args[0] + self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) + handler_status = vm_status.vmAgent.extensionHandlers[0] + self.assertEquals(expected_status, handler_status.status) + self.assertEquals(expected_handler_name, + handler_status.name) + self.assertEquals(version, handler_status.version) + self.assertEquals(expected_ext_count, len(handler_status.extensions)) + return + + def _assert_no_handler_status(self, report_vm_status): + self.assertTrue(report_vm_status.called) + args, kw = report_vm_status.call_args + vm_status = args[0] + self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) + return + + def _assert_ext_status(self, report_ext_status, expected_status, + expected_seq_no): + self.assertTrue(report_ext_status.called) + args, kw = report_ext_status.call_args + ext_status = args[-1] + self.assertEquals(expected_status, ext_status.status) + self.assertEquals(expected_seq_no, ext_status.sequenceNumber) + + def _create_mock(self, test_data, mock_http_get, mock_crypt_util, *args): + """Test enable/disable/uninstall of an extension""" + ext_handler = get_exthandlers_handler() + monitor_handler = get_monitor_handler() + + # Mock protocol to return test data + mock_http_get.side_effect = test_data.mock_http_get + mock_crypt_util.side_effect = test_data.mock_crypt_util + + protocol = WireProtocol("foo.bar") + protocol.detect() + protocol.report_ext_status = MagicMock() + protocol.report_vm_status = MagicMock() + + ext_handler.protocol_util.get_protocol = Mock(return_value=protocol) + monitor_handler.protocol_util.get_protocol = Mock(return_value=protocol) + return ext_handler, monitor_handler, protocol + + @attr('requires_sudo') + def test_ext_handler_with_cgroup_enabled(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, _, protocol = self._create_mock(test_data, *args) + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Test goal state not changed + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + + # Test goal state changed + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 1) + + # Test hotfix + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", + "seqNo=\"2\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") + self._assert_ext_status(protocol.report_ext_status, "success", 2) + + # Test upgrade + test_data.goal_state = test_data.goal_state.replace("3<", + "4<") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", + "seqNo=\"3\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_ext_status(protocol.report_ext_status, "success", 3) + + # Test disable + test_data.goal_state = test_data.goal_state.replace("4<", + "5<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "NotReady", + 1, "1.2.0") + + # Test uninstall + test_data.goal_state = test_data.goal_state.replace("5<", + "6<") + test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + + # Test uninstall again! + test_data.goal_state = test_data.goal_state.replace("6<", + "7<") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @attr('requires_sudo') + def test_ext_handler_and_monitor_handler_with_cgroup_enabled(self, patch_add_event, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, monitor_handler, protocol = self._create_mock(test_data, *args) + + monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + monitor_handler.poll_telemetry_metrics() + monitor_handler.send_telemetry_metrics() + + self.assertEqual(patch_add_event.call_count, 4) + + name = patch_add_event.call_args[0][0] + fields = patch_add_event.call_args[1] + + self.assertEqual(name, "WALinuxAgent") + self.assertEqual(fields["op"], "ExtensionMetricsData") + self.assertEqual(fields["is_success"], True) + self.assertEqual(fields["log_event"], False) + self.assertEqual(fields["is_internal"], False) + self.assertIsInstance(fields["message"], ustr) + + monitor_handler.stop() + + @attr('requires_sudo') + def test_ext_handler_with_systemd_cgroup_enabled(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + from azurelinuxagent.common.cgroupapi import CGroupsApi + print(CGroupsApi._is_systemd()) + + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, _, protocol = self._create_mock(test_data, *args) + + # Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Test goal state not changed + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + + # Test goal state changed + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 1) + + # Test hotfix + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", + "seqNo=\"2\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") + self._assert_ext_status(protocol.report_ext_status, "success", 2) + + # Test upgrade + test_data.goal_state = test_data.goal_state.replace("3<", + "4<") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", + "seqNo=\"3\"") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_ext_status(protocol.report_ext_status, "success", 3) + + # Test disable + test_data.goal_state = test_data.goal_state.replace("4<", + "5<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "NotReady", + 1, "1.2.0") + + # Test uninstall + test_data.goal_state = test_data.goal_state.replace("5<", + "6<") + test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + + # Test uninstall again! + test_data.goal_state = test_data.goal_state.replace("6<", + "7<") + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) + + +class TestExtensionUpdateOnFailure(ExtensionTestCase): + + @staticmethod + def _get_ext_handler_instance(name, version, handler=None, continue_on_update_failure=False): + + handler_json = { + "installCommand": "sample.py -install", + "uninstallCommand": "sample.py -uninstall", + "updateCommand": "sample.py -update", + "enableCommand": "sample.py -enable", + "disableCommand": "sample.py -disable", + "rebootAfterInstall": False, + "reportHeartbeat": False, + "continueOnUpdateFailure": continue_on_update_failure + } + + if handler: + handler_json.update(handler) + + ext_handler_properties = ExtHandlerProperties() + ext_handler_properties.version = version + ext_handler = ExtHandler(name=name) + ext_handler.properties = ext_handler_properties + ext_handler_i = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) + ext_handler_i.load_manifest = MagicMock(return_value=HandlerManifest({'handlerManifest': handler_json})) + fileutil.mkdir(ext_handler_i.get_base_dir()) + return ext_handler_i + + def test_disable_failed_env_variable_should_be_set_for_update_cmd_when_continue_on_update_failure_is_true( + self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) + + with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", + side_effect=ExtensionError('disable Failed')) as patch_start_cmd: + with self.assertRaises(ExtensionError): + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + args, kwargs = patch_start_cmd.call_args + + self.assertTrue('-update' in kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode in kwargs['env'], + "The update command should have Disable Failed in env variable") + + def test_uninstall_failed_env_variable_should_set_for_install_when_continue_on_update_failure_is_true( + self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) + + with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", + side_effect=['ok', 'ok', ExtensionError('uninstall Failed'), 'ok']) as patch_start_cmd: + + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + args, kwargs = patch_start_cmd.call_args + + self.assertTrue('-install' in kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in kwargs['env'], + "The install command should have Uninstall Failed in env variable") + + def test_extension_error_should_be_raised_when_continue_on_update_failure_is_false_on_disable_failure(self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) + + with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): + with self.assertRaises(ExtensionUpdateError) as error: + # Ensure the error is of type ExtensionUpdateError + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + msg = str(error.exception) + self.assertIn("Disable Failed", msg, "Update should fail with Disable Failed error") + self.assertIn("ExtensionError", msg, "The Exception should initially be propagated as ExtensionError") + + @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") + def test_extension_error_should_be_raised_when_continue_on_update_failure_is_false_on_uninstall_failure(self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) + + with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): + with self.assertRaises(ExtensionUpdateError) as error: + # Ensure the error is of type ExtensionUpdateError + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + msg = str(error.exception) + self.assertIn("Uninstall Failed", msg, "Update should fail with Uninstall Failed error") + self.assertIn("ExtensionError", msg, "The Exception should initially be propagated as ExtensionError") + + @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") + def test_extension_error_should_be_raised_when_continue_on_update_failure_is_true_on_command_failure(self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) + + # Disable Failed and update failed + with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): + with patch.object(ExtHandlerInstance, "update", side_effect=ExtensionError("Update Failed")): + with self.assertRaises(ExtensionError) as error: + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + msg = str(error.exception) + self.assertIn("Update Failed", msg, "Update should fail with Update Failed error") + self.assertNotIn("ExtensionUpdateError", msg, "The exception should not be ExtensionUpdateError") + + # Uninstall Failed and install failed + with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): + with patch.object(ExtHandlerInstance, "install", side_effect=ExtensionError("Install Failed")): + with self.assertRaises(ExtensionError) as error: + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + msg = str(error.exception) + self.assertIn("Install Failed", msg, "Update should fail with Install Failed error") + self.assertNotIn("ExtensionUpdateError", msg, "The exception should not be ExtensionUpdateError") + + @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") + def test_env_variable_should_not_set_when_continue_on_update_failure_is_false(self, *args): + old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) + + # When Disable Fails + with patch.object(ExtHandlerInstance, "launch_command") as patch_launch_command: + with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): + with self.assertRaises(ExtensionUpdateError): + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + self.assertEqual(0, patch_launch_command.call_count, "Launch command shouldn't be called even once for" + " disable failures") + + # When Uninstall Fails + with patch.object(ExtHandlerInstance, "launch_command") as patch_launch_command: + with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): + with self.assertRaises(ExtensionUpdateError): + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + self.assertEqual(2, patch_launch_command.call_count, "Launch command should be called 2 times for " + "Disable->Update") + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) + def test_failed_env_variables_should_be_set_from_within_extension_commands(self, *args): + """ + This test will test from the perspective of the extensions command weather the env variables are + being set for those processes + """ + + test_file_name = "testfile.sh" + update_file_name = test_file_name + " -update" + install_file_name = test_file_name + " -install" + old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0') + new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( + 'foo', '1.0.1', + handler={"updateCommand": update_file_name, "installCommand": install_file_name}, + continue_on_update_failure=True + ) + + # Script prints env variables passed to this process and prints all starting with AZURE_ + test_file = """ + printenv | grep AZURE_ + """ + + self.create_script(file_name=test_file_name, contents=test_file, + file_path=os.path.join(new_handler_i.get_base_dir(), test_file_name)) + + with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: + # Since we're not mocking the azurelinuxagent.common.cgroupconfigurator..handle_process_completion, + # both disable.cmd and uninstall.cmd would raise ExtensionError exceptions and set the + # ExtCommandEnvVariable.DisableReturnCode and ExtCommandEnvVariable.UninstallReturnCode env variables. + # For update and install we're running the script above to print all the env variables starting with AZURE_ + # and verify accordingly if the corresponding env variables are set properly or not + ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + + _, update_kwargs = mock_report.call_args_list[0] + _, install_kwargs = mock_report.call_args_list[1] + + # Ensure we're checking variables for update scenario + self.assertIn(update_file_name, update_kwargs['message']) + self.assertIn(ExtCommandEnvVariable.DisableReturnCode, update_kwargs['message']) + self.assertTrue(ExtCommandEnvVariable.ExtensionPath in update_kwargs['message'] and + ExtCommandEnvVariable.ExtensionVersion in update_kwargs['message']) + self.assertNotIn(ExtCommandEnvVariable.UninstallReturnCode, update_kwargs['message']) + + # Ensure we're checking variables for install scenario + self.assertIn(install_file_name, install_kwargs['message']) + self.assertIn(ExtCommandEnvVariable.UninstallReturnCode, install_kwargs['message']) + self.assertTrue(ExtCommandEnvVariable.ExtensionPath in install_kwargs['message'] and + ExtCommandEnvVariable.ExtensionVersion in install_kwargs['message']) + self.assertNotIn(ExtCommandEnvVariable.DisableReturnCode, install_kwargs['message']) + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) + def test_correct_exit_code_should_set_on_disable_cmd_failure(self, _): + test_env_file_name = "test_env.sh" + test_failure_file_name = "test_fail.sh" + # update_file_name = test_env_file_name + " -update" + old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ + "disableCommand": test_failure_file_name, + "uninstallCommand": test_failure_file_name}) + new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( + 'foo', '1.0.1', + handler={"updateCommand": test_env_file_name, + "updateMode": "UpdateWithoutInstall"}, + continue_on_update_failure=True + ) + + exit_code = 150 + error_test_file = """ + exit %s + """ % exit_code + + test_env_file = """ + printenv | grep AZURE_ + """ + + self.create_script(file_name=test_env_file_name, contents=test_env_file, + file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) + self.create_script(file_name=test_failure_file_name, contents=error_test_file, + file_path=os.path.join(old_handler_i.get_base_dir(), test_failure_file_name)) + + with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: + + uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) + _, kwargs = mock_report.call_args + + self.assertEqual(exit_code, uninstall_rc) + self.assertIn("%s=%s" % (ExtCommandEnvVariable.DisableReturnCode, exit_code), kwargs['message']) + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.0001)) + def test_timeout_code_should_set_on_cmd_timeout(self, _): + test_env_file_name = "test_env.sh" + test_failure_file_name = "test_fail.sh" + old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ + "disableCommand": test_failure_file_name, + "uninstallCommand": test_failure_file_name}) + new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( + 'foo', '1.0.1', + handler={"updateCommand": test_env_file_name + " -u", "installCommand": test_env_file_name + " -i"}, + continue_on_update_failure=True + ) + + exit_code = 156 + error_test_file = """ + sleep 1m + exit %s + """ % exit_code + + test_env_file = """ + printenv | grep AZURE_ + """ + + self.create_script(file_name=test_env_file_name, contents=test_env_file, + file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) + self.create_script(file_name=test_failure_file_name, contents=error_test_file, + file_path=os.path.join(old_handler_i.get_base_dir(), test_failure_file_name)) + + with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: + uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, + new_handler_i) + _, update_kwargs = mock_report.call_args_list[0] + _, install_kwargs = mock_report.call_args_list[1] + + self.assertNotEqual(exit_code, uninstall_rc) + self.assertEqual(ExtensionErrorCodes.PluginHandlerScriptTimedout, uninstall_rc) + self.assertTrue(test_env_file_name + " -i" in install_kwargs['message'] and "%s=%s" % ( + ExtCommandEnvVariable.UninstallReturnCode, ExtensionErrorCodes.PluginHandlerScriptTimedout) in + install_kwargs['message']) + self.assertTrue(test_env_file_name + " -u" in update_kwargs['message'] and "%s=%s" % ( + ExtCommandEnvVariable.DisableReturnCode, ExtensionErrorCodes.PluginHandlerScriptTimedout) in + update_kwargs['message']) + + @patch('time.sleep', side_effect=lambda _: mock_sleep(0.0001)) + def test_success_code_should_set_in_env_variables_on_cmd_success(self, _): + test_env_file_name = "test_env.sh" + test_success_file_name = "test_success.sh" + old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ + "disableCommand": test_success_file_name, + "uninstallCommand": test_success_file_name}) + new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( + 'foo', '1.0.1', + handler={"updateCommand": test_env_file_name + " -u", "installCommand": test_env_file_name + " -i"}, + continue_on_update_failure=False + ) + + exit_code = 0 + success_test_file = """ + exit %s + """ % exit_code + + test_env_file = """ + printenv | grep AZURE_ + """ + + self.create_script(file_name=test_env_file_name, contents=test_env_file, + file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) + self.create_script(file_name=test_success_file_name, contents=success_test_file, + file_path=os.path.join(old_handler_i.get_base_dir(), test_success_file_name)) + + with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: + uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, + new_handler_i) + _, update_kwargs = mock_report.call_args_list[0] + _, install_kwargs = mock_report.call_args_list[1] + + self.assertEqual(exit_code, uninstall_rc) + self.assertTrue(test_env_file_name + " -i" in install_kwargs['message'] and "%s=%s" % ( + ExtCommandEnvVariable.UninstallReturnCode, exit_code) in install_kwargs['message']) + self.assertTrue(test_env_file_name + " -u" in update_kwargs['message'] and "%s=%s" % ( + ExtCommandEnvVariable.DisableReturnCode, exit_code) in update_kwargs['message']) if __name__ == '__main__': unittest.main() - diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers_download_extension.py walinuxagent-2.2.45/tests/ga/test_exthandlers_download_extension.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers_download_extension.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_exthandlers_download_extension.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,211 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. + +import zipfile, time + +from azurelinuxagent.common.protocol.restapi import ExtHandler, ExtHandlerProperties, ExtHandlerPackage, ExtHandlerVersionUri +from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.ga.exthandlers import ExtHandlerInstance, NUMBER_OF_DOWNLOAD_RETRIES +from azurelinuxagent.common.exception import ExtensionDownloadError, ExtensionErrorCodes +from tests.tools import * + + +class DownloadExtensionTestCase(AgentTestCase): + """ + Test cases for launch_command + """ + @classmethod + def setUpClass(cls): + AgentTestCase.setUpClass() + cls.mock_cgroups = patch("azurelinuxagent.ga.exthandlers.CGroupConfigurator") + cls.mock_cgroups.start() + + @classmethod + def tearDownClass(cls): + cls.mock_cgroups.stop() + + AgentTestCase.tearDownClass() + + def setUp(self): + AgentTestCase.setUp(self) + + ext_handler_properties = ExtHandlerProperties() + ext_handler_properties.version = "1.0.0" + ext_handler = ExtHandler(name='Microsoft.CPlat.Core.RunCommandLinux') + ext_handler.properties = ext_handler_properties + + protocol = WireProtocol("http://Microsoft.CPlat.Core.RunCommandLinux/foo-bar") + + self.pkg = ExtHandlerPackage() + self.pkg.uris = [ ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri() ] + self.pkg.uris[0].uri = 'https://zrdfepirv2cy4prdstr00a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' + self.pkg.uris[1].uri = 'https://zrdfepirv2cy4prdstr01a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' + self.pkg.uris[2].uri = 'https://zrdfepirv2cy4prdstr02a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' + self.pkg.uris[3].uri = 'https://zrdfepirv2cy4prdstr03a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' + self.pkg.uris[4].uri = 'https://zrdfepirv2cy4prdstr04a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' + + self.ext_handler_instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=protocol) + self.ext_handler_instance.pkg = self.pkg + + self.extension_dir = os.path.join(self.tmp_dir, "Microsoft.CPlat.Core.RunCommandLinux-1.0.0") + self.mock_get_base_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", return_value=self.extension_dir) + self.mock_get_base_dir.start() + + self.mock_get_log_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", return_value=self.tmp_dir) + self.mock_get_log_dir.start() + + self.agent_dir = self.tmp_dir + self.mock_get_lib_dir = patch("azurelinuxagent.ga.exthandlers.conf.get_lib_dir", return_value=self.agent_dir) + self.mock_get_lib_dir.start() + + def tearDown(self): + self.mock_get_lib_dir.stop() + self.mock_get_log_dir.stop() + self.mock_get_base_dir.stop() + + AgentTestCase.tearDown(self) + + _extension_command = "RunCommandLinux.sh" + + @staticmethod + def _create_zip_file(filename): + file = None + try: + file = zipfile.ZipFile(filename, "w") + info = zipfile.ZipInfo(DownloadExtensionTestCase._extension_command) + info.date_time = time.localtime(time.time())[:6] + info.compress_type = zipfile.ZIP_DEFLATED + file.writestr(info, "#!/bin/sh\necho 'RunCommandLinux executed successfully'\n") + finally: + if file is not None: + file.close() + + @staticmethod + def _create_invalid_zip_file(filename): + with open(filename, "w") as file: + file.write("An invalid ZIP file\n") + + def _get_extension_package_file(self): + return os.path.join(self.agent_dir, self.ext_handler_instance.get_extension_package_zipfile_name()) + + def _get_extension_command_file(self): + return os.path.join(self.extension_dir, DownloadExtensionTestCase._extension_command) + + def _assert_download_and_expand_succeeded(self): + self.assertTrue(os.path.exists(self._get_extension_package_file()), "The extension package was not downloaded to the expected location") + self.assertTrue(os.path.exists(self._get_extension_command_file()), "The extension package was not expanded to the expected location") + + def test_it_should_download_and_expand_extension_package(self): + def download_ext_handler_pkg(_uri, destination): + DownloadExtensionTestCase._create_zip_file(destination) + return True + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event") as mock_report_event: + self.ext_handler_instance.download() + + # first download attempt should succeed + mock_download_ext_handler_pkg.assert_called_once() + mock_report_event.assert_called_once() + + self._assert_download_and_expand_succeeded() + + def test_it_should_use_existing_extension_package_when_already_downloaded(self): + DownloadExtensionTestCase._create_zip_file(self._get_extension_package_file()) + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg") as mock_download_ext_handler_pkg: + with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event") as mock_report_event: + self.ext_handler_instance.download() + + mock_download_ext_handler_pkg.assert_not_called() + mock_report_event.assert_not_called() + + self.assertTrue(os.path.exists(self._get_extension_command_file()), "The extension package was not expanded to the expected location") + + def test_it_should_ignore_existing_extension_package_when_it_is_invalid(self): + def download_ext_handler_pkg(_uri, destination): + DownloadExtensionTestCase._create_zip_file(destination) + return True + + DownloadExtensionTestCase._create_invalid_zip_file(self._get_extension_package_file()) + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + self.ext_handler_instance.download() + + mock_download_ext_handler_pkg.assert_called_once() + + self._assert_download_and_expand_succeeded() + + def test_it_should_use_alternate_uris_when_download_fails(self): + self.download_failures = 0 + + def download_ext_handler_pkg(_uri, destination): + # fail a few times, then succeed + if self.download_failures < 3: + self.download_failures += 1 + return False + DownloadExtensionTestCase._create_zip_file(destination) + return True + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + self.ext_handler_instance.download() + + self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) + + self._assert_download_and_expand_succeeded() + + def test_it_should_use_alternate_uris_when_download_raises_an_exception(self): + self.download_failures = 0 + + def download_ext_handler_pkg(_uri, destination): + # fail a few times, then succeed + if self.download_failures < 3: + self.download_failures += 1 + raise Exception("Download failed") + DownloadExtensionTestCase._create_zip_file(destination) + return True + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + self.ext_handler_instance.download() + + self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) + + self._assert_download_and_expand_succeeded() + + def test_it_should_use_alternate_uris_when_it_downloads_an_invalid_package(self): + self.download_failures = 0 + + def download_ext_handler_pkg(_uri, destination): + # fail a few times, then succeed + if self.download_failures < 3: + self.download_failures += 1 + DownloadExtensionTestCase._create_invalid_zip_file(destination) + else: + DownloadExtensionTestCase._create_zip_file(destination) + return True + + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + self.ext_handler_instance.download() + + self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) + + self._assert_download_and_expand_succeeded() + + def test_it_should_raise_an_exception_when_all_downloads_fail(self): + def download_ext_handler_pkg(_uri, _destination): + DownloadExtensionTestCase._create_invalid_zip_file(self._get_extension_package_file()) + return True + + with patch("time.sleep", lambda *_: None): + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: + with self.assertRaises(ExtensionDownloadError) as context_manager: + self.ext_handler_instance.download() + + self.assertEquals(mock_download_ext_handler_pkg.call_count, NUMBER_OF_DOWNLOAD_RETRIES * len(self.pkg.uris)) + + self.assertRegex(str(context_manager.exception), "Failed to download extension") + self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginManifestDownloadError) + + self.assertFalse(os.path.exists(self.extension_dir), "The extension directory was not removed") + self.assertFalse(os.path.exists(self._get_extension_package_file()), "The extension package was not removed") + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers_exthandlerinstance.py walinuxagent-2.2.45/tests/ga/test_exthandlers_exthandlerinstance.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers_exthandlerinstance.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_exthandlers_exthandlerinstance.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. + +from azurelinuxagent.ga.exthandlers import ExtHandlerInstance +from azurelinuxagent.common.protocol.restapi import ExtHandler, ExtHandlerProperties, ExtHandlerPackage, \ + ExtHandlerVersionUri +from tests.tools import * + + +class ExtHandlerInstanceTestCase(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + + ext_handler_properties = ExtHandlerProperties() + ext_handler_properties.version = "1.2.3" + ext_handler = ExtHandler(name='foo') + ext_handler.properties = ext_handler_properties + self.ext_handler_instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) + + pkg_uri = ExtHandlerVersionUri() + pkg_uri.uri = "http://bar/foo__1.2.3" + self.ext_handler_instance.pkg = ExtHandlerPackage(ext_handler_properties.version) + self.ext_handler_instance.pkg.uris.append(pkg_uri) + + self.base_dir = self.tmp_dir + self.extension_directory = os.path.join(self.tmp_dir, "extension_directory") + self.mock_get_base_dir = patch.object(self.ext_handler_instance, "get_base_dir", return_value=self.extension_directory) + self.mock_get_base_dir.start() + + def tearDown(self): + self.mock_get_base_dir.stop() + + def test_rm_ext_handler_dir_should_remove_the_extension_packages(self): + os.mkdir(self.extension_directory) + open(os.path.join(self.extension_directory, "extension_file1"), 'w').close() + open(os.path.join(self.extension_directory, "extension_file2"), 'w').close() + open(os.path.join(self.extension_directory, "extension_file3"), 'w').close() + open(os.path.join(self.base_dir, "foo__1.2.3.zip"), 'w').close() + + self.ext_handler_instance.remove_ext_handler() + + self.assertFalse(os.path.exists(self.extension_directory)) + self.assertFalse(os.path.exists(os.path.join(self.base_dir, "foo__1.2.3.zip"))) + + def test_rm_ext_handler_dir_should_remove_the_extension_directory(self): + os.mkdir(self.extension_directory) + os.mknod(os.path.join(self.extension_directory, "extension_file1")) + os.mknod(os.path.join(self.extension_directory, "extension_file2")) + os.mknod(os.path.join(self.extension_directory, "extension_file3")) + + self.ext_handler_instance.remove_ext_handler() + + self.assertFalse(os.path.exists(self.extension_directory)) + + def test_rm_ext_handler_dir_should_not_report_an_event_if_the_extension_directory_does_not_exist(self): + if os.path.exists(self.extension_directory): + os.rmdir(self.extension_directory) + + with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: + self.ext_handler_instance.remove_ext_handler() + + mock_report_event.assert_not_called() + + def test_rm_ext_handler_dir_should_not_report_an_event_if_a_child_is_removed_asynchronously_while_deleting_the_extension_directory(self): + os.mkdir(self.extension_directory) + os.mknod(os.path.join(self.extension_directory, "extension_file1")) + os.mknod(os.path.join(self.extension_directory, "extension_file2")) + os.mknod(os.path.join(self.extension_directory, "extension_file3")) + + # + # Some extensions uninstall asynchronously and the files we are trying to remove may be removed + # while shutil.rmtree is traversing the extension's directory. Mock this by deleting a file + # twice (the second call will produce "[Errno 2] No such file or directory", which should not be + # reported as a telemetry event. + # In order to mock this, we need to know that remove_ext_handler invokes Pyhon's shutil.rmtree, + # which in turn invokes os.unlink (Python 3) or os.remove (Python 2) + # + remove_api_name = "unlink" if sys.version_info >= (3, 0) else "remove" + + original_remove_api = getattr(shutil.os, remove_api_name) + + extension_directory = self.extension_directory + + def mock_remove(path, dir_fd=None): + if dir_fd is not None: # path is relative, make it absolute + path = os.path.join(extension_directory, path) + + if path.endswith("extension_file2"): + original_remove_api(path) + mock_remove.file_deleted_asynchronously = True + original_remove_api(path) + + mock_remove.file_deleted_asynchronously = False + + with patch.object(shutil.os, remove_api_name, mock_remove): + with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: + self.ext_handler_instance.remove_ext_handler() + + mock_report_event.assert_not_called() + + # The next 2 asserts are checks on the mock itself, in case the implementation of remove_ext_handler changes (mocks may need to be updated then) + self.assertTrue(mock_remove.file_deleted_asynchronously) # verify the mock was actually called + self.assertFalse(os.path.exists(self.extension_directory)) # verify the error produced by the mock did not prevent the deletion + + def test_rm_ext_handler_dir_should_report_an_event_if_an_error_occurs_while_deleting_the_extension_directory(self): + os.mkdir(self.extension_directory) + os.mknod(os.path.join(self.extension_directory, "extension_file1")) + os.mknod(os.path.join(self.extension_directory, "extension_file2")) + os.mknod(os.path.join(self.extension_directory, "extension_file3")) + + # The mock below relies on the knowledge that remove_ext_handler invokes Pyhon's shutil.rmtree, + # which in turn invokes os.unlink (Python 3) or os.remove (Python 2) + remove_api_name = "unlink" if sys.version_info >= (3, 0) else "remove" + + original_remove_api = getattr(shutil.os, remove_api_name) + + def mock_remove(path, dir_fd=None): + if path.endswith("extension_file2"): + raise IOError("A mocked error") + original_remove_api(path) + + with patch.object(shutil.os, remove_api_name, mock_remove): + with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: + self.ext_handler_instance.remove_ext_handler() + + args, kwargs = mock_report_event.call_args + self.assertTrue("A mocked error" in kwargs["message"]) + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers.py walinuxagent-2.2.45/tests/ga/test_exthandlers.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_exthandlers.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,608 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +import json +import subprocess + +from azurelinuxagent.common.protocol.restapi import ExtensionStatus, Extension, ExtHandler, ExtHandlerProperties +from azurelinuxagent.ga.exthandlers import parse_ext_status, ExtHandlerInstance, get_exthandlers_handler, \ + ExtCommandEnvVariable +from azurelinuxagent.common.exception import ProtocolError, ExtensionError, ExtensionErrorCodes +from azurelinuxagent.common.event import WALAEventOperation +from azurelinuxagent.common.utils.extensionprocessutil import TELEMETRY_MESSAGE_MAX_LEN, format_stdout_stderr, read_output +from tests.tools import * + + +class TestExtHandlers(AgentTestCase): + def test_parse_extension_status00(self): + """ + Parse a status report for a successful execution of an extension. + """ + + s = '''[{ + "status": { + "status": "success", + "formattedMessage": { + "lang": "en-US", + "message": "Command is finished." + }, + "operation": "Daemon", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" + }, + "version": "1.0", + "timestampUTC": "2018-04-20T21:20:24Z" + } +]''' + ext_status = ExtensionStatus(seq_no=0) + parse_ext_status(ext_status, json.loads(s)) + + self.assertEqual('0', ext_status.code) + self.assertEqual(None, ext_status.configurationAppliedTime) + self.assertEqual('Command is finished.', ext_status.message) + self.assertEqual('Daemon', ext_status.operation) + self.assertEqual('success', ext_status.status) + self.assertEqual(0, ext_status.sequenceNumber) + self.assertEqual(0, len(ext_status.substatusList)) + + def test_parse_extension_status01(self): + """ + Parse a status report for a failed execution of an extension. + + The extension returned a bad status/status of failed. + The agent should handle this gracefully, and convert all unknown + status/status values into an error. + """ + + s = '''[{ + "status": { + "status": "failed", + "formattedMessage": { + "lang": "en-US", + "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..." + }, + "operation": "Enable", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" + }, + "version": "1.0", + "timestampUTC": "2018-04-20T20:50:22Z" +}]''' + ext_status = ExtensionStatus(seq_no=0) + parse_ext_status(ext_status, json.loads(s)) + + self.assertEqual('0', ext_status.code) + self.assertEqual(None, ext_status.configurationAppliedTime) + self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message) + self.assertEqual('Enable', ext_status.operation) + self.assertEqual('error', ext_status.status) + self.assertEqual(0, ext_status.sequenceNumber) + self.assertEqual(0, len(ext_status.substatusList)) + + def test_parse_ext_status_should_parse_missing_substatus_as_empty(self): + status = '''[{ + "status": { + "status": "success", + "formattedMessage": { + "lang": "en-US", + "message": "Command is finished." + }, + "operation": "Enable", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" + }, + + "version": "1.0", + "timestampUTC": "2018-04-20T21:20:24Z" + } + ]''' + + extension_status = ExtensionStatus(seq_no=0) + + parse_ext_status(extension_status, json.loads(status)) + + self.assertTrue(isinstance(extension_status.substatusList, list), 'substatus was not parsed correctly') + self.assertEqual(0, len(extension_status.substatusList)) + + def test_parse_ext_status_should_parse_null_substatus_as_empty(self): + status = '''[{ + "status": { + "status": "success", + "formattedMessage": { + "lang": "en-US", + "message": "Command is finished." + }, + "operation": "Enable", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux", + "substatus": null + }, + + "version": "1.0", + "timestampUTC": "2018-04-20T21:20:24Z" + } + ]''' + + extension_status = ExtensionStatus(seq_no=0) + + parse_ext_status(extension_status, json.loads(status)) + + self.assertTrue(isinstance(extension_status.substatusList, list), 'substatus was not parsed correctly') + self.assertEqual(0, len(extension_status.substatusList)) + + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_largest_seq_no') + def assert_extension_sequence_number(self, + patch_get_largest_seq, + patch_add_event, + goal_state_sequence_number, + disk_sequence_number, + expected_sequence_number): + ext = Extension() + ext.sequenceNumber = goal_state_sequence_number + patch_get_largest_seq.return_value = disk_sequence_number + + ext_handler_props = ExtHandlerProperties() + ext_handler_props.version = "1.2.3" + ext_handler = ExtHandler(name='foo') + ext_handler.properties = ext_handler_props + + instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) + seq, path = instance.get_status_file_path(ext) + + try: + gs_seq_int = int(goal_state_sequence_number) + gs_int = True + except ValueError: + gs_int = False + + if gs_int and gs_seq_int != disk_sequence_number: + self.assertEqual(1, patch_add_event.call_count) + args, kw_args = patch_add_event.call_args + self.assertEqual('SequenceNumberMismatch', kw_args['op']) + self.assertEqual(False, kw_args['is_success']) + self.assertEqual('Goal state: {0}, disk: {1}' + .format(gs_seq_int, disk_sequence_number), + kw_args['message']) + else: + self.assertEqual(0, patch_add_event.call_count) + + self.assertEqual(expected_sequence_number, seq) + if seq > -1: + self.assertTrue(path.endswith('/foo-1.2.3/status/{0}.status'.format(expected_sequence_number))) + else: + self.assertIsNone(path) + + def test_extension_sequence_number(self): + self.assert_extension_sequence_number(goal_state_sequence_number="12", + disk_sequence_number=366, + expected_sequence_number=12) + + self.assert_extension_sequence_number(goal_state_sequence_number=" 12 ", + disk_sequence_number=366, + expected_sequence_number=12) + + self.assert_extension_sequence_number(goal_state_sequence_number=" foo", + disk_sequence_number=3, + expected_sequence_number=3) + + self.assert_extension_sequence_number(goal_state_sequence_number="-1", + disk_sequence_number=3, + expected_sequence_number=-1) + + @patch("azurelinuxagent.ga.exthandlers.add_event") + @patch("azurelinuxagent.common.errorstate.ErrorState.is_triggered") + @patch("azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol") + def test_it_should_report_an_error_if_the_wireserver_cannot_be_reached(self, patch_get_protocol, patch_is_triggered, patch_add_event): + test_message = "TEST MESSAGE" + + patch_get_protocol.side_effect = ProtocolError(test_message) # get_protocol will throw if the wire server cannot be reached + patch_is_triggered.return_value = True # protocol errors are reported only after a delay; force the error to be reported now + + get_exthandlers_handler().run() + + self.assertEquals(patch_add_event.call_count, 2) + + _, first_call_args = patch_add_event.call_args_list[0] + self.assertEquals(first_call_args['op'], WALAEventOperation.GetArtifactExtended) + self.assertEquals(first_call_args['is_success'], False) + + _, second_call_args = patch_add_event.call_args_list[1] + self.assertEquals(second_call_args['op'], WALAEventOperation.ExtensionProcessing) + self.assertEquals(second_call_args['is_success'], False) + self.assertIn(test_message, second_call_args['message']) + + +class LaunchCommandTestCase(AgentTestCase): + """ + Test cases for launch_command + """ + + def setUp(self): + AgentTestCase.setUp(self) + + ext_handler_properties = ExtHandlerProperties() + ext_handler_properties.version = "1.2.3" + self.ext_handler = ExtHandler(name='foo') + self.ext_handler.properties = ext_handler_properties + self.ext_handler_instance = ExtHandlerInstance(ext_handler=self.ext_handler, protocol=None) + + self.mock_get_base_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", lambda *_: self.tmp_dir) + self.mock_get_base_dir.start() + + self.log_dir = os.path.join(self.tmp_dir, "log") + self.mock_get_log_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", lambda *_: self.log_dir) + self.mock_get_log_dir.start() + + self.mock_sleep = patch("time.sleep", lambda *_: mock_sleep(0.01)) + self.mock_sleep.start() + + self.cgroups_enabled = CGroupConfigurator.get_instance().enabled() + CGroupConfigurator.get_instance().disable() + + def tearDown(self): + if self.cgroups_enabled: + CGroupConfigurator.get_instance().enable() + else: + CGroupConfigurator.get_instance().disable() + + self.mock_get_log_dir.stop() + self.mock_get_base_dir.stop() + self.mock_sleep.stop() + + AgentTestCase.tearDown(self) + + @staticmethod + def _output_regex(stdout, stderr): + return r"\[stdout\]\s+{0}\s+\[stderr\]\s+{1}".format(stdout, stderr) + + @staticmethod + def _find_process(command): + for pid in [pid for pid in os.listdir('/proc') if pid.isdigit()]: + try: + with open(os.path.join('/proc', pid, 'cmdline'), 'r') as cmdline: + for line in cmdline.readlines(): + if command in line: + return True + except IOError: # proc has already terminated + continue + return False + + def test_it_should_capture_the_output_of_the_command(self): + stdout = "stdout" * 5 + stderr = "stderr" * 5 + + command = self.create_script("produce_output.py", ''' +import sys + +sys.stdout.write("{0}") +sys.stderr.write("{1}") + +'''.format(stdout, stderr)) + + def list_directory(): + base_dir = self.ext_handler_instance.get_base_dir() + return [i for i in os.listdir(base_dir) if not i.endswith(".tld")] # ignore telemetry files + + files_before = list_directory() + + output = self.ext_handler_instance.launch_command(command) + + files_after = list_directory() + + self.assertRegex(output, LaunchCommandTestCase._output_regex(stdout, stderr)) + + self.assertListEqual(files_before, files_after, "Not all temporary files were deleted. File list: {0}".format(files_after)) + + def test_it_should_raise_an_exception_when_the_command_times_out(self): + extension_error_code = ExtensionErrorCodes.PluginHandlerScriptTimedout + stdout = "stdout" * 7 + stderr = "stderr" * 7 + + # the signal file is used by the test command to indicate it has produced output + signal_file = os.path.join(self.tmp_dir, "signal_file.txt") + + # the test command produces some output then goes into an infinite loop + command = self.create_script("produce_output_then_hang.py", ''' +import sys +import time + +sys.stdout.write("{0}") +sys.stdout.flush() + +sys.stderr.write("{1}") +sys.stderr.flush() + +with open("{2}", "w") as file: + while True: + file.write(".") + time.sleep(1) + +'''.format(stdout, stderr, signal_file)) + + # mock time.sleep to wait for the signal file (launch_command implements the time out using polling and sleep) + original_sleep = time.sleep + + def sleep(seconds): + if not os.path.exists(signal_file): + original_sleep(seconds) + + timeout = 60 + + start_time = time.time() + + with patch("time.sleep", side_effect=sleep, autospec=True) as mock_sleep: + + with self.assertRaises(ExtensionError) as context_manager: + self.ext_handler_instance.launch_command(command, timeout=timeout, extension_error_code=extension_error_code) + + # the command name and its output should be part of the message + message = str(context_manager.exception) + command_full_path = os.path.join(self.tmp_dir, command.lstrip(os.path.sep)) + self.assertRegex(message, r"Timeout\(\d+\):\s+{0}\s+{1}".format(command_full_path, LaunchCommandTestCase._output_regex(stdout, stderr))) + + # the exception code should be as specified in the call to launch_command + self.assertEquals(context_manager.exception.code, extension_error_code) + + # the timeout period should have elapsed + self.assertGreaterEqual(mock_sleep.call_count, timeout) + + # the command should have been terminated + self.assertFalse(LaunchCommandTestCase._find_process(command), "The command was not terminated") + + # as a check for the test itself, verify it completed in just a few seconds + self.assertLessEqual(time.time() - start_time, 5) + + def test_it_should_raise_an_exception_when_the_command_fails(self): + extension_error_code = 2345 + stdout = "stdout" * 3 + stderr = "stderr" * 3 + exit_code = 99 + + command = self.create_script("fail.py", ''' +import sys + +sys.stdout.write("{0}") +sys.stderr.write("{1}") +exit({2}) + +'''.format(stdout, stderr, exit_code)) + + # the output is captured as part of the exception message + with self.assertRaises(ExtensionError) as context_manager: + self.ext_handler_instance.launch_command(command, extension_error_code=extension_error_code) + + message = str(context_manager.exception) + self.assertRegex(message, r"Non-zero exit code: {0}.+{1}\s+{2}".format(exit_code, command, LaunchCommandTestCase._output_regex(stdout, stderr))) + + self.assertEquals(context_manager.exception.code, extension_error_code) + + def test_it_should_not_wait_for_child_process(self): + stdout = "stdout" + stderr = "stderr" + + command = self.create_script("start_child_process.py", ''' +import os +import sys +import time + +pid = os.fork() + +if pid == 0: + time.sleep(60) +else: + sys.stdout.write("{0}") + sys.stderr.write("{1}") + +'''.format(stdout, stderr)) + + start_time = time.time() + + output = self.ext_handler_instance.launch_command(command) + + self.assertLessEqual(time.time() - start_time, 5) + + # Also check that we capture the parent's output + self.assertRegex(output, LaunchCommandTestCase._output_regex(stdout, stderr)) + + def test_it_should_capture_the_output_of_child_process(self): + parent_stdout = "PARENT STDOUT" + parent_stderr = "PARENT STDERR" + child_stdout = "CHILD STDOUT" + child_stderr = "CHILD STDERR" + more_parent_stdout = "MORE PARENT STDOUT" + more_parent_stderr = "MORE PARENT STDERR" + + # the child process uses the signal file to indicate it has produced output + signal_file = os.path.join(self.tmp_dir, "signal_file.txt") + + command = self.create_script("start_child_with_output.py", ''' +import os +import sys +import time + +sys.stdout.write("{0}") +sys.stderr.write("{1}") + +pid = os.fork() + +if pid == 0: + sys.stdout.write("{2}") + sys.stderr.write("{3}") + + open("{6}", "w").close() +else: + sys.stdout.write("{4}") + sys.stderr.write("{5}") + + while not os.path.exists("{6}"): + time.sleep(0.5) + +'''.format(parent_stdout, parent_stderr, child_stdout, child_stderr, more_parent_stdout, more_parent_stderr, signal_file)) + + output = self.ext_handler_instance.launch_command(command) + + self.assertIn(parent_stdout, output) + self.assertIn(parent_stderr, output) + + self.assertIn(child_stdout, output) + self.assertIn(child_stderr, output) + + self.assertIn(more_parent_stdout, output) + self.assertIn(more_parent_stderr, output) + + def test_it_should_capture_the_output_of_child_process_that_fails_to_start(self): + parent_stdout = "PARENT STDOUT" + parent_stderr = "PARENT STDERR" + child_stdout = "CHILD STDOUT" + child_stderr = "CHILD STDERR" + + command = self.create_script("start_child_that_fails.py", ''' +import os +import sys +import time + +pid = os.fork() + +if pid == 0: + sys.stdout.write("{0}") + sys.stderr.write("{1}") + exit(1) +else: + sys.stdout.write("{2}") + sys.stderr.write("{3}") + +'''.format(child_stdout, child_stderr, parent_stdout, parent_stderr)) + + output = self.ext_handler_instance.launch_command(command) + + self.assertIn(parent_stdout, output) + self.assertIn(parent_stderr, output) + + self.assertIn(child_stdout, output) + self.assertIn(child_stderr, output) + + def test_it_should_execute_commands_with_no_output(self): + # file used to verify the command completed successfully + signal_file = os.path.join(self.tmp_dir, "signal_file.txt") + + command = self.create_script("create_file.py", ''' +open("{0}", "w").close() + +'''.format(signal_file)) + + output = self.ext_handler_instance.launch_command(command) + + self.assertTrue(os.path.exists(signal_file)) + self.assertRegex(output, LaunchCommandTestCase._output_regex('', '')) + + def test_it_should_not_capture_the_output_of_commands_that_do_their_own_redirection(self): + # the test script redirects its output to this file + command_output_file = os.path.join(self.tmp_dir, "command_output.txt") + stdout = "STDOUT" + stderr = "STDERR" + + # the test script mimics the redirection done by the Custom Script extension + command = self.create_script("produce_output", ''' +exec &> {0} +echo {1} +>&2 echo {2} + +'''.format(command_output_file, stdout, stderr)) + + output = self.ext_handler_instance.launch_command(command) + + self.assertRegex(output, LaunchCommandTestCase._output_regex('', '')) + + with open(command_output_file, "r") as command_output: + output = command_output.read() + self.assertEquals(output, "{0}\n{1}\n".format(stdout, stderr)) + + def test_it_should_truncate_the_command_output(self): + stdout = "STDOUT" + stderr = "STDERR" + + command = self.create_script("produce_long_output.py", ''' +import sys + +sys.stdout.write( "{0}" * {1}) +sys.stderr.write( "{2}" * {3}) +'''.format(stdout, int(TELEMETRY_MESSAGE_MAX_LEN / len(stdout)), stderr, int(TELEMETRY_MESSAGE_MAX_LEN / len(stderr)))) + + output = self.ext_handler_instance.launch_command(command) + + self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN) + self.assertIn(stdout, output) + self.assertIn(stderr, output) + + def test_it_should_read_only_the_head_of_large_outputs(self): + command = self.create_script("produce_long_output.py", ''' +import sys + +sys.stdout.write("O" * 5 * 1024 * 1024) +sys.stderr.write("E" * 5 * 1024 * 1024) +''') + + # Mocking the call to file.read() is difficult, so instead we mock the call to format_stdout_stderr, which takes the + # return value of the calls to file.read(). The intention of the test is to verify we never read (and load in memory) + # more than a few KB of data from the files used to capture stdout/stderr + with patch('azurelinuxagent.common.utils.extensionprocessutil.format_stdout_stderr', side_effect=format_stdout_stderr) as mock_format: + output = self.ext_handler_instance.launch_command(command) + + self.assertGreaterEqual(len(output), 1024) + self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN) + + mock_format.assert_called_once() + + args, kwargs = mock_format.call_args + stdout, stderr = args + + self.assertGreaterEqual(len(stdout), 1024) + self.assertLessEqual(len(stdout), TELEMETRY_MESSAGE_MAX_LEN) + + self.assertGreaterEqual(len(stderr), 1024) + self.assertLessEqual(len(stderr), TELEMETRY_MESSAGE_MAX_LEN) + + def test_it_should_handle_errors_while_reading_the_command_output(self): + command = self.create_script("produce_output.py", ''' +import sys + +sys.stdout.write("STDOUT") +sys.stderr.write("STDERR") +''') + # Mocking the call to file.read() is difficult, so instead we mock the call to_capture_process_output, + # which will call file.read() and we force stdout/stderr to be None; this will produce an exception when + # trying to use these files. + original_capture_process_output = read_output + + def capture_process_output(stdout_file, stderr_file): + return original_capture_process_output(None, None) + + with patch('azurelinuxagent.common.utils.extensionprocessutil.read_output', side_effect=capture_process_output): + output = self.ext_handler_instance.launch_command(command) + + self.assertIn("[stderr]\nCannot read stdout/stderr:", output) + + def test_it_should_contain_all_helper_environment_variables(self): + + helper_env_vars = {ExtCommandEnvVariable.ExtensionSeqNumber: self.ext_handler_instance.get_seq_no(), + ExtCommandEnvVariable.ExtensionPath: self.tmp_dir, + ExtCommandEnvVariable.ExtensionVersion: self.ext_handler_instance.ext_handler.properties.version} + + command = """ + printenv | grep -E '(%s)' + """ % '|'.join(helper_env_vars.keys()) + + test_file = self.create_script('printHelperEnvironments.sh', command) + + with patch("subprocess.Popen", wraps=subprocess.Popen) as patch_popen: + output = self.ext_handler_instance.launch_command(test_file) + + args, kwagrs = patch_popen.call_args + without_os_env = dict((k, v) for (k, v) in kwagrs['env'].items() if k not in os.environ) + + # This check will fail if any helper environment variables are added/removed later on + self.assertEqual(helper_env_vars, without_os_env) + + # This check is checking if the expected values are set for the extension commands + for helper_var in helper_env_vars: + self.assertIn("%s=%s" % (helper_var, helper_env_vars[helper_var]), output) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_monitor.py walinuxagent-2.2.45/tests/ga/test_monitor.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_monitor.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_monitor.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,66 +12,1012 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import datetime +import json +import os +import platform +import random +import shutil +import string +import sys +import tempfile +import time +from datetime import timedelta -from tests.tools import * -from azurelinuxagent.ga.monitor import * +from mock import Mock, MagicMock, patch +from nose.plugins.attrib import attr +from azurelinuxagent.common import logger +from azurelinuxagent.common.cgroup import CGroup +from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.datacontract import get_properties +from azurelinuxagent.common.event import EventLogger, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.imds import ComputeInfo +from azurelinuxagent.common.protocol.restapi import VMInfo +from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.common.telemetryevent import TelemetryEventParam, TelemetryEvent +from azurelinuxagent.common.utils import restutil, fileutil +from azurelinuxagent.common.version import AGENT_VERSION, CURRENT_VERSION, AGENT_NAME, CURRENT_AGENT +from azurelinuxagent.ga.monitor import parse_xml_event, get_monitor_handler, MonitorHandler, \ + generate_extension_metrics_telemetry_dictionary, parse_json_event +from tests.common.test_cgroupstelemetry import make_new_cgroup, consume_cpu_time, consume_memory +from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE, conf +from tests.tools import load_data, AgentTestCase, data_dir, are_cgroups_enabled, i_am_root, skip_if_predicate_false + + +class ResponseMock(Mock): + def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): + Mock.__init__(self) + self.status = status + self.reason = reason + self.response = response + + def read(self): + return self.response + + +def random_generator(size=6, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase): + return ''.join(random.choice(chars) for x in range(size)) + + +def create_dummy_event(size=0, + name="DummyExtension", + op=WALAEventOperation.Unknown, + is_success=True, + duration=0, + version=CURRENT_VERSION, + is_internal=False, + evt_type="", + message="DummyMessage", + invalid_chars=False): + return get_event_message(name=size if size != 0 else name, + op=op, + is_success=is_success, + duration=duration, + version=version, + message=random_generator(size) if size != 0 else message, + evt_type=evt_type, + is_internal=is_internal) + + +def get_event_message(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=1): + event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") + event.parameters.append(TelemetryEventParam('Name', name)) + event.parameters.append(TelemetryEventParam('Version', str(version))) + event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) + event.parameters.append(TelemetryEventParam('Operation', op)) + event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) + event.parameters.append(TelemetryEventParam('Message', message)) + event.parameters.append(TelemetryEventParam('Duration', duration)) + event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) + event.parameters.append(TelemetryEventParam('OpcodeName', '2019-11-06 02:00:44.307835')) + + data = get_properties(event) + return json.dumps(data) + + +@patch('azurelinuxagent.common.event.EventLogger.add_event') +@patch('azurelinuxagent.common.osutil.get_osutil') +@patch('azurelinuxagent.common.protocol.get_protocol_util') +@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") +@patch("azurelinuxagent.common.utils.restutil.http_get") class TestMonitor(AgentTestCase): - def test_parse_xml_event(self): - data_str = load_data('ext/event.xml') + + def test_parse_xml_event(self, *args): + data_str = load_data('ext/event_from_extension.xml') event = parse_xml_event(data_str) - self.assertNotEquals(None, event) - self.assertNotEquals(0, event.parameters) - self.assertNotEquals(None, event.parameters[0]) - - @patch('azurelinuxagent.common.osutil.get_osutil') - @patch('azurelinuxagent.common.protocol.get_protocol_util') - def test_add_sysinfo(self, _, __): - data_str = load_data('ext/event.xml') + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertTrue(all(param is not None for param in event.parameters)) + + def test_parse_json_event(self, *args): + data_str = load_data('ext/event.json') + event = parse_json_event(data_str) + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertTrue(all(param is not None for param in event.parameters)) + + def test_add_sysinfo_should_honor_sysinfo_values_from_agent_for_agent_events(self, *args): + data_str = load_data('ext/event_from_agent.json') + event = parse_json_event(data_str) + + monitor_handler = get_monitor_handler() + + sysinfo_vm_name_value = "sysinfo_dummy_vm" + sysinfo_tenant_name_value = "sysinfo_dummy_tenant" + sysinfo_role_name_value = "sysinfo_dummy_role" + sysinfo_role_instance_name_value = "sysinfo_dummy_role_instance" + sysinfo_execution_mode_value = "sysinfo_IAAS" + container_id_value = "TEST-CONTAINER-ID-ALREADY-PRESENT-GUID" + GAVersion_value = "WALinuxAgent-2.2.44" + OpcodeName_value = "2019-11-02 01:42:49.188030" + EventTid_value = 140240384030528 + EventPid_value = 108573 + TaskName_value = "ExtHandler" + KeywordName_value = "" + + vm_name_param = "VMName" + tenant_name_param = "TenantName" + role_name_param = "RoleName" + role_instance_name_param = "RoleInstanceName" + execution_mode_param = "ExecutionMode" + container_id_param = "ContainerId" + GAVersion_param = "GAVersion" + OpcodeName_param = "OpcodeName" + EventTid_param = "EventTid" + EventPid_param = "EventPid" + TaskName_param = "TaskName" + KeywordName_param = "KeywordName" + + sysinfo = [ + TelemetryEventParam(role_instance_name_param, sysinfo_role_instance_name_value), + TelemetryEventParam(vm_name_param, sysinfo_vm_name_value), + TelemetryEventParam(execution_mode_param, sysinfo_execution_mode_value), + TelemetryEventParam(tenant_name_param, sysinfo_tenant_name_value), + TelemetryEventParam(role_name_param, sysinfo_role_name_value) + ] + monitor_handler.sysinfo = sysinfo + monitor_handler.add_sysinfo(event) + + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertTrue(all(param is not None for param in event.parameters)) + + counter = 0 + for p in event.parameters: + if p.name == vm_name_param: + self.assertEqual(sysinfo_vm_name_value, p.value) + counter += 1 + elif p.name == tenant_name_param: + self.assertEqual(sysinfo_tenant_name_value, p.value) + counter += 1 + elif p.name == role_name_param: + self.assertEqual(sysinfo_role_name_value, p.value) + counter += 1 + elif p.name == role_instance_name_param: + self.assertEqual(sysinfo_role_instance_name_value, p.value) + counter += 1 + elif p.name == execution_mode_param: + self.assertEqual(sysinfo_execution_mode_value, p.value) + counter += 1 + elif p.name == container_id_param: + self.assertEqual(container_id_value, p.value) + counter += 1 + elif p.name == GAVersion_param: + self.assertEqual(GAVersion_value, p.value) + counter += 1 + elif p.name == OpcodeName_param: + self.assertEqual(OpcodeName_value, p.value) + counter += 1 + elif p.name == EventTid_param: + self.assertEqual(EventTid_value, p.value) + counter += 1 + elif p.name == EventPid_param: + self.assertEqual(EventPid_value, p.value) + counter += 1 + elif p.name == TaskName_param: + self.assertEqual(TaskName_value, p.value) + counter += 1 + elif p.name == KeywordName_param: + self.assertEqual(KeywordName_value, p.value) + counter += 1 + + self.assertEqual(12, counter) + + def test_add_sysinfo_should_honor_sysinfo_values_from_agent_for_extension_events(self, *args): + # The difference between agent and extension events is that extension events don't have the container id + # populated on the fly like the agent events do. Ensure the container id is populated in add_sysinfo. + data_str = load_data('ext/event_from_extension.xml') event = parse_xml_event(data_str) monitor_handler = get_monitor_handler() - vm_name = 'dummy_vm' - tenant_name = 'dummy_tenant' - role_name = 'dummy_role' - role_instance_name = 'dummy_role_instance' - container_id = 'dummy_container_id' + # Prepare the os environment variable to read the container id value from + container_id_value = "TEST-CONTAINER-ID-ADDED-IN-SYSINFO-GUID" + os.environ[CONTAINER_ID_ENV_VARIABLE] = container_id_value + + sysinfo_vm_name_value = "sysinfo_dummy_vm" + sysinfo_tenant_name_value = "sysinfo_dummy_tenant" + sysinfo_role_name_value = "sysinfo_dummy_role" + sysinfo_role_instance_name_value = "sysinfo_dummy_role_instance" + sysinfo_execution_mode_value = "sysinfo_IAAS" + GAVersion_value = "WALinuxAgent-2.2.44" + OpcodeName_value = "" + EventTid_value = 0 + EventPid_value = 0 + TaskName_value = "" + KeywordName_value = "" vm_name_param = "VMName" tenant_name_param = "TenantName" role_name_param = "RoleName" role_instance_name_param = "RoleInstanceName" + execution_mode_param = "ExecutionMode" container_id_param = "ContainerId" + GAVersion_param = "GAVersion" + OpcodeName_param = "OpcodeName" + EventTid_param = "EventTid" + EventPid_param = "EventPid" + TaskName_param = "TaskName" + KeywordName_param = "KeywordName" - sysinfo = [TelemetryEventParam(vm_name_param, vm_name), - TelemetryEventParam(tenant_name_param, tenant_name), - TelemetryEventParam(role_name_param, role_name), - TelemetryEventParam(role_instance_name_param, role_instance_name), - TelemetryEventParam(container_id_param, container_id)] + sysinfo = [ + TelemetryEventParam(role_instance_name_param, sysinfo_role_instance_name_value), + TelemetryEventParam(vm_name_param, sysinfo_vm_name_value), + TelemetryEventParam(execution_mode_param, sysinfo_execution_mode_value), + TelemetryEventParam(tenant_name_param, sysinfo_tenant_name_value), + TelemetryEventParam(role_name_param, sysinfo_role_name_value) + ] monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) - self.assertNotEquals(None, event) - self.assertNotEquals(0, event.parameters) - self.assertNotEquals(None, event.parameters[0]) + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertTrue(all(param is not None for param in event.parameters)) + counter = 0 for p in event.parameters: if p.name == vm_name_param: - self.assertEquals(vm_name, p.value) + self.assertEqual(sysinfo_vm_name_value, p.value) counter += 1 elif p.name == tenant_name_param: - self.assertEquals(tenant_name, p.value) + self.assertEqual(sysinfo_tenant_name_value, p.value) counter += 1 elif p.name == role_name_param: - self.assertEquals(role_name, p.value) + self.assertEqual(sysinfo_role_name_value, p.value) counter += 1 elif p.name == role_instance_name_param: - self.assertEquals(role_instance_name, p.value) + self.assertEqual(sysinfo_role_instance_name_value, p.value) + counter += 1 + elif p.name == execution_mode_param: + self.assertEqual(sysinfo_execution_mode_value, p.value) counter += 1 elif p.name == container_id_param: - self.assertEquals(container_id, p.value) + self.assertEqual(container_id_value, p.value) + counter += 1 + elif p.name == GAVersion_param: + self.assertEqual(GAVersion_value, p.value) + counter += 1 + elif p.name == OpcodeName_param: + self.assertEqual(OpcodeName_value, p.value) counter += 1 + elif p.name == EventTid_param: + self.assertEqual(EventTid_value, p.value) + counter += 1 + elif p.name == EventPid_param: + self.assertEqual(EventPid_value, p.value) + counter += 1 + elif p.name == TaskName_param: + self.assertEqual(TaskName_value, p.value) + counter += 1 + elif p.name == KeywordName_param: + self.assertEqual(KeywordName_value, p.value) + counter += 1 + + self.assertEqual(12, counter) + os.environ.pop(CONTAINER_ID_ENV_VARIABLE) + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_heartbeat") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.collect_and_send_events") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_host_plugin_heartbeat") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_imds_heartbeat") + def test_heartbeats(self, + patch_imds_heartbeat, + patch_send_telemetry_metrics, + patch_poll_telemetry_metrics, + patch_hostplugin_heartbeat, + patch_send_events, + patch_telemetry_heartbeat, + *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + + self.assertEqual(0, patch_hostplugin_heartbeat.call_count) + self.assertEqual(0, patch_send_events.call_count) + self.assertEqual(0, patch_telemetry_heartbeat.call_count) + self.assertEqual(0, patch_imds_heartbeat.call_count) + self.assertEqual(0, patch_send_telemetry_metrics.call_count) + self.assertEqual(0, patch_poll_telemetry_metrics.call_count) + + monitor_handler.start() + time.sleep(1) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(0, patch_hostplugin_heartbeat.call_count) + self.assertNotEqual(0, patch_send_events.call_count) + self.assertNotEqual(0, patch_telemetry_heartbeat.call_count) + self.assertNotEqual(0, patch_imds_heartbeat.call_count) + self.assertNotEqual(0, patch_send_telemetry_metrics.call_count) + self.assertNotEqual(0, patch_poll_telemetry_metrics.call_count) + + monitor_handler.stop() + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") + def test_heartbeat_timings_updates_after_window(self, *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + + self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(None, monitor_handler.last_event_collection) + self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertEqual(None, monitor_handler.last_imds_heartbeat) + + monitor_handler.start() + time.sleep(0.2) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(None, monitor_handler.last_event_collection) + self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) + + heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat + heartbeat_imds = monitor_handler.last_imds_heartbeat + heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat + events_collection = monitor_handler.last_event_collection + + time.sleep(0.5) + + self.assertNotEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) + self.assertNotEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(events_collection, monitor_handler.last_event_collection) + self.assertNotEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) + + monitor_handler.stop() + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") + def test_heartbeat_timings_no_updates_within_window(self, *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(seconds=1) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(seconds=1) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(seconds=1) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(seconds=1) + + self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(None, monitor_handler.last_event_collection) + self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertEqual(None, monitor_handler.last_imds_heartbeat) + + monitor_handler.start() + time.sleep(0.2) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(None, monitor_handler.last_event_collection) + self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) + + heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat + heartbeat_imds = monitor_handler.last_imds_heartbeat + heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat + events_collection = monitor_handler.last_event_collection + + time.sleep(0.5) + + self.assertEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) + self.assertEqual(events_collection, monitor_handler.last_event_collection) + self.assertEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) + + monitor_handler.stop() + + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_heartbeat_creates_signal(self, patch_report_heartbeat, *args): + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.send_host_plugin_heartbeat() + self.assertEqual(1, patch_report_heartbeat.call_count) + self.assertEqual(0, args[5].call_count) + monitor_handler.stop() + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered', return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_failed_heartbeat_creates_telemetry(self, patch_report_heartbeat, _, *args): + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.send_host_plugin_heartbeat() + self.assertEqual(1, patch_report_heartbeat.call_count) + self.assertEqual(1, args[5].call_count) + self.assertEqual('HostPluginHeartbeatExtended', args[5].call_args[1]['op']) + self.assertEqual(False, args[5].call_args[1]['is_success']) + monitor_handler.stop() + + @patch('azurelinuxagent.common.logger.Logger.info') + def test_reset_loggers(self, mock_info, *args): + # Adding 100 different messages + for i in range(100): + event_message = "Test {0}".format(i) + logger.periodic_info(logger.EVERY_DAY, event_message) + + self.assertIn(hash(event_message), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(i + 1, mock_info.call_count) # range starts from 0. + + self.assertEqual(100, len(logger.DEFAULT_LOGGER.periodic_messages)) + + # Adding 1 message 100 times, but the same message. Mock Info should be called only once. + for i in range(100): + logger.periodic_info(logger.EVERY_DAY, "Test-Message") + + self.assertIn(hash("Test-Message"), logger.DEFAULT_LOGGER.periodic_messages) + self.assertEqual(101, mock_info.call_count) # 100 calls from the previous section. Adding only 1. + self.assertEqual(101, len(logger.DEFAULT_LOGGER.periodic_messages)) # One new message in the hash map. + + # Resetting the logger time states. + monitor_handler = get_monitor_handler() + monitor_handler.last_reset_loggers_time = datetime.datetime.utcnow() - timedelta(hours=1) + MonitorHandler.RESET_LOGGERS_PERIOD = timedelta(milliseconds=100) + + monitor_handler.reset_loggers() + + # The hash map got cleaned up by the reset_loggers method + self.assertEqual(0, len(logger.DEFAULT_LOGGER.periodic_messages)) + + monitor_handler.stop() + + @patch("azurelinuxagent.common.logger.reset_periodic", side_effect=Exception()) + def test_reset_loggers_ensuring_timestamp_gets_updated(self, *args): + # Resetting the logger time states. + monitor_handler = get_monitor_handler() + initial_time = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.last_reset_loggers_time = initial_time + MonitorHandler.RESET_LOGGERS_PERIOD = timedelta(milliseconds=100) + + # noinspection PyBroadException + try: + monitor_handler.reset_loggers() + except: + pass + + # The hash map got cleaned up by the reset_loggers method + self.assertGreater(monitor_handler.last_reset_loggers_time, initial_time) + monitor_handler.stop() + + +@patch('azurelinuxagent.common.osutil.get_osutil') +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") +@patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.utils.restutil.http_get") +class TestEventMonitoring(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + self.lib_dir = tempfile.mkdtemp() + + self.event_logger = EventLogger() + self.event_logger.event_dir = os.path.join(self.lib_dir, "events") + + def tearDown(self): + fileutil.rm_dirs(self.lib_dir) + + def _create_mock(self, test_data, mock_http_get, MockCryptUtil, *args): + """Test enable/disable/uninstall of an extension""" + monitor_handler = get_monitor_handler() + + # Mock protocol to return test data + mock_http_get.side_effect = test_data.mock_http_get + MockCryptUtil.side_effect = test_data.mock_crypt_util + + protocol = WireProtocol("foo.bar") + protocol.detect() + protocol.report_ext_status = MagicMock() + protocol.report_vm_status = MagicMock() + + monitor_handler.protocol_util.get_protocol = Mock(return_value=protocol) + return monitor_handler, protocol + + @patch("azurelinuxagent.common.protocol.imds.ImdsClient.get_compute", + return_value=ComputeInfo(subscriptionId="DummySubId", + location="DummyVMLocation", + vmId="DummyVmId", + resourceGroupName="DummyRG", + publisher="")) + @patch("azurelinuxagent.common.protocol.wire.WireProtocol.get_vminfo", + return_value=VMInfo(subscriptionId="DummySubId", + vmName="DummyVMName", + containerId="DummyContainerId", + roleName="DummyRoleName", + roleInstanceName="DummyRoleInstanceName", tenantName="DummyTenant")) + @patch("platform.release", return_value="platform-release") + @patch("platform.system", return_value="Linux") + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_processor_cores", return_value=4) + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_mem", return_value=10000) + def mock_init_sysinfo(self, monitor_handler, *args): + # Mock all values that are dependent on the environment to ensure consistency across testing environments. + monitor_handler.init_sysinfo() + + # Replacing OSVersion to make it platform agnostic. We can't mock global constants (eg. DISTRO_NAME, + # DISTRO_VERSION, DISTRO_CODENAME), so to make them constant during the test-time, we need to replace the + # OSVersion field in the event object. + for i in monitor_handler.sysinfo: + if i.name == "OSVersion": + i.value = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), + "DISTRO_NAME", + "DISTRO_VERSION", + "DISTRO_CODE_NAME", + platform.release()) + + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events_should_prepare_all_fields_for_all_event_files(self, mock_lib_dir, *args): + # Test collecting and sending both agent and extension events from the moment they're created to the moment + # they are to be reported. Ensure all necessary fields from sysinfo are present, as well as the container id. + mock_lib_dir.return_value = self.lib_dir + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + self.mock_init_sysinfo(monitor_handler) + + # Add agent event file + self.event_logger.add_event(name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HeartBeat, + is_success=True, + message="Heartbeat", + log_event=False) + + # Add extension event file the way extension do it, by dropping a .tld file in the events folder + source_file = os.path.join(data_dir, "ext/dsc_event.json") + dest_file = os.path.join(conf.get_lib_dir(), "events", "dsc_event.tld") + shutil.copyfile(source_file, dest_file) + + # Collect these events and assert they are being sent with the correct sysinfo parameters from the agent + with patch.object(protocol, "report_event") as patch_report_event: + monitor_handler.collect_and_send_events() + + telemetry_events_list = patch_report_event.call_args_list[0][0][0] + self.assertEqual(len(telemetry_events_list.events), 2) + + for event in telemetry_events_list.events: + # All sysinfo parameters coming from the agent have to be present in the telemetry event to be emitted + for param in monitor_handler.sysinfo: + self.assertTrue(param in event.parameters) + + # The container id is a special parameter that is not a part of the static sysinfo parameter list. + # The container id value is obtained from the goal state and must be present in all telemetry events. + container_id_param = TelemetryEventParam("ContainerId", protocol.client.goal_state.container_id) + self.assertTrue(container_id_param in event.parameters) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events(self, mock_lib_dir, patch_send_event, *args): + mock_lib_dir.return_value = self.lib_dir + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + self.mock_init_sysinfo(monitor_handler) + + self.event_logger.save_event(create_dummy_event(message="Message-Test")) + + monitor_handler.last_event_collection = None + monitor_handler.collect_and_send_events() + + # Validating the crafted message by the collect_and_send_events call. + self.assertEqual(1, patch_send_event.call_count) + send_event_call_args = protocol.client.send_event.call_args[0] + sample_message = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + ']]>' \ + ''.format(AGENT_VERSION, CURRENT_AGENT) + + self.maxDiff = None + self.assertEqual(sample_message, send_event_call_args[1]) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events_with_small_events(self, mock_lib_dir, patch_send_event, *args): + mock_lib_dir.return_value = self.lib_dir + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + sizes = [15, 15, 15, 15] # get the powers of 2 - 2**16 is the limit + + for power in sizes: + size = 2 ** power + self.event_logger.save_event(create_dummy_event(size)) + monitor_handler.collect_and_send_events() + + # The send_event call would be called each time, as we are filling up the buffer up to the brim for each call. + + self.assertEqual(4, patch_send_event.call_count) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events_with_large_events(self, mock_lib_dir, patch_send_event, *args): + mock_lib_dir.return_value = self.lib_dir + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + sizes = [17, 17, 17] # get the powers of 2 + + for power in sizes: + size = 2 ** power + self.event_logger.save_event(create_dummy_event(size)) + + with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn: + monitor_handler.collect_and_send_events() + self.assertEqual(3, patch_periodic_warn.call_count) + + # The send_event call should never be called as the events are larger than 2**16. + self.assertEqual(0, patch_send_event.call_count) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events_with_invalid_events(self, mock_lib_dir, patch_send_event, *args): + mock_lib_dir.return_value = self.lib_dir + dummy_events_dir = os.path.join(data_dir, "events", "collect_and_send_events_invalid_data") + fileutil.mkdir(self.event_logger.event_dir) + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + for filename in os.listdir(dummy_events_dir): + shutil.copy(os.path.join(dummy_events_dir, filename), self.event_logger.event_dir) + + monitor_handler.collect_and_send_events() + + # Invalid events + self.assertEqual(0, patch_send_event.call_count) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_events_cannot_read_events(self, mock_lib_dir, patch_send_event, *args): + mock_lib_dir.return_value = self.lib_dir + dummy_events_dir = os.path.join(data_dir, "events", "collect_and_send_events_unreadable_data") + fileutil.mkdir(self.event_logger.event_dir) + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + for filename in os.listdir(dummy_events_dir): + shutil.copy(os.path.join(dummy_events_dir, filename), self.event_logger.event_dir) + + def builtins_version(): + if sys.version_info[0] == 2: + return "__builtin__" + else: + return "builtins" + + with patch("{0}.open".format(builtins_version())) as mock_open: + mock_open.side_effect = OSError(13, "Permission denied") + monitor_handler.collect_and_send_events() + + # Invalid events + self.assertEqual(0, patch_send_event.call_count) + + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_with_http_post_returning_503(self, mock_lib_dir, *args): + mock_lib_dir.return_value = self.lib_dir + fileutil.mkdir(self.event_logger.event_dir) + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. + + for power in sizes: + size = 2 ** power * 1024 + self.event_logger.save_event(create_dummy_event(size)) + + with patch("azurelinuxagent.common.logger.error") as mock_error: + with patch("azurelinuxagent.common.utils.restutil.http_post") as mock_http_post: + mock_http_post.return_value = ResponseMock( + status=restutil.httpclient.SERVICE_UNAVAILABLE, + response="") + monitor_handler.collect_and_send_events() + self.assertEqual(1, mock_error.call_count) + self.assertEqual("[ProtocolError] [Wireserver Exception] [ProtocolError] [Wireserver Failed] " + "URI http://foo.bar/machine?comp=telemetrydata [HTTP Failed] Status Code 503", + mock_error.call_args[0][1]) + self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) + + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_with_send_event_generating_exception(self, mock_lib_dir, *args): + mock_lib_dir.return_value = self.lib_dir + fileutil.mkdir(self.event_logger.event_dir) + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. + + for power in sizes: + size = 2 ** power * 1024 + self.event_logger.save_event(create_dummy_event(size)) + + monitor_handler.last_event_collection = datetime.datetime.utcnow() - timedelta(hours=1) + # This test validates that if we hit an issue while sending an event, we never send it again. + with patch("azurelinuxagent.common.logger.warn") as mock_warn: + with patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") as patch_send_event: + patch_send_event.side_effect = Exception() + monitor_handler.collect_and_send_events() + + self.assertEqual(1, mock_warn.call_count) + self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) + + @patch("azurelinuxagent.common.conf.get_lib_dir") + def test_collect_and_send_with_call_wireserver_returns_http_error(self, mock_lib_dir, *args): + mock_lib_dir.return_value = self.lib_dir + fileutil.mkdir(self.event_logger.event_dir) + + test_data = WireProtocolData(DATA_FILE) + monitor_handler, protocol = self._create_mock(test_data, *args) + monitor_handler.init_protocols() + + sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. + + for power in sizes: + size = 2 ** power * 1024 + self.event_logger.save_event(create_dummy_event(size)) + + monitor_handler.last_event_collection = datetime.datetime.utcnow() - timedelta(hours=1) + with patch("azurelinuxagent.common.logger.error") as mock_error: + with patch("azurelinuxagent.common.protocol.wire.WireClient.call_wireserver") as patch_call_wireserver: + patch_call_wireserver.side_effect = HttpError + monitor_handler.collect_and_send_events() + + self.assertEqual(1, mock_error.call_count) + self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) + + +@patch('azurelinuxagent.common.osutil.get_osutil') +@patch('azurelinuxagent.common.protocol.get_protocol_util') +@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") +@patch("azurelinuxagent.common.utils.restutil.http_get") +class TestExtensionMetricsDataTelemetry(AgentTestCase): + + def setUp(self): + AgentTestCase.setUp(self) + CGroupsTelemetry.reset() + + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.poll_all_tracked") + @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.report_all_tracked") + def test_send_extension_metrics_telemetry(self, patch_report_all_tracked, patch_poll_all_tracked, patch_add_event, + *args): + patch_report_all_tracked.return_value = { + "memory": { + "cur_mem": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())], + "max_mem": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())] + }, + "cpu": { + "cur_cpu": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())] + } + } + + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.poll_telemetry_metrics() + monitor_handler.send_telemetry_metrics() + self.assertEqual(1, patch_poll_all_tracked.call_count) + self.assertEqual(1, patch_report_all_tracked.call_count) + self.assertEqual(1, patch_add_event.call_count) + monitor_handler.stop() + + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.poll_all_tracked") + @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.report_all_tracked", return_value={}) + def test_send_extension_metrics_telemetry_for_empty_cgroup(self, patch_report_all_tracked, patch_poll_all_tracked, + patch_add_event, *args): + patch_report_all_tracked.return_value = {} + + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.poll_telemetry_metrics() + monitor_handler.send_telemetry_metrics() + self.assertEqual(1, patch_poll_all_tracked.call_count) + self.assertEqual(1, patch_report_all_tracked.call_count) + self.assertEqual(0, patch_add_event.call_count) + monitor_handler.stop() + + @skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @attr('requires_sudo') + def test_send_extension_metrics_telemetry_with_actual_cgroup(self, patch_add_event, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + num_polls = 5 + name = "test-cgroup" + + cgs = make_new_cgroup(name) + + self.assertEqual(len(cgs), 2) + + for cgroup in cgs: + CGroupsTelemetry.track_cgroup(cgroup) + + for i in range(num_polls): + CGroupsTelemetry.poll_all_tracked() + consume_cpu_time() # Eat some CPU + consume_memory() + + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.poll_telemetry_metrics() + monitor_handler.send_telemetry_metrics() + self.assertEqual(1, patch_add_event.call_count) + + name = patch_add_event.call_args[0][0] + fields = patch_add_event.call_args[1] + + self.assertEqual(name, "WALinuxAgent") + self.assertEqual(fields["op"], "ExtensionMetricsData") + self.assertEqual(fields["is_success"], True) + self.assertEqual(fields["log_event"], False) + self.assertEqual(fields["is_internal"], False) + self.assertIsInstance(fields["message"], ustr) + + monitor_handler.stop() + + @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") + def test_generate_extension_metrics_telemetry_dictionary(self, *args): + num_polls = 10 + num_extensions = 1 + num_summarization_values = 7 + + cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)] + + # only verifying calculations and not validity of the values. + memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + + for i in range(num_extensions): + dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + + dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", + "dummy_extension_{0}".format(i)) + CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + + self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked)) + + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: + with patch("azurelinuxagent.common.cgroup.CpuCgroup._update_cpu_data") as patch_update_cpu_data: + with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: + for i in range(num_polls): + patch_is_active.return_value = True + patch_get_cpu_percent.return_value = cpu_percent_values[i] + patch_get_memory_usage.return_value = memory_usage_values[i] # example 200 MB + patch_get_memory_max_usage.return_value = max_memory_usage_values[i] # example 450 MB + CGroupsTelemetry.poll_all_tracked() + + performance_metrics = CGroupsTelemetry.report_all_tracked() + + message_json = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, + performance_metrics=performance_metrics) + + for i in range(num_extensions): + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) + self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) + + self.assertIn("SchemaVersion", message_json) + self.assertIn("PerfMetrics", message_json) + + collected_metrics = message_json["PerfMetrics"] + + for i in range(num_extensions): + extn_name = "dummy_extension_{0}".format(i) + + self.assertIn("memory", collected_metrics[extn_name]) + self.assertIn("cur_mem", collected_metrics[extn_name]["memory"]) + self.assertIn("max_mem", collected_metrics[extn_name]["memory"]) + self.assertEqual(len(collected_metrics[extn_name]["memory"]["cur_mem"]), num_summarization_values) + self.assertEqual(len(collected_metrics[extn_name]["memory"]["max_mem"]), num_summarization_values) + + self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][6], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][6], str) + + self.assertIn("cpu", collected_metrics[extn_name]) + self.assertIn("cur_cpu", collected_metrics[extn_name]["cpu"]) + self.assertEqual(len(collected_metrics[extn_name]["cpu"]["cur_cpu"]), num_summarization_values) + + self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][5], str) + self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][6], str) + + message_json = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, + performance_metrics=None) + self.assertIn("SchemaVersion", message_json) + self.assertNotIn("PerfMetrics", message_json) + + message_json = generate_extension_metrics_telemetry_dictionary(schema_version=2.0, + performance_metrics=None) + self.assertEqual(message_json, None) + + message_json = generate_extension_metrics_telemetry_dictionary(schema_version="z", + performance_metrics=None) + self.assertEqual(message_json, None) + + +@patch('azurelinuxagent.common.event.EventLogger.add_event') +@patch("azurelinuxagent.common.utils.restutil.http_post") +@patch("azurelinuxagent.common.utils.restutil.http_get") +@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state') +@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol('endpoint')) +class TestMonitorFailure(AgentTestCase): + + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_error_heartbeat_creates_no_signal(self, patch_report_heartbeat, *args): + patch_http_get = args[2] + patch_add_event = args[4] + + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + + patch_http_get.side_effect = IOError('client error') + monitor_handler.send_host_plugin_heartbeat() + + # health report should not be made + self.assertEqual(0, patch_report_heartbeat.call_count) + + # telemetry with failure details is sent + self.assertEqual(1, patch_add_event.call_count) + self.assertEqual('HostPluginHeartbeat', patch_add_event.call_args[1]['op']) + self.assertTrue('client error' in patch_add_event.call_args[1]['message']) - self.assertEquals(5, counter) + self.assertEqual(False, patch_add_event.call_args[1]['is_success']) + monitor_handler.stop() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess_handler.py walinuxagent-2.2.45/tests/ga/test_remoteaccess_handler.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess_handler.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_remoteaccess_handler.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,527 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from datetime import timedelta + +from azurelinuxagent.common.exception import RemoteAccessError +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.ga.remoteaccess import RemoteAccessHandler +from tests.common.osutil.mock_osutil import MockOSUtil +from tests.tools import * + + +info_messages = [] +error_messages = [] + + +def get_user_dictionary(users): + user_dictionary = {} + for user in users: + user_dictionary[user[0]] = user + return user_dictionary + + +def log_info(msg_format, *args): + info_messages.append(msg_format.format(args)) + + +def log_error(msg_format, *args): + error_messages.append(msg_format.format(args)) + + +def mock_add_event(name, op, is_success, version, message): + TestRemoteAccessHandler.eventing_data = (name, op, is_success, version, message) + + +class TestRemoteAccessHandler(AgentTestCase): + eventing_data = [()] + + def setUp(self): + super(TestRemoteAccessHandler, self).setUp() + del info_messages[:] + del error_messages[:] + for data in TestRemoteAccessHandler.eventing_data: + del data + + # add_user tests + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_add_user(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + actual_user = users[tstuser] + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + self.assertEqual(actual_user[7], expected_expiration) + self.assertEqual(actual_user[4], "JIT_Account") + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual(info_messages[0], "User '{0}' added successfully with expiration in {1}" + .format(tstuser, expected_expiration)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_add_user_bad_creation_data(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + error = "Error adding user {0}. test exception for bad username".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + self.assertEqual(0, len(rah.os_util.get_users())) + self.assertEqual(0, len(error_messages)) + self.assertEqual(0, len(info_messages)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="") + def test_add_user_bad_password_data(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "" + tstuser = "foobar" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + error = "Error adding user {0} cleanup successful\nInner error: test exception for bad password".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + self.assertEqual(0, len(rah.os_util.get_users())) + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) + + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_add_user_already_existing(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + self.assertEqual(1, len(users.keys())) + actual_user = users[tstuser] + self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) + # add the new duplicate user, ensure it's not created and does not overwrite the existing user. + # this does not test the user add function as that's mocked, it tests processing skips the remaining + # calls after the initial failure + new_user_expiration = datetime.utcnow() + timedelta(days=5) + self.assertRaises(RemoteAccessError, rah.add_user, tstuser, pwd, new_user_expiration) + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users after dup user attempted".format(tstuser)) + self.assertEqual(1, len(users.keys())) + actual_user = users[tstuser] + self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) + + # delete_user tests + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_delete_user(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + rah.delete_user(tstuser) + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertFalse(tstuser in users) + self.assertEqual(0, len(error_messages)) + self.assertEqual(2, len(info_messages)) + self.assertEqual("User '{0}' added successfully with expiration in {1}".format(tstuser, expected_expiration), + info_messages[0]) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[1]) + + def test_handle_failed_create_with_bad_data(self): + mock_os_util = MockOSUtil() + testusr = "foobar" + mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) + rah = RemoteAccessHandler() + rah.os_util = mock_os_util + self.assertRaises(RemoteAccessError, rah.handle_failed_create, "") + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(1, len(users.keys())) + self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + def test_delete_user_does_not_exist(self, _1, _2): + mock_os_util = MockOSUtil() + testusr = "foobar" + mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) + rah = RemoteAccessHandler() + rah.os_util = mock_os_util + testuser = "Carl" + error = "Failed to clean up after account creation for {0}.\n" \ + "Inner error: test exception, user does not exist to delete".format(testuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.handle_failed_create, testuser) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(1, len(users.keys())) + self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) + self.assertEqual(0, len(error_messages)) + self.assertEqual(0, len(info_messages)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_new_user(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + tstuser = remote_access.user_list.users[0].name + expiration_date = datetime.utcnow() + timedelta(days=1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + actual_user = users[tstuser] + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + self.assertEqual(actual_user[7], expected_expiration) + self.assertEqual(actual_user[4], "JIT_Account") + + def test_do_not_add_expired_user(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + expiration = (datetime.utcnow() - timedelta(days=2)).strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertFalse("testAccount" in users) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + def test_error_add_user(self, _1, _2): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstuser = "foobar" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = "bad password" + error = "Error adding user foobar cleanup successful\n" \ + "Inner error: \[CryptError\] Error decoding secret\n" \ + "Inner error: Incorrect padding".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(0, len(users)) + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) + + def test_handle_remote_access_no_users(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_no_accounts.xml') + remote_access = RemoteAccess(data_str) + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(0, len(users.keys())) + + def test_handle_remote_access_validate_jit_user_valid(self): + rah = RemoteAccessHandler() + comment = "JIT_Account" + result = rah.validate_jit_user(comment) + self.assertTrue(result, "Did not identify '{0}' as a JIT_Account".format(comment)) + + def test_handle_remote_access_validate_jit_user_invalid(self): + rah = RemoteAccessHandler() + test_users = ["John Doe", None, "", " "] + failed_results = "" + for user in test_users: + if rah.validate_jit_user(user): + failed_results += "incorrectly identified '{0} as a JIT_Account'. ".format(user) + if len(failed_results) > 0: + self.fail(failed_results) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_two_accounts.xml') + remote_access = RemoteAccess(data_str) + testusers = [] + count = 0 + while count < 2: + user = remote_access.user_list.users[count].name + expiration_date = datetime.utcnow() + timedelta(days=count + 1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[count].expiration = expiration + testusers.append(user) + count += 1 + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(testusers[0] in users, "{0} missing from users".format(testusers[0])) + self.assertTrue(testusers[1] in users, "{0} missing from users".format(testusers[1])) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + # max fabric supports in the Goal State + def test_handle_remote_access_ten_users(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(10, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_user_removed(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(10, len(users.keys())) + del rah.remote_access.user_list.users[:] + self.assertEqual(10, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_bad_data_and_good_data(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + if count is 2: + user.name = "" + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(9, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_deleted_user_readded(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + tstuser = remote_access.user_list.users[0].name + expiration_date = datetime.utcnow() + timedelta(days=1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + os_util = rah.os_util + os_util.__class__ = MockOSUtil + os_util.all_users.clear() + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser not in users) + rah.handle_remote_access() + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + @patch('azurelinuxagent.common.osutil.get_osutil', + return_value=MockOSUtil()) + @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', + return_value=WireProtocol("12.34.56.78")) + @patch('azurelinuxagent.common.protocol.wire.WireProtocol.get_incarnation', + return_value="1") + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_remote_access', + return_value="asdf") + def test_remote_access_handler_run_bad_data(self, _1, _2, _3, _4, _5): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + rah.run() + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_one_removed(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + deleted_user = rah.remote_access.user_list.users[3] + del rah.remote_access.user_list.users[3] + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) + self.assertEqual(9, len(users)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_null_remote_access(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + rah.remote_access = None + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(0, len(users)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_error_with_null_remote_access(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + rah.remote_access = None + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(0, len(users)) + + def test_remove_user_error(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + error = "Failed to delete user {0}\nInner error: test exception, bad data".format("") + self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, "") + + def test_remove_user_not_exists(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + user = "bob" + error = "Failed to delete user {0}\n" \ + "Inner error: test exception, user does not exist to delete".format(user) + self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, user) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_remove_and_add(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + new_user = "tstuser11" + deleted_user = rah.remote_access.user_list.users[3] + rah.remote_access.user_list.users[3].name = new_user + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) + self.assertTrue(new_user in [u[0] for u in users], "user {0} not in users".format(new_user)) + self.assertEqual(10, len(users)) + + @patch('azurelinuxagent.ga.remoteaccess.add_event', side_effect=mock_add_event) + @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', side_effect=RemoteAccessError("foobar!")) + def test_remote_access_handler_run_error(self, _1, _2): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + rah.run() + print(TestRemoteAccessHandler.eventing_data) + check_message = "foobar!" + self.assertTrue(check_message in TestRemoteAccessHandler.eventing_data[4], + "expected message {0} not found in {1}" + .format(check_message, TestRemoteAccessHandler.eventing_data[4])) + self.assertEqual(False, TestRemoteAccessHandler.eventing_data[2], "is_success is true") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess.py walinuxagent-2.2.45/tests/ga/test_remoteaccess.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_remoteaccess.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,94 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import xml + +from tests.tools import * +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.osutil import get_osutil + +class TestRemoteAccess(AgentTestCase): + def test_parse_remote_access(self): + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals("1", remote_access.incarnation) + self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', + return_value=GoalState(load_data('wire/goal_state.xml'))) + def test_update_remote_access_conf_no_remote_access(self, _): + protocol = WireProtocol('12.34.56.78') + goal_state = protocol.client.get_goal_state() + protocol.client.update_remote_access_conf(goal_state) + + def test_parse_two_remote_access_accounts(self): + data_str = load_data('wire/remote_access_two_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals("1", remote_access.incarnation) + self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") + + def test_parse_ten_remote_access_accounts(self): + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.") + + def test_parse_duplicate_remote_access_accounts(self): + data_str = load_data('wire/remote_access_duplicate_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") + + def test_parse_zero_remote_access_accounts(self): + data_str = load_data('wire/remote_access_no_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.") + + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', + return_value=GoalState(load_data('wire/goal_state_remote_access.xml'))) + @patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config', + return_value=load_data('wire/remote_access_single_account.xml')) + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert') + def test_update_remote_access_conf_remote_access(self, _1, _2, _3): + protocol = WireProtocol('12.34.56.78') + goal_state = protocol.client.get_goal_state() + protocol.client.update_remote_access_conf(goal_state) + self.assertNotEquals(None, protocol.client.remote_access) + self.assertEquals(1, len(protocol.client.remote_access.user_list.users)) + self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name) + self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password) + + def test_parse_bad_remote_access_data(self): + data = "foobar" + self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data) \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_update.py walinuxagent-2.2.45/tests/ga/test_update.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_update.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/ga/test_update.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,28 +1,8 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. from __future__ import print_function -from datetime import datetime - -import json -import shutil -import stat - from azurelinuxagent.common.event import * from azurelinuxagent.common.protocol.hostplugin import * from azurelinuxagent.common.protocol.metadata import * @@ -152,7 +132,7 @@ if len(agents) <= 0: agents = get_agent_pkgs() for agent in agents: - fileutil.copy_file(agent, to_dir=self.tmp_dir) + shutil.copy(agent, self.tmp_dir) return def expand_agents(self): @@ -181,7 +161,7 @@ return def prepare_agents(self, - count=5, + count=20, is_available=True): # Ensure the test data is copied over @@ -389,6 +369,30 @@ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") + def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded): + try: + mock_downloaded.side_effect = ResourceGoneError() + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_blacklisted) + except ResourceGoneError: + pass + except: + self.fail("Exception was not expected!") + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") + def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded): + try: + mock_downloaded.side_effect = IOError() + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_blacklisted) + except IOError: + pass + except: + self.fail("Exception was not expected!") + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_downloaded(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_downloaded) @@ -510,7 +514,8 @@ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") - def test_download_fallback(self, mock_http_get, mock_loaded, mock_downloaded): + @patch("azurelinuxagent.ga.update.restutil.http_post") + def test_download_fallback(self, mock_http_post, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) @@ -665,22 +670,17 @@ self.assertEqual(None, self.update_handler.signal_handler) - def test_emit_restart_event_writes_sentinal_file(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) - self.update_handler._emit_restart_event() - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) - def test_emit_restart_event_emits_event_if_not_clean_start(self): try: mock_event = self.event_patch.start() - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() self.update_handler._emit_restart_event() self.assertEqual(1, mock_event.call_count) except Exception as e: pass self.event_patch.stop() - def _create_protocol(self, count=5, versions=None): + def _create_protocol(self, count=20, versions=None): latest_version = self.prepare_agents(count=count) if versions is None or len(versions) <= 0: versions = [latest_version] @@ -842,8 +842,8 @@ self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) - kept_agents = self.update_handler.agents[1::2] - blacklisted_agents = self.update_handler.agents[::2] + kept_agents = self.update_handler.agents[::2] + blacklisted_agents = self.update_handler.agents[1::2] for agent in blacklisted_agents: agent.mark_failure(is_fatal=True) self.update_handler._filter_blacklisted_agents() @@ -879,7 +879,8 @@ protocol = WireProtocol('12.34.56.78') mock_get_host.return_value = "faux host" host = self.update_handler._get_host_plugin(protocol=protocol) - mock_get_host.assert_called_once() + print("mock_get_host call cound={0}".format(mock_get_host.call_count)) + self.assertEqual(1, mock_get_host.call_count) self.assertEqual("faux host", host) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') @@ -936,16 +937,16 @@ for p in pid_files: self.assertTrue(pid_re.match(os.path.basename(p))) - def test_is_clean_start_returns_true_when_no_sentinal(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_is_clean_start_returns_true_when_no_sentinel(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.assertTrue(self.update_handler._is_clean_start) - def test_is_clean_start_returns_false_when_sentinal_exists(self): - self.update_handler._set_sentinal(agent=CURRENT_AGENT) + def test_is_clean_start_returns_false_when_sentinel_exists(self): + self.update_handler._set_sentinel(agent=CURRENT_AGENT) self.assertFalse(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_for_exceptions(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception): self.assertFalse(self.update_handler._is_clean_start) @@ -998,12 +999,23 @@ # Ensure at least three agents initially exist self.assertTrue(2 < len(self.update_handler.agents)) - # Purge every other agent - kept_agents = self.update_handler.agents[1::2] - purged_agents = self.update_handler.agents[::2] + # Purge every other agent. Don't add the current version to agents_to_keep explicitly; + # the current version is never purged + agents_to_keep = [] + kept_agents = [] + purged_agents = [] + for i in range(0, len(self.update_handler.agents)): + if self.update_handler.agents[i].version == CURRENT_VERSION: + kept_agents.append(self.update_handler.agents[i]) + else: + if i % 2 == 0: + agents_to_keep.append(self.update_handler.agents[i]) + kept_agents.append(self.update_handler.agents[i]) + else: + purged_agents.append(self.update_handler.agents[i]) # Reload and assert only the kept agents remain on disk - self.update_handler.agents = kept_agents + self.update_handler.agents = agents_to_keep self.update_handler._purge_agents() self.update_handler._find_agents() self.assertEqual( @@ -1092,13 +1104,12 @@ self._test_run_latest(mock_time=mock_time) self.assertEqual(1, mock_time.sleep_interval) - def test_run_latest_polls_moderately_if_installed_not_latest(self): + def test_run_latest_polls_every_second_if_installed_not_latest(self): self.prepare_agents() - mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) - self.assertNotEqual(1, mock_time.sleep_interval) + self.assertEqual(1, mock_time.sleep_interval) def test_run_latest_defaults_to_current(self): self.assertEqual(None, self.update_handler.get_latest_agent()) @@ -1193,7 +1204,7 @@ self._test_run_latest() self.assertEqual(0, mock_signal.call_count) - def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False): + def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False, sleep_interval=(3,)): conf.get_autoupdate_enabled = Mock(return_value=enable_updates) # Note: @@ -1212,23 +1223,27 @@ fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: - with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: - with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: - with patch('time.sleep', side_effect=iterator) as mock_sleep: - with patch('sys.exit') as mock_exit: - if isinstance(os.getppid, MagicMock): - self.update_handler.run() - else: - with patch('os.getppid', return_value=42): + with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: + with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: + with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: + with patch('time.sleep', side_effect=iterator) as mock_sleep: + with patch('sys.exit') as mock_exit: + if isinstance(os.getppid, MagicMock): self.update_handler.run() - - self.assertEqual(1, mock_handler.call_count) - self.assertEqual(mock_handler.return_value.method_calls, calls) - self.assertEqual(invocations, mock_sleep.call_count) - self.assertEqual(1, mock_monitor.call_count) - self.assertEqual(1, mock_env.call_count) - self.assertEqual(1, mock_exit.call_count) - + else: + with patch('os.getppid', return_value=42): + self.update_handler.run() + + self.assertEqual(1, mock_handler.call_count) + self.assertEqual(mock_handler.return_value.method_calls, calls) + self.assertEqual(1, mock_ra_handler.call_count) + self.assertEqual(mock_ra_handler.return_value.method_calls, calls) + self.assertEqual(invocations, mock_sleep.call_count) + if invocations > 0: + self.assertEqual(sleep_interval, mock_sleep.call_args[0]) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_exit.call_count) def test_run(self): self._test_run() @@ -1244,14 +1259,14 @@ with patch('os.getppid', return_value=1): self._test_run(invocations=0, calls=[], enable_updates=True) - def test_run_clears_sentinal_on_successful_exit(self): + def test_run_clears_sentinel_on_successful_exit(self): self._test_run() - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_run_leaves_sentinal_on_unsuccessful_exit(self): + def test_run_leaves_sentinel_on_unsuccessful_exit(self): self.update_handler._upgrade_available = Mock(side_effect=Exception) self._test_run(invocations=0, calls=[], enable_updates=True) - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_emits_restart_event(self): self.update_handler._emit_restart_event = Mock() @@ -1275,31 +1290,31 @@ self.assertTrue(v > a.version) v = a.version - def test_set_sentinal(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) - self.update_handler._set_sentinal() - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_set_sentinel(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) + self.update_handler._set_sentinel() + self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_set_sentinal_writes_current_agent(self): - self.update_handler._set_sentinal() + def test_set_sentinel_writes_current_agent(self): + self.update_handler._set_sentinel() self.assertTrue( - fileutil.read_file(self.update_handler._sentinal_file_path()), + fileutil.read_file(self.update_handler._sentinel_file_path()), CURRENT_AGENT) def test_shutdown(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() self.update_handler._shutdown() self.assertFalse(self.update_handler.running) - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_shutdown_ignores_missing_sentinal_file(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_shutdown_ignores_missing_sentinel_file(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._shutdown() self.assertFalse(self.update_handler.running) - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_exceptions(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() try: with patch("os.remove", side_effect=Exception): @@ -1312,7 +1327,7 @@ base_version=FlexibleVersion(AGENT_VERSION), protocol=None, versions=None, - count=5): + count=20): if protocol is None: protocol = self._create_protocol(count=count, versions=versions) @@ -1325,15 +1340,6 @@ def test_upgrade_available_returns_true_on_first_use(self): self.assertTrue(self._test_upgrade_available()) - def test_upgrade_available_will_refresh_goal_state(self): - protocol = self._create_protocol() - protocol.emulate_stale_goal_state() - self.assertTrue(self._test_upgrade_available(protocol=protocol)) - self.assertEqual(2, protocol.call_counts["get_vmagent_manifests"]) - self.assertEqual(1, protocol.call_counts["get_vmagent_pkgs"]) - self.assertEqual(1, protocol.call_counts["update_goal_state"]) - self.assertTrue(protocol.goal_state_forced) - def test_upgrade_available_handles_missing_family(self): extensions_config = ExtensionsConfig(load_data("wire/ext_conf_missing_family.xml")) protocol = ProtocolMock() @@ -1360,7 +1366,7 @@ def test_upgrade_available_purges_old_agents(self): self.prepare_agents() agent_count = self.agent_count() - self.assertEqual(5, agent_count) + self.assertEqual(20, agent_count) agent_versions = self.agent_versions()[:3] self.assertTrue(self._test_upgrade_available(versions=agent_versions)) @@ -1477,6 +1483,170 @@ self.assertTrue(ga_manifest_2.allowed_versions[0] == '2.2.13') self.assertTrue(ga_manifest_2.allowed_versions[1] == '2.2.14') + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_update_happens_when_extensions_disabled(self, _): + """ + Although the extension enabled config will not get checked + before an update is found, this test attempts to ensure that + behavior never changes. + """ + self.update_handler._upgrade_available = Mock(return_value=True) + self._test_run(invocations=0, calls=[], enable_updates=True, sleep_interval=(300,)) + + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_interval_changes_when_extensions_disabled(self, _): + """ + When extension processing is disabled, the goal state interval should be larger. + """ + self.update_handler._upgrade_available = Mock(return_value=False) + self._test_run(invocations=15, calls=[call.run()] * 15, sleep_interval=(300,)) + + +class MonitorThreadTest(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + self.event_patch = patch('azurelinuxagent.common.event.add_event') + self.update_handler = get_update_handler() + self.update_handler.protocol_util = Mock() + + def _test_run(self, invocations=1): + iterations = [0] + def iterator(*args, **kwargs): + iterations[0] += 1 + if iterations[0] >= invocations: + self.update_handler.running = False + return + + with patch('os.getpid', return_value=42): + with patch.object(UpdateHandler, '_is_orphaned') as mock_is_orphaned: + mock_is_orphaned.__get__ = Mock(return_value=False) + with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: + with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: + with patch('time.sleep', side_effect=iterator) as mock_sleep: + with patch('sys.exit') as mock_exit: + self.update_handler.run() + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_start_threads(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=0) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_check_if_monitor_thread_is_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=True) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=0) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_monitor_thread.is_alive.call_count) + self.assertEqual(0, mock_monitor_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_check_if_env_thread_is_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=True) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + self.assertEqual(1, mock_env_thread.is_alive.call_count) + self.assertEqual(0, mock_env_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_monitor_thread_if_not_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=False) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_monitor_thread.is_alive.call_count) + self.assertEqual(1, mock_monitor_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_env_thread_if_not_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=False) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + self.assertEqual(1, mock_env_thread.is_alive.call_count) + self.assertEqual(1, mock_env_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_monitor_thread(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=False) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=0) + self.assertEqual(True, mock_monitor.called) + self.assertEqual(True, mock_monitor_thread.run.called) + self.assertEqual(True, mock_monitor_thread.is_alive.called) + self.assertEqual(True, mock_monitor_thread.start.called) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_env_thread(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=False) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=0) + self.assertEqual(True, mock_env.called) + self.assertEqual(True, mock_env_thread.run.called) + self.assertEqual(True, mock_env_thread.is_alive.called) + self.assertEqual(True, mock_env_thread.start.called) + class ChildMock(Mock): def __init__(self, return_value=0, side_effect=None): @@ -1550,6 +1720,7 @@ self.call_counts["update_goal_state"] += 1 self.goal_state_forced = self.goal_state_forced or forced + class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) @@ -1579,5 +1750,6 @@ self.next_time += self.time_increment return current_time + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/__init__.py walinuxagent-2.2.45/tests/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/__init__.py walinuxagent-2.2.45/tests/pa/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/pa/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/test_deprovision.py walinuxagent-2.2.45/tests/pa/test_deprovision.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/test_deprovision.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/pa/test_deprovision.py 2019-11-07 00:36:56.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import signal @@ -53,68 +53,6 @@ dh.run(force=True) self.assertEqual(2, dh.do_actions.call_count) - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") - def test_del_cloud_init_without_once(self, - mock_files, - mock_dirs): - deprovision_handler = get_deprovision_handler("","","") - deprovision_handler.del_cloud_init([], [], - include_once=False, deluser=False) - - mock_dirs.assert_called_with(include_once=False) - mock_files.assert_called_with(include_once=False, deluser=False) - - @patch("signal.signal") - @patch("azurelinuxagent.common.protocol.get_protocol_util") - @patch("azurelinuxagent.common.osutil.get_osutil") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") - def test_del_cloud_init(self, - mock_files, - mock_dirs, - mock_osutil, - mock_util, - mock_signal): - try: - with tempfile.NamedTemporaryFile() as f: - warnings = [] - actions = [] - - dirs = [tempfile.mkdtemp()] - mock_dirs.return_value = dirs - - files = [f.name] - mock_files.return_value = files - - deprovision_handler = get_deprovision_handler("","","") - deprovision_handler.del_cloud_init(warnings, actions, - deluser=True) - - mock_dirs.assert_called_with(include_once=True) - mock_files.assert_called_with(include_once=True, deluser=True) - - self.assertEqual(len(warnings), 0) - self.assertEqual(len(actions), 2) - for da in actions: - if da.func == fileutil.rm_dirs: - self.assertEqual(da.args, dirs) - elif da.func == fileutil.rm_files: - self.assertEqual(da.args, files) - else: - self.assertTrue(False) - - try: - for da in actions: - da.invoke() - self.assertEqual(len([d for d in dirs if os.path.isdir(d)]), 0) - self.assertEqual(len([f for f in files if os.path.isfile(f)]), 0) - except Exception as e: - self.assertTrue(False, "Exception {0}".format(e)) - except OSError: - # Ignore the error caused by removing the file within the "with" - pass - @distros("ubuntu") @patch('azurelinuxagent.common.conf.get_lib_dir') def test_del_lib_dir_files(self, @@ -122,6 +60,11 @@ distro_version, distro_full_name, mock_conf): + dirs = [ + 'WALinuxAgent-2.2.26/config', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' + ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', @@ -133,11 +76,19 @@ 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', - 'GoalState.2.xml' + 'GoalState.2.xml', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', + 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp + for d in dirs: + fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") @@ -147,14 +98,18 @@ warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) + deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) - self.assertTrue(len(actions) == 1) + self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) - self.assertTrue(len(actions[0].args) > 0) + self.assertEqual(fileutil.rm_files, actions[1].func) + self.assertEqual(11, len(actions[0].args)) + self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) - + for f in actions[1].args: + self.assertTrue(f[len(tmp)+1:] in files) @distros("redhat") def test_deprovision(self, @@ -180,5 +135,6 @@ warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolvconf/resolv.conf.d/tail" in w for w in warnings) + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/test_provision.py walinuxagent-2.2.45/tests/pa/test_provision.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/test_provision.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/pa/test_provision.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.utils.fileutil as fileutil - -from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.exception import ProvisionError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.pa.provision import get_provision_handler +from azurelinuxagent.pa.provision.cloudinit import CloudInitProvisionHandler from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * @@ -38,7 +37,6 @@ provision_handler.osutil = mock_osutil provision_handler.protocol_util.osutil = mock_osutil - provision_handler.protocol_util.get_protocol_by_file = MagicMock() provision_handler.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) @@ -67,9 +65,9 @@ ph.run() - ph.is_provisioned.assert_not_called() - ph.report_ready.assert_called_once() - ph.write_provisioned.assert_called_once() + self.assertEqual(0, ph.is_provisioned.call_count) + self.assertEqual(1, ph.report_ready.call_count) + self.assertEqual(1, ph.write_provisioned.call_count) @patch('os.path.isfile', return_value=False) def test_is_provisioned_not_provisioned(self, mock_isfile): @@ -92,8 +90,8 @@ mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) - ph.osutil.is_current_instance_id.assert_called_once() - deprovision_handler.run_changed_unique_id.assert_not_called() + self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) + self.assertEqual(0, deprovision_handler.run_changed_unique_id.call_count) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', @@ -112,8 +110,267 @@ mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) - ph.osutil.is_current_instance_id.assert_called_once() - deprovision_handler.run_changed_unique_id.assert_called_once() + self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) + self.assertEqual(1, deprovision_handler.run_changed_unique_id.call_count) + + @distros() + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + def test_provision_telemetry_pga_false(self, + distro_name, + distro_version, + distro_full_name, _): + """ + ProvisionGuestAgent flag is 'false' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + OVF_FILE_NAME, + 'false', + True) + + @distros() + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + def test_provision_telemetry_pga_true(self, + distro_name, + distro_version, + distro_full_name, _): + """ + ProvisionGuestAgent flag is 'true' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-2.xml', + 'true', + True) + + @distros() + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + def test_provision_telemetry_pga_empty(self, + distro_name, + distro_version, + distro_full_name, _): + """ + ProvisionGuestAgent flag is '' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-3.xml', + 'true', + False) + + @distros() + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + def test_provision_telemetry_pga_bad(self, + distro_name, + distro_version, + distro_full_name, _): + """ + ProvisionGuestAgent flag is 'bad data' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-4.xml', + 'bad data', + True) + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', + return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') + def _provision_test(self, + distro_name, + distro_version, + distro_full_name, + ovf_file, + provisionMessage, + expect_success, + patch_write_agent_disabled, + patch_get_instance_id): + """ + Assert that the agent issues two telemetry messages as part of a + successful provisioning. + + 1. Provision + 2. GuestState + """ + ph = get_provision_handler(distro_name, + distro_version, + distro_full_name) + ph.report_event = MagicMock() + ph.reg_ssh_host_key = MagicMock(return_value='--thumprint--') + + mock_osutil = MagicMock() + mock_osutil.decode_customdata = Mock(return_value="") + + ph.osutil = mock_osutil + ph.protocol_util.osutil = mock_osutil + ph.protocol_util.get_protocol = MagicMock() + + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) + ovfenv_data = load_data(ovf_file) + fileutil.write_file(ovfenv_file, ovfenv_data) + + ph.run() + + if expect_success: + self.assertEqual(2, ph.report_event.call_count) + positional_args, kw_args = ph.report_event.call_args_list[0] + # [call('Provisioning succeeded (146473.68s)', duration=65, is_success=True)] + self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+s\)', positional_args[0]) is not None) + self.assertTrue(isinstance(kw_args['duration'], int)) + self.assertTrue(kw_args['is_success']) + + positional_args, kw_args = ph.report_event.call_args_list[1] + self.assertTrue(kw_args['operation'] == 'ProvisionGuestAgent') + self.assertTrue(kw_args['message'] == provisionMessage) + self.assertTrue(kw_args['is_success']) + + expected_disabled = True if provisionMessage == 'false' else False + self.assertTrue(patch_write_agent_disabled.call_count == expected_disabled) + + else: + self.assertEqual(1, ph.report_event.call_count) + positional_args, kw_args = ph.report_event.call_args_list[0] + # [call(u'[ProtocolError] Failed to validate OVF: ProvisionGuestAgent not found')] + self.assertTrue('Failed to validate OVF: ProvisionGuestAgent not found' in positional_args[0]) + self.assertFalse(kw_args['is_success']) + + @distros() + @patch( + 'azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', + return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') + @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') + def test_provision_telemetry_fail(self, + mock_util, + distro_name, + distro_version, + distro_full_name, _): + """ + Assert that the agent issues one telemetry message as part of a + failed provisioning. + + 1. Provision + """ + ph = get_provision_handler(distro_name, distro_version, + distro_full_name) + ph.report_event = MagicMock() + ph.reg_ssh_host_key = MagicMock(side_effect=ProvisionError( + "--unit-test--")) + + mock_osutil = MagicMock() + mock_osutil.decode_customdata = Mock(return_value="") + + ph.osutil = mock_osutil + ph.protocol_util.osutil = mock_osutil + ph.protocol_util.get_protocol = MagicMock() + + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) + ovfenv_data = load_data("ovf-env.xml") + fileutil.write_file(ovfenv_file, ovfenv_data) + + ph.run() + positional_args, kw_args = ph.report_event.call_args_list[0] + self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) + + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') + @distros() + def test_handle_provision_guest_agent(self, + patch_write_agent_disabled, + distro_name, + distro_version, + distro_full_name): + ph = get_provision_handler(distro_name, + distro_version, + distro_full_name) + + patch_write_agent_disabled.call_count = 0 + + ph.handle_provision_guest_agent(provision_guest_agent='false') + self.assertEqual(1, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='False') + self.assertEqual(2, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='FALSE') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent=' ') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent=None) + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='true') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='True') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='TRUE') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + @patch( + 'azurelinuxagent.common.conf.get_provisioning_agent', + return_value='auto' + ) + @patch( + 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', + return_value=False + ) + def test_get_provision_handler_config_auto_no_cloudinit( + self, + patch_cloud_init_is_enabled, + patch_get_provisioning_agent): + provisioning_handler = get_provision_handler() + self.assertIsInstance(provisioning_handler, ProvisionHandler, 'Auto provisioning handler should be waagent if cloud-init is not enabled') + + @patch( + 'azurelinuxagent.common.conf.get_provisioning_agent', + return_value='waagent' + ) + @patch( + 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', + return_value=True + ) + def test_get_provision_handler_config_waagent( + self, + patch_cloud_init_is_enabled, + patch_get_provisioning_agent): + provisioning_handler = get_provision_handler() + self.assertIsInstance(provisioning_handler, ProvisionHandler, 'Provisioning handler should be waagent if agent is set to waagent') + + @patch( + 'azurelinuxagent.common.conf.get_provisioning_agent', + return_value='auto' + ) + @patch( + 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', + return_value=True + ) + def test_get_provision_handler_config_auto_cloudinit( + self, + patch_cloud_init_is_enabled, + patch_get_provisioning_agent): + provisioning_handler = get_provision_handler() + self.assertIsInstance(provisioning_handler, CloudInitProvisionHandler, 'Auto provisioning handler should be cloud-init if cloud-init is enabled') + + @patch( + 'azurelinuxagent.common.conf.get_provisioning_agent', + return_value='cloud-init' + ) + def test_get_provision_handler_config_cloudinit( + self, + patch_get_provisioning_agent): + provisioning_handler = get_provision_handler() + self.assertIsInstance(provisioning_handler, CloudInitProvisionHandler, 'Provisioning handler should be cloud-init if agent is set to cloud-init') if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/__init__.py walinuxagent-2.2.45/tests/protocol/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/mockmetadata.py walinuxagent-2.2.45/tests/protocol/mockmetadata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/mockmetadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/mockmetadata.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/mockwiredata.py walinuxagent-2.2.45/tests/protocol/mockwiredata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/mockwiredata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/mockwiredata.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * @@ -55,6 +55,24 @@ DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml" +DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy() +DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml" + +DATA_FILE_EXT_DELETION = DATA_FILE.copy() +DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml" + +DATA_FILE_EXT_SINGLE = DATA_FILE.copy() +DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml" + +DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy() +DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml" + +DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy() +DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml" + +DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy() +DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml" + class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.emulate_stale_goal_state = False @@ -164,4 +182,3 @@ with open(trans_cert_file, 'w+') as cert_file: cert_file.write(self.trans_cert) - diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_datacontract.py walinuxagent-2.2.45/tests/protocol/test_datacontract.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_datacontract.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_datacontract.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,49 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import unittest +from azurelinuxagent.common.datacontract import get_properties, set_properties +from azurelinuxagent.common.protocol.restapi import * + + +class SampleDataContract(DataContract): + def __init__(self): + self.foo = None + self.bar = DataContractList(int) + + +class TestDataContract(unittest.TestCase): + def test_get_properties(self): + obj = SampleDataContract() + obj.foo = "foo" + obj.bar.append(1) + data = get_properties(obj) + self.assertEquals("foo", data["foo"]) + self.assertEquals(list, type(data["bar"])) + + def test_set_properties(self): + obj = SampleDataContract() + data = { + 'foo' : 1, + 'baz': 'a' + } + set_properties('sample', obj, data) + self.assertFalse(hasattr(obj, 'baz')) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_healthservice.py walinuxagent-2.2.45/tests/protocol/test_healthservice.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_healthservice.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_healthservice.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,235 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +import json + +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.protocol.healthservice import Observation, HealthService +from azurelinuxagent.common.utils import restutil +from tests.protocol.test_hostplugin import MockResponse +from tests.tools import * + + +class TestHealthService(AgentTestCase): + + def assert_status_code(self, status_code, expected_healthy): + response = MockResponse('response', status_code) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.assertEqual(expected_healthy, is_healthy) + + def assert_observation(self, call_args, name, is_healthy, value, description): + endpoint = call_args[0][0] + content = call_args[0][1] + + jo = json.loads(content) + api = jo['Api'] + source = jo['Source'] + version = jo['Version'] + obs = jo['Observations'] + fo = obs[0] + obs_name = fo['ObservationName'] + obs_healthy = fo['IsHealthy'] + obs_value = fo['Value'] + obs_description = fo['Description'] + + self.assertEqual('application/json', call_args[1]['headers']['Content-Type']) + self.assertEqual('http://endpoint:80/HealthService', endpoint) + self.assertEqual('reporttargethealth', api) + self.assertEqual('WALinuxAgent', source) + self.assertEqual('1.0', version) + + self.assertEqual(name, obs_name) + self.assertEqual(value, obs_value) + self.assertEqual(is_healthy, obs_healthy) + self.assertEqual(description, obs_description) + + def assert_telemetry(self, call_args, response=''): + args, kw_args = call_args + self.assertFalse(kw_args['is_success']) + self.assertEqual('HealthObservation', kw_args['op']) + obs = json.loads(kw_args['message']) + self.assertEqual(obs['Value'], response) + + def test_observation_validity(self): + try: + Observation(name=None, is_healthy=True) + self.fail('Empty observation name should raise ValueError') + except ValueError: + pass + + try: + Observation(name='Name', is_healthy=None) + self.fail('Empty measurement should raise ValueError') + except ValueError: + pass + + o = Observation(name='Name', is_healthy=True, value=None, description=None) + self.assertEqual('', o.value) + self.assertEqual('', o.description) + + long_str = 's' * 200 + o = Observation(name=long_str, is_healthy=True, value=long_str, description=long_str) + self.assertEqual(200, len(o.name)) + self.assertEqual(200, len(o.value)) + self.assertEqual(200, len(o.description)) + + self.assertEqual(64, len(o.as_obj['ObservationName'])) + self.assertEqual(128, len(o.as_obj['Value'])) + self.assertEqual(128, len(o.as_obj['Description'])) + + def test_observation_json(self): + health_service = HealthService('endpoint') + health_service.observations.append(Observation(name='name', + is_healthy=True, + value='value', + description='description')) + expected_json = '{"Source": "WALinuxAgent", ' \ + '"Api": "reporttargethealth", ' \ + '"Version": "1.0", ' \ + '"Observations": [{' \ + '"Value": "value", ' \ + '"ObservationName": "name", ' \ + '"Description": "description", ' \ + '"IsHealthy": true' \ + '}]}' + expected = sorted(json.loads(expected_json).items()) + actual = sorted(json.loads(health_service.as_json).items()) + self.assertEqual(expected, actual) + + @patch('azurelinuxagent.common.event.add_event') + @patch("azurelinuxagent.common.utils.restutil.http_post") + def test_reporting(self, patch_post, patch_add_event): + health_service = HealthService('endpoint') + health_service.report_host_plugin_status(is_healthy=True, response='response') + self.assertEqual(1, patch_post.call_count) + self.assertEqual(0, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_status(is_healthy=False, response='error') + self.assertEqual(2, patch_post.call_count) + self.assertEqual(1, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='error') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=False, + value='error', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_extension_artifact(is_healthy=True, source='source', response='response') + self.assertEqual(3, patch_post.call_count) + self.assertEqual(1, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='source') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_extension_artifact(is_healthy=False, source='source', response='response') + self.assertEqual(4, patch_post.call_count) + self.assertEqual(2, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='response') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=False, + value='response', + description='source') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_heartbeat(is_healthy=True) + self.assertEqual(5, patch_post.call_count) + self.assertEqual(2, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=True, + value='', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_heartbeat(is_healthy=False) + self.assertEqual(3, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args) + self.assertEqual(6, patch_post.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=False, + value='', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_versions(is_healthy=True, response='response') + self.assertEqual(7, patch_post.call_count) + self.assertEqual(3, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_versions(is_healthy=False, response='response') + self.assertEqual(8, patch_post.call_count) + self.assertEqual(4, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='response') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=False, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + patch_post.side_effect = HttpError() + health_service.report_host_plugin_versions(is_healthy=True, response='') + + self.assertEqual(9, patch_post.call_count) + self.assertEqual(4, patch_add_event.call_count) + self.assertEqual(0, len(health_service.observations)) + + def test_observation_length(self): + health_service = HealthService('endpoint') + + # make 100 observations + for i in range(0, 100): + health_service._observe(is_healthy=True, name='{0}'.format(i)) + + # ensure we keep only 10 + self.assertEqual(10, len(health_service.observations)) + + # ensure we keep the most recent 10 + self.assertEqual('90', health_service.observations[0].name) + self.assertEqual('99', health_service.observations[9].name) + + def test_status_codes(self): + # healthy + self.assert_status_code(status_code=200, expected_healthy=True) + self.assert_status_code(status_code=201, expected_healthy=True) + self.assert_status_code(status_code=302, expected_healthy=True) + self.assert_status_code(status_code=400, expected_healthy=True) + self.assert_status_code(status_code=416, expected_healthy=True) + self.assert_status_code(status_code=419, expected_healthy=True) + self.assert_status_code(status_code=429, expected_healthy=True) + self.assert_status_code(status_code=502, expected_healthy=True) + + # unhealthy + self.assert_status_code(status_code=500, expected_healthy=False) + self.assert_status_code(status_code=501, expected_healthy=False) + self.assert_status_code(status_code=503, expected_healthy=False) + self.assert_status_code(status_code=504, expected_healthy=False) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_hostplugin.py walinuxagent-2.2.45/tests/protocol/test_hostplugin.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_hostplugin.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_hostplugin.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 import json import sys +import datetime +import azurelinuxagent.common.protocol.restapi as restapi +import azurelinuxagent.common.protocol.wire as wire +import azurelinuxagent.common.protocol.hostplugin as hostplugin +from azurelinuxagent.common.errorstate import ErrorState + +from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.hostplugin import API_VERSION +from azurelinuxagent.common.utils import restutil +from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE +from tests.protocol.test_wire import MockResponse +from tests.tools import * if sys.version_info[0] == 3: import http.client as httpclient @@ -28,19 +40,10 @@ import httplib as httpclient bytebuffer = buffer -import azurelinuxagent.common.protocol.restapi as restapi -import azurelinuxagent.common.protocol.wire as wire -import azurelinuxagent.common.protocol.hostplugin as hostplugin - -from azurelinuxagent.common import event -from azurelinuxagent.common.exception import ProtocolError, HttpError -from azurelinuxagent.common.protocol.hostplugin import API_VERSION -from azurelinuxagent.common.utils import restutil - -from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE -from tests.tools import * hostplugin_status_url = "http://168.63.129.16:32526/status" +hostplugin_versions_url = "http://168.63.129.16:32526/versions" +health_service_url = 'http://168.63.129.16:80/HealthService' sas_url = "http://sas_url" wireserver_url = "168.63.129.16" @@ -55,15 +58,48 @@ if PY_VERSION_MAJOR > 2: faux_status_b64 = faux_status_b64.decode('utf-8') + class TestHostPlugin(AgentTestCase): + def _init_host(self): + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + host_plugin = wire.HostPluginProtocol(wireserver_url, + test_goal_state.container_id, + test_goal_state.role_config_name) + self.assertTrue(host_plugin.health_service is not None) + return host_plugin + + def _init_status_blob(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + status_blob = wire_protocol_client.status_blob + status_blob.data = faux_status + status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") + return status_blob + + def _relax_timestamp(self, headers): + new_headers = [] + + for header in headers: + header_value = header['headerValue'] + if header['headerName'] == 'x-ms-date': + timestamp = header['headerValue'] + header_value = timestamp[:timestamp.rfind(":")] + + new_header = {header['headerName']: header_value} + new_headers.append(new_header) + + return new_headers + def _compare_data(self, actual, expected): + # Remove seconds from the timestamps for testing purposes, that level or granularity introduces test flakiness + actual['headers'] = self._relax_timestamp(actual['headers']) + expected['headers'] = self._relax_timestamp(expected['headers']) + for k in iter(expected.keys()): if k == 'content' or k == 'requestUri': if actual[k] != expected[k]: - print("Mismatch: Actual '{0}'='{1}', " \ - "Expected '{0}'='{3}'".format( - k, actual[k], expected[k])) + print("Mismatch: Actual '{0}'='{1}', " + "Expected '{0}'='{2}'".format(k, actual[k], expected[k])) return False elif k == 'headers': for h in expected['headers']: @@ -93,7 +129,7 @@ s = s.decode('utf-8') data['content'] = s return data - + def _hostplugin_headers(self, goal_state): return { 'x-ms-version': '2015-09-01', @@ -101,7 +137,7 @@ 'x-ms-containerid': goal_state.container_id, 'x-ms-host-config-name': goal_state.role_config_name } - + def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): args, kwargs = args self.assertEqual(exp_method, args[0]) @@ -112,62 +148,239 @@ self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) - def test_fallback(self): + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") + @patch("azurelinuxagent.ga.update.restutil.http_get") + @patch("azurelinuxagent.common.event.report_event") + def assert_ensure_initialized(self, patch_event, patch_http_get, patch_report_health, + response_body, + response_status_code, + should_initialize, + should_report_healthy): + + host = hostplugin.HostPluginProtocol(endpoint='ws', container_id='cid', role_config_name='rcf') + + host.is_initialized = False + patch_http_get.return_value = MockResponse(body=response_body, + reason='reason', + status_code=response_status_code) + return_value = host.ensure_initialized() + + self.assertEqual(return_value, host.is_available) + self.assertEqual(should_initialize, host.is_initialized) + + self.assertEqual(1, patch_event.call_count) + self.assertEqual('InitializeHostPlugin', patch_event.call_args[0][0]) + + self.assertEqual(should_initialize, patch_event.call_args[1]['is_success']) + self.assertEqual(1, patch_report_health.call_count) + + self.assertEqual(should_report_healthy, patch_report_health.call_args[1]['is_healthy']) + + actual_response = patch_report_health.call_args[1]['response'] + if should_initialize: + self.assertEqual('', actual_response) + else: + self.assertTrue('HTTP Failed' in actual_response) + self.assertTrue(response_body in actual_response) + self.assertTrue(ustr(response_status_code) in actual_response) + + def test_ensure_initialized(self): + """ + Test calls to ensure_initialized + """ + self.assert_ensure_initialized(response_body=api_versions, + response_status_code=200, + should_initialize=True, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='invalid ip', + response_status_code=400, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='generic bad request', + response_status_code=400, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='resource gone', + response_status_code=410, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='generic error', + response_status_code=500, + should_initialize=False, + should_report_healthy=False) + + self.assert_ensure_initialized(response_body='upstream error', + response_status_code=502, + should_initialize=False, + should_report_healthy=True) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status") + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_default_channel(self, patch_update, patch_put, patch_upload, _): + """ + Status now defaults to HostPlugin. Validate that any errors on the public + channel are ignored. Validate that the default channel is never changed + as part of status upload. + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") + + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is not called + self.assertEqual(0, patch_upload.call_count, "Direct channel was used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is only called once, non-forced + self.assertEqual(1, patch_update.call_count, "Unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Unexpected parameters") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=True) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=HttpError("503")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_503(self, patch_update, patch_put, patch_upload, _): """ - Validate fallback to upload status using HostGAPlugin is happening when - status reporting via default method is unsuccessful + When host plugin returns a 503, we should fall back to the direct channel """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", - return_value=True): - with patch.object(wire.StatusBlob, "upload", return_value=False) as patch_upload: - with patch.object(wire.HostPluginProtocol, - "_put_page_blob_status") as patch_put: - wire_protocol_client = wire.WireProtocol(wireserver_url).client - wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) - wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) - wire_protocol_client.ext_conf.status_upload_blob = sas_url - wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type - wire_protocol_client.status_blob.set_vm_status(status) - wire_protocol_client.upload_status_blob() - self.assertEqual(patch_upload.call_count, 1) - self.assertTrue(patch_put.call_count == 1, - "Fallback was not engaged") - self.assertTrue(patch_put.call_args[0][0] == sas_url) - self.assertTrue(wire.HostPluginProtocol.is_default_channel()) - wire.HostPluginProtocol.set_default_channel(False) - def test_fallback_failure(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is called + self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is only called once, non-forced + self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=True) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=ResourceGoneError("410")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_410(self, patch_update, patch_put, patch_upload, _): """ - Validate that when host plugin fails, the default channel is reset + When host plugin returns a 410, we should force the goal state update and return """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) - status = restapi.VMStatus(status="Ready", - message="Guest Agent is running") - wire.HostPluginProtocol.set_default_channel(False) - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", - return_value=True): - with patch.object(wire.StatusBlob, - "upload", - return_value=False): - with patch.object(wire.HostPluginProtocol, - "_put_page_blob_status", - side_effect=wire.HttpError("put failure")) as patch_put: - client = wire.WireProtocol(wireserver_url).client - client.get_goal_state = Mock(return_value=test_goal_state) - client.ext_conf = wire.ExtensionsConfig(None) - client.ext_conf.status_upload_blob = sas_url - client.ext_conf.status_upload_blob_type = page_blob_type - client.status_blob.set_vm_status(status) - client.upload_status_blob() - self.assertTrue(patch_put.call_count == 1, - "Fallback was not engaged") - self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") + + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is not called + self.assertEqual(0, patch_upload.call_count, "Direct channel was used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is called twice, forced=True on the second + self.assertEqual(2, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(1, len(patch_update.call_args[1]), "Update goal state unexpected call count") + self.assertTrue(patch_update.call_args[1]['forced'], "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=False) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=HttpError("500")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_failure(self, patch_update, patch_put, patch_upload, _): + """ + When host plugin returns a 500, and direct fails, we should raise a ProtocolError + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") - def test_put_status_error_reporting(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) + + # assert direct route is not called + self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is called twice, forced=True on the second + self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + @patch("azurelinuxagent.common.event.add_event") + def test_put_status_error_reporting(self, patch_add_event, _): """ Validate the telemetry when uploading status fails """ @@ -184,17 +397,25 @@ wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.set_vm_status(status) put_error = wire.HttpError("put status http error") - with patch.object(event, - "add_event") as patch_add_event: - with patch.object(restutil, - "http_put", - side_effect=put_error) as patch_http_put: - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", return_value=True): - wire_protocol_client.upload_status_blob() - self.assertFalse(wire.HostPluginProtocol.is_default_channel()) - self.assertTrue(patch_add_event.call_count == 1) + with patch.object(restutil, + "http_put", + side_effect=put_error) as patch_http_put: + with patch.object(wire.HostPluginProtocol, + "ensure_initialized", return_value=True): + self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) + # The agent tries to upload via HostPlugin and that fails due to + # http_put having a side effect of "put_error" + # + # The agent tries to upload using a direct connection, and that succeeds. + self.assertEqual(1, wire_protocol_client.status_blob.upload.call_count) + # The agent never touches the default protocol is this code path, so no change. + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + # The agent never logs telemetry event for direct fallback + self.assertEqual(1, patch_add_event.call_count) + self.assertEqual('ReportStatus', patch_add_event.call_args[1]['op']) + self.assertTrue('Falling back to direct' in patch_add_event.call_args[1]['message']) + self.assertEqual(True, patch_add_event.call_args[1]['is_success']) def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" @@ -209,8 +430,8 @@ exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( - status_blob.get_block_blob_headers(len(faux_status)), - bytearray(faux_status, encoding='utf-8')) + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) @@ -222,13 +443,20 @@ patch_api.return_value = API_VERSION plugin.put_vm_status(status_blob, sas_url, block_blob_type) - self.assertTrue(patch_http.call_count == 1) + self.assertTrue(patch_http.call_count == 2) + + # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) - def test_no_fallback(self): + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_no_fallback(self, _): """ Validate fallback to upload status using HostGAPlugin is not happening when status reporting via default method is successful @@ -254,6 +482,7 @@ test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) + self.assertTrue(host_client.health_service is not None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status @@ -263,23 +492,29 @@ exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( - status_blob.get_block_blob_headers(len(faux_status)), - bytearray(faux_status, encoding='utf-8')) + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) with patch.object(wire.HostPluginProtocol, - "get_api_versions") as patch_get: + "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) - self.assertTrue(patch_http.call_count == 1) + self.assertTrue(patch_http.call_count == 2) + + # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) - + + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + def test_validate_page_blobs(self): """Validate correct set of data is sent for page blobs""" wire_protocol_client = wire.WireProtocol(wireserver_url).client @@ -308,29 +543,35 @@ mock_response = MockResponse('', httpclient.OK) with patch.object(restutil, "http_request", - return_value=mock_response) as patch_http: + return_value=mock_response) as patch_http: with patch.object(wire.HostPluginProtocol, - "get_api_versions") as patch_get: + "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) - self.assertTrue(patch_http.call_count == 2) + self.assertTrue(patch_http.call_count == 3) + # first call is to host plugin exp_data = self._hostplugin_data( - status_blob.get_page_blob_create_headers( - page_size)) + status_blob.get_page_blob_create_headers( + page_size)) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + + # last call is to host plugin exp_data = self._hostplugin_data( - status_blob.get_page_blob_page_headers( - 0, page_size), - page) - exp_data['requestUri'] += "?comp=page" + status_blob.get_page_blob_page_headers( + 0, page_size), + page) + exp_data['requestUri'] += "?comp=page" self._validate_hostplugin_args( - patch_http.call_args_list[1], + patch_http.call_args_list[2], test_goal_state, exp_method, exp_url, exp_data) @@ -347,6 +588,7 @@ test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) + self.assertTrue(host_client.health_service is not None) with patch.object(wire.HostPluginProtocol, "get_api_versions", return_value=api_versions) as patch_get: actual_url, actual_headers = host_client.get_artifact_request(sas_url) @@ -356,15 +598,243 @@ for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) - + + @patch("azurelinuxagent.common.utils.restutil.http_get") + def test_health(self, patch_http_get): + host_plugin = self._init_host() + + patch_http_get.return_value = MockResponse('', 200) + result = host_plugin.get_health() + self.assertEqual(1, patch_http_get.call_count) + self.assertTrue(result) + + patch_http_get.return_value = MockResponse('', 500) + result = host_plugin.get_health() + self.assertFalse(result) + + patch_http_get.side_effect = IOError('client IO error') + try: + host_plugin.get_health() + self.fail('IO error expected to be raised') + except IOError: + # expected + pass + + @patch("azurelinuxagent.common.utils.restutil.http_get", + return_value=MockResponse(status_code=200, body=b'')) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") + def test_ensure_health_service_called(self, patch_http_get, patch_report_versions): + host_plugin = self._init_host() + + host_plugin.get_api_versions() + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(1, patch_report_versions.call_count) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_healthy_signal(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 201) + + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(2, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[0][0][0]) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[1][0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_unhealthy_signal_transient(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 500) + + with self.assertRaises(HttpError): + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(1, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_unhealthy_signal_permanent(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 500) + + host_plugin.status_error_state.is_triggered = Mock(return_value=True) + + with self.assertRaises(HttpError): + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(1, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertFalse(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") + def test_report_fetch_health(self, patch_report_artifact, patch_should_report): + host_plugin = self._init_host() + host_plugin.report_fetch_health(uri='', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + host_plugin.report_fetch_health(uri='http://169.254.169.254/extensionArtifact', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/status', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + self.assertEqual(None, host_plugin.fetch_last_timestamp) + host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/extensionArtifact', is_healthy=True) + self.assertNotEqual(None, host_plugin.fetch_last_timestamp) + self.assertEqual(1, patch_should_report.call_count) + self.assertEqual(1, patch_report_artifact.call_count) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_status") + def test_report_status_health(self, patch_report_status, patch_should_report): + host_plugin = self._init_host() + self.assertEqual(None, host_plugin.status_last_timestamp) + host_plugin.report_status_health(is_healthy=True) + self.assertNotEqual(None, host_plugin.status_last_timestamp) + self.assertEqual(1, patch_should_report.call_count) + self.assertEqual(1, patch_report_status.call_count) + + def test_should_report(self): + host_plugin = self._init_host() + error_state = ErrorState(min_timedelta=datetime.timedelta(minutes=5)) + period = datetime.timedelta(minutes=1) + last_timestamp = None + + # first measurement at 0s, should report + is_healthy = True + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(True, actual) + + # second measurement at 30s, should not report + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(False, actual) + + # third measurement at 60s, should report + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(True, actual) + + # fourth measurement unhealthy, should report and increment counter + is_healthy = False + self.assertEqual(0, error_state.count) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(1, error_state.count) + self.assertEqual(True, actual) + + # fifth measurement, should not report and reset counter + is_healthy = True + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) + self.assertEqual(1, error_state.count) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(0, error_state.count) + self.assertEqual(False, actual) + class MockResponse: - def __init__(self, body, status_code): + def __init__(self, body, status_code, reason=''): self.body = body self.status = status_code + self.reason = reason def read(self): - return self.body + return self.body if sys.version_info[0] == 2 else bytes(self.body, encoding='utf-8') + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_image_info_matcher.py walinuxagent-2.2.45/tests/protocol/test_image_info_matcher.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_image_info_matcher.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_image_info_matcher.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,139 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +from azurelinuxagent.common.datacontract import set_properties +from azurelinuxagent.common.protocol.imds import ImageInfoMatcher +from tests.tools import * + + +class TestImageInfoMatcher(unittest.TestCase): + def test_image_does_not_exist(self): + doc = '{}' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("Red Hat", "RHEL", "6.3", "")) + + def test_image_exists_by_sku(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "16.04-LTS": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "")) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "16.04.201805090")) + + self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "16.04.201805090")) + + def test_image_exists_by_version(self): + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3" + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.1", "")) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.2", "")) + + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.4", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.5", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.0", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.1", "")) + + def test_image_exists_by_version01(self): + """ + Test case to ensure the matcher exhaustively searches all cases. + + REDHAT/RHEL have a SKU >= 6.3 is less precise than + REDHAT/RHEL/7-LVM have a any version. + + Both should return a successful match. + """ + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3", + "7-LVM": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7-LVM", "")) + + def test_ignores_case(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "16.04-LTS": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("canonical", "ubuntuserver", "16.04-lts", "")) + self.assertFalse(test_subject.is_match("canonical", "ubuntuserver", "14.04.0-lts", "16.04.201805090")) + + def test_list_operator(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "List": [ "14.04.0-LTS", "14.04.1-LTS" ] + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "")) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.1-LTS", "")) + + self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "22.04-LTS", "")) + + def test_invalid_version(self): + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3" + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "16.04-LTS", "")) + + # This is *expected* behavior as opposed to desirable. The specification is + # controlled by the agent, so there is no reason to use these values, but if + # one does this is expected behavior. + # + # FlexibleVersion chops off all leading zeros. + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.04", "")) + # FlexibleVersion coerces everything to a string + self.assertTrue(test_subject.is_match("RedHat", "RHEL", 6.04, "")) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_imds.py walinuxagent-2.2.45/tests/protocol/test_imds.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_imds.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_imds.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,490 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import json + +import azurelinuxagent.common.protocol.imds as imds + +from azurelinuxagent.common.datacontract import set_properties +from azurelinuxagent.common.exception import HttpError, ResourceGoneError +from azurelinuxagent.common.future import ustr, httpclient +from azurelinuxagent.common.utils import restutil +from tests.ga.test_update import ResponseMock +from tests.tools import * + + +class TestImds(AgentTestCase): + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get(self, mock_http_get): + mock_http_get.return_value = ResponseMock(response='''{ + "location": "westcentralus", + "name": "unit_test", + "offer": "UnitOffer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "UnitPublisher", + "resourceGroupName": "UnitResourceGroupName", + "sku": "UnitSku", + "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", + "tags": "Key1:Value1;Key2:Value2", + "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", + "version": "UnitVersion", + "vmSize": "Standard_D1_v2" + }'''.encode('utf-8')) + + test_subject = imds.ImdsClient() + test_subject.get_compute() + + self.assertEqual(1, mock_http_get.call_count) + positional_args, kw_args = mock_http_get.call_args + + self.assertEqual('http://169.254.169.254/metadata/instance/compute?api-version=2018-02-01', positional_args[0]) + self.assertTrue('User-Agent' in kw_args['headers']) + self.assertTrue('Metadata' in kw_args['headers']) + self.assertEqual(True, kw_args['headers']['Metadata']) + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get_bad_request(self, mock_http_get): + mock_http_get.return_value = ResponseMock(status=restutil.httpclient.BAD_REQUEST) + + test_subject = imds.ImdsClient() + self.assertRaises(HttpError, test_subject.get_compute) + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get_internal_service_error(self, mock_http_get): + mock_http_get.return_value = ResponseMock(status=restutil.httpclient.INTERNAL_SERVER_ERROR) + + test_subject = imds.ImdsClient() + self.assertRaises(HttpError, test_subject.get_compute) + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get_empty_response(self, mock_http_get): + mock_http_get.return_value = ResponseMock(response=''.encode('utf-8')) + + test_subject = imds.ImdsClient() + self.assertRaises(ValueError, test_subject.get_compute) + + def test_deserialize_ComputeInfo(self): + s = '''{ + "location": "westcentralus", + "name": "unit_test", + "offer": "UnitOffer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "UnitPublisher", + "resourceGroupName": "UnitResourceGroupName", + "sku": "UnitSku", + "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", + "tags": "Key1:Value1;Key2:Value2", + "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", + "version": "UnitVersion", + "vmSize": "Standard_D1_v2", + "vmScaleSetName": "MyScaleSet", + "zone": "In" + }''' + + data = json.loads(s, encoding='utf-8') + + compute_info = imds.ComputeInfo() + set_properties("compute", compute_info, data) + + self.assertEqual('westcentralus', compute_info.location) + self.assertEqual('unit_test', compute_info.name) + self.assertEqual('UnitOffer', compute_info.offer) + self.assertEqual('Linux', compute_info.osType) + self.assertEqual('', compute_info.placementGroupId) + self.assertEqual('0', compute_info.platformFaultDomain) + self.assertEqual('0', compute_info.platformUpdateDomain) + self.assertEqual('UnitPublisher', compute_info.publisher) + self.assertEqual('UnitResourceGroupName', compute_info.resourceGroupName) + self.assertEqual('UnitSku', compute_info.sku) + self.assertEqual('e4402c6c-2804-4a0a-9dee-d61918fc4d28', compute_info.subscriptionId) + self.assertEqual('Key1:Value1;Key2:Value2', compute_info.tags) + self.assertEqual('f62f23fb-69e2-4df0-a20b-cb5c201a3e7a', compute_info.vmId) + self.assertEqual('UnitVersion', compute_info.version) + self.assertEqual('Standard_D1_v2', compute_info.vmSize) + self.assertEqual('MyScaleSet', compute_info.vmScaleSetName) + self.assertEqual('In', compute_info.zone) + + self.assertEqual('UnitPublisher:UnitOffer:UnitSku:UnitVersion', compute_info.image_info) + + def test_is_custom_image(self): + image_origin = self._setup_image_origin_assert("", "", "", "") + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_CUSTOM, image_origin) + + def test_is_endorsed_CentOS(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.5", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.6", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.9", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.0", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-LVM", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-RAW", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.5", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.1", "")) + + def test_is_endorsed_CoreOS(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.4.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "899.17.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "1688.5.3")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.3.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "alpha", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "beta", "")) + + def test_is_endorsed_Debian(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "9", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "9-DAILY", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "10-DAILY", "")) + + def test_is_endorsed_Rhel(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.9", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.0", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-LVM", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-RAW", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("RedHat", "RHEL", "6.6", "")) + + def test_is_endorsed_SuSE(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "11-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "11-SP4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("SuSE", "SLES", "11-SP3", "")) + + def test_is_endorsed_UbuntuServer(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.0-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.1-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.2-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.3-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.4-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.5-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.6-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.7-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.8-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "16.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "20.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "22.04-LTS", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "12.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "17.10", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-DAILY-LTS", "")) + + @staticmethod + def _setup_image_origin_assert(publisher, offer, sku, version): + s = '''{{ + "publisher": "{0}", + "offer": "{1}", + "sku": "{2}", + "version": "{3}" + }}'''.format(publisher, offer, sku, version) + + data = json.loads(s, encoding='utf-8') + compute_info = imds.ComputeInfo() + set_properties("compute", compute_info, data) + + return compute_info.image_origin + + def test_response_validation(self): + # invalid json or empty response + self._assert_validation(http_status_code=200, + http_response='', + expected_valid=False, + expected_response='JSON parsing failed') + + self._assert_validation(http_status_code=200, + http_response=None, + expected_valid=False, + expected_response='JSON parsing failed') + + self._assert_validation(http_status_code=200, + http_response='{ bad json ', + expected_valid=False, + expected_response='JSON parsing failed') + + # 500 response + self._assert_validation(http_status_code=500, + http_response='error response', + expected_valid=False, + expected_response='IMDS error in /metadata/instance: [HTTP Failed] [500: reason] error response') + + # 429 response - throttling does not mean service is unhealthy + self._assert_validation(http_status_code=429, + http_response='server busy', + expected_valid=True, + expected_response='[HTTP Failed] [429: reason] server busy') + + # 404 response - error responses do not mean service is unhealthy + self._assert_validation(http_status_code=404, + http_response='not found', + expected_valid=True, + expected_response='[HTTP Failed] [404: reason] not found') + + # valid json + self._assert_validation(http_status_code=200, + http_response=self._imds_response('valid'), + expected_valid=True, + expected_response='') + # unicode + self._assert_validation(http_status_code=200, + http_response=self._imds_response('unicode'), + expected_valid=True, + expected_response='') + + def test_field_validation(self): + # TODO: compute fields (#1249) + + self._assert_field('network', 'interface', 'ipv4', 'ipAddress', 'privateIpAddress') + self._assert_field('network', 'interface', 'ipv4', 'ipAddress') + self._assert_field('network', 'interface', 'ipv4') + self._assert_field('network', 'interface', 'macAddress') + self._assert_field('network') + + def _assert_field(self, *fields): + response = self._imds_response('valid') + response_obj = json.loads(ustr(response, encoding="utf-8")) + + # assert empty value + self._update_field(response_obj, fields, '') + altered_response = json.dumps(response_obj).encode() + self._assert_validation(http_status_code=200, + http_response=altered_response, + expected_valid=False, + expected_response='Empty field: [{0}]'.format(fields[-1])) + + # assert missing value + self._update_field(response_obj, fields, None) + altered_response = json.dumps(response_obj).encode() + self._assert_validation(http_status_code=200, + http_response=altered_response, + expected_valid=False, + expected_response='Missing field: [{0}]'.format(fields[-1])) + + def _update_field(self, obj, fields, val): + if isinstance(obj, list): + self._update_field(obj[0], fields, val) + else: + f = fields[0] + if len(fields) == 1: + if val is None: + del obj[f] + else: + obj[f] = val + else: + self._update_field(obj[f], fields[1:], val) + + @staticmethod + def _imds_response(f): + path = os.path.join(data_dir, "imds", "{0}.json".format(f)) + with open(path, "rb") as fh: + return fh.read() + + def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response): + test_subject = imds.ImdsClient() + with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get: + mock_http_get.return_value = ResponseMock(status=http_status_code, + reason='reason', + response=http_response) + validate_response = test_subject.validate() + + self.assertEqual(1, mock_http_get.call_count) + positional_args, kw_args = mock_http_get.call_args + + self.assertTrue('User-Agent' in kw_args['headers']) + self.assertEqual(restutil.HTTP_USER_AGENT_HEALTH, kw_args['headers']['User-Agent']) + self.assertTrue('Metadata' in kw_args['headers']) + self.assertEqual(True, kw_args['headers']['Metadata']) + self.assertEqual('http://169.254.169.254/metadata/instance?api-version=2018-02-01', + positional_args[0]) + self.assertEqual(expected_valid, validate_response[0]) + self.assertTrue(expected_response in validate_response[1], + "Expected: '{0}', Actual: '{1}'" + .format(expected_response, validate_response[1])) + + @patch("azurelinuxagent.common.protocol.util.ProtocolUtil") + def test_endpoint_fallback(self, ProtocolUtil): + # http error status codes are tested in test_response_validation, none of which + # should trigger a fallback. This is confirmed as _assert_validation will count + # http GET calls and enforces a single GET call (fallback would cause 2) and + # checks the url called. + + test_subject = imds.ImdsClient() + ProtocolUtil().get_wireserver_endpoint.return_value = "foo.bar" + + # ensure user-agent gets set correctly + for is_health, expected_useragent in [(False, restutil.HTTP_USER_AGENT), (True, restutil.HTTP_USER_AGENT_HEALTH)]: + # set a different resource path for health query to make debugging unit test easier + resource_path = 'something/health' if is_health else 'something' + + for has_primary_ioerror in (False, True): + # secondary endpoint unreachable + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(primary_ioerror=has_primary_ioerror, secondary_ioerror=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) if has_primary_ioerror else self.assertTrue(result.success) + self.assertFalse(result.service_error) + if has_primary_ioerror: + self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response) + else: + self.assertEqual('Mock success response', result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) + + # IMDS success + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(primary_ioerror=has_primary_ioerror) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertTrue(result.success) + self.assertFalse(result.service_error) + self.assertEqual('Mock success response', result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) + + # IMDS throttled + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(primary_ioerror=has_primary_ioerror, throttled=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertFalse(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) + + # IMDS gone error + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(primary_ioerror=has_primary_ioerror, gone_error=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertTrue(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) + + # IMDS bad request + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(primary_ioerror=has_primary_ioerror, bad_request=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertFalse(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) + + def _mock_imds_setup(self, primary_ioerror=False, secondary_ioerror=False, gone_error=False, throttled=False, bad_request=False): + self._mock_imds_expect_fallback = primary_ioerror + self._mock_imds_primary_ioerror = primary_ioerror + self._mock_imds_secondary_ioerror = secondary_ioerror + self._mock_imds_gone_error = gone_error + self._mock_imds_throttled = throttled + self._mock_imds_bad_request = bad_request + + def _mock_http_get(self, *_, **kwargs): + if "foo.bar" == kwargs['endpoint'] and not self._mock_imds_expect_fallback: + raise Exception("Unexpected endpoint called") + if self._mock_imds_primary_ioerror and "169.254.169.254" == kwargs['endpoint']: + raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" + .format(kwargs['endpoint'], kwargs['resource_path'])) + if self._mock_imds_secondary_ioerror and "foo.bar" == kwargs['endpoint']: + raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" + .format(kwargs['endpoint'], kwargs['resource_path'])) + if self._mock_imds_gone_error: + raise ResourceGoneError("Resource is gone") + if self._mock_imds_throttled: + raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made" + .format(kwargs['endpoint'], kwargs['resource_path'])) + + resp = MagicMock() + resp.reason = 'reason' + if self._mock_imds_bad_request: + resp.status = httpclient.NOT_FOUND + resp.read.return_value = 'Mock not found' + else: + resp.status = httpclient.OK + resp.read.return_value = 'Mock success response' + return resp + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_metadata.py walinuxagent-2.2.45/tests/protocol/test_metadata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_metadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_metadata.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,20 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import json - -from azurelinuxagent.common.future import ustr - -from azurelinuxagent.common.utils.restutil import httpclient +from azurelinuxagent.common.datacontract import get_properties, set_properties +from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.telemetryevent import TelemetryEventList, TelemetryEvent +from azurelinuxagent.common.utils import restutil from tests.protocol.mockmetadata import * from tests.tools import * + class TestMetadataProtocolGetters(AgentTestCase): def load_json(self, path): return json.loads(ustr(load_data(path)), encoding="utf-8") @@ -49,7 +49,6 @@ test_data = MetadataProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) - @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests(self, mock_get, mock_update): diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_protocol_util.py walinuxagent-2.2.45/tests/protocol/test_protocol_util.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_protocol_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_protocol_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * @@ -60,7 +60,7 @@ tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist - protocol_util.get_protocol_by_file() + protocol_util.get_protocol(by_file=True) protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() @@ -71,7 +71,7 @@ with open(tag_file, "w+") as tag_fd: tag_fd.write("") - protocol_util.get_protocol_by_file() + protocol_util.get_protocol(by_file=True) protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_restapi.py walinuxagent-2.2.45/tests/protocol/test_restapi.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_restapi.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_restapi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from tests.tools import * -import uuid -import unittest -import os -import shutil -import time -from azurelinuxagent.common.protocol.restapi import * - -class SampleDataContract(DataContract): - def __init__(self): - self.foo = None - self.bar = DataContractList(int) - -class TestDataContract(unittest.TestCase): - def test_get_properties(self): - obj = SampleDataContract() - obj.foo = "foo" - obj.bar.append(1) - data = get_properties(obj) - self.assertEquals("foo", data["foo"]) - self.assertEquals(list, type(data["bar"])) - - def test_set_properties(self): - obj = SampleDataContract() - data = { - 'foo' : 1, - 'baz': 'a' - } - set_properties('sample', obj, data) - self.assertFalse(hasattr(obj, 'baz')) - -if __name__ == '__main__': - unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_wire.py walinuxagent-2.2.45/tests/protocol/test_wire.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_wire.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/protocol/test_wire.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,5 @@ -# Copyright 2014 Microsoft Corporation +# -*- encoding: utf-8 -*- +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import glob +import zipfile -from azurelinuxagent.common import event +from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.utils.shellutil import run_get_output +from tests.ga.test_monitor import random_generator from tests.protocol.mockwiredata import * data_with_bom = b'\xef\xbb\xbfhehe' @@ -26,15 +29,31 @@ testtype = 'BlockBlob' wireserver_url = '168.63.129.16' + +def get_event(message, duration=30000, evt_type="", is_internal=False, is_success=True, + name="", op="Unknown", version=CURRENT_VERSION, eventId=1): + event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") + event.parameters.append(TelemetryEventParam('Name', name)) + event.parameters.append(TelemetryEventParam('Version', str(version))) + event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) + event.parameters.append(TelemetryEventParam('Operation', op)) + event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) + event.parameters.append(TelemetryEventParam('Message', message)) + event.parameters.append(TelemetryEventParam('Duration', duration)) + event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) + return event + + @patch("time.sleep") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") class TestWireProtocol(AgentTestCase): def setUp(self): super(TestWireProtocol, self).setUp() HostPluginProtocol.set_default_channel(False) - - def _test_getters(self, test_data, MockCryptUtil, _): + + def _test_getters(self, test_data, certsMustBePresent, __, MockCryptUtil, _): MockCryptUtil.side_effect = test_data.mock_crypt_util with patch.object(restutil, 'http_get', test_data.mock_http_get): @@ -52,38 +71,52 @@ '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.crt') prv2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.prv') - - self.assertTrue(os.path.isfile(crt1)) - self.assertTrue(os.path.isfile(crt2)) - self.assertTrue(os.path.isfile(prv2)) - + if certsMustBePresent: + self.assertTrue(os.path.isfile(crt1)) + self.assertTrue(os.path.isfile(crt2)) + self.assertTrue(os.path.isfile(prv2)) + else: + self.assertFalse(os.path.isfile(crt1)) + self.assertFalse(os.path.isfile(crt2)) + self.assertFalse(os.path.isfile(prv2)) self.assertEqual("1", protocol.get_incarnation()) def test_getters(self, *args): """Normal case""" test_data = WireProtocolData(DATA_FILE) - self._test_getters(test_data, *args) + self._test_getters(test_data, True, *args) def test_getters_no_ext(self, *args): """Provision with agent is not checked""" test_data = WireProtocolData(DATA_FILE_NO_EXT) - self._test_getters(test_data, *args) + self._test_getters(test_data, True, *args) def test_getters_ext_no_settings(self, *args): """Extensions without any settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) - self._test_getters(test_data, *args) + self._test_getters(test_data, True, *args) def test_getters_ext_no_public(self, *args): """Extensions without any public settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) - self._test_getters(test_data, *args) + self._test_getters(test_data, True, *args) + + def test_getters_ext_no_cert_format(self, *args): + """Certificate format not specified""" + test_data = WireProtocolData(DATA_FILE_NO_CERT_FORMAT) + self._test_getters(test_data, True, *args) + + def test_getters_ext_cert_format_not_pfx(self, *args): + """Certificate format is not Pkcs7BlobWithPfxContents specified""" + test_data = WireProtocolData(DATA_FILE_CERT_FORMAT_NOT_PFX) + self._test_getters(test_data, False, *args) - def test_getters_with_stale_goal_state(self, *args): + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") + def test_getters_with_stale_goal_state(self, patch_report, *args): test_data = WireProtocolData(DATA_FILE) test_data.emulate_stale_goal_state = True - self._test_getters(test_data, *args) + self._test_getters(test_data, True, *args) # Ensure HostPlugin was invoked self.assertEqual(1, test_data.call_counts["/versions"]) self.assertEqual(2, test_data.call_counts["extensionArtifact"]) @@ -92,10 +125,9 @@ # fetched often; however, the dependent documents, such as the # HostingEnvironmentConfig, will be retrieved the expected number self.assertEqual(2, test_data.call_counts["hostingenvuri"]) + self.assertEqual(1, patch_report.call_count) - def test_call_storage_kwargs(self, - mock_cryptutil, - mock_sleep): + def test_call_storage_kwargs(self, *args): from azurelinuxagent.common.utils import restutil with patch.object(restutil, 'http_get') as http_patch: http_req = restutil.http_get @@ -129,7 +161,7 @@ use_proxy=True) # assert self.assertTrue(http_patch.call_count == 5) - for i in range(0,5): + for i in range(0, 5): c = http_patch.call_args_list[i][-1]['use_proxy'] self.assertTrue(c == (True if i != 3 else False)) @@ -149,37 +181,62 @@ wire_protocol_client = WireProtocol(wireserver_url).client goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) - with patch.object(WireClient, "get_goal_state", return_value = goal_state) as patch_get_goal_state: + with patch.object(WireClient, "get_goal_state", return_value=goal_state) as patch_get_goal_state: host_plugin = wire_protocol_client.get_host_plugin() self.assertEqual(goal_state.container_id, host_plugin.container_id) self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name) - patch_get_goal_state.assert_called_once() + self.assertEqual(1, patch_get_goal_state.call_count) - def test_download_ext_handler_pkg_fallback(self, *args): + @patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=IOError) + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_fallback(self, patch_request, patch_get_host, patch_http, *args): ext_uri = 'extension_uri' host_uri = 'host_uri' - mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') - with patch.object(restutil, - "http_request", - side_effect=IOError) as patch_http: - with patch.object(WireClient, - "get_host_plugin", - return_value=mock_host): - with patch.object(HostPluginProtocol, - "get_artifact_request", - return_value=[host_uri, {}]) as patch_request: - - WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri) - - self.assertEqual(patch_http.call_count, 2) - self.assertEqual(patch_request.call_count, 1) - - self.assertEqual(patch_http.call_args_list[0][0][1], - ext_uri) - self.assertEqual(patch_http.call_args_list[1][0][1], - host_uri) + destination = 'destination' + patch_get_host.return_value = HostPluginProtocol(host_uri, 'container_id', 'role_config') + patch_request.return_value = [host_uri, {}] + + WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) + + self.assertEqual(patch_http.call_count, 2) + self.assertEqual(patch_request.call_count, 1) + self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri) + self.assertEqual(patch_http.call_args_list[1][0][1], host_uri) + + @skip_if_predicate_true(running_under_travis, "Travis unit tests should not have external dependencies") + def test_download_ext_handler_pkg_stream(self, *args): + ext_uri = 'https://dcrdata.blob.core.windows.net/files/packer.zip' + tmp = tempfile.mkdtemp() + destination = os.path.join(tmp, 'test_download_ext_handler_pkg_stream.zip') + + success = WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) + self.assertTrue(success) + self.assertTrue(os.path.exists(destination)) + + # verify size + self.assertEqual(33193077, os.stat(destination).st_size) + + # verify unzip + zipfile.ZipFile(destination).extractall(tmp) + packer = os.path.join(tmp, 'packer') + self.assertTrue(os.path.exists(packer)) + fileutil.chmod(packer, os.stat(packer).st_mode | stat.S_IXUSR) + + # verify unpacked size + self.assertEqual(105552030, os.stat(packer).st_size) + + # execute, verify result + packer_version = '{0} --version'.format(packer) + rc, stdout = run_get_output(packer_version) + self.assertEqual(0, rc) + self.assertEqual('1.3.5\n', stdout) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_default(self, *args): + """ + Default status blob method is HostPlugin. + """ vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) @@ -193,10 +250,14 @@ HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() - patch_default_upload.assert_called_once_with(testurl) - patch_get_goal_state.assert_not_called() - patch_host_ga_plugin_upload.assert_not_called() + # do not call the direct method unless host plugin fails + patch_default_upload.assert_not_called() + # host plugin always fetches a goal state + patch_get_goal_state.assert_called_once_with() + # host plugin uploads the status blob + patch_host_ga_plugin_upload.assert_called_once_with(ANY, testurl, 'BlockBlob') + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_host_ga_plugin(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client @@ -217,13 +278,14 @@ HostPluginProtocol.set_default_channel(False) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.upload_status_blob() - patch_default_upload.assert_called_once_with(testurl) - wire_protocol_client.get_goal_state.assert_called_once() + patch_default_upload.assert_not_called() + self.assertEqual(1, wire_protocol_client.get_goal_state.call_count) patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) - self.assertTrue(HostPluginProtocol.is_default_channel()) - HostPluginProtocol.set_default_channel(False) + self.assertFalse(HostPluginProtocol.is_default_channel()) - def test_upload_status_blob_unknown_type_assumes_block(self, *args): + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized") + def test_upload_status_blob_unknown_type_assumes_block(self, _, __, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) @@ -239,8 +301,9 @@ patch_prepare.assert_called_once_with("BlockBlob") patch_default_upload.assert_called_once_with(testurl) - patch_get_goal_state.assert_not_called() + patch_get_goal_state.assert_called_once_with() + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_reports_prepare_error(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client @@ -250,13 +313,9 @@ wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) - with patch.object(StatusBlob, "prepare", - side_effect=Exception) as mock_prepare: - with patch.object(WireClient, "report_status_event") as mock_event: - wire_protocol_client.upload_status_blob() - - mock_prepare.assert_called_once() - mock_event.assert_called_once() + with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare: + self.assertRaises(ProtocolError, wire_protocol_client.upload_status_blob) + self.assertEqual(1, mock_prepare.call_count) def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client @@ -265,7 +324,7 @@ # Test when artifacts_profile_blob is null/None self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) - #Test when artifacts_profile_blob is whitespace + # Test when artifacts_profile_blob is whitespace wire_protocol_client.ext_conf.artifacts_profile_blob = " " self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) @@ -277,18 +336,18 @@ wire_protocol_client.get_goal_state = Mock(return_value=goal_state) with patch.object(HostPluginProtocol, "get_artifact_request", - return_value = ['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers: - #Test when response body is None + return_value=['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers: + # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(None, 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) - #Test when response body is None + # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(' '.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) - #Test when response body is None + # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{ }'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(), in_vm_artifacts_profile.__dict__, @@ -297,6 +356,26 @@ host_plugin_get_artifact_url_and_headers.assert_called_with(testurl) + @patch("azurelinuxagent.common.event.add_event") + def test_artifacts_profile_json_parsing(self, patch_event, *args): + wire_protocol_client = WireProtocol(wireserver_url).client + wire_protocol_client.ext_conf = ExtensionsConfig(None) + wire_protocol_client.ext_conf.artifacts_profile_blob = testurl + goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) + wire_protocol_client.get_goal_state = Mock(return_value=goal_state) + + # response is invalid json + wire_protocol_client.call_storage_service = Mock(return_value=MockResponse("invalid json".encode('utf-8'), 200)) + in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() + + # ensure response is empty + self.assertEqual(None, in_vm_artifacts_profile) + + # ensure event is logged + self.assertEqual(1, patch_event.call_count) + self.assertFalse(patch_event.call_args[1]['is_success']) + self.assertTrue('invalid json' in patch_event.call_args[1]['message']) + self.assertEqual('ArtifactsProfileBlob', patch_event.call_args[1]['op']) def test_get_in_vm_artifacts_profile_default(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client @@ -305,13 +384,13 @@ goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) - wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200)) + wire_protocol_client.call_storage_service = Mock( + return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) - @patch("time.sleep") - def test_fetch_manifest_fallback(self, patch_sleep, *args): + def test_fetch_manifest_fallback(self, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) @@ -321,28 +400,55 @@ 'container_id', 'role_config') client = WireProtocol(wireserver_url).client - with patch.object(WireClient, - "fetch", - return_value=None) as patch_fetch: - with patch.object(WireClient, - "get_host_plugin", - return_value=mock_host): - with patch.object(HostPluginProtocol, - "get_artifact_request", - return_value=[host_uri, {}]): + + with patch.object(WireClient, "fetch", return_value=None) as patch_fetch: + with patch.object(WireClient, "get_host_plugin", return_value=mock_host): + with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]): HostPluginProtocol.set_default_channel(False) - self.assertRaises(ProtocolError, client.fetch_manifest, uris) + self.assertRaises(ExtensionDownloadError, client.fetch_manifest, uris) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri) self.assertEqual(patch_fetch.call_args_list[1][0][0], host_uri) + # This test checks if the manifest_uri variable is set in the host object of WireClient + # This variable is used when we make /extensionArtifact API calls to the HostGA + def test_fetch_manifest_ensure_manifest_uri_is_set(self, *args): + uri1 = ExtHandlerVersionUri() + uri1.uri = 'ext_uri' + uris = DataContractList(ExtHandlerVersionUri) + uris.append(uri1) + host_uri = 'host_uri' + mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') + client = WireProtocol(wireserver_url).client + manifest_return = "manifest.xml" + + with patch.object(WireClient, "get_host_plugin", return_value=mock_host): + mock_host.get_artifact_request = MagicMock(return_value=[host_uri, {}]) + + # First test tried to download directly from blob and asserts manifest_uri is set + with patch.object(WireClient, "fetch", return_value=manifest_return) as patch_fetch: + fetch_manifest_mock = client.fetch_manifest(uris) + self.assertEqual(fetch_manifest_mock, manifest_return) + self.assertEqual(patch_fetch.call_count, 1) + self.assertEqual(mock_host.manifest_uri, uri1.uri) + + # Second test tries to download from the HostGA (by failing the direct download) + # and asserts manifest_uri is set + with patch.object(WireClient, "fetch") as patch_fetch: + patch_fetch.side_effect = [None, manifest_return] + fetch_manifest_mock = client.fetch_manifest(uris) + self.assertEqual(fetch_manifest_mock, manifest_return) + self.assertEqual(patch_fetch.call_count, 2) + self.assertEqual(mock_host.manifest_uri, uri1.uri) + self.assertTrue(HostPluginProtocol.is_default_channel()) + def test_get_in_vm_artifacts_profile_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) - wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}'.encode('utf-8')]) + wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}']) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as artifact_request: @@ -386,17 +492,596 @@ 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, - 'guestOSInfo' : v1_ga_guest_info + 'guestOSInfo': v1_ga_guest_info } self.assertEqual(json.dumps(v1_vm_status), actual.to_json()) + @patch("azurelinuxagent.common.utils.restutil.http_request") + def test_send_event(self, mock_http_request, *args): + mock_http_request.return_value = MockResponse("", 200) + + event_str = u'a test string' + client = WireProtocol(wireserver_url).client + client.send_event("foo", event_str) + + first_call = mock_http_request.call_args_list[0] + args, kwargs = first_call + method, url, body_received = args + headers = kwargs['headers'] + + # the headers should include utf-8 encoding... + self.assertTrue("utf-8" in headers['Content-Type']) + # the body is not encoded, just check for equality + self.assertIn(event_str, body_received) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + def test_report_event_small_event(self, patch_send_event, *args): + event_list = TelemetryEventList() + client = WireProtocol(wireserver_url).client + + event_str = random_generator(10) + event_list.events.append(get_event(message=event_str)) + + event_str = random_generator(100) + event_list.events.append(get_event(message=event_str)) + + event_str = random_generator(1000) + event_list.events.append(get_event(message=event_str)) + + event_str = random_generator(10000) + event_list.events.append(get_event(message=event_str)) + + client.report_event(event_list) + + # It merges the messages into one message + self.assertEqual(patch_send_event.call_count, 1) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + def test_report_event_multiple_events_to_fill_buffer(self, patch_send_event, *args): + event_list = TelemetryEventList() + client = WireProtocol(wireserver_url).client + + event_str = random_generator(2 ** 15) + event_list.events.append(get_event(message=event_str)) + event_list.events.append(get_event(message=event_str)) + + client.report_event(event_list) + + # It merges the messages into one message + self.assertEqual(patch_send_event.call_count, 2) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") + def test_report_event_large_event(self, patch_send_event, *args): + event_list = TelemetryEventList() + event_str = random_generator(2 ** 18) + event_list.events.append(get_event(message=event_str)) + client = WireProtocol(wireserver_url).client + client.report_event(event_list) + + self.assertEqual(patch_send_event.call_count, 0) + + +class TestWireClient(AgentTestCase): + + def test_save_or_update_goal_state_should_save_new_goal_state_file(self): + # Assert the file didn't exist before + incarnation = 42 + goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) + self.assertFalse(os.path.exists(goal_state_file)) + + xml_text = WireProtocolData(DATA_FILE).goal_state + client = WireClient(wireserver_url) + client.save_or_update_goal_state_file(incarnation, xml_text) + + # Assert the file exists and its contents + self.assertTrue(os.path.exists(goal_state_file)) + with open(goal_state_file, "r") as f: + contents = f.readlines() + self.assertEquals("".join(contents), xml_text) + + def test_save_or_update_goal_state_should_update_existing_goal_state_file(self): + incarnation = 42 + goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) + xml_text = WireProtocolData(DATA_FILE).goal_state + + with open(goal_state_file, "w") as f: + f.write(xml_text) + + # Assert the file exists and its contents + self.assertTrue(os.path.exists(goal_state_file)) + with open(goal_state_file, "r") as f: + contents = f.readlines() + self.assertEquals("".join(contents), xml_text) + + # Update the container id + new_goal_state = WireProtocolData(DATA_FILE).goal_state.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", + "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") + client = WireClient(wireserver_url) + client.save_or_update_goal_state_file(incarnation, new_goal_state) + + # Assert the file exists and its contents + self.assertTrue(os.path.exists(goal_state_file)) + with open(goal_state_file, "r") as f: + contents = f.readlines() + self.assertEquals("".join(contents), new_goal_state) + + def test_save_or_update_goal_state_should_update_goal_state_and_container_id_when_not_forced(self): + incarnation = "1" # Match the incarnation number from dummy goal state file + incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) + with open(incarnation_file, "w") as f: + f.write(incarnation) + + xml_text = WireProtocolData(DATA_FILE).goal_state + goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) + + with open(goal_state_file, "w") as f: + f.write(xml_text) + + client = WireClient(wireserver_url) + host = client.get_host_plugin() + old_container_id = host.container_id + + # Update the container id + new_goal_state = WireProtocolData(DATA_FILE).goal_state.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", + "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_config", return_value=new_goal_state): + client.update_goal_state(forced=False) + + self.assertNotEqual(old_container_id, host.container_id) + self.assertEquals(host.container_id, "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + protocol = WireProtocol("foo.bar") + HostPluginProtocol.set_default_channel(False) + + mock_successful_response = MockResponse(body=b"OK", status_code=200) + destination = os.path.join(self.tmp_dir, "tmp_file") + + # Direct channel succeeds + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ + as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", + wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: + ret = protocol.download_ext_handler_pkg("uri", destination) + self.assertEquals(ret, True) + + self.assertEquals(patch_host.call_count, 0) + self.assertEquals(patch_direct.call_count, 1) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + protocol = WireProtocol("foo.bar") + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=b"OK", status_code=200) + destination = os.path.join(self.tmp_dir, "tmp_file") + + # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel + # should have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ + as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", + wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: + ret = protocol.download_ext_handler_pkg("uri", destination) + self.assertEquals(ret, True) + + self.assertEquals(patch_host.call_count, 1) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + protocol = WireProtocol("foo.bar") + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=b"OK", status_code=200) + destination = os.path.join(self.tmp_dir, "tmp_file") + + # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. + # As a consequence, goal state should have been updated and host channel should have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ + as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", + wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: + ret = protocol.download_ext_handler_pkg("uri", destination) + self.assertEquals(ret, True) + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + protocol = WireProtocol("foo.bar") + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + destination = os.path.join(self.tmp_dir, "tmp_file") + + # Everything fails. Goal state should have been updated and host channel should not have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ + as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", + wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: + ret = protocol.download_ext_handler_pkg("uri", destination) + self.assertEquals(ret, False) + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_fetch_manifest_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + + HostPluginProtocol.set_default_channel(False) + mock_successful_response = MockResponse(body=b"OK", status_code=200) + + # Direct channel succeeds + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", + wraps=client.fetch_manifest_through_host) as patch_host: + ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) + self.assertEquals(ret, "OK") + + self.assertEquals(patch_host.call_count, 0) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_fetch_manifest_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=b"OK", status_code=200) + + # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel + # should have been set as default + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", + wraps=client.fetch_manifest_through_host) as patch_host: + ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) + self.assertEquals(ret, "OK") + + self.assertEquals(patch_host.call_count, 1) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + # Reset default channel + HostPluginProtocol.set_default_channel(False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_fetch_manifest_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=b"OK", status_code=200) + + # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. + # As a consequence, goal state should have been updated and host channel should have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", + wraps=client.fetch_manifest_through_host) as patch_host: + ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) + self.assertEquals(ret, "OK") + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_fetch_manifest_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + + HostPluginProtocol.set_default_channel(False) + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + + # Everything fails. Goal state should have been updated and host channel should not have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", + wraps=client.fetch_manifest_through_host) as patch_host: + with self.assertRaises(ExtensionDownloadError): + client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_get_artifacts_profile_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + client.ext_conf = ExtensionsConfig(None) + client.ext_conf.artifacts_profile_blob = "testurl" + json_profile = b'{ "onHold": true }' + + HostPluginProtocol.set_default_channel(False) + mock_successful_response = MockResponse(body=json_profile, status_code=200) + + # Direct channel succeeds + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", + wraps=client.get_artifacts_profile_through_host) as patch_host: + ret = client.get_artifacts_profile() + self.assertIsInstance(ret, InVMArtifactsProfile) + + self.assertEquals(patch_host.call_count, 0) + self.assertEquals(patch_direct.call_count, 1) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_get_artifacts_profile_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + client.ext_conf = ExtensionsConfig(None) + client.ext_conf.artifacts_profile_blob = "testurl" + json_profile = b'{ "onHold": true }' + + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=json_profile, status_code=200) + + # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel + # should have been set as default + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", + wraps=client.get_artifacts_profile_through_host) as patch_host: + ret = client.get_artifacts_profile() + self.assertIsInstance(ret, InVMArtifactsProfile) + + self.assertEquals(patch_host.call_count, 1) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 0) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_get_artifacts_profile_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + client.ext_conf = ExtensionsConfig(None) + client.ext_conf.artifacts_profile_blob = "testurl" + json_profile = b'{ "onHold": true }' + + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + mock_successful_response = MockResponse(body=json_profile, status_code=200) + + # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. + # As a consequence, goal state should have been updated and host channel should have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", + side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", + wraps=client.get_artifacts_profile_through_host) as patch_host: + ret = client.get_artifacts_profile() + self.assertIsInstance(ret, InVMArtifactsProfile) + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), True) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_get_artifacts_profile_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): + mock_get_artifact_request.return_value = "dummy_url", "dummy_header" + client = WireClient("foo.bar") + client.ext_conf = ExtensionsConfig(None) + client.ext_conf.artifacts_profile_blob = "testurl" + json_profile = b'{ "onHold": true }' + + HostPluginProtocol.set_default_channel(False) + + mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) + + # Everything fails. Goal state should have been updated and host channel should not have been set as default. + with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): + with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: + with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: + with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", + wraps=client.get_artifacts_profile_through_host) as patch_host: + ret = client.get_artifacts_profile() + self.assertEquals(ret, None) + + self.assertEquals(patch_host.call_count, 2) + # The host channel calls the direct function under the covers + self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) + self.assertEquals(mock_update_goal_state.call_count, 1) + + self.assertEquals(HostPluginProtocol.is_default_channel(), False) + + def test_send_request_using_appropriate_channel_should_not_invoke_host_channel_when_direct_channel_succeeds(self, *args): + xml_text = WireProtocolData(DATA_FILE).goal_state + client = WireClient(wireserver_url) + client.goal_state = GoalState(xml_text) + client.get_host_plugin().set_default_channel(False) + + def direct_func(*args): + direct_func.counter += 1 + return 42 + + def host_func(*args): + host_func.counter += 1 + return None + + direct_func.counter = 0 + host_func.counter = 0 + + # Assert we've only called the direct channel functions and that it succeeded. + ret = client.send_request_using_appropriate_channel(direct_func, host_func) + self.assertEquals(42, ret) + self.assertEquals(1, direct_func.counter) + self.assertEquals(0, host_func.counter) + + def test_send_request_using_appropriate_channel_should_not_use_direct_channel_when_host_channel_is_default(self, *args): + xml_text = WireProtocolData(DATA_FILE).goal_state + client = WireClient(wireserver_url) + client.goal_state = GoalState(xml_text) + client.get_host_plugin().set_default_channel(True) + + def direct_func(*args): + direct_func.counter += 1 + return 42 + + def host_func(*args): + host_func.counter += 1 + return 43 + + direct_func.counter = 0 + host_func.counter = 0 + + # Assert we've only called the host channel function since it's the default channel + ret = client.send_request_using_appropriate_channel(direct_func, host_func) + self.assertEquals(43, ret) + self.assertEquals(0, direct_func.counter) + self.assertEquals(1, host_func.counter) + + def test_send_request_using_appropriate_channel_should_use_host_channel_when_direct_channel_fails(self, *args): + xml_text = WireProtocolData(DATA_FILE).goal_state + client = WireClient(wireserver_url) + client.goal_state = GoalState(xml_text) + host = client.get_host_plugin() + host.set_default_channel(False) + + def direct_func(*args): + direct_func.counter += 1 + raise InvalidContainerError() + + def host_func(*args): + host_func.counter += 1 + return 42 + + direct_func.counter = 0 + host_func.counter = 0 + + # Assert we've called both the direct channel function and the host channel function, which succeeded. + # After the host channel succeeds, the host plugin should have been set as the default channel. + ret = client.send_request_using_appropriate_channel(direct_func, host_func) + self.assertEquals(42, ret) + self.assertEquals(1, direct_func.counter) + self.assertEquals(1, host_func.counter) + self.assertEquals(True, host.is_default_channel()) + + def test_send_request_using_appropriate_channel_should_retry_the_host_channel_after_reloading_goal_state(self, *args): + xml_text = WireProtocolData(DATA_FILE).goal_state + client = WireClient(wireserver_url) + client.goal_state = GoalState(xml_text) + client.get_host_plugin().set_default_channel(False) + + def direct_func(*args): + direct_func.counter += 1 + raise InvalidContainerError() + + def host_func(*args): + host_func.counter += 1 + if host_func.counter == 1: + raise ResourceGoneError("Resource is gone") + return 42 + + direct_func.counter = 0 + host_func.counter = 0 + + # Assert we've called both the direct channel function (once) and the host channel function (twice). + # After the host channel succeeds, the host plugin should have been set as the default channel. + with patch('azurelinuxagent.common.protocol.wire.WireClient.update_goal_state') as mock_update_goal_state: + ret = client.send_request_using_appropriate_channel(direct_func, host_func) + self.assertEquals(42, ret) + self.assertEquals(1, direct_func.counter) + self.assertEquals(2, host_func.counter) + self.assertEquals(1, mock_update_goal_state.call_count) + self.assertEquals(True, client.get_host_plugin().is_default_channel()) + class MockResponse: def __init__(self, body, status_code): self.body = body self.status = status_code - def read(self): + def read(self, *_): return self.body diff -Nru walinuxagent-2.2.21+really2.2.20/tests/test_agent.py walinuxagent-2.2.45/tests/test_agent.py --- walinuxagent-2.2.21+really2.2.20/tests/test_agent.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/test_agent.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import mock import os.path -import sys from azurelinuxagent.agent import * from azurelinuxagent.common.conf import * @@ -28,18 +26,22 @@ """AutoUpdate.Enabled = True AutoUpdate.GAFamily = Prod Autoupdate.Frequency = 3600 +CGroups.EnforceLimits = False +CGroups.Excluded = customscript,runcommand DVD.MountPoint = /mnt/cdrom/secure DetectScvmmEnv = False -EnableOverProvisioning = False +EnableOverProvisioning = True Extension.LogDir = /var/log/azure +Extensions.Enabled = True HttpProxy.Host = None HttpProxy.Port = None Lib.Dir = /var/lib/waagent +Logs.Console = True Logs.Verbose = False OS.AllowHTTP = False OS.CheckRdmaDriver = False OS.EnableFIPS = True -OS.EnableFirewall = True +OS.EnableFirewall = False OS.EnableRDMA = False OS.HomeDir = /home OS.OpensslPath = /usr/bin/openssl @@ -50,18 +52,18 @@ OS.SudoersDir = /etc/sudoers.d OS.UpdateRdmaDriver = False Pid.File = /var/run/waagent.pid +Provisioning.Agent = auto Provisioning.AllowResetSysUser = False Provisioning.DecodeCustomData = False Provisioning.DeleteRootPassword = True -Provisioning.Enabled = True Provisioning.ExecuteCustomData = False Provisioning.MonitorHostName = True Provisioning.PasswordCryptId = 6 Provisioning.PasswordCryptSaltLength = 10 Provisioning.RegenerateSshHostKeyPair = True Provisioning.SshHostKeyPairType = rsa -Provisioning.UseCloudInit = True ResourceDisk.EnableSwap = False +ResourceDisk.EnableSwapEncryption = False ResourceDisk.Filesystem = ext4 ResourceDisk.Format = True ResourceDisk.MountOptions = None @@ -72,13 +74,13 @@ def test_accepts_configuration_path(self): conf_path = os.path.join(data_dir, "test_waagent.conf") - c, f, v, cfp = parse_args(["-configuration-path:" + conf_path]) + c, f, v, d, cfp = parse_args(["-configuration-path:" + conf_path]) self.assertEqual(cfp, conf_path) @patch("os.path.exists", return_value=True) def test_checks_configuration_path(self, mock_exists): conf_path = "/foo/bar-baz/something.conf" - c, f, v, cfp = parse_args(["-configuration-path:"+conf_path]) + c, f, v, d, cfp = parse_args(["-configuration-path:"+conf_path]) self.assertEqual(cfp, conf_path) self.assertEqual(mock_exists.call_count, 1) @@ -87,13 +89,13 @@ @patch("sys.exit", side_effect=Exception) def test_rejects_missing_configuration_path(self, mock_exit, mock_exists, mock_stderr): try: - c, f, v, cfp = parse_args(["-configuration-path:/foo/bar.conf"]) + c, f, v, d, cfp = parse_args(["-configuration-path:/foo/bar.conf"]) self.assertTrue(False) except Exception: self.assertEqual(mock_exit.call_count, 1) def test_configuration_path_defaults_to_none(self): - c, f, v, cfp = parse_args([]) + c, f, v, d, cfp = parse_args([]) self.assertEqual(cfp, None) def test_agent_accepts_configuration_path(self): @@ -119,7 +121,7 @@ agent.daemon() mock_daemon.run.assert_called_once_with(child_args=None) - mock_load.assert_called_once() + self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") @@ -133,7 +135,7 @@ agent.daemon() mock_daemon.run.assert_called_once_with(child_args="-configuration-path:/foo/bar.conf") - mock_load.assert_called_once() + self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_ensures_extension_log_directory(self, mock_dir): @@ -158,7 +160,7 @@ conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) - mock_log.assert_called_once() + self.assertEqual(1, mock_log.call_count) def test_agent_get_configuration(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) @@ -168,3 +170,22 @@ for k in sorted(configuration.keys()): actual_configuration.append("{0} = {1}".format(k, configuration[k])) self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration) + + def test_agent_usage_message(self): + message = usage() + + # Python 2.6 does not have assertIn() + self.assertTrue("-verbose" in message) + self.assertTrue("-force" in message) + self.assertTrue("-help" in message) + self.assertTrue("-configuration-path" in message) + self.assertTrue("-deprovision" in message) + self.assertTrue("-register-service" in message) + self.assertTrue("-version" in message) + self.assertTrue("-daemon" in message) + self.assertTrue("-start" in message) + self.assertTrue("-run-exthandlers" in message) + self.assertTrue("-show-configuration" in message) + + # sanity check + self.assertFalse("-not-a-valid-option" in message) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/test_import.py walinuxagent-2.2.45/tests/test_import.py --- walinuxagent-2.2.21+really2.2.20/tests/test_import.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/test_import.py 2019-11-07 00:36:56.000000000 +0000 @@ -9,8 +9,10 @@ import azurelinuxagent.daemon.scvmm as scvmm import azurelinuxagent.ga.exthandlers as exthandlers import azurelinuxagent.ga.monitor as monitor +import azurelinuxagent.ga.remoteaccess as remoteaccess import azurelinuxagent.ga.update as update + class TestImportHandler(AgentTestCase): def test_get_handler(self): osutil.get_osutil() @@ -24,3 +26,4 @@ monitor.get_monitor_handler() update.get_update_handler() exthandlers.get_exthandlers_handler() + remoteaccess.get_remote_access_handler() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/tools.py walinuxagent-2.2.45/tests/tools.py --- walinuxagent-2.2.21+really2.2.20/tests/tools.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/tools.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,34 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Define util functions for unit test """ - +import difflib import os +import pprint import re import shutil +import stat +import sys import tempfile import unittest from functools import wraps import time +from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.osutil.factory import _get_osutil +from azurelinuxagent.common.osutil.ubuntu import Ubuntu14OSUtil, Ubuntu16OSUtil from azurelinuxagent.common.utils import fileutil - from azurelinuxagent.common.version import PY_VERSION_MAJOR -# Import mock module for Python2 and Python3 try: - from unittest.mock import Mock, patch, MagicMock, DEFAULT, call + from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call + + # Import mock module for Python2 and Python3 + from bin.waagent2 import Agent except ImportError: - from mock import Mock, patch, MagicMock, DEFAULT, call + from mock import Mock, patch, MagicMock, ANY, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") @@ -53,8 +60,157 @@ logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) +_MAX_LENGTH = 120 + +_MAX_LENGTH_SAFE_REPR = 80 + +# Mock sleep to reduce test execution time +_SLEEP = time.sleep + + +def mock_sleep(sec=0.01): + """ + Mocks the time.sleep method to reduce unit test time + :param sec: Time to replace the sleep call with, default = 0.01sec + """ + _SLEEP(sec) + + +def safe_repr(obj, short=False): + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < _MAX_LENGTH: + return result + return result[:_MAX_LENGTH_SAFE_REPR] + ' [truncated]...' + + +def skip_if_predicate_false(predicate, message): + if not predicate(): + if hasattr(unittest, "skip"): + return unittest.skip(message) + return lambda func: None + return lambda func: func + + +def skip_if_predicate_true(predicate, message): + if predicate(): + if hasattr(unittest, "skip"): + return unittest.skip(message) + return lambda func: None + return lambda func: func + + +def _safe_repr(obj, short=False): + """ + Copied from Python 3.x + """ + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < _MAX_LENGTH: + return result + return result[:_MAX_LENGTH] + ' [truncated]...' + + +def running_under_travis(): + return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' + + +def get_osutil_for_travis(): + distro_name = os.environ['_system_name'].lower() + distro_version = os.environ['_system_version'] + + if distro_name == "ubuntu" and distro_version == "14.04": + return Ubuntu14OSUtil() + + if distro_name == "ubuntu" and distro_version == "16.04": + return Ubuntu16OSUtil() + + +def mock_get_osutil(*args): + # It's a known issue that calling platform.linux_distribution() in Travis will result in the wrong info. + # See https://github.com/travis-ci/travis-ci/issues/2755 + # When running in Travis, use manual distro resolution that relies on environment variables. + if running_under_travis(): + return get_osutil_for_travis() + else: + return _get_osutil(*args) + + +def are_cgroups_enabled(): + # We use a function decorator to check if cgroups are enabled in multiple tests, which at some point calls + # get_osutil. The global mock for that function doesn't get executed before the function decorators are imported, + # so we need to specifically mock it beforehand. + mock__get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", mock_get_osutil) + mock__get_osutil.start() + ret = CGroupConfigurator.get_instance().enabled + mock__get_osutil.stop() + return ret + + +def is_trusty_in_travis(): + # In Travis, Trusty (Ubuntu 14.04) is missing the cpuacct.stat file, + # possibly because the accounting is not enabled by default. + if not running_under_travis(): + return False + + return type(get_osutil_for_travis()) == Ubuntu14OSUtil + + +def is_systemd_present(): + return os.path.exists("/run/systemd/system") + + +def i_am_root(): + return os.geteuid() == 0 + class AgentTestCase(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Setup newer unittest assertions missing in prior versions of Python + + if not hasattr(cls, "assertRegex"): + cls.assertRegex = cls.assertRegexpMatches if hasattr(cls, "assertRegexpMatches") else cls.emulate_assertRegexpMatches + if not hasattr(cls, "assertNotRegex"): + cls.assertNotRegex = cls.assertNotRegexpMatches if hasattr(cls, "assertNotRegexpMatches") else cls.emulate_assertNotRegexpMatches + if not hasattr(cls, "assertIn"): + cls.assertIn = cls.emulate_assertIn + if not hasattr(cls, "assertNotIn"): + cls.assertNotIn = cls.emulate_assertNotIn + if not hasattr(cls, "assertGreater"): + cls.assertGreater = cls.emulate_assertGreater + if not hasattr(cls, "assertGreaterEqual"): + cls.assertGreaterEqual = cls.emulate_assertGreaterEqual + if not hasattr(cls, "assertLess"): + cls.assertLess = cls.emulate_assertLess + if not hasattr(cls, "assertLessEqual"): + cls.assertLessEqual = cls.emulate_assertLessEqual + if not hasattr(cls, "assertIsNone"): + cls.assertIsNone = cls.emulate_assertIsNone + if not hasattr(cls, "assertIsNotNone"): + cls.assertIsNotNone = cls.emulate_assertIsNotNone + if hasattr(cls, "assertRaisesRegexp"): + cls.assertRaisesRegex = cls.assertRaisesRegexp + if not hasattr(cls, "assertRaisesRegex"): + cls.assertRaisesRegex = cls.emulate_raises_regex + if not hasattr(cls, "assertListEqual"): + cls.assertListEqual = cls.emulate_assertListEqual + if not hasattr(cls, "assertIsInstance"): + cls.assertIsInstance = cls.emulate_assertIsInstance + if sys.version_info < (2, 7): + # assertRaises does not implement a context manager in 2.6; override it with emulate_assertRaises but + # keep a pointer to the original implementation to use when a context manager is not requested. + cls.original_assertRaises = unittest.TestCase.assertRaises + cls.assertRaises = cls.emulate_assertRaises + + @classmethod + def tearDownClass(cls): + pass + def setUp(self): prefix = "{0}_".format(self.__class__.__name__) @@ -72,16 +228,254 @@ event.init_event_status(self.tmp_dir) event.init_event_logger(self.tmp_dir) + self.mock__get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", mock_get_osutil) + self.mock__get_osutil.start() + def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) - def _create_files(self, tmp_dir, prefix, suffix, count, with_sleep=0): + self.mock__get_osutil.stop() + + def emulate_assertIn(self, a, b, msg=None): + if a not in b: + msg = msg if msg is not None else "{0} not found in {1}".format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertNotIn(self, a, b, msg=None): + if a in b: + msg = msg if msg is not None else "{0} unexpectedly found in {1}".format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertGreater(self, a, b, msg=None): + if not a > b: + msg = msg if msg is not None else '{0} not greater than {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertGreaterEqual(self, a, b, msg=None): + if not a >= b: + msg = msg if msg is not None else '{0} not greater or equal to {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertLess(self, a, b, msg=None): + if not a < b: + msg = msg if msg is not None else '{0} not less than {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertLessEqual(self, a, b, msg=None): + if not a <= b: + msg = msg if msg is not None else '{0} not less or equal to {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertIsNone(self, x, msg=None): + if x is not None: + msg = msg if msg is not None else '{0} is not None'.format(_safe_repr(x)) + self.fail(msg) + + def emulate_assertIsNotNone(self, x, msg=None): + if x is None: + msg = msg if msg is not None else '{0} is None'.format(_safe_repr(x)) + self.fail(msg) + + def emulate_assertRegexpMatches(self, text, regexp, msg=None): + if re.search(regexp, text) is not None: + return + msg = msg if msg is not None else "'{0}' does not match '{1}'.".format(text, regexp) + self.fail(msg) + + def emulate_assertNotRegexpMatches(self, text, regexp, msg=None): + if re.search(regexp, text, flags=1) is None: + return + msg = msg if msg is not None else "'{0}' should not match '{1}'.".format(text, regexp) + self.fail(msg) + + class _AssertRaisesContextManager(object): + def __init__(self, expected_exception_type, test_case): + self._expected_exception_type = expected_exception_type + self._test_case = test_case + + def __enter__(self): + return self + + @staticmethod + def _get_type_name(type): + return type.__name__ if hasattr(type, "__name__") else str(type) + + def __exit__(self, exception_type, exception, *_): + if exception_type is None: + expected = AgentTestCase._AssertRaisesContextManager._get_type_name(self._expected_exception_type) + self._test_case.fail("Did not raise an exception; expected '{0}'".format(expected)) + if not issubclass(exception_type, self._expected_exception_type): + raised = AgentTestCase._AssertRaisesContextManager._get_type_name(exception_type) + expected = AgentTestCase._AssertRaisesContextManager._get_type_name(self._expected_exception_type) + self._test_case.fail("Raised '{0}', but expected '{1}'".format(raised, expected)) + + self.exception = exception + return True + + def emulate_assertRaises(self, exception_type, function=None, *args, **kwargs): + # return a context manager only when function is not provided; otherwise use the original assertRaises + if function is None: + return AgentTestCase._AssertRaisesContextManager(exception_type, self) + + self.original_assertRaises(exception_type, function, *args, **kwargs) + + return None + + def emulate_raises_regex(self, exception_type, regex, function, *args, **kwargs): + try: + function(*args, **kwargs) + except Exception as e: + if re.search(regex, str(e), flags=1) is not None: + return + else: + self.fail("Expected exception {0} matching {1}. Actual: {2}".format( + exception_type, regex, str(e))) + self.fail("No exception was thrown. Expected exception {0} matching {1}".format(exception_type, regex)) + + def emulate_assertListEqual(self, seq1, seq2, msg=None, seq_type=None): + """An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + """ + if seq_type is not None: + seq_type_name = seq_type.__name__ + if not isinstance(seq1, seq_type): + raise self.failureException('First sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq1))) + if not isinstance(seq2, seq_type): + raise self.failureException('Second sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq2))) + else: + seq_type_name = "sequence" + + differing = None + try: + len1 = len(seq1) + except (TypeError, NotImplementedError): + differing = 'First %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + try: + len2 = len(seq2) + except (TypeError, NotImplementedError): + differing = 'Second %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + if seq1 == seq2: + return + + seq1_repr = safe_repr(seq1) + seq2_repr = safe_repr(seq2) + if len(seq1_repr) > 30: + seq1_repr = seq1_repr[:30] + '...' + if len(seq2_repr) > 30: + seq2_repr = seq2_repr[:30] + '...' + elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) + differing = '%ss differ: %s != %s\n' % elements + + for i in xrange(min(len1, len2)): + try: + item1 = seq1[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of first %s\n' % + (i, seq_type_name)) + break + + try: + item2 = seq2[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of second %s\n' % + (i, seq_type_name)) + break + + if item1 != item2: + differing += ('\nFirst differing element %d:\n%s\n%s\n' % + (i, safe_repr(item1), safe_repr(item2))) + break + else: + if (len1 == len2 and seq_type is None and + type(seq1) != type(seq2)): + # The sequences are the same, but have differing types. + return + + if len1 > len2: + differing += ('\nFirst %s contains %d additional ' + 'elements.\n' % (seq_type_name, len1 - len2)) + try: + differing += ('First extra element %d:\n%s\n' % + (len2, safe_repr(seq1[len2]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of first %s\n' % (len2, seq_type_name)) + elif len1 < len2: + differing += ('\nSecond %s contains %d additional ' + 'elements.\n' % (seq_type_name, len2 - len1)) + try: + differing += ('First extra element %d:\n%s\n' % + (len1, safe_repr(seq2[len1]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of second %s\n' % (len1, seq_type_name)) + standardMsg = differing + diffMsg = '\n' + '\n'.join( + difflib.ndiff(pprint.pformat(seq1).splitlines(), + pprint.pformat(seq2).splitlines())) + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + def emulate_assertIsInstance(self, obj, object_type, msg=None): + if not isinstance(obj, object_type): + msg = msg if msg is not None else '{0} is not an instance of {1}'.format(_safe_repr(obj), + _safe_repr(object_type)) + self.fail(msg) + + @staticmethod + def _create_files(tmp_dir, prefix, suffix, count, with_sleep=0): for i in range(count): f = os.path.join(tmp_dir, '.'.join((prefix, str(i), suffix))) fileutil.write_file(f, "faux content") time.sleep(with_sleep) + def create_script(self, file_name, contents, file_path=None): + """ + Creates an executable script with the given contents. + If file_name ends with ".py", it creates a Python3 script, otherwise it creates a bash script + :param file_name: The name of the file to create the script with + :param contents: Contents of the script file + :param file_path: The path of the file where to create it in (we use /tmp/ by default) + :return: + """ + if not file_path: + file_path = os.path.join(self.tmp_dir, file_name) + + directory = os.path.dirname(file_path) + if not os.path.exists(directory): + os.mkdir(directory) + + with open(file_path, "w") as script: + if file_name.endswith(".py"): + script.write("#!/usr/bin/env python3\n") + else: + script.write("#!/usr/bin/env bash\n") + script.write(contents) + + os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + + return file_name + def load_data(name): """Load test data""" diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/cgroups_tools.py walinuxagent-2.2.45/tests/utils/cgroups_tools.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/cgroups_tools.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/cgroups_tools.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,50 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import os +from azurelinuxagent.common.cgroupapi import VM_AGENT_CGROUP_NAME +from azurelinuxagent.common.utils import fileutil + +class CGroupsTools(object): + @staticmethod + def create_legacy_agent_cgroup(cgroups_file_system_root, controller, daemon_pid): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. + + This method creates a mock cgroup using the legacy path and adds the given PID to it. + """ + legacy_cgroup = os.path.join(cgroups_file_system_root, controller, "WALinuxAgent", "WALinuxAgent") + if not os.path.exists(legacy_cgroup): + os.makedirs(legacy_cgroup) + fileutil.append_file(os.path.join(legacy_cgroup, "cgroup.procs"), daemon_pid + "\n") + return legacy_cgroup + + @staticmethod + def create_agent_cgroup(cgroups_file_system_root, controller, extension_handler_pid): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. + + This method creates a mock cgroup using the newer path and adds the given PID to it. + """ + new_cgroup = os.path.join(cgroups_file_system_root, controller, VM_AGENT_CGROUP_NAME) + if not os.path.exists(new_cgroup): + os.makedirs(new_cgroup) + fileutil.append_file(os.path.join(new_cgroup, "cgroup.procs"), extension_handler_pid + "\n") + return new_cgroup + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/__init__.py walinuxagent-2.2.45/tests/utils/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/__init__.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_archive.py walinuxagent-2.2.45/tests/utils/test_archive.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_archive.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_archive.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,248 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +from datetime import datetime, timedelta + +import zipfile + +from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, MAX_ARCHIVED_STATES +from tests.tools import * + +debug = False +if os.environ.get('DEBUG') == '1': + debug = True + +# Enable verbose logger to stdout +if debug: + logger.add_logger_appender(logger.AppenderType.STDOUT, + logger.LogLevel.VERBOSE) + + +class TestArchive(AgentTestCase): + def setUp(self): + prefix = "{0}_".format(self.__class__.__name__) + + self.tmp_dir = tempfile.mkdtemp(prefix=prefix) + + def tearDown(self): + if not debug and self.tmp_dir is not None: + shutil.rmtree(self.tmp_dir) + + def _write_file(self, fn, contents=None): + full_name = os.path.join(self.tmp_dir, fn) + fileutil.mkdir(os.path.dirname(full_name)) + + with open(full_name, 'w') as fh: + data = contents if contents is not None else fn + fh.write(data) + return full_name + + @property + def history_dir(self): + return os.path.join(self.tmp_dir, 'history') + + def test_archive00(self): + """ + StateFlusher should move all 'goal state' files to a new directory + under the history folder that is timestamped. + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + for f in temp_files: + self._write_file(f) + + test_subject = StateFlusher(self.tmp_dir) + test_subject.flush(datetime.utcnow()) + + self.assertTrue(os.path.exists(self.history_dir)) + self.assertTrue(os.path.isdir(self.history_dir)) + + timestamp_dirs = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_dirs)) + + self.assertIsIso8601(timestamp_dirs[0]) + ts = self.parse_isoformat(timestamp_dirs[0]) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + for f in temp_files: + history_path = os.path.join(self.history_dir, timestamp_dirs[0], f) + msg = "expected the temp file {0} to exist".format(history_path) + self.assertTrue(os.path.exists(history_path), msg) + + def test_archive01(self): + """ + StateArchiver should archive all history directories by + + 1. Creating a .zip of a timestamped directory's files + 2. Saving the .zip to /var/lib/waagent/history/ + 2. Deleting the timestamped directory + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + for f in temp_files: + self._write_file(f) + + flusher = StateFlusher(self.tmp_dir) + flusher.flush(datetime.utcnow()) + + test_subject = StateArchiver(self.tmp_dir) + test_subject.archive() + + timestamp_zips = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_zips)) + + zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000.zip + ts_s = os.path.splitext(zip_fn)[0] # 2000-01-01T00:00:00.000000 + + self.assertIsIso8601(ts_s) + ts = self.parse_isoformat(ts_s) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + zip_full = os.path.join(self.history_dir, zip_fn) + self.assertZipContains(zip_full, temp_files) + + def test_archive02(self): + """ + StateArchiver should purge the MAX_ARCHIVED_STATES oldest files + or directories. The oldest timestamps are purged first. + + This test case creates a mixture of archive files and directories. + It creates 5 more values than MAX_ARCHIVED_STATES to ensure that + 5 archives are cleaned up. It asserts that the files and + directories are properly deleted from the disk. + """ + count = 6 + total = MAX_ARCHIVED_STATES + count + + start = datetime.now() + timestamps = [] + + for i in range(0, total): + ts = start + timedelta(seconds=i) + timestamps.append(ts) + + if i % 2 == 0: + fn = os.path.join('history', ts.isoformat(), 'Prod.0.manifest.xml') + else: + fn = os.path.join('history', "{0}.zip".format(ts.isoformat())) + + self._write_file(fn) + + self.assertEqual(total, len(os.listdir(self.history_dir))) + + test_subject = StateArchiver(self.tmp_dir) + test_subject.purge() + + archived_entries = os.listdir(self.history_dir) + self.assertEqual(MAX_ARCHIVED_STATES, len(archived_entries)) + + archived_entries.sort() + + for i in range(0, MAX_ARCHIVED_STATES): + ts = timestamps[i + count].isoformat() + if i % 2 == 0: + fn = ts + else: + fn = "{0}.zip".format(ts) + self.assertTrue(fn in archived_entries, "'{0}' is not in the list of unpurged entires".format(fn)) + + def test_archive03(self): + """ + If the StateFlusher has to flush the same file, it should + overwrite the existing one. + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + def _write_goal_state_files(temp_files, content=None): + for f in temp_files: + self._write_file(f, content) + + def _check_history_files(timestamp_dir, files, content=None): + for f in files: + history_path = os.path.join(self.history_dir, timestamp_dir, f) + msg = "expected the temp file {0} to exist".format(history_path) + self.assertTrue(os.path.exists(history_path), msg) + expected_content = f if content is None else content + actual_content = fileutil.read_file(history_path) + self.assertEqual(expected_content, actual_content) + + timestamp = datetime.utcnow() + + _write_goal_state_files(temp_files) + test_subject = StateFlusher(self.tmp_dir) + test_subject.flush(timestamp) + + # Ensure history directory exists, has proper timestamped-based name, + self.assertTrue(os.path.exists(self.history_dir)) + self.assertTrue(os.path.isdir(self.history_dir)) + + timestamp_dirs = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_dirs)) + + self.assertIsIso8601(timestamp_dirs[0]) + ts = self.parse_isoformat(timestamp_dirs[0]) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + # Ensure saved files contain the right content + _check_history_files(timestamp_dirs[0], temp_files) + + # re-write all of the same files with different content, and flush again. + # .flush() should overwrite the existing ones + _write_goal_state_files(temp_files, "--this-has-been-changed--") + test_subject.flush(timestamp) + + # The contents of the saved files were overwritten as a result of the flush. + _check_history_files(timestamp_dirs[0], temp_files, "--this-has-been-changed--") + + def test_archive04(self): + """ + The archive directory is created if it does not exist. + + This failure was caught when .purge() was called before .archive(). + """ + test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist')) + test_subject.purge() + + def parse_isoformat(self, s): + return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f') + + def assertIsIso8601(self, s): + try: + self.parse_isoformat(s) + except: + raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(s)) + + def _total_seconds(self, td): + """ + Compute the total_seconds for a timedelta because 2.6 does not have total_seconds. + """ + return (0.0 + td.microseconds + (td.seconds + td.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6 + + def assertDateTimeCloseTo(self, t1, t2, within): + if t1 <= t2: + diff = t2 -t1 + else: + diff = t1 - t2 + + secs = self._total_seconds(within - diff) + if secs < 0: + self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs)) + + def assertZipContains(self, zip_fn, files): + ziph = zipfile.ZipFile(zip_fn, 'r') + zip_files = [x.filename for x in ziph.filelist] + for f in files: + self.assertTrue(f in zip_files, "'{0}' was not found in {1}".format(f, zip_fn)) + + ziph.close() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_crypt_util.py walinuxagent-2.2.45/tests/utils/test_crypt_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_crypt_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_crypt_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.exception import CryptError +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from tests.tools import * + + +def is_python_version_26(): + return sys.version_info[0] == 2 and sys.version_info[1] == 6 + + +class TestCryptoUtilOperations(AgentTestCase): + + def test_decrypt_encrypted_text(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/sample.pem")) + secret = ']aPPEv}uNg1FPnl?' + crypto = CryptUtil(conf.get_openssl_cmd()) + decrypted_string = crypto.decrypt_secret(encrypted_string, prv_key) + self.assertEquals(secret, decrypted_string, "decrypted string does not match expected") + + def test_decrypt_encrypted_text_missing_private_key(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, "abc" + prv_key) + + @skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6") + def test_decrypt_encrypted_text_wrong_private_key(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "wrong.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/trans_prv")) + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) + + def test_decrypt_encrypted_text_text_not_encrypted(self): + encrypted_string = "abc@123" + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/sample.pem")) + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) + + def test_get_pubkey_from_crt(self): + crypto = CryptUtil(conf.get_openssl_cmd()) + prv_key = os.path.join(data_dir, "wire", "trans_prv") + expected_pub_key = os.path.join(data_dir, "wire", "trans_pub") + + with open(expected_pub_key) as fh: + self.assertEqual(fh.read(), crypto.get_pubkey_from_prv(prv_key)) + + def test_get_pubkey_from_crt_invalid_file(self): + crypto = CryptUtil(conf.get_openssl_cmd()) + prv_key = os.path.join(data_dir, "wire", "trans_prv_does_not_exist") + + self.assertRaises(IOError, crypto.get_pubkey_from_prv, prv_key) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_extension_process_util.py walinuxagent-2.2.45/tests/utils/test_extension_process_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_extension_process_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_extension_process_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,260 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +from azurelinuxagent.common.exception import ExtensionError, ExtensionErrorCodes +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils.extensionprocessutil import format_stdout_stderr, read_output, \ + wait_for_process_completion_or_timeout, handle_process_completion +from tests.tools import * +import subprocess + + +class TestProcessUtils(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + self.tmp_dir = tempfile.mkdtemp() + self.stdout = tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") + self.stderr = tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") + + self.stdout.write("The quick brown fox jumps over the lazy dog.".encode("utf-8")) + self.stderr.write("The five boxing wizards jump quickly.".encode("utf-8")) + + def tearDown(self): + if self.tmp_dir is not None: + shutil.rmtree(self.tmp_dir) + + def test_wait_for_process_completion_or_timeout_should_terminate_cleanly(self): + process = subprocess.Popen( + "date", + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=5) + self.assertEquals(timed_out, False) + self.assertEquals(ret, 0) + + def test_wait_for_process_completion_or_timeout_should_kill_process_on_timeout(self): + timeout = 5 + process = subprocess.Popen( + "sleep 1m", + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid) + + # We don't actually mock the kill, just wrap it so we can assert its call count + with patch('azurelinuxagent.common.utils.extensionprocessutil.os.killpg', wraps=os.killpg) as patch_kill: + with patch('time.sleep') as mock_sleep: + timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=timeout) + + # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure + # we're "waiting" the correct amount of time before killing the process + self.assertEquals(mock_sleep.call_count, timeout) + + self.assertEquals(patch_kill.call_count, 1) + self.assertEquals(timed_out, True) + self.assertEquals(ret, None) + + def test_handle_process_completion_should_return_nonzero_when_process_fails(self): + process = subprocess.Popen( + "ls folder_does_not_exist", + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=5) + self.assertEquals(timed_out, False) + self.assertEquals(ret, 2) + + def test_handle_process_completion_should_return_process_output(self): + command = "echo 'dummy stdout' && 1>&2 echo 'dummy stderr'" + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + process = subprocess.Popen(command, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr, + preexec_fn=os.setsid) + + process_output = handle_process_completion(process=process, + command=command, + timeout=5, + stdout=stdout, + stderr=stderr, + error_code=42) + + expected_output = "[stdout]\ndummy stdout\n\n\n[stderr]\ndummy stderr\n" + self.assertEquals(process_output, expected_output) + + def test_handle_process_completion_should_raise_on_timeout(self): + command = "sleep 1m" + timeout = 5 + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch('time.sleep') as mock_sleep: + with self.assertRaises(ExtensionError) as context_manager: + process = subprocess.Popen(command, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr, + preexec_fn=os.setsid) + + handle_process_completion(process=process, + command=command, + timeout=timeout, + stdout=stdout, + stderr=stderr, + error_code=42) + + # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure + # we're "waiting" the correct amount of time before killing the process and raising an exception + self.assertEquals(mock_sleep.call_count, timeout) + + self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout) + self.assertIn("Timeout({0})".format(timeout), ustr(context_manager.exception)) + + def test_handle_process_completion_should_raise_on_nonzero_exit_code(self): + command = "ls folder_does_not_exist" + error_code = 42 + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with self.assertRaises(ExtensionError) as context_manager: + process = subprocess.Popen(command, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr, + preexec_fn=os.setsid) + + handle_process_completion(process=process, + command=command, + timeout=4, + stdout=stdout, + stderr=stderr, + error_code=error_code) + + self.assertEquals(context_manager.exception.code, error_code) + self.assertIn("Non-zero exit code:", ustr(context_manager.exception)) + + def test_read_output_it_should_return_no_content(self): + with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 0): + expected = "[stdout]\n\n\n[stderr]\n" + actual = read_output(self.stdout, self.stderr) + self.assertEqual(expected, actual) + + def test_read_output_it_should_truncate_the_content(self): + with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 10): + expected = "[stdout]\nThe quick \n\n[stderr]\nThe five b" + actual = read_output(self.stdout, self.stderr) + self.assertEqual(expected, actual) + + def test_read_output_it_should_return_all_content(self): + with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 50): + expected = "[stdout]\nThe quick brown fox jumps over the lazy dog.\n\n" \ + "[stderr]\nThe five boxing wizards jump quickly." + actual = read_output(self.stdout, self.stderr) + self.assertEqual(expected, actual) + + def test_read_output_it_should_handle_exceptions(self): + with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', "type error"): + actual = read_output(self.stdout, self.stderr) + self.assertIn("Cannot read stdout/stderr", actual) + + def test_format_stdout_stderr00(self): + """ + If stdout and stderr are both smaller than the max length, + the full representation should be displayed. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) + actual = format_stdout_stderr(stdout, stderr, 1000) + self.assertEqual(expected, actual) + + def test_format_stdout_stderr01(self): + """ + If stdout and stderr both exceed the max length, + then both stdout and stderr are trimmed equally. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + # noinspection SpellCheckingInspection + expected = '[stdout]\ns over the lazy dog.\n\n[stderr]\nizards jump quickly.' + actual = format_stdout_stderr(stdout, stderr, 60) + self.assertEqual(expected, actual) + self.assertEqual(60, len(actual)) + + def test_format_stdout_stderr02(self): + """ + If stderr is much larger than stdout, stderr is allowed + to borrow space from stdout's quota. + """ + stdout = "empty" + stderr = "The five boxing wizards jump quickly." + + expected = '[stdout]\nempty\n\n[stderr]\ns jump quickly.' + actual = format_stdout_stderr(stdout, stderr, 40) + self.assertEqual(expected, actual) + self.assertEqual(40, len(actual)) + + def test_format_stdout_stderr03(self): + """ + If stdout is much larger than stderr, stdout is allowed + to borrow space from stderr's quota. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "empty" + + expected = '[stdout]\nr the lazy dog.\n\n[stderr]\nempty' + actual = format_stdout_stderr(stdout, stderr, 40) + self.assertEqual(expected, actual) + self.assertEqual(40, len(actual)) + + def test_format_stdout_stderr04(self): + """ + If the max length is not sufficient to even hold the stdout + and stderr markers an empty string is returned. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + expected = '' + actual = format_stdout_stderr(stdout, stderr, 4) + self.assertEqual(expected, actual) + self.assertEqual(0, len(actual)) + + def test_format_stdout_stderr05(self): + """ + If stdout and stderr are empty, an empty template is returned. + """ + + expected = '[stdout]\n\n\n[stderr]\n' + actual = format_stdout_stderr('', '', 1000) + self.assertEqual(expected, actual) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_file_util.py walinuxagent-2.2.45/tests/utils/test_file_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_file_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_file_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import errno as errno @@ -38,7 +38,19 @@ content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) - + + def test_write_file_content_is_None(self): + """ + write_file throws when content is None. No file is created. + """ + try: + test_file=os.path.join(self.tmp_dir, self.test_file) + fileutil.write_file(test_file, None) + + self.fail("expected write_file to throw an exception") + except: + self.assertEquals(False, os.path.exists(test_file)) + def test_rw_utf8_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = u"\u6211" diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_network_util.py walinuxagent-2.2.45/tests/utils/test_network_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_network_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_network_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -0,0 +1,62 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import azurelinuxagent.common.utils.networkutil as networkutil + +from tests.tools import * + + +class TestNetworkOperations(AgentTestCase): + def test_route_entry(self): + interface = "eth0" + mask = "C0FFFFFF" # 255.255.255.192 + destination = "C0BB910A" # + gateway = "C1BB910A" + flags = "1" + metric = "0" + + expected = 'Iface: eth0\tDestination: 10.145.187.192\tGateway: 10.145.187.193\tMask: 255.255.255.192\tFlags: 0x0001\tMetric: 0' + expected_json = '{"Iface": "eth0", "Destination": "10.145.187.192", "Gateway": "10.145.187.193", "Mask": "255.255.255.192", "Flags": "0x0001", "Metric": "0"}' + + entry = networkutil.RouteEntry(interface, destination, gateway, mask, flags, metric) + + self.assertEqual(str(entry), expected) + self.assertEqual(entry.to_json(), expected_json) + + def test_nic_link_only(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info" }') + + def test_nic_ipv4(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + nic.add_ipv4("ipv4-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1"] }') + nic.add_ipv4("ipv4-2") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1","ipv4-2"] }') + + def test_nic_ipv6(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + nic.add_ipv6("ipv6-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1"] }') + nic.add_ipv6("ipv6-2") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1","ipv6-2"] }') + + def test_nic_ordinary(self): + nic = networkutil.NetworkInterfaceCard("test0", "link INFO") + nic.add_ipv6("ipv6-1") + nic.add_ipv4("ipv4-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link INFO", "ipv4": ["ipv4-1"], "ipv6": ["ipv6-1"] }') diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_rest_util.py walinuxagent-2.2.45/tests/utils/test_rest_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_rest_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_rest_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,19 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import os -import unittest - -from azurelinuxagent.common.exception import HttpError, \ - ProtocolError, \ - ResourceGoneError +from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError import azurelinuxagent.common.utils.restutil as restutil - +from azurelinuxagent.common.utils.restutil import HTTP_USER_AGENT from azurelinuxagent.common.future import httpclient, ustr - from tests.tools import * @@ -88,6 +82,7 @@ {"hostplugin":0, "protocol":0, "other":0}, restutil.IOErrorCounter._counts) + class TestHttpOperations(AgentTestCase): def test_parse_url(self): test_uri = "http://abc.def/ghi#hash?jkl=mn" @@ -117,6 +112,68 @@ self.assertEquals(None, host) self.assertEquals(rel_uri, "None") + def test_cleanup_sas_tokens_from_urls_for_normal_cases(self): + test_url = "http://abc.def/ghi#hash?jkl=mn" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "http://abc.def:80/" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "http://abc.def/" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "https://abc.def/ghi?jkl=mn" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + def test_cleanup_sas_tokens_from_urls_containing_sas_tokens(self): + # Contains pair of URLs (RawURL, RedactedURL) + urls_tuples = [("https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" + "=sXBjML1Fpk9UnTBtajo05ZTFSk0LWFGvARZ6WlVcAog%3D&srt=o&ss=b&" + "spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00%3A21%3A38Z&" + "st=2017-07-01T23%3A16%3A38Z", + "https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" + "=" + restutil.REDACTED_TEXT + + "&srt=o&ss=b&spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00" + "%3A21%3A38Z&st=2017-07-01T23%3A16%3A38Z"), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=DavQgRtl99DsEPv9Xeb63GnLXCuaLYw5ay%2BE1cFckQY%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=ttSCKmyjiDEeIzT9q7HtYYgbCRIXuesFSOhNEab52NM%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=X0imGmcj5KcBPFcqlfYjIZakzGrzONGbRv5JMOnGrwc%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=9hfxYvaZzrMahtGO1OgMUiFGnDOtZXulZ3skkv1eVBg%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https" + "&sig=cmluQEHnOGsVK9NDm83ruuPdPWNQcerfjOAbkspNZXU%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https&sig" + "=" + restutil.REDACTED_TEXT) + ] + + for x in urls_tuples: + self.assertEquals(restutil.redact_sas_tokens_in_urls(x[0]), x[1]) + @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_none_is_default(self, mock_host, mock_port): @@ -134,8 +191,8 @@ h, p = restutil._get_http_proxy() self.assertEqual("host", h) self.assertEqual(None, p) - mock_host.assert_called_once() - mock_port.assert_called_once() + self.assertEqual(1, mock_host.call_count) + self.assertEqual(1, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') @@ -145,8 +202,8 @@ h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) - mock_host.assert_called_once() - mock_port.assert_not_called() + self.assertEqual(1, mock_host.call_count) + self.assertEqual(0, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_http_uses_httpproxy(self, mock_host): @@ -180,6 +237,89 @@ self.assertEqual("foo.com", h) self.assertEqual(80, p) + def test_get_no_proxy_with_values_set(self): + no_proxy_list = ["foo.com", "www.google.com"] + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + no_proxy_from_environment = restutil.get_no_proxy() + + self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment)) + + for i, j in zip(no_proxy_from_environment, no_proxy_list): + self.assertEqual(i, j) + + def test_get_no_proxy_with_incorrect_variable_set(self): + no_proxy_list = ["foo.com", "www.google.com", "", ""] + no_proxy_list_cleaned = [entry for entry in no_proxy_list if entry] + + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + no_proxy_from_environment = restutil.get_no_proxy() + + self.assertEquals(len(no_proxy_list_cleaned), len(no_proxy_from_environment)) + + for i, j in zip(no_proxy_from_environment, no_proxy_list_cleaned): + print(i, j) + self.assertEqual(i, j) + + def test_get_no_proxy_with_ip_addresses_set(self): + no_proxy_var = "10.0.0.1,10.0.0.2,10.0.0.3,10.0.0.4,10.0.0.5,10.0.0.6,10.0.0.7,10.0.0.8,10.0.0.9,10.0.0.10," + no_proxy_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5', + '10.0.0.6', '10.0.0.7', '10.0.0.8', '10.0.0.9', '10.0.0.10'] + + with patch.dict(os.environ, { + 'no_proxy': no_proxy_var + }): + no_proxy_from_environment = restutil.get_no_proxy() + + self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment)) + + for i, j in zip(no_proxy_from_environment, no_proxy_list): + self.assertEqual(i, j) + + def test_get_no_proxy_default(self): + no_proxy_generator = restutil.get_no_proxy() + self.assertIsNone(no_proxy_generator) + + def test_is_ipv4_address(self): + self.assertTrue(restutil.is_ipv4_address('8.8.8.8')) + self.assertFalse(restutil.is_ipv4_address('localhost.localdomain')) + self.assertFalse(restutil.is_ipv4_address('2001:4860:4860::8888')) # ipv6 tests + + def test_is_valid_cidr(self): + self.assertTrue(restutil.is_valid_cidr('192.168.1.0/24')) + self.assertFalse(restutil.is_valid_cidr('8.8.8.8')) + self.assertFalse(restutil.is_valid_cidr('192.168.1.0/a')) + self.assertFalse(restutil.is_valid_cidr('192.168.1.0/128')) + self.assertFalse(restutil.is_valid_cidr('192.168.1.0/-1')) + self.assertFalse(restutil.is_valid_cidr('192.168.1.999/24')) + + def test_address_in_network(self): + self.assertTrue(restutil.address_in_network('192.168.1.1', '192.168.1.0/24')) + self.assertFalse(restutil.address_in_network('172.16.0.1', '192.168.1.0/24')) + + def test_dotted_netmask(self): + self.assertEquals(restutil.dotted_netmask(0), '0.0.0.0') + self.assertEquals(restutil.dotted_netmask(8), '255.0.0.0') + self.assertEquals(restutil.dotted_netmask(16), '255.255.0.0') + self.assertEquals(restutil.dotted_netmask(24), '255.255.255.0') + self.assertEquals(restutil.dotted_netmask(32), '255.255.255.255') + self.assertRaises(ValueError, restutil.dotted_netmask, 33) + + def test_bypass_proxy(self): + no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16", "Microsoft.com"] + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + self.assertFalse(restutil.bypass_proxy("http://bar.com")) + self.assertTrue(restutil.bypass_proxy("http://foo.com")) + self.assertTrue(restutil.bypass_proxy("http://168.63.129.16")) + self.assertFalse(restutil.bypass_proxy("http://baz.com")) + self.assertFalse(restutil.bypass_proxy("http://10.1.1.1")) + self.assertTrue(restutil.bypass_proxy("http://www.microsoft.com")) + @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct(self, HTTPConnection, HTTPSConnection): @@ -197,9 +337,9 @@ ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ - call(method="GET", url="/bar", body=None, headers={}) + call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -220,9 +360,9 @@ call("foo", 443, timeout=10) ]) mock_conn.request.assert_has_calls([ - call(method="GET", url="/bar", body=None, headers={}) + call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -244,12 +384,120 @@ ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ - call(method="GET", url="http://foo:80/bar", body=None, headers={}) + call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) + @patch("azurelinuxagent.common.utils.restutil._get_http_proxy") + @patch("time.sleep") + @patch("azurelinuxagent.common.utils.restutil._http_request") + def test_http_request_proxy_with_no_proxy_check(self, _http_request, sleep, mock_get_http_proxy): + mock_http_resp = MagicMock() + mock_http_resp.read = Mock(return_value="hehe") + _http_request.return_value = mock_http_resp + mock_get_http_proxy.return_value = "host", 1234 # Return a host/port combination + + no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"] + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + # Test http get + resp = restutil.http_get("http://foo.com", use_proxy=True) + self.assertEquals("hehe", resp.read()) + self.assertEquals(0, mock_get_http_proxy.call_count) + + # Test http get + resp = restutil.http_get("http://bar.com", use_proxy=True) + self.assertEquals("hehe", resp.read()) + self.assertEquals(1, mock_get_http_proxy.call_count) + + def test_proxy_conditions_with_no_proxy(self): + should_use_proxy = True + should_not_use_proxy = False + use_proxy = True + + no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"] + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + host = "10.0.0.1" + self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "foo.com" + self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "www.google.com" + self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "168.63.129.16" + self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "www.bar.com" + self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + no_proxy_list = ["10.0.0.1/24"] + with patch.dict(os.environ, { + 'no_proxy': ",".join(no_proxy_list) + }): + host = "www.bar.com" + self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.0.1" + self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.1.1" + self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) + + # When No_proxy is empty + with patch.dict(os.environ, { + 'no_proxy': "" + }): + host = "10.0.0.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "foo.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "www.google.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "168.63.129.16" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "www.bar.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.0.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.1.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + # When os.environ is empty - No global variables defined. + with patch.dict(os.environ, {}): + host = "10.0.0.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "foo.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "www.google.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "168.63.129.16" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "www.bar.com" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.0.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + + host = "10.0.1.1" + self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) + @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy_secure(self, HTTPConnection, HTTPSConnection): @@ -269,9 +517,9 @@ call("foo.bar", 23333, timeout=10) ]) mock_conn.request.assert_has_calls([ - call(method="GET", url="https://foo:443/bar", body=None, headers={}) + call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -391,9 +639,9 @@ @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") - def test_http_request_raises_for_bad_request(self, _http_request, _sleep): + def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): _http_request.side_effect = [ - Mock(status=httpclient.BAD_REQUEST) + Mock(status=httpclient.GONE) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") @@ -401,9 +649,25 @@ @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") - def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): + def test_http_request_raises_for_invalid_container_configuration(self, _http_request, _sleep): + def read(): + return b'{ "errorCode": "InvalidContainerConfiguration", "message": "Invalid request." }' + _http_request.side_effect = [ - Mock(status=httpclient.GONE) + Mock(status=httpclient.BAD_REQUEST, reason='Bad Request', read=read) + ] + + self.assertRaises(InvalidContainerError, restutil.http_get, "https://foo.bar") + self.assertEqual(1, _http_request.call_count) + + @patch("time.sleep") + @patch("azurelinuxagent.common.utils.restutil._http_request") + def test_http_request_raises_for_invalid_role_configuration(self, _http_request, _sleep): + def read(): + return b'{ "errorCode": "RequestRoleConfigFileNotFound", "message": "Invalid request." }' + + _http_request.side_effect = [ + Mock(status=httpclient.GONE, reason='Resource Gone', read=read) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_shell_util.py walinuxagent-2.2.45/tests/utils/test_shell_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_shell_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_shell_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,17 +13,40 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * -import uuid +from azurelinuxagent.common.logger import LogLevel import unittest -import os import azurelinuxagent.common.utils.shellutil as shellutil -import test -class TestrunCmd(AgentTestCase): + +class ShellQuoteTestCase(AgentTestCase): + def test_shellquote(self): + self.assertEqual("\'foo\'", shellutil.quote("foo")) + self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) + self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) + + +class RunTestCase(AgentTestCase): + def test_it_should_return_the_exit_code_of_the_command(self): + exit_code = shellutil.run("exit 123") + self.assertEquals(123, exit_code) + + def test_it_should_be_a_pass_thru_to_run_get_output(self): + with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output: + shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3]) + + self.assertEquals(mock_run_get_output.call_count, 1) + + args, kwargs = mock_run_get_output.call_args + self.assertEquals(args[0], "echo hello word!") + self.assertEquals(kwargs["chk_err"], False) + self.assertEquals(kwargs["expected_errors"], [1, 2, 3]) + + +class RunGetOutputTestCase(AgentTestCase): def test_run_get_output(self): output = shellutil.run_get_output(u"ls /") self.assertNotEquals(None, output) @@ -35,10 +58,150 @@ err = shellutil.run_get_output(u"ls 我") self.assertNotEquals(0, err[0]) - def test_shellquote(self): - self.assertEqual("\'foo\'", shellutil.quote("foo")) - self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) - self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) + def test_it_should_log_the_command(self): + command = "echo hello world!" + + with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: + shellutil.run_get_output(command) + + self.assertEquals(mock_logger.verbose.call_count, 1) + + args, kwargs = mock_logger.verbose.call_args + command_in_message = args[1] + self.assertEqual(command_in_message, command) + + def test_it_should_log_command_failures_as_errors(self): + return_code = 99 + command = "exit {0}".format(return_code) + + with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: + shellutil.run_get_output(command, log_cmd=False) + + self.assertEquals(mock_logger.error.call_count, 1) + + args, kwargs = mock_logger.error.call_args + + message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" + self.assertIn("[{0}]".format(command), message) + self.assertIn("[{0}]".format(return_code), message) + + self.assertEquals(mock_logger.verbose.call_count, 0) + self.assertEquals(mock_logger.info.call_count, 0) + self.assertEquals(mock_logger.warn.call_count, 0) + + def test_it_should_log_expected_errors_as_info(self): + return_code = 99 + command = "exit {0}".format(return_code) + + with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: + shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code]) + + self.assertEquals(mock_logger.info.call_count, 1) + + args, kwargs = mock_logger.info.call_args + + message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" + self.assertIn("[{0}]".format(command), message) + self.assertIn("[{0}]".format(return_code), message) + + self.assertEquals(mock_logger.verbose.call_count, 0) + self.assertEquals(mock_logger.warn.call_count, 0) + self.assertEquals(mock_logger.error.call_count, 0) + + def test_it_should_log_unexpected_errors_as_errors(self): + return_code = 99 + command = "exit {0}".format(return_code) + + with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: + shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1]) + + self.assertEquals(mock_logger.error.call_count, 1) + + args, kwargs = mock_logger.error.call_args + + message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" + self.assertIn("[{0}]".format(command), message) + self.assertIn("[{0}]".format(return_code), message) + + self.assertEquals(mock_logger.info.call_count, 0) + self.assertEquals(mock_logger.verbose.call_count, 0) + self.assertEquals(mock_logger.warn.call_count, 0) + + +class RunCommandTestCase(AgentTestCase): + def test_run_command_should_execute_the_command(self): + command = ["echo", "-n", "A TEST STRING"] + ret = shellutil.run_command(command) + self.assertEquals(ret, "A TEST STRING") + + def test_run_command_should_raise_an_exception_when_the_command_fails(self): + command = ["ls", "-d", "/etc", "nonexistent_file"] + + with self.assertRaises(shellutil.CommandError) as context_manager: + shellutil.run_command(command) + + exception = context_manager.exception + self.assertEquals(str(exception), "'ls' failed: 2") + self.assertEquals(exception.stdout, "/etc\n") + self.assertIn("No such file or directory", exception.stderr) + self.assertEquals(exception.returncode, 2) + + def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self): + command = "nonexistent_command" + + with self.assertRaises(Exception) as context_manager: + shellutil.run_command(command) + + exception = context_manager.exception + self.assertIn("No such file or directory", str(exception)) + + def test_run_command_it_should_not_log_by_default(self): + + def assert_no_message_logged(command): + try: + shellutil.run_command(command) + except: + pass + + self.assertEquals(mock_logger.info.call_count, 0) + self.assertEquals(mock_logger.verbose.call_count, 0) + self.assertEquals(mock_logger.warn.call_count, 0) + self.assertEquals(mock_logger.error.call_count, 0) + + assert_no_message_logged(["ls", "nonexistent_file"]) + assert_no_message_logged("nonexistent_command") + + def test_run_command_it_should_log_an_error_when_log_error_is_set(self): + command = ["ls", "-d", "/etc", "nonexistent_file"] + + with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error: + try: + shellutil.run_command(command, log_error=True) + except: + pass + + self.assertEquals(mock_log_error.call_count, 1) + + args, kwargs = mock_log_error.call_args + self.assertIn("ls -d /etc nonexistent_file", args, msg="The command was not logged") + self.assertIn(2, args, msg="The command's return code was not logged") + self.assertIn("/etc\n", args, msg="The command's stdout was not logged") + self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged") + + command = "nonexistent_command" + + with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error: + try: + shellutil.run_command(command, log_error=True) + except: + pass + + self.assertEquals(mock_log_error.call_count, 1) + + args, kwargs = mock_log_error.call_args + self.assertIn(command, args, msg="The command was not logged") + self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged") + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_text_util.py walinuxagent-2.2.45/tests/utils/test_text_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_text_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/tests/utils/test_text_util.py 2019-11-07 00:36:56.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +from distutils.version import LooseVersion as Version from tests.tools import * -import uuid -import unittest -import os -from azurelinuxagent.common.future import ustr + +import hashlib + import azurelinuxagent.common.utils.textutil as textutil -from azurelinuxagent.common.utils.textutil import Version + +from azurelinuxagent.common.future import ustr class TestTextUtil(AgentTestCase): @@ -43,7 +44,7 @@ data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdXeXfghijkXlm", - textutil.replace_non_ascii(data, replace_char='X')) + textutil.replace_non_ascii(data, replace_char='X')) self.assertEqual('', textutil.replace_non_ascii(None)) @@ -138,6 +139,73 @@ for t in data: self.assertEqual(t[2], textutil.swap_hexstring(t[0], width=t[1])) - + + def test_compress(self): + result = textutil.compress('[stdout]\nHello World\n\n[stderr]\n\n') + self.assertEqual('eJyLLi5JyS8tieXySM3JyVcIzy/KSeHiigaKphYVxXJxAQDAYQr2', result) + + def test_hash_empty_list(self): + result = textutil.hash_strings([]) + self.assertEqual(b'\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t', result) + + def test_hash_list(self): + test_list = ["abc", "123"] + result_from_list = textutil.hash_strings(test_list) + + test_string = "".join(test_list) + hash_from_string = hashlib.sha1() + hash_from_string.update(test_string.encode()) + + self.assertEqual(result_from_list, hash_from_string.digest()) + self.assertEqual(hash_from_string.hexdigest(), '6367c48dd193d56ea7b0baad25b19455e529f5ee') + + def test_empty_strings(self): + self.assertTrue(textutil.is_str_none_or_whitespace(None)) + self.assertTrue(textutil.is_str_none_or_whitespace(' ')) + self.assertTrue(textutil.is_str_none_or_whitespace('\t')) + self.assertTrue(textutil.is_str_none_or_whitespace('\n')) + self.assertTrue(textutil.is_str_none_or_whitespace(' \t')) + self.assertTrue(textutil.is_str_none_or_whitespace(' \r\n')) + + self.assertTrue(textutil.is_str_empty(None)) + self.assertTrue(textutil.is_str_empty(' ')) + self.assertTrue(textutil.is_str_empty('\t')) + self.assertTrue(textutil.is_str_empty('\n')) + self.assertTrue(textutil.is_str_empty(' \t')) + self.assertTrue(textutil.is_str_empty(' \r\n')) + + self.assertFalse(textutil.is_str_none_or_whitespace(u' \x01 ')) + self.assertFalse(textutil.is_str_none_or_whitespace(u'foo')) + self.assertFalse(textutil.is_str_none_or_whitespace('bar')) + + self.assertFalse(textutil.is_str_empty(u' \x01 ')) + self.assertFalse(textutil.is_str_empty(u'foo')) + self.assertFalse(textutil.is_str_empty('bar')) + + hex_null_1 = u'\x00' + hex_null_2 = u' \x00 ' + + self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_1)) + self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_2)) + + self.assertTrue(textutil.is_str_empty(hex_null_1)) + self.assertTrue(textutil.is_str_empty(hex_null_2)) + + self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_1), textutil.is_str_empty(hex_null_1)) + self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_2), textutil.is_str_empty(hex_null_2)) + + def test_format_memory_value(self): + """ + Test formatting of memory amounts into human-readable units + """ + self.assertEqual(2048, textutil.format_memory_value('kilobytes', 2)) + self.assertEqual(0, textutil.format_memory_value('kilobytes', 0)) + self.assertEqual(2048000, textutil.format_memory_value('kilobytes', 2000)) + self.assertEqual(2048 * 1024, textutil.format_memory_value('megabytes', 2)) + self.assertEqual((1024 + 512) * 1024 * 1024, textutil.format_memory_value('gigabytes', 1.5)) + self.assertRaises(ValueError, textutil.format_memory_value, 'KiloBytes', 1) + self.assertRaises(TypeError, textutil.format_memory_value, 'bytes', None) + + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/.travis.yml walinuxagent-2.2.45/.travis.yml --- walinuxagent-2.2.21+really2.2.20/.travis.yml 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.45/.travis.yml 2019-11-07 00:36:56.000000000 +0000 @@ -1,14 +1,43 @@ +--- +os: linux +dist: xenial language: python -python: - - "2.6" - - "2.7" - #- "3.2" - #- "3.3" - - "3.4" -# command to install dependencies +env: + - NOSEOPTS="--verbose" SETUPOPTS="" + # Add SETUPOPTS="check flake8" to enable flake8 checks + +matrix: + # exclude the default "python" build - we're being specific here... + exclude: + - python: + env: + - NOSEOPTS="" SETUPOPTS="check flake8" + + include: + - python: 2.6 + dist: trusty + env: + - NOSEOPTS="--verbose" SETUPOPTS="" + - python: 2.7 + - python: 3.4 + - python: 3.6 + - python: 3.7 + env: + - >- + NOSEOPTS="--verbose --with-coverage --cover-inclusive + --cover-min-percentage=60 --cover-branches + --cover-package=azurelinuxagent --cover-xml" + SETUPOPTS="" + install: - #- pip install . - #- pip install -r requirements.txt - - pip install pyasn1 -# command to run tests -script: nosetests tests + - pip install -r requirements.txt + - pip install -r test-requirements.txt + +script: + # future: - pylint setup.py makepkg.py azurelinuxagent/ + - nosetests $NOSEOPTS --attr '!requires_sudo' tests + - sudo env "PATH=$PATH" nosetests $NOSEOPTS --verbose --attr 'requires_sudo' tests + - if [ ! -z "$SETUPOPTS" ]; then /usr/bin/env python setup.py $SETUPOPTS; fi + +after_success: + - if [[ $TRAVIS_PYTHON_VERSION == 3.7 ]]; then codecov; fi \ No newline at end of file