diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/agent.py walinuxagent-2.2.32/azurelinuxagent/agent.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/agent.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/agent.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -37,6 +37,7 @@ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO, GOAL_STATE_AGENT_VERSION from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils import fileutil class Agent(object): def __init__(self, verbose, conf_file_path=None): @@ -63,13 +64,17 @@ path="/var/log/waagent.log") logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") + # See issue #1035 + # logger.add_logger_appender(logger.AppenderType.TELEMETRY, + # logger.LogLevel.WARNING, + # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): - os.makedirs(ext_log_dir) + fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error( "Exception occurred while creating extension " @@ -85,6 +90,7 @@ """ Run agent daemon """ + logger.set_prefix("Daemon") child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) @@ -124,6 +130,7 @@ """ Run the update and extension handler """ + logger.set_prefix("ExtHandler") from azurelinuxagent.ga.update import get_update_handler update_handler = get_update_handler() update_handler.run() @@ -144,7 +151,7 @@ if command == "version": version() elif command == "help": - usage() + print(usage()) elif command == "start": start(conf_file_path=conf_file_path) else: @@ -228,15 +235,16 @@ def usage(): """ - Show agent usage + Return agent usage message """ - print("") - print((("usage: {0} [-verbose] [-force] [-help] " + s = "\n" + s += ("usage: {0} [-verbose] [-force] [-help] " "-configuration-path:" "-deprovision[+user]|-register-service|-version|-daemon|-start|" - "-run-exthandlers]" - "").format(sys.argv[0]))) - print("") + "-run-exthandlers|-show-configuration]" + "").format(sys.argv[0]) + s += "\n" + return s def start(conf_file_path=None): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroups.py walinuxagent-2.2.32/azurelinuxagent/common/cgroups.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/cgroups.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/cgroups.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,779 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import errno +import os +import re + +import time + +from azurelinuxagent.common import logger, conf +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.osutil.default import BASE_CGROUPS +from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION + + +WRAPPER_CGROUP_NAME = "Agent+Extensions" +METRIC_HIERARCHIES = ['cpu', 'memory'] +MEMORY_DEFAULT = -1 + +# percentage of a single core +DEFAULT_CPU_LIMIT_AGENT = 10 +DEFAULT_CPU_LIMIT_EXT = 40 + +DEFAULT_MEM_LIMIT_MIN_MB = 256 # mb, applies to agent and extensions +DEFAULT_MEM_LIMIT_MAX_MB = 512 # mb, applies to agent only +DEFAULT_MEM_LIMIT_PCT = 15 # percent, applies to extensions + +re_user_system_times = re.compile('user (\d+)\nsystem (\d+)\n') + +related_services = { + "Microsoft.OSTCExtensions.LinuxDiagnostic": ["omid", "omsagent-LAD", "mdsd-lde"], + "Microsoft.Azure.Diagnostics.LinuxDiagnostic": ["omid", "omsagent-LAD", "mdsd-lde"], +} + + +class CGroupsException(Exception): + + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return repr(self.msg) + + +# The metric classes (Cpu, Memory, etc) can all assume that CGroups is enabled, as the CGroupTelemetry +# class is very careful not to call them if CGroups isn't enabled. Any tests should be disabled if the osutil +# is_cgroups_support() method returns false. + + +class Cpu(object): + def __init__(self, cgt): + """ + Initialize data collection for the Cpu hierarchy. User must call update() before attempting to get + any useful metrics. + + :param cgt: CGroupsTelemetry + :return: + """ + self.cgt = cgt + self.osutil = get_osutil() + self.current_cpu_total = self.get_current_cpu_total() + self.previous_cpu_total = 0 + self.current_system_cpu = self.osutil.get_total_cpu_ticks_since_boot() + self.previous_system_cpu = 0 + + def __str__(self): + return "Cgroup: Current {0}, previous {1}; System: Current {2}, previous {3}".format( + self.current_cpu_total, self.previous_cpu_total, self.current_system_cpu, self.previous_system_cpu + ) + + def get_current_cpu_total(self): + """ + Compute the number of USER_HZ of CPU time (user and system) consumed by this cgroup since boot. + + :return: int + """ + cpu_total = 0 + try: + cpu_stat = self.cgt.cgroup.\ + get_file_contents('cpu', 'cpuacct.stat') + if cpu_stat is not None: + m = re_user_system_times.match(cpu_stat) + if m: + cpu_total = int(m.groups()[0]) + int(m.groups()[1]) + except CGroupsException: + # There are valid reasons for file contents to be unavailable; for example, if an extension + # has not yet started (or has stopped) an associated service on a VM using systemd, the cgroup for + # the service will not exist ('cause systemd will tear it down). This might be a transient or a + # long-lived state, so there's no point in logging it, much less emitting telemetry. + pass + return cpu_total + + def update(self): + """ + Update all raw data required to compute metrics of interest. The intent is to call update() once, then + call the various get_*() methods which use this data, which we've collected exactly once. + """ + self.previous_cpu_total = self.current_cpu_total + self.previous_system_cpu = self.current_system_cpu + self.current_cpu_total = self.get_current_cpu_total() + self.current_system_cpu = self.osutil.get_total_cpu_ticks_since_boot() + + def get_cpu_percent(self): + """ + Compute the percent CPU time used by this cgroup over the elapsed time since the last time this instance was + update()ed. If the cgroup fully consumed 2 cores on a 4 core system, return 200. + + :return: CPU usage in percent of a single core + :rtype: float + """ + cpu_delta = self.current_cpu_total - self.previous_cpu_total + system_delta = max(1, self.current_system_cpu - self.previous_system_cpu) + + return round(float(cpu_delta * self.cgt.cpu_count * 100) / float(system_delta), 3) + + def collect(self): + """ + Collect and return a list of all cpu metrics. If no metrics are collected, return an empty list. + + :rtype: [(str, str, float)] + """ + self.update() + usage = self.get_cpu_percent() + return [("Process", "% Processor Time", usage)] + + +class Memory(object): + def __init__(self, cgt): + """ + Initialize data collection for the Memory hierarchy + + :param CGroupsTelemetry cgt: The telemetry object for which memory metrics should be collected + :return: + """ + self.cgt = cgt + + def get_memory_usage(self): + """ + Collect memory.usage_in_bytes from the cgroup. + + :return: Memory usage in bytes + :rtype: int + """ + usage = self.cgt.cgroup.get_parameter('memory', 'memory.usage_in_bytes') + if not usage: + usage = "0" + return int(usage) + + def collect(self): + """ + Collect and return a list of all memory metrics + + :rtype: [(str, str, float)] + """ + usage = self.get_memory_usage() + return [("Memory", "Total Memory Usage", usage)] + + +class CGroupsTelemetry(object): + """ + Encapsulate the cgroup-based telemetry for the agent or one of its extensions, or for the aggregation across + the agent and all of its extensions. These objects should have lifetimes that span the time window over which + measurements are desired; in general, they're not terribly effective at providing instantaneous measurements. + """ + _tracked = {} + _metrics = { + "cpu": Cpu, + "memory": Memory + } + _hierarchies = list(_metrics.keys()) + tracked_names = set() + + @staticmethod + def metrics_hierarchies(): + return CGroupsTelemetry._hierarchies + + @staticmethod + def track_cgroup(cgroup): + """ + Create a CGroupsTelemetry object to track a particular CGroups instance. Typical usage: + 1) Create a CGroups object + 2) Ask CGroupsTelemetry to track it + 3) Tell the CGroups object to add one or more processes (or let systemd handle that, for its cgroups) + + :param CGroups cgroup: The cgroup to track + """ + name = cgroup.name + if CGroups.enabled() and not CGroupsTelemetry.is_tracked(name): + tracker = CGroupsTelemetry(name, cgroup=cgroup) + CGroupsTelemetry._tracked[name] = tracker + + @staticmethod + def track_systemd_service(name): + """ + If not already tracking it, create the CGroups object for a systemd service and track it. + + :param str name: Service name (without .service suffix) to be tracked. + """ + service_name = "{0}.service".format(name).lower() + if CGroups.enabled() and not CGroupsTelemetry.is_tracked(service_name): + cgroup = CGroups.for_systemd_service(service_name) + tracker = CGroupsTelemetry(service_name, cgroup=cgroup) + CGroupsTelemetry._tracked[service_name] = tracker + + @staticmethod + def track_extension(name, cgroup=None): + """ + Create all required CGroups to track all metrics for an extension and its associated services. + + :param str name: Full name of the extension to be tracked + :param CGroups cgroup: CGroup for the extension itself. This method will create it if none is supplied. + """ + if not CGroups.enabled(): + return + + if not CGroupsTelemetry.is_tracked(name): + cgroup = CGroups.for_extension(name) if cgroup is None else cgroup + logger.info("Now tracking cgroup {0}".format(name)) + cgroup.set_limits() + CGroupsTelemetry.track_cgroup(cgroup) + if CGroups.is_systemd_manager(): + if name in related_services: + for service_name in related_services[name]: + CGroupsTelemetry.track_systemd_service(service_name) + + @staticmethod + def track_agent(): + """ + Create and track the correct cgroup for the agent itself. The actual cgroup depends on whether systemd + is in use, but the caller doesn't need to know that. + """ + if not CGroups.enabled(): + return + if CGroups.is_systemd_manager(): + CGroupsTelemetry.track_systemd_service(AGENT_NAME) + else: + CGroupsTelemetry.track_cgroup(CGroups.for_extension(AGENT_NAME)) + + @staticmethod + def is_tracked(name): + return name in CGroupsTelemetry._tracked + + @staticmethod + def stop_tracking(name): + """ + Stop tracking telemetry for the CGroups associated with an extension. If any system services are being + tracked, those will continue to be tracked; multiple extensions might rely upon the same service. + + :param str name: Extension to be dropped from tracking + """ + if CGroupsTelemetry.is_tracked(name): + del (CGroupsTelemetry._tracked[name]) + + @staticmethod + def collect_all_tracked(): + """ + Return a dictionary mapping from the name of a tracked cgroup to the list of collected metrics for that cgroup. + Collecting metrics is not guaranteed to be a fast operation; it's possible some other thread might add or remove + tracking for a cgroup while we're doing it. To avoid "dictionary changed size during iteration" exceptions, + work from a shallow copy of the _tracked dictionary. + + :returns: Dictionary of list collected metrics (metric class, metric name, value), by cgroup + :rtype: dict(str: [(str, str, float)]) + """ + results = {} + for cgroup_name, collector in CGroupsTelemetry._tracked.copy().items(): + cgroup_name = cgroup_name if cgroup_name else WRAPPER_CGROUP_NAME + results[cgroup_name] = collector.collect() + return results + + @staticmethod + def update_tracked(ext_handlers): + """ + Track CGroups for all enabled extensions. + Track CGroups for services created by enabled extensions. + Stop tracking CGroups for not-enabled extensions. + + :param List(ExtHandler) ext_handlers: + """ + if not CGroups.enabled(): + return + + not_enabled_extensions = set() + for extension in ext_handlers: + if extension.properties.state == u"enabled": + CGroupsTelemetry.track_extension(extension.name) + else: + not_enabled_extensions.add(extension.name) + + names_now_tracked = set(CGroupsTelemetry._tracked.keys()) + if CGroupsTelemetry.tracked_names != names_now_tracked: + now_tracking = " ".join("[{0}]".format(name) for name in sorted(names_now_tracked)) + if len(now_tracking): + logger.info("After updating cgroup telemetry, tracking {0}".format(now_tracking)) + else: + logger.warn("After updating cgroup telemetry, tracking no cgroups.") + CGroupsTelemetry.tracked_names = names_now_tracked + + def __init__(self, name, cgroup=None): + """ + Create the necessary state to collect metrics for the agent, one of its extensions, or the aggregation across + the agent and all of its extensions. To access aggregated metrics, instantiate this object with an empty string + or None. + + :param name: str + """ + if name is None: + name = "" + self.name = name + if cgroup is None: + cgroup = CGroups.for_extension(name) + self.cgroup = cgroup + self.cpu_count = CGroups.get_num_cores() + self.current_wall_time = time.time() + self.previous_wall_time = 0 + + self.data = {} + if CGroups.enabled(): + for hierarchy in CGroupsTelemetry.metrics_hierarchies(): + self.data[hierarchy] = CGroupsTelemetry._metrics[hierarchy](self) + + def collect(self): + """ + Return a list of collected metrics. Each element is a tuple of + (metric group name, metric name, metric value) + :return: [(str, str, float)] + """ + results = [] + for collector in self.data.values(): + results.extend(collector.collect()) + return results + + +class CGroups(object): + """ + This class represents the cgroup folders for the agent or an extension. This is a pretty lightweight object + without much state worth preserving; it's not unreasonable to create one just when you need it. + """ + # whether cgroup support is enabled + _enabled = True + _hierarchies = CGroupsTelemetry.metrics_hierarchies() + _use_systemd = None # Tri-state: None (i.e. "unknown"), True, False + _osutil = get_osutil() + + @staticmethod + def _construct_custom_path_for_hierarchy(hierarchy, cgroup_name): + return os.path.join(BASE_CGROUPS, hierarchy, AGENT_NAME, cgroup_name).rstrip(os.path.sep) + + @staticmethod + def _construct_systemd_path_for_hierarchy(hierarchy, cgroup_name): + return os.path.join(BASE_CGROUPS, hierarchy, 'system.slice', cgroup_name).rstrip(os.path.sep) + + @staticmethod + def for_extension(name): + return CGroups(name, CGroups._construct_custom_path_for_hierarchy) + + @staticmethod + def for_systemd_service(name): + return CGroups(name.lower(), CGroups._construct_systemd_path_for_hierarchy) + + @staticmethod + def enabled(): + return CGroups._osutil.is_cgroups_supported() and CGroups._enabled + + @staticmethod + def disable(): + CGroups._enabled = False + + @staticmethod + def enable(): + CGroups._enabled = True + + def __init__(self, name, path_maker): + """ + Construct CGroups object. Create appropriately-named directory for each hierarchy of interest. + + :param str name: Name for the cgroup (usually the full name of the extension) + :param path_maker: Function which constructs the root path for a given hierarchy where this cgroup lives + """ + if name == "": + self.name = "Agents+Extensions" + self.is_wrapper_cgroup = True + else: + self.name = name + self.is_wrapper_cgroup = False + + self.cgroups = {} + + if not self.enabled(): + return + + system_hierarchies = os.listdir(BASE_CGROUPS) + for hierarchy in CGroups._hierarchies: + if hierarchy not in system_hierarchies: + self.disable() + raise CGroupsException("Hierarchy {0} is not mounted".format(hierarchy)) + + cgroup_name = "" if self.is_wrapper_cgroup else self.name + cgroup_path = path_maker(hierarchy, cgroup_name) + if not os.path.isdir(cgroup_path): + logger.info("Creating cgroup directory {0}".format(cgroup_path)) + CGroups._try_mkdir(cgroup_path) + self.cgroups[hierarchy] = cgroup_path + + @staticmethod + def is_systemd_manager(): + """ + Determine if systemd is managing system services. Many extensions are structured as a set of services, + including the agent itself; systemd expects those services to remain in the cgroups in which it placed them. + If this process (presumed to be the agent) is in a cgroup that looks like one created by systemd, we can + assume systemd is in use. + + :return: True if systemd is managing system services + :rtype: Bool + """ + if not CGroups.enabled(): + return False + if CGroups._use_systemd is None: + hierarchy = METRIC_HIERARCHIES[0] + path = CGroups.get_my_cgroup_folder(hierarchy) + CGroups._use_systemd = path.startswith(CGroups._construct_systemd_path_for_hierarchy(hierarchy, "")) + return CGroups._use_systemd + + @staticmethod + def _try_mkdir(path): + """ + Try to create a directory, recursively. If it already exists as such, do nothing. Raise the appropriate + exception should an error occur. + + :param path: str + """ + if not os.path.isdir(path): + try: + os.makedirs(path, 0o755) + except OSError as e: + if e.errno == errno.EEXIST: + if not os.path.isdir(path): + raise CGroupsException("Create directory for cgroup {0}: " + "normal file already exists with that name".format(path)) + else: + pass # There was a race to create the directory, but it's there now, and that's fine + elif e.errno == errno.EACCES: + # This is unexpected, as the agent runs as root + raise CGroupsException("Create directory for cgroup {0}: permission denied".format(path)) + else: + raise + + def add(self, pid): + """ + Add a process to the cgroups for this agent/extension. + """ + if not self.enabled(): + return + + if self.is_wrapper_cgroup: + raise CGroupsException("Cannot add a process to the Agents+Extensions wrapper cgroup") + + if not self._osutil.check_pid_alive(pid): + raise CGroupsException('PID {0} does not exist'.format(pid)) + for hierarchy, cgroup in self.cgroups.items(): + tasks_file = self._get_cgroup_file(hierarchy, 'cgroup.procs') + fileutil.append_file(tasks_file, "{0}\n".format(pid)) + + def set_limits(self): + """ + Set per-hierarchy limits based on the cgroup name (agent or particular extension) + """ + + if not conf.get_cgroups_enforce_limits(): + return + + if self.name is None: + return + + for ext in conf.get_cgroups_excluded(): + if ext in self.name.lower(): + logger.info('No cgroups limits for {0}'.format(self.name)) + return + + # default values + cpu_limit = DEFAULT_CPU_LIMIT_EXT + mem_limit = max(DEFAULT_MEM_LIMIT_MIN_MB, round(self._osutil.get_total_mem() * DEFAULT_MEM_LIMIT_PCT / 100, 0)) + + # agent values + if AGENT_NAME.lower() in self.name.lower(): + cpu_limit = DEFAULT_CPU_LIMIT_AGENT + mem_limit = min(DEFAULT_MEM_LIMIT_MAX_MB, mem_limit) + + msg = '{0}: {1}% {2}mb'.format(self.name, cpu_limit, mem_limit) + logger.info("Setting cgroups limits for {0}".format(msg)) + success = False + + try: + self.set_cpu_limit(cpu_limit) + self.set_memory_limit(mem_limit) + success = True + except Exception as ge: + msg = '[{0}] {1}'.format(msg, ustr(ge)) + raise + finally: + from azurelinuxagent.common.event import add_event, WALAEventOperation + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.SetCGroupsLimits, + is_success=success, + message=msg, + log_event=False) + + @staticmethod + def _apply_wrapper_limits(path, hierarchy): + """ + Find wrapping limits for the hierarchy and apply them to the cgroup denoted by the path + + :param path: str + :param hierarchy: str + """ + pass + + @staticmethod + def _setup_wrapper_groups(): + """ + For each hierarchy, construct the wrapper cgroup and apply the appropriate limits + """ + for hierarchy in METRIC_HIERARCHIES: + root_dir = CGroups._construct_custom_path_for_hierarchy(hierarchy, "") + CGroups._try_mkdir(root_dir) + CGroups._apply_wrapper_limits(root_dir, hierarchy) + + @staticmethod + def setup(suppress_process_add=False): + """ + Only needs to be called once, and should be called from the -daemon instance of the agent. + Mount the cgroup fs if necessary + Create wrapper cgroups for agent-plus-extensions and set limits on them; + Add this process to the "agent" cgroup, if required + Actual collection of metrics from cgroups happens in the -run-exthandlers instance + """ + if CGroups.enabled(): + try: + CGroups._osutil.mount_cgroups() + if not suppress_process_add: + CGroups._setup_wrapper_groups() + pid = int(os.getpid()) + if not CGroups.is_systemd_manager(): + cg = CGroups.for_extension(AGENT_NAME) + logger.info("Add daemon process pid {0} to {1} cgroup".format(pid, cg.name)) + cg.add(pid) + cg.set_limits() + else: + cg = CGroups.for_systemd_service(AGENT_NAME) + logger.info("Add daemon process pid {0} to {1} systemd cgroup".format(pid, cg.name)) + # systemd sets limits; any limits we write would be overwritten + status = "ok" + except CGroupsException as cge: + status = cge.msg + CGroups.disable() + except Exception as ge: + status = ustr(ge) + CGroups.disable() + else: + status = "not supported by platform" + CGroups.disable() + + logger.info("CGroups: {0}".format(status)) + + from azurelinuxagent.common.event import add_event, WALAEventOperation + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.InitializeCGroups, + is_success=CGroups.enabled(), + message=status, + log_event=False) + + @staticmethod + def add_to_extension_cgroup(name, pid=int(os.getpid())): + """ + Create cgroup directories for this extension in each of the hierarchies and add this process to the new cgroup. + Should only be called when creating sub-processes and invoked inside the fork/exec window. As a result, + there's no point in returning the CGroups object itself; the goal is to move the child process into the + cgroup before the new code even starts running. + + :param str name: Short name of extension, suitable for naming directories in the filesystem + :param int pid: Process id of extension to be added to the cgroup + """ + if not CGroups.enabled(): + return + if name == AGENT_NAME: + logger.warn('Extension cgroup name cannot match agent cgroup name ({0})'.format(AGENT_NAME)) + return + + try: + logger.info("Move process {0} into cgroups for extension {1}".format(pid, name)) + CGroups.for_extension(name).add(pid) + except Exception as ex: + logger.warn("Unable to move process {0} into cgroups for extension {1}: {2}".format(pid, name, ex)) + + @staticmethod + def get_my_cgroup_path(hierarchy_id): + """ + Get the cgroup path "suffix" for this process for the given hierarchy ID. The leading "/" is always stripped, + so the suffix is suitable for passing to os.path.join(). (If the process is in the root cgroup, an empty + string is returned, and os.path.join() will still do the right thing.) + + :param hierarchy_id: str + :return: str + """ + cgroup_paths = fileutil.read_file("/proc/self/cgroup") + for entry in cgroup_paths.splitlines(): + fields = entry.split(':') + if fields[0] == hierarchy_id: + return fields[2].lstrip(os.path.sep) + raise CGroupsException("This process belongs to no cgroup for hierarchy ID {0}".format(hierarchy_id)) + + @staticmethod + def get_hierarchy_id(hierarchy): + """ + Get the cgroups hierarchy ID for a given hierarchy name + + :param hierarchy: + :return: str + """ + cgroup_states = fileutil.read_file("/proc/cgroups") + for entry in cgroup_states.splitlines(): + fields = entry.split('\t') + if fields[0] == hierarchy: + return fields[1] + raise CGroupsException("Cgroup hierarchy {0} not found in /proc/cgroups".format(hierarchy)) + + @staticmethod + def get_my_cgroup_folder(hierarchy): + """ + Find the path of the cgroup in which this process currently lives for the given hierarchy. + + :param hierarchy: str + :return: str + """ + hierarchy_id = CGroups.get_hierarchy_id(hierarchy) + return os.path.join(BASE_CGROUPS, hierarchy, CGroups.get_my_cgroup_path(hierarchy_id)) + + def _get_cgroup_file(self, hierarchy, file_name): + return os.path.join(self.cgroups[hierarchy], file_name) + + @staticmethod + def _convert_cpu_limit_to_fraction(value): + """ + Convert a CPU limit from percent (e.g. 50 meaning 50%) to a decimal fraction (0.50). + :return: Fraction of one CPU to be made available (e.g. 0.5 means half a core) + :rtype: float + """ + try: + limit = float(value) + except ValueError: + raise CGroupsException('CPU Limit must be convertible to a float') + + if limit <= float(0) or limit > float(CGroups.get_num_cores() * 100): + raise CGroupsException('CPU Limit must be between 0 and 100 * numCores') + + return limit / 100.0 + + def get_file_contents(self, hierarchy, file_name): + """ + Retrieve the value of a parameter from a hierarchy. + + :param str hierarchy: Name of cgroup metric hierarchy + :param str file_name: Name of file within that metric hierarchy + :return: Entire contents of the file + :rtype: str + """ + if hierarchy in self.cgroups: + parameter_file = self._get_cgroup_file(hierarchy, file_name) + + try: + return fileutil.read_file(parameter_file) + except Exception: + raise CGroupsException("Could not retrieve cgroup file {0}/{1}".format(hierarchy, file_name)) + else: + raise CGroupsException("{0} subsystem not available in cgroup {1}. cgroup paths: {2}".format( + hierarchy, self.name, self.cgroups)) + + def get_parameter(self, hierarchy, parameter_name): + """ + Retrieve the value of a parameter from a hierarchy. + Assumes the parameter is the sole line of the file. + + :param str hierarchy: Name of cgroup metric hierarchy + :param str parameter_name: Name of file within that metric hierarchy + :return: The first line of the file, without line terminator + :rtype: str + """ + result = "" + try: + values = self.get_file_contents(hierarchy, parameter_name).splitlines() + result = values[0] + except IndexError: + parameter_filename = self._get_cgroup_file(hierarchy, parameter_name) + logger.error("File {0} is empty but should not be".format(parameter_filename)) + except CGroupsException as e: + # ignore if the file does not exist yet + pass + except Exception as e: + parameter_filename = self._get_cgroup_file(hierarchy, parameter_name) + logger.error("Exception while attempting to read {0}: {1}".format(parameter_filename, ustr(e))) + return result + + def set_cpu_limit(self, limit=None): + """ + Limit this cgroup to a percentage of a single core. limit=10 means 10% of one core; 150 means 150%, which + is useful only in multi-core systems. + To limit a cgroup to utilize 10% of a single CPU, use the following commands: + # echo 10000 > /cgroup/cpu/red/cpu.cfs_quota_us + # echo 100000 > /cgroup/cpu/red/cpu.cfs_period_us + + :param limit: + """ + if not CGroups.enabled(): + return + + if limit is None: + return + + if 'cpu' in self.cgroups: + total_units = float(self.get_parameter('cpu', 'cpu.cfs_period_us')) + limit_units = int(self._convert_cpu_limit_to_fraction(limit) * total_units) + cpu_shares_file = self._get_cgroup_file('cpu', 'cpu.cfs_quota_us') + logger.verbose("writing {0} to {1}".format(limit_units, cpu_shares_file)) + fileutil.write_file(cpu_shares_file, '{0}\n'.format(limit_units)) + else: + raise CGroupsException("CPU hierarchy not available in this cgroup") + + @staticmethod + def get_num_cores(): + """ + Return the number of CPU cores exposed to this system. + + :return: int + """ + return CGroups._osutil.get_processor_cores() + + @staticmethod + def _format_memory_value(unit, limit=None): + units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024} + if unit not in units: + raise CGroupsException("Unit must be one of {0}".format(units.keys())) + if limit is None: + value = MEMORY_DEFAULT + else: + try: + limit = float(limit) + except ValueError: + raise CGroupsException('Limit must be convertible to a float') + else: + value = int(limit * units[unit]) + return value + + def set_memory_limit(self, limit=None, unit='megabytes'): + if 'memory' in self.cgroups: + value = self._format_memory_value(unit, limit) + memory_limit_file = self._get_cgroup_file('memory', 'memory.limit_in_bytes') + logger.verbose("writing {0} to {1}".format(value, memory_limit_file)) + fileutil.write_file(memory_limit_file, '{0}\n'.format(value)) + else: + raise CGroupsException("Memory hierarchy not available in this cgroup") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/conf.py walinuxagent-2.2.32/azurelinuxagent/common/conf.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/conf.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/conf.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -26,6 +26,8 @@ import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import AgentConfigError +DISABLE_AGENT_FILE = 'disable_agent' + class ConfigurationProvider(object): """ @@ -40,11 +42,11 @@ raise AgentConfigError("Can't not parse empty configuration") for line in content.split('\n'): if not line.startswith("#") and "=" in line: - parts = line.split('=') + parts = line.split('=', 1) if len(parts) < 2: continue key = parts[0].strip() - value = parts[1].split('#')[0].strip("\" ") + value = parts[1].split('#')[0].strip("\" ").strip() self.values[key] = value if value != "None" else None def get(self, key, default_val): @@ -85,59 +87,64 @@ raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) + __SWITCH_OPTIONS__ = { - "OS.AllowHTTP" : False, - "OS.EnableFirewall" : False, - "OS.EnableFIPS" : False, - "OS.EnableRDMA" : False, - "OS.UpdateRdmaDriver" : False, - "OS.CheckRdmaDriver" : False, - "Logs.Verbose" : False, - "Provisioning.Enabled" : True, - "Provisioning.UseCloudInit" : False, - "Provisioning.AllowResetSysUser" : False, - "Provisioning.RegenerateSshHostKeyPair" : False, - "Provisioning.DeleteRootPassword" : False, - "Provisioning.DecodeCustomData" : False, - "Provisioning.ExecuteCustomData" : False, - "Provisioning.MonitorHostName" : False, - "DetectScvmmEnv" : False, - "ResourceDisk.Format" : False, - "DetectScvmmEnv" : False, - "ResourceDisk.Format" : False, - "ResourceDisk.EnableSwap" : False, - "AutoUpdate.Enabled" : True, - "EnableOverProvisioning" : False + "OS.AllowHTTP": False, + "OS.EnableFirewall": False, + "OS.EnableFIPS": False, + "OS.EnableRDMA": False, + "OS.UpdateRdmaDriver": False, + "OS.CheckRdmaDriver": False, + "Logs.Verbose": False, + "Extensions.Enabled": True, + "Provisioning.Enabled": True, + "Provisioning.UseCloudInit": False, + "Provisioning.AllowResetSysUser": False, + "Provisioning.RegenerateSshHostKeyPair": False, + "Provisioning.DeleteRootPassword": False, + "Provisioning.DecodeCustomData": False, + "Provisioning.ExecuteCustomData": False, + "Provisioning.MonitorHostName": False, + "DetectScvmmEnv": False, + "ResourceDisk.Format": False, + "ResourceDisk.EnableSwap": False, + "AutoUpdate.Enabled": True, + "EnableOverProvisioning": True, + "CGroups.EnforceLimits": False, } + __STRING_OPTIONS__ = { - "Lib.Dir" : "/var/lib/waagent", - "DVD.MountPoint" : "/mnt/cdrom/secure", - "Pid.File" : "/var/run/waagent.pid", - "Extension.LogDir" : "/var/log/azure", - "OS.OpensslPath" : "/usr/bin/openssl", - "OS.SshDir" : "/etc/ssh", - "OS.HomeDir" : "/home", - "OS.PasswordPath" : "/etc/shadow", - "OS.SudoersDir" : "/etc/sudoers.d", - "OS.RootDeviceScsiTimeout" : None, - "Provisioning.SshHostKeyPairType" : "rsa", - "Provisioning.PasswordCryptId" : "6", - "HttpProxy.Host" : None, - "ResourceDisk.MountPoint" : "/mnt/resource", - "ResourceDisk.MountOptions" : None, - "ResourceDisk.Filesystem" : "ext3", - "AutoUpdate.GAFamily" : "Prod" + "Lib.Dir": "/var/lib/waagent", + "DVD.MountPoint": "/mnt/cdrom/secure", + "Pid.File": "/var/run/waagent.pid", + "Extension.LogDir": "/var/log/azure", + "OS.OpensslPath": "/usr/bin/openssl", + "OS.SshDir": "/etc/ssh", + "OS.HomeDir": "/home", + "OS.PasswordPath": "/etc/shadow", + "OS.SudoersDir": "/etc/sudoers.d", + "OS.RootDeviceScsiTimeout": None, + "Provisioning.SshHostKeyPairType": "rsa", + "Provisioning.PasswordCryptId": "6", + "HttpProxy.Host": None, + "ResourceDisk.MountPoint": "/mnt/resource", + "ResourceDisk.MountOptions": None, + "ResourceDisk.Filesystem": "ext3", + "AutoUpdate.GAFamily": "Prod", + "CGroups.Excluded": "customscript,runcommand", } + __INTEGER_OPTIONS__ = { - "OS.SshClientAliveInterval" : 180, - "Provisioning.PasswordCryptSaltLength" : 10, - "HttpProxy.Port" : None, - "ResourceDisk.SwapSizeMB" : 0, - "Autoupdate.Frequency" : 3600 + "OS.SshClientAliveInterval": 180, + "Provisioning.PasswordCryptSaltLength": 10, + "HttpProxy.Port": None, + "ResourceDisk.SwapSizeMB": 0, + "Autoupdate.Frequency": 3600 } + def get_configuration(conf=__conf__): options = {} for option in __SWITCH_OPTIONS__: @@ -151,17 +158,21 @@ return options + def enable_firewall(conf=__conf__): return conf.get_switch("OS.EnableFirewall", False) + def enable_rdma(conf=__conf__): return conf.get_switch("OS.EnableRDMA", False) or \ conf.get_switch("OS.UpdateRdmaDriver", False) or \ conf.get_switch("OS.CheckRdmaDriver", False) + def enable_rdma_update(conf=__conf__): return conf.get_switch("OS.UpdateRdmaDriver", False) + def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) @@ -185,44 +196,57 @@ def get_ext_log_dir(conf=__conf__): return conf.get("Extension.LogDir", "/var/log/azure") + def get_fips_enabled(conf=__conf__): return conf.get_switch("OS.EnableFIPS", False) + def get_openssl_cmd(conf=__conf__): return conf.get("OS.OpensslPath", "/usr/bin/openssl") + def get_ssh_client_alive_interval(conf=__conf__): return conf.get("OS.SshClientAliveInterval", 180) + def get_ssh_dir(conf=__conf__): return conf.get("OS.SshDir", "/etc/ssh") + def get_home_dir(conf=__conf__): return conf.get("OS.HomeDir", "/home") + def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") + def get_sudoers_dir(conf=__conf__): return conf.get("OS.SudoersDir", "/etc/sudoers.d") + def get_sshd_conf_file_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), "sshd_config") + def get_ssh_key_glob(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_*key*') + def get_ssh_key_private_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key'.format(get_ssh_host_keypair_type(conf))) + def get_ssh_key_public_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key.pub'.format(get_ssh_host_keypair_type(conf))) + def get_root_device_scsi_timeout(conf=__conf__): return conf.get("OS.RootDeviceScsiTimeout", None) + def get_ssh_host_keypair_type(conf=__conf__): keypair_type = conf.get("Provisioning.SshHostKeyPairType", "rsa") if keypair_type == "auto": @@ -233,15 +257,23 @@ return "rsa" return keypair_type + def get_ssh_host_keypair_mode(conf=__conf__): return conf.get("Provisioning.SshHostKeyPairType", "rsa") + def get_provision_enabled(conf=__conf__): return conf.get_switch("Provisioning.Enabled", True) + +def get_extensions_enabled(conf=__conf__): + return conf.get_switch("Extensions.Enabled", True) + + def get_provision_cloudinit(conf=__conf__): return conf.get_switch("Provisioning.UseCloudInit", False) + def get_allow_reset_sys_user(conf=__conf__): return conf.get_switch("Provisioning.AllowResetSysUser", False) @@ -321,8 +353,23 @@ def get_autoupdate_frequency(conf=__conf__): return conf.get_int("Autoupdate.Frequency", 3600) + def get_enable_overprovisioning(conf=__conf__): - return conf.get_switch("EnableOverProvisioning", False) + return conf.get_switch("EnableOverProvisioning", True) + def get_allow_http(conf=__conf__): return conf.get_switch("OS.AllowHTTP", False) + + +def get_disable_agent_file_path(conf=__conf__): + return os.path.join(get_lib_dir(conf), DISABLE_AGENT_FILE) + + +def get_cgroups_enforce_limits(conf=__conf__): + return conf.get_switch("CGroups.EnforceLimits", False) + + +def get_cgroups_excluded(conf=__conf__): + excluded_value = conf.get("CGroups.Excluded", "customscript, runcommand") + return [s for s in [i.strip().lower() for i in excluded_value.split(',')] if len(s) > 0] if excluded_value else [] diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/dhcp.py walinuxagent-2.2.32/azurelinuxagent/common/dhcp.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/dhcp.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/dhcp.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import os import socket @@ -151,6 +151,15 @@ def send_dhcp_req(self): """ + Check if DHCP is available + """ + (dhcp_available, endpoint) = self.osutil.is_dhcp_available() + if not dhcp_available: + logger.info("send_dhcp_req: DHCP not available") + self.endpoint = endpoint + return + + """ Build dhcp request with mac addr Configure route to allow dhcp traffic Stop dhcp service if necessary diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/errorstate.py walinuxagent-2.2.32/azurelinuxagent/common/errorstate.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/errorstate.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/errorstate.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,45 @@ +from datetime import datetime, timedelta + +ERROR_STATE_DELTA_DEFAULT = timedelta(minutes=15) +ERROR_STATE_DELTA_INSTALL = timedelta(minutes=5) +ERROR_STATE_HOST_PLUGIN_FAILURE = timedelta(minutes=5) + + +class ErrorState(object): + def __init__(self, min_timedelta=ERROR_STATE_DELTA_DEFAULT): + self.min_timedelta = min_timedelta + + self.count = 0 + self.timestamp = None + + def incr(self): + if self.count == 0: + self.timestamp = datetime.utcnow() + + self.count += 1 + + def reset(self): + self.count = 0 + self.timestamp = None + + def is_triggered(self): + if self.timestamp is None: + return False + + delta = datetime.utcnow() - self.timestamp + if delta >= self.min_timedelta: + return True + + return False + + @property + def fail_time(self): + if self.timestamp is None: + return 'unknown' + + delta = round((datetime.utcnow() - self.timestamp).seconds / 60.0, 2) + if delta < 60: + return '{0} min'.format(delta) + + delta_hr = round(delta / 60.0, 2) + return '{0} hr'.format(delta_hr) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/event.py walinuxagent-2.2.32/azurelinuxagent/common/event.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/event.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/event.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,59 +12,71 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import os -import sys -import traceback import atexit +import datetime import json +import os +import sys import time -import datetime -import threading -import platform +import traceback -from datetime import datetime, timedelta +from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.exception import EventError, ProtocolError +from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ TelemetryEvent, \ - set_properties, get_properties -from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_VERSION, \ - CURRENT_AGENT, CURRENT_VERSION + get_properties +from azurelinuxagent.common.utils import fileutil, textutil +from azurelinuxagent.common.version import CURRENT_VERSION _EVENT_MSG = "Event: name={0}, op={1}, message={2}, duration={3}" + class WALAEventOperation: ActivateResourceDisk = "ActivateResourceDisk" AgentBlacklisted = "AgentBlacklisted" AgentEnabled = "AgentEnabled" + ArtifactsProfileBlob = "ArtifactsProfileBlob" AutoUpdate = "AutoUpdate" CustomData = "CustomData" Deploy = "Deploy" Disable = "Disable" + Downgrade = "Downgrade" Download = "Download" Enable = "Enable" ExtensionProcessing = "ExtensionProcessing" Firewall = "Firewall" + GetArtifactExtended = "GetArtifactExtended" HealthCheck = "HealthCheck" + HealthObservation = "HealthObservation" HeartBeat = "HeartBeat" HostPlugin = "HostPlugin" + HostPluginHeartbeat = "HostPluginHeartbeat" + HostPluginHeartbeatExtended = "HostPluginHeartbeatExtended" HttpErrors = "HttpErrors" + ImdsHeartbeat = "ImdsHeartbeat" Install = "Install" + InitializeCGroups = "InitializeCGroups" InitializeHostPlugin = "InitializeHostPlugin" + Log = "Log" Partition = "Partition" ProcessGoalState = "ProcessGoalState" Provision = "Provision" + ProvisionGuestAgent = "ProvisionGuestAgent" + RemoteAccessHandling = "RemoteAccessHandling" ReportStatus = "ReportStatus" + ReportStatusExtended = "ReportStatusExtended" Restart = "Restart" + SequenceNumberMismatch = "SequenceNumberMismatch" + SetCGroupsLimits = "SetCGroupsLimits" + SkipUpdate = "SkipUpdate" UnhandledError = "UnhandledError" UnInstall = "UnInstall" Unknown = "Unknown" @@ -72,10 +84,18 @@ Update = "Update" +SHOULD_ENCODE_MESSAGE_LEN = 80 +SHOULD_ENCODE_MESSAGE_OP = [ + WALAEventOperation.Disable, + WALAEventOperation.Enable, + WALAEventOperation.Install, + WALAEventOperation.UnInstall, +] + class EventStatus(object): EVENT_STATUS_FILE = "event_status.json" - def __init__(self, status_dir=conf.get_lib_dir()): + def __init__(self): self._path = None self._status = {} @@ -90,7 +110,7 @@ event = self._event_name(name, version, op) if event not in self._status: return True - return self._status[event] == True + return self._status[event] is True def initialize(self, status_dir=conf.get_lib_dir()): self._path = os.path.join(status_dir, EventStatus.EVENT_STATUS_FILE) @@ -98,7 +118,7 @@ def mark_event_status(self, name, version, op, status): event = self._event_name(name, version, op) - self._status[event] = (status == True) + self._status[event] = (status is True) self._save() def _event_name(self, name, version, op): @@ -121,6 +141,7 @@ except Exception as e: logger.warn("Exception occurred saving event status: {0}".format(e)) + __event_status__ = EventStatus() __event_status_operations__ = [ WALAEventOperation.AutoUpdate, @@ -128,9 +149,45 @@ ] +def _encode_message(op, message): + """ + Gzip and base64 encode a message based on the operation. + + The intent of this message is to make the logs human readable and include the + stdout/stderr from extension operations. Extension operations tend to generate + a lot of noise, which makes it difficult to parse the line-oriented waagent.log. + The compromise is to encode the stdout/stderr so we preserve the data and do + not destroy the line oriented nature. + + The data can be recovered using the following command: + + $ echo '' | base64 -d | pigz -zd + + You may need to install the pigz command. + + :param op: Operation, e.g. Enable or Install + :param message: Message to encode + :return: gzip'ed and base64 encoded message, or the original message + """ + + if len(message) == 0: + return message + + if op not in SHOULD_ENCODE_MESSAGE_OP: + return message + + try: + return textutil.compress(message) + except Exception: + # If the message could not be encoded a dummy message ('<>') is returned. + # The original message was still sent via telemetry, so all is not lost. + return "<>" + + def _log_event(name, op, message, duration, is_success=True): global _EVENT_MSG + message = _encode_message(op, message) if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) else: @@ -147,9 +204,7 @@ logger.warn("Cannot save event -- Event reporter is not initialized.") return - if not os.path.exists(self.event_dir): - os.mkdir(self.event_dir) - os.chmod(self.event_dir, 0o700) + fileutil.mkdir(self.event_dir, mode=0o700) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: @@ -203,10 +258,13 @@ is_internal=False, log_event=True): - if not is_success or log_event: + if (not is_success) and log_event: _log_event(name, op, message, duration, is_success=is_success) - event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=6) + + def _add_event(self, duration, evt_type, is_internal, is_success, message, name, op, version, eventId): + event = TelemetryEvent(eventId, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) @@ -223,22 +281,82 @@ except EventError as e: logger.error("{0}", e) + def add_log_event(self, level, message): + # By the time the message has gotten to this point it is formatted as + # + # YYYY/MM/DD HH:mm:ss.fffffff LEVEL . + # + # The timestamp and the level are redundant, and should be stripped. + # The logging library does not schematize this data, so I am forced + # to parse the message. The format is regular, so the burden is low. + + parts = message.split(' ', 3) + msg = parts[3] if len(parts) == 4 \ + else message + + event = TelemetryEvent(7, "FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F") + event.parameters.append(TelemetryEventParam('EventName', WALAEventOperation.Log)) + event.parameters.append(TelemetryEventParam('CapabilityUsed', logger.LogLevel.STRINGS[level])) + event.parameters.append(TelemetryEventParam('Context1', msg)) + event.parameters.append(TelemetryEventParam('Context2', '')) + event.parameters.append(TelemetryEventParam('Context3', '')) + + data = get_properties(event) + try: + self.save_event(json.dumps(data)) + except EventError: + pass + + def add_metric(self, category, counter, instance, value, log_event=False): + """ + Create and save an event which contains a telemetry event. + + :param str category: The category of metric (e.g. "cpu", "memory") + :param str counter: The specific metric within the category (e.g. "%idle") + :param str instance: For instanced metrics, the instance identifier (filesystem name, cpu core#, etc.) + :param value: Value of the metric + :param bool log_event: If true, log the collected metric in the agent log + """ + if log_event: + from azurelinuxagent.common.version import AGENT_NAME + message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) + _log_event(AGENT_NAME, "METRIC", message, 0) + + event = TelemetryEvent(4, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + event.parameters.append(TelemetryEventParam('Category', category)) + event.parameters.append(TelemetryEventParam('Counter', counter)) + event.parameters.append(TelemetryEventParam('Instance', instance)) + event.parameters.append(TelemetryEventParam('Value', value)) + + data = get_properties(event) + try: + self.save_event(json.dumps(data)) + except EventError as e: + logger.error("{0}", e) + __event_logger__ = EventLogger() def elapsed_milliseconds(utc_start): - d = datetime.utcnow() - utc_start + now = datetime.utcnow() + if now < utc_start: + return 0 + + d = now - utc_start return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \ (d.microseconds / 1000.0)) -def report_event(op, is_success=True, message=''): + +def report_event(op, is_success=True, message='', log_event=True): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=is_success, message=message, - op=op) + op=op, + log_event=log_event) + def report_periodic(delta, op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION @@ -248,6 +366,26 @@ message=message, op=op) + +def report_metric(category, counter, instance, value, log_event=False, reporter=__event_logger__): + """ + Send a telemetry event reporting a single instance of a performance counter. + :param str category: The category of the metric (cpu, memory, etc) + :param str counter: The name of the metric ("%idle", etc) + :param str instance: For instanced metrics, the identifier of the instance. E.g. a disk drive name, a cpu core# + :param value: The value of the metric + :param bool log_event: If True, log the metric in the agent log as well + :param EventLogger reporter: The EventLogger instance to which metric events should be sent + """ + if reporter.event_dir is None: + from azurelinuxagent.common.version import AGENT_NAME + logger.warn("Cannot report metric event -- Event reporter is not initialized.") + message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) + _log_event(AGENT_NAME, "METRIC", message, 0) + return + reporter.add_metric(category, counter, instance, value, log_event) + + def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, log_event=True, @@ -264,6 +402,14 @@ version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) + +def add_log_event(level, message, reporter=__event_logger__): + if reporter.event_dir is None: + return + + reporter.add_log_event(level, message) + + def add_periodic( delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, @@ -279,10 +425,12 @@ version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event, force=force) + def mark_event_status(name, version, op, status): if op in __event_status_operations__: __event_status__.mark_event_status(name, version, op, status) + def should_emit_event(name, version, op, status): return \ op not in __event_status_operations__ or \ @@ -290,12 +438,15 @@ not __event_status__.event_marked(name, version, op) or \ __event_status__.event_succeeded(name, version, op) != status + def init_event_logger(event_dir): __event_logger__.event_dir = event_dir + def init_event_status(status_dir): __event_status__.initialize(status_dir) + def dump_unhandled_err(name): if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ hasattr(sys, 'last_traceback'): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/exception.py walinuxagent-2.2.32/azurelinuxagent/common/exception.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/exception.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/exception.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Defines all exceptions @@ -44,7 +44,7 @@ class AgentNetworkError(AgentError): """ - When network is not avaiable. + When network is not available\. """ def __init__(self, msg=None, inner=None): @@ -56,8 +56,9 @@ When failed to execute an extension """ - def __init__(self, msg=None, inner=None): + def __init__(self, msg=None, inner=None, code=-1): super(ExtensionError, self).__init__(msg, inner) + self.code = code class ProvisionError(AgentError): @@ -86,6 +87,7 @@ def __init__(self, msg=None, inner=None): super(DhcpError, self).__init__(msg, inner) + class OSUtilError(AgentError): """ Failed to perform operation to OS configuration @@ -158,3 +160,12 @@ if msg is None: msg = "Resource is gone" super(ResourceGoneError, self).__init__(msg, inner) + + +class RemoteAccessError(AgentError): + """ + Remote Access Error + """ + + def __init__(self, msg=None, inner=None): + super(RemoteAccessError, self).__init__(msg, inner) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/future.py walinuxagent-2.2.32/azurelinuxagent/common/future.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/future.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/future.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,13 @@ +import platform import sys +# Note broken dependency handling to avoid potential backward +# compatibility issues on different distributions +try: + import distro +except Exception: + pass + """ Add alias for python2 and python3 libs and functions. """ @@ -24,3 +32,39 @@ else: raise ImportError("Unknown python version: {0}".format(sys.version_info)) + + +def get_linux_distribution(get_full_name, supported_dists): + """Abstract platform.linux_distribution() call which is deprecated as of + Python 3.5 and removed in Python 3.7""" + try: + supported = platform._supported_dists + (supported_dists,) + osinfo = list( + platform.linux_distribution( + full_distribution_name=get_full_name, + supported_dists=supported + ) + ) + if not osinfo or osinfo == ['', '', '']: + return get_linux_distribution_from_distro(get_full_name) + full_name = platform.linux_distribution()[0].strip() + osinfo.append(full_name) + except AttributeError: + return get_linux_distribution_from_distro(get_full_name) + + return osinfo + + +def get_linux_distribution_from_distro(get_full_name): + """Get the distribution information from the distro Python module.""" + # If we get here we have to have the distro module, thus we do + # not wrap the call in a try-except block as it would mask the problem + # and result in a broken agent installation + osinfo = list( + distro.linux_distribution( + full_distribution_name=get_full_name + ) + ) + full_name = distro.linux_distribution()[0].strip() + osinfo.append(full_name) + return osinfo diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/__init__.py walinuxagent-2.2.32/azurelinuxagent/common/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/logger.py walinuxagent-2.2.32/azurelinuxagent/common/logger.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/logger.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/logger.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and openssl_bin 1.0+ +# Requires Python 2.6+ and openssl_bin 1.0+ # """ Log utils """ -import os import sys + from azurelinuxagent.common.future import ustr from datetime import datetime, timedelta @@ -28,6 +28,7 @@ EVERY_HALF_HOUR = timedelta(minutes=30) EVERY_FIFTEEN_MINUTES = timedelta(minutes=15) + class Logger(object): """ Logger class @@ -41,6 +42,9 @@ def reset_periodic(self): self.logger.periodic_messages = {} + def set_prefix(self, prefix): + self.prefix = prefix + def is_period_elapsed(self, delta, h): return h not in self.logger.periodic_messages or \ (self.logger.periodic_messages[h] + delta) <= datetime.now() @@ -92,6 +96,7 @@ appender = _create_logger_appender(appender_type, level, path) self.appenders.append(appender) + class ConsoleAppender(object): def __init__(self, level, path): self.level = level @@ -105,6 +110,7 @@ except IOError: pass + class FileAppender(object): def __init__(self, level, path): self.level = level @@ -118,6 +124,7 @@ except IOError: pass + class StdoutAppender(object): def __init__(self, level): self.level = level @@ -129,9 +136,24 @@ except IOError: pass + +class TelemetryAppender(object): + def __init__(self, level, event_func): + self.level = level + self.event_func = event_func + + def write(self, level, msg): + if self.level <= level: + try: + self.event_func(level, msg) + except IOError: + pass + + #Initialize logger instance DEFAULT_LOGGER = Logger() + class LogLevel(object): VERBOSE = 0 INFO = 1 @@ -144,35 +166,48 @@ "ERROR" ] + class AppenderType(object): FILE = 0 CONSOLE = 1 STDOUT = 2 + TELEMETRY = 3 + def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) + def reset_periodic(): DEFAULT_LOGGER.reset_periodic() +def set_prefix(prefix): + DEFAULT_LOGGER.set_prefix(prefix) + def periodic(delta, msg_format, *args): DEFAULT_LOGGER.periodic(delta, msg_format, *args) + def verbose(msg_format, *args): DEFAULT_LOGGER.verbose(msg_format, *args) + def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) + def warn(msg_format, *args): DEFAULT_LOGGER.warn(msg_format, *args) + def error(msg_format, *args): DEFAULT_LOGGER.error(msg_format, *args) + def log(level, msg_format, *args): DEFAULT_LOGGER.log(level, msg_format, args) + def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): if appender_type == AppenderType.CONSOLE: return ConsoleAppender(level, path) @@ -180,6 +215,8 @@ return FileAppender(level, path) elif appender_type == AppenderType.STDOUT: return StdoutAppender(level) + elif appender_type == AppenderType.TELEMETRY: + return TelemetryAppender(level, path) else: raise ValueError("Unknown appender type") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/alpine.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/alpine.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/alpine.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/alpine.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger @@ -22,9 +22,11 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class AlpineOSUtil(DefaultOSUtil): + def __init__(self): super(AlpineOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' + self.jit_enabled = True def is_dhcp_enabled(self): return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/arch.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/arch.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/arch.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/arch.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -21,6 +21,10 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class ArchUtil(DefaultOSUtil): + def __init__(self): + super(ArchUtil, self).__init__() + self.jit_enabled = True + def is_dhcp_enabled(self): return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/bigip.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/bigip.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/bigip.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/bigip.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import array @@ -41,6 +41,7 @@ class BigIpOSUtil(DefaultOSUtil): + def __init__(self): super(BigIpOSUtil, self).__init__() @@ -132,7 +133,7 @@ """ return None - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """Create user account using tmsh Our policy is to create two accounts when booting a BIG-IP instance. @@ -143,6 +144,7 @@ :param username: The username that you want to add to the system :param expiration: The expiration date to use. We do not use this value. + :param comment: description of the account. We do not use this value. """ if self.get_userentry(username): logger.info("User {0} already exists, skip useradd", username) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/clearlinux.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/clearlinux.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/clearlinux.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/clearlinux.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -34,9 +34,11 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class ClearLinuxUtil(DefaultOSUtil): + def __init__(self): super(ClearLinuxUtil, self).__init__() self.agent_conf_file_path = '/usr/share/defaults/waagent/waagent.conf' + self.jit_enabled = True def is_dhcp_enabled(self): return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/coreos.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/coreos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/coreos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/coreos.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -21,11 +21,13 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class CoreOSUtil(DefaultOSUtil): + def __init__(self): super(CoreOSUtil, self).__init__() self.agent_conf_file_path = '/usr/share/oem/waagent.conf' self.waagent_path = '/usr/share/oem/bin/waagent' self.python_path = '/usr/share/oem/python/bin' + self.jit_enabled = True if 'PATH' in os.environ: path = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/debian.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/debian.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/debian.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/debian.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -33,8 +33,10 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class DebianOSUtil(DefaultOSUtil): + def __init__(self): super(DebianOSUtil, self).__init__() + self.jit_enabled = True def restart_ssh_service(self): return shellutil.run("systemctl --job-mode=ignore-dependencies try-reload-or-restart ssh", chk_err=False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/default.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/default.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import array import base64 import datetime +import errno import fcntl import glob import multiprocessing @@ -42,6 +43,9 @@ from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard + +from pwd import getpwall __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] @@ -60,13 +64,21 @@ # Note: # -- Initially "flight" the change to ACCEPT packets and develop a metric baseline # A subsequent release will convert the ACCEPT to DROP -FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" -# FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" +# FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" +FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" FIREWALL_LIST = "iptables {0} -t security -L -nxv" FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv" FIREWALL_FLUSH = "iptables {0} -t security --flush" +# Precisely delete the rules created by the agent. +# this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact. +FIREWALL_DELETE_CONNTRACK_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" +FIREWALL_DELETE_OWNER_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m owner --uid-owner {2} -j ACCEPT" +FIREWALL_DELETE_CONNTRACK_DROP = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" + PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" +ALL_CPUS_REGEX = re.compile('^cpu .*') + _enable_firewall = True @@ -76,12 +88,22 @@ r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) -class DefaultOSUtil(object): +IOCTL_SIOCGIFCONF = 0x8912 +IOCTL_SIOCGIFFLAGS = 0x8913 +IOCTL_SIOCGIFHWADDR = 0x8927 +IFNAMSIZ = 16 +IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') + +BASE_CGROUPS = '/sys/fs/cgroup' + + +class DefaultOSUtil(object): def __init__(self): self.agent_conf_file_path = '/etc/waagent.conf' self.selinux = None self.disable_route_warning = False + self.jit_enabled = False def get_firewall_dropped_packets(self, dst_ip=None): # If a previous attempt failed, do not retry @@ -92,7 +114,12 @@ try: wait = self.get_firewall_will_wait() - rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait)) + rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait), log_cmd=False) + if rc == 3: + # Transient error that we ignore. This code fires every loop + # of the daemon (60m), so we will get the value eventually. + return 0 + if rc != 0: return -1 @@ -129,24 +156,44 @@ else "" return wait - def remove_firewall(self): + def _delete_rule(self, rule): + """ + Continually execute the delete operation until the return + code is non-zero or the limit has been reached. + """ + for i in range(1, 100): + rc = shellutil.run(rule, chk_err=False) + if rc == 1: + return + elif rc == 2: + raise Exception("invalid firewall deletion rule '{0}'".format(rule)) + + def remove_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: + if dst_ip is None or uid is None: + msg = "Missing arguments to enable_firewall" + logger.warn(msg) + raise Exception(msg) + wait = self.get_firewall_will_wait() - flush_rule = FIREWALL_FLUSH.format(wait) - if shellutil.run(flush_rule, chk_err=True) != 0: - raise Exception("non-zero return code") + # This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25 + # has aged out, keep this cleanup in place. + self._delete_rule(FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip)) + + self._delete_rule(FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst_ip, uid)) + self._delete_rule(FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst_ip)) return True except Exception as e: _enable_firewall = False - logger.info("Unable to flush firewall -- " + logger.info("Unable to remove firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False @@ -167,10 +214,15 @@ # If the DROP rule exists, make no changes drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip) - - if shellutil.run(drop_rule, chk_err=False) == 0: + rc = shellutil.run(drop_rule, chk_err=False) + if rc == 0: logger.verbose("Firewall appears established") return True + elif rc == 2: + self.remove_firewall(dst_ip, uid) + msg = "please upgrade iptables to a version that supports the -C option" + logger.warn(msg) + raise Exception(msg) # Otherwise, append both rules accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid) @@ -241,6 +293,54 @@ return id_that == id_this or \ id_that == self._correct_instance_id(id_this) + @staticmethod + def is_cgroups_supported(): + """ + Enabled by default; disabled in WSL/Travis + """ + is_wsl = '-Microsoft-' in platform.platform() + is_travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' + base_fs_exists = os.path.exists(BASE_CGROUPS) + return not is_wsl and not is_travis and base_fs_exists + + @staticmethod + def _cgroup_path(tail=""): + return os.path.join(BASE_CGROUPS, tail).rstrip(os.path.sep) + + def mount_cgroups(self): + try: + path = self._cgroup_path() + if not os.path.exists(path): + fileutil.mkdir(path) + self.mount(device='cgroup_root', + mount_point=path, + option="-t tmpfs", + chk_err=False) + elif not os.path.isdir(self._cgroup_path()): + logger.error("Could not mount cgroups: ordinary file at {0}".format(path)) + return + + for metric_hierarchy in ['cpu,cpuacct', 'memory']: + target_path = self._cgroup_path(metric_hierarchy) + if not os.path.exists(target_path): + fileutil.mkdir(target_path) + self.mount(device=metric_hierarchy, + mount_point=target_path, + option="-t cgroup -o {0}".format(metric_hierarchy), + chk_err=False) + + for metric_hierarchy in ['cpu', 'cpuacct']: + target_path = self._cgroup_path(metric_hierarchy) + if not os.path.exists(target_path): + os.symlink(self._cgroup_path('cpu,cpuacct'), target_path) + + except OSError as oe: + # log a warning for read-only file systems + logger.warn("Could not mount cgroups: {0}", ustr(oe)) + raise + except Exception as e: + logger.error("Could not mount cgroups: {0}", ustr(e)) + raise def get_agent_conf_file_path(self): return self.agent_conf_file_path @@ -254,12 +354,12 @@ ''' if os.path.isfile(PRODUCT_ID_FILE): s = fileutil.read_file(PRODUCT_ID_FILE).strip() - + else: rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" - + return self._correct_instance_id(s.strip()) def get_userentry(self, username): @@ -293,7 +393,7 @@ else: return False - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ @@ -306,6 +406,9 @@ cmd = "useradd -m {0} -e {1}".format(username, expiration) else: cmd = "useradd -m {0}".format(username) + + if comment is not None: + cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " @@ -322,6 +425,9 @@ if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) + + def get_users(self): + return getpwall() def conf_sudoer(self, username, nopasswd=False, remove=False): sudoers_dir = conf.get_sudoers_dir() @@ -330,12 +436,12 @@ if not remove: # for older distros create sudoers.d if not os.path.isdir(sudoers_dir): - sudoers_file = os.path.join(sudoers_dir, '../sudoers') # create the sudoers.d directory - os.mkdir(sudoers_dir) + fileutil.mkdir(sudoers_dir) # add the include of sudoers.d to the /etc/sudoers - sudoers = '\n#includedir ' + sudoers_dir + '\n' - fileutil.append_file(sudoers_file, sudoers) + sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers') + include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir) + fileutil.append_file(sudoers_file, include_sudoers_dir) sudoer = None if nopasswd: sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username) @@ -579,8 +685,8 @@ time.sleep(1) return False - def mount(self, dvd, mount_point, option="", chk_err=True): - cmd = "mount {0} {1} {2}".format(option, dvd, mount_point) + def mount(self, device, mount_point, option="", chk_err=True): + cmd = "mount {0} {1} {2}".format(option, device, mount_point) retcode, err = shellutil.run_get_output(cmd, chk_err) if retcode != 0: detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err) @@ -591,14 +697,13 @@ return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) def allow_dhcp_broadcast(self): - #Open DHCP port if iptables is enabled. + # Open DHCP port if iptables is enabled. # We supress error logging on error. shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) - def remove_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for src in rules_files: @@ -623,12 +728,10 @@ def get_mac_addr(self): """ - Convienience function, returns mac addr bound to + Convenience function, returns mac addr bound to first non-loopback interface. """ - ifname='' - while len(ifname) < 2 : - ifname=self.get_first_if()[0] + ifname = self.get_if_name() addr = self.get_if_mac(ifname) return textutil.hexstr_to_bytearray(addr) @@ -640,49 +743,139 @@ socket.SOCK_DGRAM, socket.IPPROTO_UDP) param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) - info = fcntl.ioctl(sock.fileno(), 0x8927, param) + info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param) + sock.close() return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) - def get_first_if(self): + @staticmethod + def _get_struct_ifconf_size(): """ - Return the interface name, and ip addr of the - first active non-loopback interface. + Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes; + on 32-bit platforms the size is 32 bytes. """ - iface='' - expected=16 # how many devices should I expect... - - # for 64bit the size is 40 bytes - # for 32bit the size is 32 bytes python_arc = platform.architecture()[0] struct_size = 32 if python_arc == '32bit' else 40 + return struct_size - sock = socket.socket(socket.AF_INET, - socket.SOCK_DGRAM, - socket.IPPROTO_UDP) - buff=array.array('B', b'\0' * (expected * struct_size)) - param = struct.pack('iL', - expected*struct_size, - buff.buffer_info()[0]) - ret = fcntl.ioctl(sock.fileno(), 0x8912, param) - retsize=(struct.unpack('iL', ret)[0]) - if retsize == (expected * struct_size): + def _get_all_interfaces(self): + """ + Return a dictionary mapping from interface name to IPv4 address. + Interfaces without a name are ignored. + """ + expected=16 # how many devices should I expect... + struct_size = DefaultOSUtil._get_struct_ifconf_size() + array_size = expected * struct_size + + buff = array.array('B', b'\0' * array_size) + param = struct.pack('iL', array_size, buff.buffer_info()[0]) + + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) + ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param) + retsize = (struct.unpack('iL', ret)[0]) + sock.close() + + if retsize == array_size: logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) - sock = buff.tostring() - primary = bytearray(self.get_primary_interface(), encoding='utf-8') - for i in range(0, struct_size * expected, struct_size): - iface=sock[i:i+16].split(b'\0', 1)[0] - if len(iface) == 0 or self.is_loopback(iface) or iface != primary: - # test the next one - if len(iface) != 0 and not self.disable_route_warning: - logger.info('Interface [{0}] skipped'.format(iface)) - continue - else: - # use this one - logger.info('Interface [{0}] selected'.format(iface)) - break - return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) + ifconf_buff = buff.tostring() + + ifaces = {} + for i in range(0, array_size, struct_size): + iface = ifconf_buff[i:i+IFNAMSIZ].split(b'\0', 1)[0] + if len(iface) > 0: + iface_name = iface.decode('latin-1') + if iface_name not in ifaces: + ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i+20:i+24]) + return ifaces + + def get_first_if(self): + """ + Return the interface name, and IPv4 addr of the "primary" interface or, + failing that, any active non-loopback interface. + """ + primary = self.get_primary_interface() + ifaces = self._get_all_interfaces() + + if primary in ifaces: + return primary, ifaces[primary] + + for iface_name in ifaces.keys(): + if not self.is_loopback(iface_name): + logger.info("Choosing non-primary [{0}]".format(iface_name)) + return iface_name, ifaces[iface_name] + + return '', '' + + @staticmethod + def _build_route_list(proc_net_route): + """ + Construct a list of network route entries + :param list(str) proc_net_route: Route table lines, including headers, containing at least one route + :return: List of network route objects + :rtype: list(RouteEntry) + """ + idx = 0 + column_index = {} + header_line = proc_net_route[0] + for header in filter(lambda h: len(h) > 0, header_line.split("\t")): + column_index[header.strip()] = idx + idx += 1 + try: + idx_iface = column_index["Iface"] + idx_dest = column_index["Destination"] + idx_gw = column_index["Gateway"] + idx_flags = column_index["Flags"] + idx_metric = column_index["Metric"] + idx_mask = column_index["Mask"] + except KeyError: + msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line) + logger.error(msg) + return [] + + route_list = [] + for entry in proc_net_route[1:]: + route = entry.split("\t") + if len(route) > 0: + route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask], + route[idx_flags], route[idx_metric]) + route_list.append(route_obj) + return route_list + + def read_route_table(self): + """ + Return a list of strings comprising the route table, including column headers. Each line is stripped of leading + or trailing whitespace but is otherwise unmolested. + + :return: Entries in the text route table + :rtype: list(str) + """ + try: + with open('/proc/net/route') as routing_table: + return list(map(str.strip, routing_table.readlines())) + except Exception as e: + logger.error("Cannot read route table [{0}]", ustr(e)) + + return [] + + def get_list_of_routes(self, route_table): + """ + Construct a list of all network routes known to this system. + + :param list(str) route_table: List of text entries from route table, including headers + :return: a list of network routes + :rtype: list(RouteEntry) + """ + route_list = [] + count = len(route_table) + + if count < 1: + logger.error("/proc/net/route is missing headers") + elif count == 1: + logger.error("/proc/net/route contains no routes") + else: + route_list = DefaultOSUtil._build_route_list(route_table) + return route_list def get_primary_interface(self): """ @@ -754,13 +947,18 @@ return self.get_primary_interface() == ifname def is_loopback(self, ifname): + """ + Determine if a named interface is loopback. + """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) - result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15])) + ifname_buff = ifname + ('\0'*256) + result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff) flags, = struct.unpack('H', result[16:18]) isloopback = flags & 8 == 8 if not self.disable_route_warning: logger.info('interface [{0}] has flags [{1}], ' 'is loopback [{2}]'.format(ifname, flags, isloopback)) + s.close() return isloopback def get_dhcp_lease_endpoint(self): @@ -783,28 +981,23 @@ endpoint = None HEADER_LEASE = "lease" - HEADER_OPTION = "option unknown-245" - HEADER_DNS = "option domain-name-servers" + HEADER_OPTION_245 = "option unknown-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" + option_245_re = re.compile(r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);') logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() - if HEADER_OPTION in leases: + if HEADER_OPTION_245 in leases: cached_endpoint = None - has_option_245 = False + option_245_match = None expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None - has_option_245 = False expired = True - elif HEADER_DNS in line: - cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;") - elif HEADER_OPTION in line: - has_option_245 = True elif HEADER_EXPIRE in line: if "never" in line: expired = False @@ -818,12 +1011,20 @@ logger.error("could not parse expiry token '{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( - cached_endpoint, has_option_245, expired)) - if not expired and cached_endpoint is not None and has_option_245: + cached_endpoint, option_245_match is not None, expired)) + if not expired and cached_endpoint is not None: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching + else: + option_245_match = option_245_re.match(line) + if option_245_match is not None: + cached_endpoint = '{0}.{1}.{2}.{3}'.format( + int(option_245_match.group(1), 16), + int(option_245_match.group(2), 16), + int(option_245_match.group(3), 16), + int(option_245_match.group(4), 16)) if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: @@ -831,26 +1032,40 @@ return endpoint def is_missing_default_route(self): - routes = shellutil.run_get_output("route -n")[1] + route_cmd = "ip route show" + routes = shellutil.run_get_output(route_cmd)[1] for route in routes.split("\n"): if route.startswith("0.0.0.0 ") or route.startswith("default "): return False return True def get_if_name(self): - return self.get_first_if()[0] + if_name = '' + if_found = False + while not if_found: + if_name = self.get_first_if()[0] + if_found = len(if_name) >= 2 + if not if_found: + time.sleep(2) + return if_name def get_ip4_addr(self): return self.get_first_if()[1] def set_route_for_dhcp_broadcast(self, ifname): - return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname), + route_cmd = "ip route add" + return shellutil.run("{0} 255.255.255.255 dev {1}".format( + route_cmd, ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): - shellutil.run("route del 255.255.255.255 dev {0}".format(ifname), + route_cmd = "ip route del" + shellutil.run("{0} 255.255.255.255 dev {1}".format(route_cmd, ifname), chk_err=False) + def is_dhcp_available(self): + return (True, '') + def is_dhcp_enabled(self): return False @@ -880,10 +1095,9 @@ def route_add(self, net, mask, gateway): """ - Add specified route using /sbin/route add -net. + Add specified route """ - cmd = ("/sbin/route add -net " - "{0} netmask {1} gw {2}").format(net, mask, gateway) + cmd = "ip route add {0} via {1}".format(net, gateway) return shellutil.run(cmd, chk_err=False) def get_dhcp_pid(self): @@ -974,20 +1188,24 @@ device = None path = "/sys/bus/vmbus/devices/" if os.path.exists(path): - for vmbus in os.listdir(path): - deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) - guid = deviceid.lstrip('{').split('-') - if guid[0] == g0 and guid[1] == "000" + ustr(port_id): - for root, dirs, files in os.walk(path + vmbus): - if root.endswith("/block"): - device = dirs[0] - break - else : #older distros - for d in dirs: - if ':' in d and "block" == d.split(':')[0]: - device = d.split(':')[1] - break - break + try: + for vmbus in os.listdir(path): + deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) + guid = deviceid.lstrip('{').split('-') + if guid[0] == g0 and guid[1] == "000" + ustr(port_id): + for root, dirs, files in os.walk(path + vmbus): + if root.endswith("/block"): + device = dirs[0] + break + else: + # older distros + for d in dirs: + if ':' in d and "block" == d.split(':')[0]: + device = d.split(':')[1] + break + break + except OSError as oe: + logger.warn('Could not obtain device for IDE port {0}: {1}', port_id, ustr(oe)) return device def set_hostname_record(self, hostname): @@ -998,7 +1216,7 @@ if not os.path.exists(hostname_record): # this file is created at provisioning time with agents >= 2.2.3 hostname = socket.gethostname() - logger.warn('Hostname record does not exist, ' + logger.info('Hostname record does not exist, ' 'creating [{0}] with hostname [{1}]', hostname_record, hostname) @@ -1024,8 +1242,115 @@ return multiprocessing.cpu_count() def check_pid_alive(self, pid): - return pid is not None and os.path.isdir(os.path.join('/proc', pid)) + try: + pid = int(pid) + os.kill(pid, 0) + except (ValueError, TypeError): + return False + except OSError as e: + if e.errno == errno.EPERM: + return True + return False + return True @property def is_64bit(self): return sys.maxsize > 2**32 + + @staticmethod + def _get_proc_stat(): + """ + Get the contents of /proc/stat. + # cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0 + # cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0 + # cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0 + + :return: A single string with the contents of /proc/stat + :rtype: str + """ + results = None + try: + results = fileutil.read_file('/proc/stat') + except (OSError, IOError) as ex: + logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror)) + + return results + + @staticmethod + def get_total_cpu_ticks_since_boot(): + """ + Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot. + + :return: int + """ + system_cpu = 0 + proc_stat = DefaultOSUtil._get_proc_stat() + if proc_stat is not None: + for line in proc_stat.splitlines(): + if ALL_CPUS_REGEX.match(line): + system_cpu = sum(int(i) for i in line.split()[1:7]) + break + return system_cpu + + def get_nic_state(self): + """ + Capture NIC state (IPv4 and IPv6 addresses plus link state). + + :return: Dictionary of NIC state objects, with the NIC name as key + :rtype: dict(str,NetworkInformationCard) + """ + state = {} + + status, output = shellutil.run_get_output("ip -a -d -o link", chk_err=False, log_cmd=False) + """ + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 + 2: eth0: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 + 3: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64 + + """ + if status != 0: + logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) + return {} + + for entry in output.splitlines(): + result = IP_COMMAND_OUTPUT.match(entry) + if result: + name = result.group(1) + state[name] = NetworkInterfaceCard(name, result.group(2)) + + self._update_nic_state(state, "ip -4 -a -d -o address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") + """ + 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever + 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever + 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever + """ + + self._update_nic_state(state, "ip -6 -a -d -o address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") + """ + 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever + 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever + """ + + return state + + def _update_nic_state(self, state, ip_command, handler, description): + """ + Update the state of NICs based on the output of a specified ip subcommand. + + :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects + :param str ip_command: The ip command to run + :param handler: A method on the NetworkInterfaceCard class + :param str description: Description of the particular information being added to the state + """ + status, output = shellutil.run_get_output(ip_command, chk_err=True) + if status != 0: + return + + for entry in output.splitlines(): + result = IP_COMMAND_OUTPUT.match(entry) + if result: + interface_name = result.group(1) + if interface_name in state: + handler(state[interface_name], result.group(2)) + else: + logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/factory.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/factory.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # + import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import * from .default import DefaultOSUtil from .arch import ArchUtil @@ -27,10 +27,15 @@ from .openbsd import OpenBSDOSUtil from .redhat import RedhatOSUtil, Redhat6xOSUtil from .suse import SUSEOSUtil, SUSE11OSUtil -from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, UbuntuSnappyOSUtil +from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ + UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil from .alpine import AlpineOSUtil from .bigip import BigIpOSUtil from .gaia import GaiaOSUtil +from .iosxe import IosxeOSUtil +from .nsbsd import NSBSDOSUtil + +from distutils.version import LooseVersion as Version def get_osutil(distro_name=DISTRO_NAME, @@ -46,10 +51,14 @@ return ClearLinuxUtil() if distro_name == "ubuntu": - if Version(distro_version) == Version("12.04") or Version(distro_version) == Version("12.10"): + if Version(distro_version) in [Version("12.04"), Version("12.10")]: return Ubuntu12OSUtil() - elif Version(distro_version) == Version("14.04") or Version(distro_version) == Version("14.10"): + elif Version(distro_version) in [Version("14.04"), Version("14.10")]: return Ubuntu14OSUtil() + elif Version(distro_version) in [Version('16.04'), Version('16.10'), Version('17.04')]: + return Ubuntu16OSUtil() + elif Version(distro_version) in [Version('18.04')]: + return Ubuntu18OSUtil() elif distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() else: @@ -64,7 +73,7 @@ if distro_name == "coreos" or distro_code_name == "coreos": return CoreOSUtil() - if distro_name == "suse": + if distro_name in ("suse", "sles", "opensuse"): if distro_full_name == 'SUSE Linux Enterprise Server' \ and Version(distro_version) < Version('12') \ or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): @@ -98,6 +107,12 @@ elif distro_name == "gaia": return GaiaOSUtil() + elif distro_name == "iosxe": + return IosxeOSUtil() + + elif distro_name == "nsbsd": + return NSBSDOSUtil() + else: logger.warn("Unable to load distro implementation for {0}. Using " "default distro implementation instead.", diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/freebsd.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/freebsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/freebsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/freebsd.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil @@ -25,9 +25,11 @@ from azurelinuxagent.common.future import ustr class FreeBSDOSUtil(DefaultOSUtil): + def __init__(self): super(FreeBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False + self.jit_enabled = True def set_hostname(self, hostname): rc_file_path = '/etc/rc.conf' @@ -39,7 +41,7 @@ def restart_ssh_service(self): return shellutil.run('service sshd restart', chk_err=False) - def useradd(self, username, expiration=None): + def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ @@ -47,11 +49,12 @@ if userentry is not None: logger.warn("User {0} already exists, skip useradd", username) return - if expiration is not None: cmd = "pw useradd {0} -e {1} -m".format(username, expiration) else: cmd = "pw useradd {0} -m".format(username) + if comment is not None: + cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " @@ -247,3 +250,7 @@ if not possible.startswith('pass'): return possible return None + + @staticmethod + def get_total_cpu_ticks_since_boot(): + return 0 diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/gaia.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/gaia.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/gaia.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/gaia.py 2018-09-30 15:05:27.000000000 +0000 @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 @@ -33,6 +33,7 @@ class GaiaOSUtil(DefaultOSUtil): + def __init__(self): super(GaiaOSUtil, self).__init__() @@ -139,16 +140,16 @@ def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on GAiA') - def mount(self, dvd, mount_point, option="", chk_err=True): - logger.info('mount {0} {1} {2}', dvd, mount_point, option) + def mount(self, device, mount_point, option="", chk_err=True): + logger.info('mount {0} {1} {2}', device, mount_point, option) if 'udf,iso9660' in option: ret, out = super(GaiaOSUtil, self).mount( - dvd, mount_point, option=option.replace('udf,iso9660', 'udf'), + device, mount_point, option=option.replace('udf,iso9660', 'udf'), chk_err=chk_err) if not ret: return ret, out return super(GaiaOSUtil, self).mount( - dvd, mount_point, option=option, chk_err=chk_err) + device, mount_point, option=option, chk_err=chk_err) def allow_dhcp_broadcast(self): logger.info('allow_dhcp_broadcast is ignored on GAiA') diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/__init__.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import get_osutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/iosxe.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/iosxe.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/iosxe.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/iosxe.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,84 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil +from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil + +''' +The IOSXE distribution is a variant of the Centos distribution, +version 7.1. +The primary difference is that IOSXE makes some assumptions about +the waagent environment: + - only the waagent daemon is executed + - no provisioning is performed + - no DHCP-based services are available +''' +class IosxeOSUtil(DefaultOSUtil): + def __init__(self): + super(IosxeOSUtil, self).__init__() + + def set_hostname(self, hostname): + """ + Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl + Due to a bug in systemd in Centos-7.0, if this call fails, fallback + to hostname. + """ + hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) + if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: + logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) + DefaultOSUtil.set_hostname(self, hostname) + + def publish_hostname(self, hostname): + """ + Restart NetworkManager first before publishing hostname + """ + shellutil.run("service NetworkManager restart") + super(RedhatOSUtil, self).publish_hostname(hostname) + + def register_agent_service(self): + return shellutil.run("systemctl enable waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("systemctl disable waagent", chk_err=False) + + def openssl_to_openssh(self, input_file, output_file): + DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) + + def is_dhcp_available(self): + return (False, '168.63.129.16') + + def get_instance_id(self): + ''' + Azure records a UUID as the instance ID + First check /sys/class/dmi/id/product_uuid. + If that is missing, then extracts from dmidecode + If nothing works (for old VMs), return the empty string + ''' + if os.path.isfile(PRODUCT_ID_FILE): + try: + s = fileutil.read_file(PRODUCT_ID_FILE).strip() + return self._correct_instance_id(s.strip()) + except IOError: + pass + rc, s = shellutil.run_get_output(DMIDECODE_CMD) + if rc != 0 or UUID_PATTERN.match(s) is None: + return "" + return self._correct_instance_id(s.strip()) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/nsbsd.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/nsbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/nsbsd.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/nsbsd.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,159 @@ +# +# Copyright 2018 Stormshield +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +import os +import time + +class NSBSDOSUtil(FreeBSDOSUtil): + + resolver = None + + def __init__(self): + super(NSBSDOSUtil, self).__init__() + + if self.resolver is None: + # NSBSD doesn't have a system resolver, configure a python one + + try: + import dns.resolver + except ImportError: + raise OSUtilError("Python DNS resolver not available. Cannot proceed!") + + self.resolver = dns.resolver.Resolver() + servers = [] + cmd = "getconf /usr/Firewall/ConfigFiles/dns Servers | tail -n +2" + ret, output = shellutil.run_get_output(cmd) + for server in output.split("\n"): + if server == '': + break + server = server[:-1] # remove last '=' + cmd = "grep '{}' /etc/hosts".format(server) + " | awk '{print $1}'" + ret, ip = shellutil.run_get_output(cmd) + servers.append(ip) + self.resolver.nameservers = servers + dns.resolver.override_system_resolver(self.resolver) + + def set_hostname(self, hostname): + shellutil.run("/usr/Firewall/sbin/setconf /usr/Firewall/System/global SystemName {0}".format(hostname)) + shellutil.run("/usr/Firewall/sbin/enlog") + shellutil.run("/usr/Firewall/sbin/enproxy -u") + shellutil.run("/usr/Firewall/sbin/ensl -u") + shellutil.run("/usr/Firewall/sbin/ennetwork -f") + + def restart_ssh_service(self): + return shellutil.run('/usr/Firewall/sbin/enservice', chk_err=False) + + def conf_sshd(self, disable_password): + option = "0" if disable_password else "1" + + shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH State 1', + chk_err=False) + shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH Password {}'.format(option), + chk_err=False) + shellutil.run('enservice', chk_err=False) + + logger.info("{0} SSH password-based authentication methods." + .format("Disabled" if disable_password else "Enabled")) + + def useradd(self, username, expiration=None): + """ + Create user account with 'username' + """ + logger.warn("User creation disabled") + return + + def del_account(self, username): + logger.warn("User deletion disabled") + + def conf_sudoer(self, username, nopasswd=False, remove=False): + logger.warn("Sudo is not enabled") + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + cmd = "/usr/Firewall/sbin/fwpasswd -p {0}".format(password) + ret, output = shellutil.run_get_output(cmd, log_cmd=False) + if ret != 0: + raise OSUtilError(("Failed to set password for admin: {0}" + "").format(output)) + + # password set, activate webadmin and ssh access + shellutil.run('setconf /usr/Firewall/ConfigFiles/webadmin ACL any && ensl', + chk_err=False) + + def deploy_ssh_pubkey(self, username, pubkey): + """ + Deploy authorized_key + """ + path, thumbprint, value = pubkey + + #overide parameters + super(NSBSDOSUtil, self).deploy_ssh_pubkey('admin', + ["/usr/Firewall/.ssh/authorized_keys", thumbprint, value]) + + def del_root_password(self): + logger.warn("Root password deletion disabled") + + def start_dhcp_service(self): + shellutil.run("/usr/Firewall/sbin/nstart dhclient", chk_err=False) + + def stop_dhcp_service(self): + shellutil.run("/usr/Firewall/sbin/nstop dhclient", chk_err=False) + + def get_dhcp_pid(self): + ret = None + pidfile = "/var/run/dhclient.pid" + + if os.path.isfile(pidfile): + ret = fileutil.read_file(pidfile, encoding='ascii') + return ret + + def eject_dvd(self, chk_err=True): + pass + + def restart_if(self, ifname): + # Restart dhclient only to publish hostname + shellutil.run("ennetwork", chk_err=False) + + def set_dhcp_hostname(self, hostname): + #already done by the dhcp client + pass + + def get_firewall_dropped_packets(self, dst_ip=None): + # disable iptables methods + return 0 + + def get_firewall_will_wait(self): + # disable iptables methods + return "" + + def _delete_rule(self, rule): + # disable iptables methods + return + + def remove_firewall(self, dst_ip=None, uid=None): + # disable iptables methods + return True + + def enable_firewall(self, dst_ip=None, uid=None): + # disable iptables methods + return True diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openbsd.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/openbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/openbsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/openbsd.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and OpenSSL 1.0+ +# Requires Python 2.6+ and OpenSSL 1.0+ import os import re @@ -36,8 +36,10 @@ re.IGNORECASE) class OpenBSDOSUtil(DefaultOSUtil): + def __init__(self): super(OpenBSDOSUtil, self).__init__() + self.jit_enabled = True self._scsi_disks_timeout_set = False def get_instance_id(self): @@ -345,3 +347,7 @@ Return device name attached to ide port 'n'. """ return "wd{0}".format(port_id) + + @staticmethod + def get_total_cpu_ticks_since_boot(): + return 0 diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/redhat.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/redhat.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/redhat.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/redhat.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -38,8 +38,10 @@ class Redhat6xOSUtil(DefaultOSUtil): + def __init__(self): super(Redhat6xOSUtil, self).__init__() + self.jit_enabled = True def start_network(self): return shellutil.run("/sbin/service networking start", chk_err=False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/suse.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/suse.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/suse.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/suse.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -33,8 +33,10 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil class SUSE11OSUtil(DefaultOSUtil): + def __init__(self): super(SUSE11OSUtil, self).__init__() + self.jit_enabled = True self.dhclient_name='dhcpcd' def set_hostname(self, hostname): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/ubuntu.py walinuxagent-2.2.32/azurelinuxagent/common/osutil/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/osutil/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/osutil/ubuntu.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,15 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import time + +import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil + from azurelinuxagent.common.osutil.default import DefaultOSUtil + class Ubuntu14OSUtil(DefaultOSUtil): + def __init__(self): super(Ubuntu14OSUtil, self).__init__() + self.jit_enabled = True def start_network(self): return shellutil.run("service networking start", chk_err=False) @@ -41,6 +48,7 @@ def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') + class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() @@ -50,9 +58,16 @@ ret = shellutil.run_get_output("pidof dhclient3", chk_err=False) return ret[1] if ret[0] == 0 else None -class UbuntuOSUtil(Ubuntu14OSUtil): + def mount_cgroups(self): + pass + + +class Ubuntu16OSUtil(Ubuntu14OSUtil): + """ + Ubuntu 16.04, 16.10, and 17.04. + """ def __init__(self): - super(UbuntuOSUtil, self).__init__() + super(Ubuntu16OSUtil, self).__init__() def register_agent_service(self): return shellutil.run("systemctl unmask walinuxagent", chk_err=False) @@ -60,6 +75,65 @@ def unregister_agent_service(self): return shellutil.run("systemctl mask walinuxagent", chk_err=False) + def mount_cgroups(self): + """ + Mounted by default in Ubuntu 16.04 + """ + pass + + +class Ubuntu18OSUtil(Ubuntu16OSUtil): + """ + Ubuntu 18.04 + """ + def __init__(self): + super(Ubuntu18OSUtil, self).__init__() + + def get_dhcp_pid(self): + ret = shellutil.run_get_output("pidof systemd-networkd") + return ret[1] if ret[0] == 0 else None + + def start_network(self): + return shellutil.run("systemctl start systemd-networkd", chk_err=False) + + def stop_network(self): + return shellutil.run("systemctl stop systemd-networkd", chk_err=False) + + def start_dhcp_service(self): + return self.start_network() + + def stop_dhcp_service(self): + return self.stop_network() + + def start_agent_service(self): + return shellutil.run("systemctl start walinuxagent", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("systemctl stop walinuxagent", chk_err=False) + + +class UbuntuOSUtil(Ubuntu16OSUtil): + def __init__(self): + super(UbuntuOSUtil, self).__init__() + + def restart_if(self, ifname, retries=3, wait=5): + """ + Restart an interface by bouncing the link. systemd-networkd observes + this event, and forces a renew of DHCP. + """ + retry_limit=retries+1 + for attempt in range(1, retry_limit): + return_code=shellutil.run("ip link set {0} down && ip link set {0} up".format(ifname)) + if return_code == 0: + return + logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) + if attempt < retry_limit: + logger.info("retrying in {0} seconds".format(wait)) + time.sleep(wait) + else: + logger.warn("exceeded restart retries") + + class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuSnappyOSUtil, self).__init__() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/healthservice.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/healthservice.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/healthservice.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/healthservice.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,177 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import json + +from azurelinuxagent.common import logger +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import restutil +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION + + +class Observation(object): + def __init__(self, name, is_healthy, description='', value=''): + if name is None: + raise ValueError("Observation name must be provided") + + if is_healthy is None: + raise ValueError("Observation health must be provided") + + if value is None: + value = '' + + if description is None: + description = '' + + self.name = name + self.is_healthy = is_healthy + self.description = description + self.value = value + + @property + def as_obj(self): + return { + "ObservationName": self.name[:64], + "IsHealthy": self.is_healthy, + "Description": self.description[:128], + "Value": self.value[:128] + } + + +class HealthService(object): + + ENDPOINT = 'http://{0}:80/HealthService' + API = 'reporttargethealth' + VERSION = "1.0" + OBSERVER_NAME = 'WALinuxAgent' + HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME = 'GuestAgentPluginHeartbeat' + HOST_PLUGIN_STATUS_OBSERVATION_NAME = 'GuestAgentPluginStatus' + HOST_PLUGIN_VERSIONS_OBSERVATION_NAME = 'GuestAgentPluginVersions' + HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME = 'GuestAgentPluginArtifact' + IMDS_OBSERVATION_NAME = 'InstanceMetadataHeartbeat' + MAX_OBSERVATIONS = 10 + + def __init__(self, endpoint): + self.endpoint = HealthService.ENDPOINT.format(endpoint) + self.api = HealthService.API + self.version = HealthService.VERSION + self.source = HealthService.OBSERVER_NAME + self.observations = list() + + @property + def as_json(self): + data = { + "Api": self.api, + "Version": self.version, + "Source": self.source, + "Observations": [o.as_obj for o in self.observations] + } + return json.dumps(data) + + def report_host_plugin_heartbeat(self, is_healthy): + """ + Reports a signal for /health + :param is_healthy: whether the call succeeded + """ + self._observe(name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=is_healthy) + self._report() + + def report_host_plugin_versions(self, is_healthy, response): + """ + Reports a signal for /versions + :param is_healthy: whether the api call succeeded + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def report_host_plugin_extension_artifact(self, is_healthy, source, response): + """ + Reports a signal for /extensionArtifact + :param is_healthy: whether the api call succeeded + :param source: specifies the api caller for debugging failures + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=is_healthy, + description=source, + value=response) + self._report() + + def report_host_plugin_status(self, is_healthy, response): + """ + Reports a signal for /status + :param is_healthy: whether the api call succeeded + :param response: debugging information for failures + """ + self._observe(name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def report_imds_status(self, is_healthy, response): + """ + Reports a signal for /metadata/instance + :param is_healthy: whether the api call succeeded and returned valid data + :param response: debugging information for failures + """ + self._observe(name=HealthService.IMDS_OBSERVATION_NAME, + is_healthy=is_healthy, + value=response) + self._report() + + def _observe(self, name, is_healthy, value='', description=''): + # ensure we keep the list size within bounds + if len(self.observations) >= HealthService.MAX_OBSERVATIONS: + del self.observations[:HealthService.MAX_OBSERVATIONS-1] + self.observations.append(Observation(name=name, + is_healthy=is_healthy, + value=value, + description=description)) + + def _report(self): + logger.verbose('HealthService: report observations') + try: + restutil.http_post(self.endpoint, self.as_json, headers={'Content-Type': 'application/json'}) + logger.verbose('HealthService: Reported observations to {0}: {1}', self.endpoint, self.as_json) + except HttpError as e: + logger.warn("HealthService: could not report observations: {0}", ustr(e)) + finally: + # report any failures via telemetry + self._report_failures() + # these signals are not timestamped, so there is no value in persisting data + del self.observations[:] + + def _report_failures(self): + try: + logger.verbose("HealthService: report failures as telemetry") + from azurelinuxagent.common.event import add_event, WALAEventOperation + for o in self.observations: + if not o.is_healthy: + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HealthObservation, + is_success=False, + message=json.dumps(o.as_obj)) + except Exception as e: + logger.verbose("HealthService: could not report failures: {0}".format(ustr(e))) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/hostplugin.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/hostplugin.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/hostplugin.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/hostplugin.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,17 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 +import datetime import json -import traceback from azurelinuxagent.common import logger +from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_HOST_PLUGIN_FAILURE from azurelinuxagent.common.exception import HttpError, ProtocolError, \ ResourceGoneError from azurelinuxagent.common.future import ustr, httpclient +from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil from azurelinuxagent.common.utils.textutil import remove_bom @@ -35,6 +37,7 @@ URI_FORMAT_GET_EXTENSION_ARTIFACT = "http://{0}:{1}/extensionArtifact" URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" +URI_FORMAT_HEALTH = "http://{0}:{1}/health" API_VERSION = "2015-09-01" HEADER_CONTAINER_ID = "x-ms-containerid" HEADER_VERSION = "x-ms-version" @@ -47,6 +50,9 @@ class HostPluginProtocol(object): _is_default_channel = False + FETCH_REPORTING_PERIOD = datetime.timedelta(minutes=1) + STATUS_REPORTING_PERIOD = datetime.timedelta(minutes=1) + def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: raise ProtocolError("HostGAPlugin: Endpoint not provided") @@ -58,6 +64,11 @@ self.deployment_id = None self.role_config_name = role_config_name self.manifest_uri = None + self.health_service = HealthService(endpoint) + self.fetch_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) + self.status_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) + self.fetch_last_timestamp = None + self.status_last_timestamp = None @staticmethod def is_default_channel(): @@ -77,25 +88,40 @@ is_success=self.is_available) return self.is_available + def get_health(self): + """ + Call the /health endpoint + :return: True if 200 received, False otherwise + """ + url = URI_FORMAT_HEALTH.format(self.endpoint, + HOST_PLUGIN_PORT) + logger.verbose("HostGAPlugin: Getting health from [{0}]", url) + response = restutil.http_get(url, max_retry=1) + return restutil.request_succeeded(response) + def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) - logger.verbose("HostGAPlugin: Getting API versions at [{0}]".format( - url)) + logger.verbose("HostGAPlugin: Getting API versions at [{0}]" + .format(url)) return_val = [] + error_response = '' + is_healthy = False try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) if restutil.request_failed(response): - logger.error( - "HostGAPlugin: Failed Get API versions: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + logger.error("HostGAPlugin: Failed Get API versions: {0}".format(error_response)) + is_healthy = not restutil.request_failed_at_hostplugin(response) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') - + is_healthy = True except HttpError as e: logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) + self.health_service.report_host_plugin_versions(is_healthy=is_healthy, response=error_response) + return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): @@ -117,6 +143,55 @@ return url, headers + def report_fetch_health(self, uri, is_healthy=True, source='', response=''): + + if uri != URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT): + return + + if self.should_report(is_healthy, + self.fetch_error_state, + self.fetch_last_timestamp, + HostPluginProtocol.FETCH_REPORTING_PERIOD): + self.fetch_last_timestamp = datetime.datetime.utcnow() + health_signal = self.fetch_error_state.is_triggered() is False + self.health_service.report_host_plugin_extension_artifact(is_healthy=health_signal, + source=source, + response=response) + + def report_status_health(self, is_healthy, response=''): + if self.should_report(is_healthy, + self.status_error_state, + self.status_last_timestamp, + HostPluginProtocol.STATUS_REPORTING_PERIOD): + self.status_last_timestamp = datetime.datetime.utcnow() + health_signal = self.status_error_state.is_triggered() is False + self.health_service.report_host_plugin_status(is_healthy=health_signal, + response=response) + + @staticmethod + def should_report(is_healthy, error_state, last_timestamp, period): + """ + Determine whether a health signal should be reported + :param is_healthy: whether the current measurement is healthy + :param error_state: the error state which is tracking time since failure + :param last_timestamp: the last measurement time stamp + :param period: the reporting period + :return: True if the signal should be reported, False otherwise + """ + + if is_healthy: + # we only reset the error state upon success, since we want to keep + # reporting the failure; this is different to other uses of error states + # which do not have a separate periodicity + error_state.reset() + else: + error_state.incr() + + if last_timestamp is None: + last_timestamp = datetime.datetime.utcnow() - period + + return datetime.datetime.utcnow() >= (last_timestamp + period) + def put_vm_log(self, content): raise NotImplementedError("Unimplemented") @@ -134,38 +209,31 @@ raise ProtocolError("HostGAPlugin: Status blob was not provided") logger.verbose("HostGAPlugin: Posting VM status") - try: + blob_type = status_blob.type if status_blob.type else config_blob_type - blob_type = status_blob.type if status_blob.type else config_blob_type - - if blob_type == "BlockBlob": - self._put_block_blob_status(sas_url, status_blob) - else: - self._put_page_blob_status(sas_url, status_blob) - - except Exception as e: - # If the HostPlugin rejects the request, - # let the error continue, but set to use the HostPlugin - if isinstance(e, ResourceGoneError): - logger.verbose("HostGAPlugin: Setting host plugin as default channel") - HostPluginProtocol.set_default_channel(True) - - raise + if blob_type == "BlockBlob": + self._put_block_blob_status(sas_url, status_blob) + else: + self._put_page_blob_status(sas_url, status_blob) def _put_block_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_block_blob_headers(len(status_blob.data)), - bytearray(status_blob.data, encoding='utf-8')), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_block_blob_headers(len(status_blob.data)), + bytearray(status_blob.data, encoding='utf-8')), + headers=self._build_status_headers()) if restutil.request_failed(response): - raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) + raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}" + .format(error_response)) else: + self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") def _put_page_blob_status(self, sas_url, status_blob): @@ -178,16 +246,19 @@ # First, initialize an empty blob response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_page_blob_create_headers(status_size)), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_create_headers(status_size)), + headers=self._build_status_headers()) if restutil.request_failed(response): - raise HttpError( - "HostGAPlugin: Failed PageBlob clean-up: {0}".format( - restutil.read_response_error(response))) + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) + raise HttpError("HostGAPlugin: Failed PageBlob clean-up: {0}" + .format(error_response)) else: + self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") # Then, upload the blob in pages @@ -207,17 +278,19 @@ # Send the page response = restutil.http_put(url, - data=self._build_status_data( - sas_url, - status_blob.get_page_blob_page_headers(start, end), - buf), - headers=self._build_status_headers()) + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_page_headers(start, end), + buf), + headers=self._build_status_headers()) if restutil.request_failed(response): + error_response = restutil.read_response_error(response) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError( - "HostGAPlugin Error: Put PageBlob bytes [{0},{1}]: " \ - "{2}".format( - start, end, restutil.read_response_error(response))) + "HostGAPlugin Error: Put PageBlob bytes " + "[{0},{1}]: {2}".format(start, end, error_response)) # Advance to the next page (if any) start = end diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/imds.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/imds.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/imds.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/imds.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,319 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +import json +import re + +import azurelinuxagent.common.utils.restutil as restutil +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.protocol.restapi import DataContract, set_properties +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion + +IMDS_ENDPOINT = '169.254.169.254' +APIVERSION = '2018-02-01' +BASE_URI = "http://{0}/metadata/instance/{1}?api-version={2}" + +IMDS_IMAGE_ORIGIN_UNKNOWN = 0 +IMDS_IMAGE_ORIGIN_CUSTOM = 1 +IMDS_IMAGE_ORIGIN_ENDORSED = 2 +IMDS_IMAGE_ORIGIN_PLATFORM = 3 + + +def get_imds_client(): + return ImdsClient() + + +# A *slightly* future proof list of endorsed distros. +# -> e.g. I have predicted the future and said that 20.04-LTS will exist +# and is endored. +# +# See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros for +# more details. +# +# This is not an exhaustive list. This is a best attempt to mark images as +# endorsed or not. Image publishers do not encode all of the requisite information +# in their publisher, offer, sku, and version to definitively mark something as +# endorsed or not. This is not perfect, but it is approximately 98% perfect. +ENDORSED_IMAGE_INFO_MATCHER_JSON = """{ + "CANONICAL": { + "UBUNTUSERVER": { + "List": [ + "14.04.0-LTS", + "14.04.1-LTS", + "14.04.2-LTS", + "14.04.3-LTS", + "14.04.4-LTS", + "14.04.5-LTS", + "14.04.6-LTS", + "14.04.7-LTS", + "14.04.8-LTS", + + "16.04-LTS", + "16.04.0-LTS", + "18.04-LTS", + "20.04-LTS", + "22.04-LTS" + ] + } + }, + "COREOS": { + "COREOS": { + "STABLE": { "Minimum": "494.4.0" } + } + }, + "CREDATIV": { + "DEBIAN": { "Minimum": "7" } + }, + "OPENLOGIC": { + "CENTOS": { + "Minimum": "6.3", + "List": [ + "7-LVM", + "7-RAW" + ] + }, + "CENTOS-HPC": { "Minimum": "6.3" } + }, + "REDHAT": { + "RHEL": { + "Minimum": "6.7", + "List": [ + "7-LVM", + "7-RAW" + ] + }, + "RHEL-HANA": { "Minimum": "6.7" }, + "RHEL-SAP": { "Minimum": "6.7" }, + "RHEL-SAP-APPS": { "Minimum": "6.7" }, + "RHEL-SAP-HANA": { "Minimum": "6.7" } + }, + "SUSE": { + "SLES": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + }, + "SLES-BYOS": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + }, + "SLES-SAP": { + "List": [ + "11-SP4", + "11-SP5", + "11-SP6", + "12-SP1", + "12-SP2", + "12-SP3", + "12-SP4", + "12-SP5", + "12-SP6" + ] + } + } +}""" + + +class ImageInfoMatcher(object): + def __init__(self, doc): + self.doc = json.loads(doc) + + def is_match(self, publisher, offer, sku, version): + def _is_match_walk(doci, keys): + key = keys.pop(0).upper() + if key is None: + return False + + if key not in doci: + return False + + if 'List' in doci[key] and keys[0] in doci[key]['List']: + return True + + if 'Match' in doci[key] and re.match(doci[key]['Match'], keys[0]): + return True + + if 'Minimum' in doci[key]: + try: + return FlexibleVersion(keys[0]) >= FlexibleVersion(doci[key]['Minimum']) + except ValueError: + pass + + return _is_match_walk(doci[key], keys) + + return _is_match_walk(self.doc, [ publisher, offer, sku, version ]) + + +class ComputeInfo(DataContract): + __matcher = ImageInfoMatcher(ENDORSED_IMAGE_INFO_MATCHER_JSON) + + def __init__(self, + location=None, + name=None, + offer=None, + osType=None, + placementGroupId=None, + platformFaultDomain=None, + placementUpdateDomain=None, + publisher=None, + resourceGroupName=None, + sku=None, + subscriptionId=None, + tags=None, + version=None, + vmId=None, + vmSize=None, + vmScaleSetName=None, + zone=None): + self.location = location + self.name = name + self.offer = offer + self.osType = osType + self.placementGroupId = placementGroupId + self.platformFaultDomain = platformFaultDomain + self.platformUpdateDomain = placementUpdateDomain + self.publisher = publisher + self.resourceGroupName = resourceGroupName + self.sku = sku + self.subscriptionId = subscriptionId + self.tags = tags + self.version = version + self.vmId = vmId + self.vmSize = vmSize + self.vmScaleSetName = vmScaleSetName + self.zone = zone + + + @property + def image_info(self): + return "{0}:{1}:{2}:{3}".format(self.publisher, self.offer, self.sku, self.version) + + @property + def image_origin(self): + """ + An integer value describing the origin of the image. + + 0 -> unknown + 1 -> custom - user created image + 2 -> endorsed - See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros + 3 -> platform - non-endorsed image that is available in the Azure Marketplace. + """ + + try: + if self.publisher == "": + return IMDS_IMAGE_ORIGIN_CUSTOM + + if ComputeInfo.__matcher.is_match(self.publisher, self.offer, self.sku, self.version): + return IMDS_IMAGE_ORIGIN_ENDORSED + else: + return IMDS_IMAGE_ORIGIN_PLATFORM + + except Exception as e: + logger.warn("Could not determine the image origin from IMDS: {0}", str(e)) + return IMDS_IMAGE_ORIGIN_UNKNOWN + + +class ImdsClient(object): + def __init__(self, version=APIVERSION): + self._api_version = version + self._headers = { + 'User-Agent': restutil.HTTP_USER_AGENT, + 'Metadata': True, + } + self._health_headers = { + 'User-Agent': restutil.HTTP_USER_AGENT_HEALTH, + 'Metadata': True, + } + pass + + @property + def compute_url(self): + return BASE_URI.format(IMDS_ENDPOINT, 'compute', self._api_version) + + @property + def instance_url(self): + return BASE_URI.format(IMDS_ENDPOINT, '', self._api_version) + + def get_compute(self): + """ + Fetch compute information. + + :return: instance of a ComputeInfo + :rtype: ComputeInfo + """ + + resp = restutil.http_get(self.compute_url, headers=self._headers) + + if restutil.request_failed(resp): + raise HttpError("{0} - GET: {1}".format(resp.status, self.compute_url)) + + data = resp.read() + data = json.loads(ustr(data, encoding="utf-8")) + + compute_info = ComputeInfo() + set_properties('compute', compute_info, data) + + return compute_info + + def validate(self): + """ + Determines whether the metadata instance api returns 200, and the response + is valid: compute should contain location, name, subscription id, and vm size + and network should contain mac address and private ip address. + :return: Tuple + is_healthy: True when validation succeeds, False otherwise + error_response: validation failure details to assist with debugging + """ + + # ensure we get a 200 + resp = restutil.http_get(self.instance_url, headers=self._health_headers) + if restutil.request_failed(resp): + return False, "{0}".format(restutil.read_response_error(resp)) + + # ensure the response is valid json + data = resp.read() + try: + json_data = json.loads(ustr(data, encoding="utf-8")) + except Exception as e: + return False, "JSON parsing failed: {0}".format(ustr(e)) + + # ensure all expected fields are present and have a value + try: + # TODO: compute fields cannot be verified yet since we need to exclude rdfe vms (#1249) + + self.check_field(json_data, 'network') + self.check_field(json_data['network'], 'interface') + self.check_field(json_data['network']['interface'][0], 'macAddress') + self.check_field(json_data['network']['interface'][0], 'ipv4') + self.check_field(json_data['network']['interface'][0]['ipv4'], 'ipAddress') + self.check_field(json_data['network']['interface'][0]['ipv4']['ipAddress'][0], 'privateIpAddress') + except ValueError as v: + return False, ustr(v) + + return True, '' + + @staticmethod + def check_field(dict_obj, field): + if field not in dict_obj or dict_obj[field] is None: + raise ValueError('Missing field: [{0}]'.format(field)) + + if len(dict_obj[field]) == 0: + raise ValueError('Empty field: [{0}]'.format(field)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/__init__.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.protocol.util import get_protocol_util, \ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/metadata.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/metadata.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/metadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/metadata.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import base64 import json @@ -304,6 +304,17 @@ logger.verbose("Incarnation is out of date. Update goalstate.") raise ProtocolError("Exceeded max retry updating goal state") + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + success = False + try: + resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy) + if restutil.request_succeeded(resp): + fileutil.write_file(destination, bytearray(resp.read()), asbin=True) + success = True + except Exception as e: + logger.warn("Failed to download from: {0}".format(uri), e) + return success + class Certificates(object): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/ovfenv.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/ovfenv.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/ovfenv.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/ovfenv.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Copy and parse ovf-env.xml from provisioning ISO and local cache @@ -52,6 +52,7 @@ self.disable_ssh_password_auth = True self.ssh_pubkeys = [] self.ssh_keypairs = [] + self.provision_guest_agent = None self.parse(xml_text) def parse(self, xml_text): @@ -111,3 +112,11 @@ fingerprint = findtext(keypair, "Fingerprint", namespace=wans) self.ssh_keypairs.append((path, fingerprint)) + platform_settings_section = find(environment, "PlatformSettingsSection", namespace=wans) + _validate_ovf(platform_settings_section, "PlatformSettingsSection not found") + + platform_settings = find(platform_settings_section, "PlatformSettings", namespace=wans) + _validate_ovf(platform_settings, "PlatformSettings not found") + + self.provision_guest_agent = findtext(platform_settings, "ProvisionGuestAgent", namespace=wans) + _validate_ovf(self.provision_guest_agent, "ProvisionGuestAgent not found") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/restapi.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/restapi.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/restapi.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/restapi.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import socket import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.exception import ProtocolError, HttpError from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import DISTRO_VERSION, DISTRO_NAME, CURRENT_VERSION @@ -161,8 +162,7 @@ class ExtHandlerProperties(DataContract): def __init__(self): self.version = None - self.upgradePolicy = None - self.upgradeGuid = None + self.dependencyLevel = None self.state = None self.extensions = DataContractList(Extension) @@ -178,6 +178,16 @@ self.properties = ExtHandlerProperties() self.versionUris = DataContractList(ExtHandlerVersionUri) + def sort_key(self): + level = self.properties.dependencyLevel + if level is None: + level = 0 + # Process uninstall or disabled before enabled, in reverse order + # remap 0 to -1, 1 to -2, 2 to -3, etc + if self.properties.state != u"enabled": + level = (0 - level) - 1 + return level + class ExtHandlerList(DataContract): def __init__(self): @@ -246,13 +256,11 @@ def __init__(self, name=None, version=None, - upgradeGuid=None, status=None, code=0, message=None): self.name = name self.version = version - self.upgradeGuid = upgradeGuid self.status = status self.code = code self.message = message @@ -293,6 +301,18 @@ self.events = DataContractList(TelemetryEvent) +class RemoteAccessUser(DataContract): + def __init__(self, name, encrypted_password, expiration): + self.name = name + self.encrypted_password = encrypted_password + self.expiration = expiration + + +class RemoteAccessUsersList(DataContract): + def __init__(self): + self.users = DataContractList(RemoteAccessUser) + + class Protocol(DataContract): def detect(self): raise NotImplementedError() @@ -321,13 +341,8 @@ def get_artifacts_profile(self): raise NotImplementedError() - def download_ext_handler_pkg(self, uri, headers=None): - try: - resp = restutil.http_get(uri, use_proxy=True, headers=headers) - if restutil.request_succeeded(resp): - return resp.read() - except Exception as e: - logger.warn("Failed to download from: {0}".format(uri), e) + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + raise NotImplementedError() def report_provision_status(self, provision_status): raise NotImplementedError() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/util.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/util.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import errno import os import re import shutil -import time import threading +import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger @@ -49,13 +49,24 @@ PASSWORD_REPLACEMENT = "*<" +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError("%s not a valid value" % name) + + +prots = _nameset(("WireProtocol", "MetadataProtocol")) + + def get_protocol_util(): return ProtocolUtil() class ProtocolUtil(object): + """ - ProtocolUtil handles initialization for protocol instance. 2 protocol types + ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ def __init__(self): @@ -73,7 +84,7 @@ ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) + tag_file_path = self._get_tag_file_path() try: self.osutil.mount_dvd() @@ -108,15 +119,17 @@ "{0} to {1}: {2}".format(tag_file_path, tag_file_path, ustr(e))) + self._cleanup_ovf_dvd() + + return ovfenv + def _cleanup_ovf_dvd(self): try: self.osutil.umount_dvd() self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) - return ovfenv - def get_ovf_env(self): """ Load saved ovf-env.xml @@ -126,7 +139,18 @@ xml_text = fileutil.read_file(ovf_file_path) return OvfEnv(xml_text) else: - raise ProtocolError("ovf-env.xml is missing from {0}".format(ovf_file_path)) + raise ProtocolError( + "ovf-env.xml is missing from {0}".format(ovf_file_path)) + + def _get_protocol_file_path(self): + return os.path.join( + conf.get_lib_dir(), + PROTOCOL_FILE_NAME) + + def _get_tag_file_path(self): + return os.path.join( + conf.get_lib_dir(), + TAG_FILE_NAME) def _get_wireserver_endpoint(self): try: @@ -141,22 +165,34 @@ fileutil.write_file(file_path, endpoint) except IOError as e: raise OSUtilError(ustr(e)) - + def _detect_wire_protocol(self): endpoint = self.dhcp_handler.endpoint if endpoint is None: - logger.info("WireServer endpoint is not found. Rerun dhcp handler") - try: - self.dhcp_handler.run() - except DhcpError as e: - raise ProtocolError(ustr(e)) - endpoint = self.dhcp_handler.endpoint - + ''' + Check if DHCP can be used to get the wire protocol endpoint + ''' + (dhcp_available, conf_endpoint) = self.osutil.is_dhcp_available() + if dhcp_available: + logger.info("WireServer endpoint is not found. Rerun dhcp handler") + try: + self.dhcp_handler.run() + except DhcpError as e: + raise ProtocolError(ustr(e)) + endpoint = self.dhcp_handler.endpoint + else: + logger.info("_detect_wire_protocol: DHCP not available") + endpoint = self._get_wireserver_endpoint() + if endpoint == None: + endpoint = conf_endpoint + logger.info("Using hardcoded WireServer endpoint {0}", endpoint) + else: + logger.info("WireServer endpoint {0} read from file", endpoint) + try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) - self.save_protocol("WireProtocol") return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") @@ -167,9 +203,8 @@ def _detect_metadata_protocol(self): protocol = MetadataProtocol() protocol.detect() - self.save_protocol("MetadataProtocol") return protocol - + def _detect_protocol(self, protocols): """ Probe protocol endpoints in turn. @@ -180,19 +215,16 @@ for protocol_name in protocols: try: protocol = self._detect_wire_protocol() \ - if protocol_name == "WireProtocol" \ + if protocol_name == prots.WireProtocol \ else self._detect_metadata_protocol() - IOErrorCounter.set_protocol_endpoint( - endpoint=protocol.endpoint) - - return protocol + return (protocol_name, protocol) except ProtocolError as e: - logger.info("Protocol endpoint not found: {0}, {1}", + logger.info("Protocol endpoint not found: {0}, {1}", protocol_name, e) - if retry < MAX_RETRY -1: + if retry < MAX_RETRY - 1: logger.info("Retry detect protocols: retry={0}", retry) time.sleep(PROBE_INTERVAL) raise ProtocolNotFoundError("No protocol found.") @@ -201,26 +233,25 @@ """ Get protocol instance based on previous detecting result. """ - protocol_file_path = os.path.join(conf.get_lib_dir(), - PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) - if protocol_name == "WireProtocol": + if protocol_name == prots.WireProtocol: endpoint = self._get_wireserver_endpoint() return WireProtocol(endpoint) - elif protocol_name == "MetadataProtocol": + elif protocol_name == prots.MetadataProtocol: return MetadataProtocol() else: raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) - def save_protocol(self, protocol_name): + def _save_protocol(self, protocol_name): """ Save protocol endpoint """ - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() try: fileutil.write_file(protocol_file_path, protocol_name) except IOError as e: @@ -232,9 +263,9 @@ """ logger.info("Clean protocol") self.protocol = None - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): - return + return try: os.remove(protocol_file_path) @@ -244,14 +275,14 @@ return logger.error("Failed to clear protocol endpoint: {0}", e) - def get_protocol(self): + def get_protocol(self, by_file=False): """ - Detect protocol by endpoints - + Detect protocol by endpoints, if by_file is True, + detect MetadataProtocol in priority. :returns: protocol instance """ self.lock.acquire() - + try: if self.protocol is not None: return self.protocol @@ -261,45 +292,21 @@ return self.protocol except ProtocolNotFoundError: pass - logger.info("Detect protocol endpoints") - protocols = ["WireProtocol", "MetadataProtocol"] - self.protocol = self._detect_protocol(protocols) - - return self.protocol + protocols = [prots.WireProtocol] - finally: - self.lock.release() - - def get_protocol_by_file(self): - """ - Detect protocol by tag file. - - If a file "useMetadataEndpoint.tag" is found on provision iso, - metedata protocol will be used. No need to probe for wire protocol - - :returns: protocol instance - """ - self.lock.acquire() - - try: - if self.protocol is not None: - return self.protocol + if by_file: + tag_file_path = self._get_tag_file_path() + if os.path.isfile(tag_file_path): + protocols.insert(0, prots.MetadataProtocol) + else: + protocols.append(prots.MetadataProtocol) + protocol_name, protocol = self._detect_protocol(protocols) - try: - self.protocol = self._get_protocol() - return self.protocol - except ProtocolNotFoundError: - pass + IOErrorCounter.set_protocol_endpoint(endpoint=protocol.endpoint) + self._save_protocol(protocol_name) - logger.info("Detect protocol by file") - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) - protocols = [] - if os.path.isfile(tag_file_path): - protocols.append("MetadataProtocol") - else: - protocols.append("WireProtocol") - self.protocol = self._detect_protocol(protocols) + self.protocol = protocol return self.protocol finally: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/wire.py walinuxagent-2.2.32/azurelinuxagent/common/protocol/wire.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/protocol/wire.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/protocol/wire.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ - +# Requires Python 2.6+ and Openssl 1.0+ +import datetime import json import os +import random import re +import sys import time +import traceback import xml.sax.saxutils as saxutils +from datetime import datetime + import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.textutil as textutil @@ -29,11 +34,15 @@ from azurelinuxagent.common.exception import ProtocolNotFoundError, \ ResourceGoneError from azurelinuxagent.common.future import httpclient, bytebuffer -from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol +from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol, URI_FORMAT_GET_EXTENSION_ARTIFACT, \ + HOST_PLUGIN_PORT from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.utils.archive import StateFlusher from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \ findtext, getattrib, gettext, remove_bom, get_bytes_from_pem, parse_json +from azurelinuxagent.common.version import AGENT_NAME +from azurelinuxagent.common.osutil import get_osutil VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" @@ -47,6 +56,7 @@ HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" SHARED_CONF_FILE_NAME = "SharedConfig.xml" CERTS_FILE_NAME = "Certificates.xml" +REMOTE_ACCESS_FILE_NAME = "RemoteAccess.{0}.xml" P7M_FILE_NAME = "Certificates.p7m" PEM_FILE_NAME = "Certificates.pem" EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" @@ -154,17 +164,16 @@ logger.verbose("Get In-VM Artifacts Profile") return self.client.get_artifacts_profile() - def download_ext_handler_pkg(self, uri, headers=None): - package = super(WireProtocol, self).download_ext_handler_pkg(uri) + def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): + success = self.client.stream(uri, destination, headers=headers, use_proxy=use_proxy) - if package is not None: - return package - else: - logger.warn("Download did not succeed, falling back to host plugin") + if not success: + logger.verbose("Download did not succeed, falling back to host plugin") host = self.client.get_host_plugin() uri, headers = host.get_artifact_request(uri, host.manifest_uri) - package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers) - return package + success = self.client.stream(uri, destination, headers=headers, use_proxy=False) + + return success def report_provision_status(self, provision_status): validate_param("provision_status", provision_status, ProvisionStatus) @@ -247,12 +256,10 @@ return xml -""" -Convert VMStatus object to status blob format -""" - - def ga_status_to_guest_info(ga_status): + """ + Convert VMStatus object to status blob format + """ v1_ga_guest_info = { "computerName" : ga_status.hostname, "osName" : ga_status.osname, @@ -274,6 +281,7 @@ } return v1_ga_status + def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: @@ -320,7 +328,8 @@ 'handlerVersion': handler_status.version, 'handlerName': handler_status.name, 'status': handler_status.status, - 'code': handler_status.code + 'code': handler_status.code, + 'useExactVersion': True } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { @@ -328,9 +337,6 @@ "message": handler_status.message } - if handler_status.upgradeGuid is not None: - v1_handler_status["upgradeGuid"] = handler_status.upgradeGuid - if len(handler_status.extensions) > 0: # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] @@ -525,10 +531,12 @@ self.updated = None self.hosting_env = None self.shared_conf = None + self.remote_access = None self.certs = None self.ext_conf = None self.host_plugin = None self.status_blob = StatusBlob(self) + self.goal_state_flusher = StateFlusher(conf.get_lib_dir()) def call_wireserver(self, http_req, *args, **kwargs): try: @@ -577,8 +585,7 @@ try: fileutil.write_file(local_file, data) except IOError as e: - fileutil.clean_ioerror(e, - paths=[local_file]) + fileutil.clean_ioerror(e, paths=[local_file]) raise ProtocolError("Failed to write cache: {0}".format(e)) @staticmethod @@ -591,7 +598,16 @@ def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") - for version in version_uris: + version_uris_shuffled = version_uris + random.shuffle(version_uris_shuffled) + + for version in version_uris_shuffled: + # GA expects a location and failoverLocation in ExtensionsConfig, but + # this is not always the case. See #1147. + if version.uri is None: + logger.verbose('The specified manifest URL is empty, ignored.') + continue + response = None if not HostPluginProtocol.is_default_channel(): response = self.fetch(version.uri) @@ -601,7 +617,7 @@ logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download manifest, " - "switching to host plugin") + "switching to host plugin") try: host = self.get_host_plugin() @@ -625,8 +641,37 @@ raise ProtocolError("Failed to fetch manifest from all sources") - def fetch(self, uri, headers=None, use_proxy=None): + def stream(self, uri, destination, headers=None, use_proxy=None): + success = False + logger.verbose("Fetch [{0}] with headers [{1}] to file [{2}]", uri, headers, destination) + + response = self._fetch_response(uri, headers, use_proxy) + if response is not None: + chunk_size = 1024 * 1024 # 1MB buffer + try: + with open(destination, 'wb', chunk_size) as destination_fh: + complete = False + while not complete: + chunk = response.read(chunk_size) + destination_fh.write(chunk) + complete = len(chunk) < chunk_size + success = True + except Exception as e: + logger.error('Error streaming {0} to {1}: {2}'.format(uri, destination, ustr(e))) + + return success + + def fetch(self, uri, headers=None, use_proxy=None, decode=True): logger.verbose("Fetch [{0}] with headers [{1}]", uri, headers) + content = None + response = self._fetch_response(uri, headers, use_proxy) + if response is not None: + response_content = response.read() + content = self.decode_config(response_content) if decode else response_content + return content + + def _fetch_response(self, uri, headers=None, use_proxy=None): + resp = None try: resp = self.call_storage_service( restutil.http_get, @@ -635,21 +680,24 @@ use_proxy=use_proxy) if restutil.request_failed(resp): - msg = "[Storage Failed] URI {0} ".format(uri) - if resp is not None: - msg += restutil.read_response_error(resp) + error_response = restutil.read_response_error(resp) + msg = "Fetch failed from [{0}]: {1}".format(uri, error_response) logger.warn(msg) + if self.host_plugin is not None: + self.host_plugin.report_fetch_health(uri, + is_healthy=not restutil.request_failed_at_hostplugin(resp), + source='WireClient', + response=error_response) raise ProtocolError(msg) + else: + if self.host_plugin is not None: + self.host_plugin.report_fetch_health(uri, source='WireClient') - return self.decode_config(resp.read()) - - except (HttpError, ProtocolError) as e: + except (HttpError, ProtocolError, IOError) as e: logger.verbose("Fetch failed from [{0}]: {1}", uri, e) - if isinstance(e, ResourceGoneError): raise - - return None + return resp def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: @@ -678,6 +726,29 @@ self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) + def update_remote_access_conf(self, goal_state): + if goal_state.remote_access_uri is None: + # Nothing in accounts data. Just return, nothing to do. + return + xml_text = self.fetch_config(goal_state.remote_access_uri, + self.get_header_for_cert()) + self.remote_access = RemoteAccess(xml_text) + local_file = os.path.join(conf.get_lib_dir(), REMOTE_ACCESS_FILE_NAME.format(self.remote_access.incarnation)) + self.save_cache(local_file, xml_text) + + def get_remote_access(self): + incarnation_file = os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME) + incarnation = self.fetch_cache(incarnation_file) + file_name = REMOTE_ACCESS_FILE_NAME.format(incarnation) + remote_access_file = os.path.join(conf.get_lib_dir(), file_name) + if not os.path.isfile(remote_access_file): + # no remote access data. + return None + xml_text = self.fetch_cache(remote_access_file) + remote_access = RemoteAccess(xml_text) + return remote_access + def update_ext_conf(self, goal_state): if goal_state.ext_uri is None: logger.info("ExtensionsConfig.xml uri is empty") @@ -695,13 +766,10 @@ INCARNATION_FILE_NAME) uri = GOAL_STATE_URI.format(self.endpoint) - # Start updating goalstate, retry on 410 - fetch_goal_state = True + goal_state = None for retry in range(0, max_retry): try: - if fetch_goal_state: - fetch_goal_state = False - + if goal_state is None: xml_text = self.fetch_config(uri, self.get_header()) goal_state = GoalState(xml_text) @@ -714,7 +782,8 @@ if last_incarnation is not None and \ last_incarnation == new_incarnation: # Goalstate is not updated. - return + return + self.goal_state_flusher.flush(datetime.utcnow()) self.goal_state = goal_state file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) @@ -724,6 +793,7 @@ self.update_shared_conf(goal_state) self.update_certs(goal_state) self.update_ext_conf(goal_state) + self.update_remote_access_conf(goal_state) self.save_cache(incarnation_file, goal_state.incarnation) if self.host_plugin is not None: @@ -732,21 +802,24 @@ return + except IOError as e: + logger.warn("IOError processing goal state, retrying [{0}]", ustr(e)) + except ResourceGoneError: - logger.info("GoalState is stale -- re-fetching") - fetch_goal_state = True + logger.info("Goal state is stale, re-fetching") + goal_state = None - except Exception as e: - log_method = logger.info \ - if type(e) is ProtocolError \ - else logger.warn - log_method( - "Exception processing GoalState-related files: {0}".format( - ustr(e))) + except ProtocolError as e: + if retry < max_retry - 1: + logger.verbose("ProtocolError processing goal state, retrying [{0}]", ustr(e)) + else: + logger.error("ProtocolError processing goal state, giving up [{0}]", ustr(e)) + except Exception as e: if retry < max_retry-1: - continue - raise + logger.verbose("Exception processing goal state, retrying: [{0}]", ustr(e)) + else: + logger.error("Exception processing goal state, giving up: [{0}]", ustr(e)) raise ProtocolError("Exceeded max retry updating goal state") @@ -787,6 +860,22 @@ return None return self.certs + def get_current_handlers(self): + handler_list = list() + try: + incarnation = self.fetch_cache(os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME)) + ext_conf = ExtensionsConfig(self.fetch_cache(os.path.join(conf.get_lib_dir(), + EXT_CONF_FILE_NAME.format(incarnation)))) + handler_list = ext_conf.ext_handlers.extHandlers + except ProtocolError as pe: + # cache file is missing, nothing to do + logger.verbose(ustr(pe)) + except Exception as e: + logger.error("Could not obtain current handlers: {0}", ustr(e)) + + return handler_list + def get_ext_conf(self): if self.ext_conf is None: goal_state = self.get_goal_state() @@ -797,7 +886,7 @@ local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_cache(local_file) self.ext_conf = ExtensionsConfig(xml_text) - return self.ext_conf + return self.ext_conf def get_ext_manifest(self, ext_handler, goal_state): for update_goal_state in [False, True]: @@ -861,6 +950,10 @@ self.update_goal_state(forced=True) goal_state = self.get_goal_state() + self._remove_stale_agent_manifest( + vmagent_manifest.family, + goal_state.incarnation) + local_file = MANIFEST_FILE_NAME.format( vmagent_manifest.family, goal_state.incarnation) @@ -875,6 +968,25 @@ raise ProtocolError("Failed to retrieve GAFamily manifest") + def _remove_stale_agent_manifest(self, family, incarnation): + """ + The incarnation number can reset at any time, which means there + could be a stale agentsManifest on disk. Stale files are cleaned + on demand as new goal states arrive from WireServer. If the stale + file is not removed agent upgrade may be delayed. + + :param family: GA family, e.g. Prod or Test + :param incarnation: incarnation of the current goal state + """ + fn = AGENTS_MANIFEST_FILE_NAME.format( + family, + incarnation) + + agent_manifest = os.path.join(conf.get_lib_dir(), fn) + + if os.path.exists(agent_manifest): + os.unlink(agent_manifest) + def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) version_info_xml = self.fetch_config(uri, None) @@ -885,61 +997,65 @@ logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) elif PROTOCOL_VERSION in version_info.get_supported(): logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) - logger.warn("Server preferred version:{0}", preferred) + logger.info("Server preferred version:{0}", preferred) else: error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) def upload_status_blob(self): - for update_goal_state in [False, True]: - try: - if update_goal_state: - self.update_goal_state(forced=True) - - ext_conf = self.get_ext_conf() - - blob_uri = ext_conf.status_upload_blob - blob_type = ext_conf.status_upload_blob_type - - if blob_uri is not None: - - if not blob_type in ["BlockBlob", "PageBlob"]: - blob_type = "BlockBlob" - logger.verbose("Status Blob type is unspecified " - "-- assuming it is a BlockBlob") - - try: - self.status_blob.prepare(blob_type) - except Exception as e: - self.report_status_event( - "Exception creating status blob: {0}", ustr(e)) - return + self.update_goal_state() + ext_conf = self.get_ext_conf() - if not HostPluginProtocol.is_default_channel(): - try: - if self.status_blob.upload(blob_uri): - return - except HttpError as e: - pass + if ext_conf.status_upload_blob is None: + self.update_goal_state(forced=True) + ext_conf = self.get_ext_conf() + + if ext_conf.status_upload_blob is None: + raise ProtocolNotFoundError("Status upload uri is missing") + + blob_type = ext_conf.status_upload_blob_type + if blob_type not in ["BlockBlob", "PageBlob"]: + blob_type = "BlockBlob" + logger.verbose("Status Blob type is unspecified, assuming BlockBlob") - host = self.get_host_plugin() - host.put_vm_status(self.status_blob, - ext_conf.status_upload_blob, - ext_conf.status_upload_blob_type) - HostPluginProtocol.set_default_channel(True) - return + try: + self.status_blob.prepare(blob_type) + except Exception as e: + raise ProtocolError("Exception creating status blob: {0}", ustr(e)) - except Exception as e: - # If the HostPlugin rejects the request, - # let the error continue, but set to use the HostPlugin - if isinstance(e, ResourceGoneError): - HostPluginProtocol.set_default_channel(True) - continue + # Swap the order of use for the HostPlugin vs. the "direct" route. + # Prefer the use of HostPlugin. If HostPlugin fails fall back to the + # direct route. + # + # The code previously preferred the "direct" route always, and only fell back + # to the HostPlugin *if* there was an error. We would like to move to + # the HostPlugin for all traffic, but this is a big change. We would like + # to see how this behaves at scale, and have a fallback should things go + # wrong. This is why we try HostPlugin then direct. + try: + host = self.get_host_plugin() + host.put_vm_status(self.status_blob, + ext_conf.status_upload_blob, + ext_conf.status_upload_blob_type) + return + except ResourceGoneError: + # do not attempt direct, force goal state update and wait to try again + self.update_goal_state(forced=True) + return + except Exception as e: + # for all other errors, fall back to direct + msg = "Falling back to direct upload: {0}".format(ustr(e)) + self.report_status_event(msg, is_success=True) - self.report_status_event( - "Exception uploading status blob: {0}", ustr(e)) + try: + if self.status_blob.upload(ext_conf.status_upload_blob): return + except Exception as e: + msg = "Exception uploading status blob: {0}".format(ustr(e)) + self.report_status_event(msg, is_success=False) + + raise ProtocolError("Failed to upload status blob via either channel") def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() @@ -1030,15 +1146,14 @@ if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) - def report_status_event(self, message, *args): + def report_status_event(self, message, is_success): from azurelinuxagent.common.event import report_event, \ WALAEventOperation - message = message.format(*args) - logger.warn(message) report_event(op=WALAEventOperation.ReportStatus, - is_success=False, - message=message) + is_success=is_success, + message=message, + log_event=not is_success) def get_header(self): return { @@ -1097,16 +1212,26 @@ logger.verbose("Using host plugin as default channel") else: logger.verbose("Failed to download artifacts profile, " - "switching to host plugin") + "switching to host plugin") host = self.get_host_plugin() uri, headers = host.get_artifact_request(blob) - config = self.fetch(uri, headers, use_proxy=False) - profile = self.decode_config(config) + profile = self.fetch(uri, headers, use_proxy=False) - if not textutil.is_str_none_or_whitespace(profile): + if not textutil.is_str_empty(profile): logger.verbose("Artifacts profile downloaded") - artifacts_profile = InVMArtifactsProfile(profile) + try: + artifacts_profile = InVMArtifactsProfile(profile) + except Exception: + logger.warn("Could not parse artifacts profile blob") + msg = "Content: [{0}]".format(profile) + logger.verbose(msg) + + from azurelinuxagent.common.event import report_event, WALAEventOperation + report_event(op=WALAEventOperation.ArtifactsProfileBlob, + is_success=False, + message=msg, + log_event=False) return artifacts_profile @@ -1115,12 +1240,11 @@ continue except Exception as e: - logger.warn( - "Exception retrieving artifacts profile: {0}".format( - ustr(e))) + logger.warn("Exception retrieving artifacts profile: {0}".format(ustr(e))) return None + class VersionInfo(object): def __init__(self, xml_text): """ @@ -1162,6 +1286,7 @@ self.expected_state = None self.hosting_env_uri = None self.shared_conf_uri = None + self.remote_access_uri = None self.certs_uri = None self.ext_uri = None self.role_instance_id = None @@ -1189,6 +1314,7 @@ self.role_config_name = findtext(role_config, "ConfigName") container = find(xml_doc, "Container") self.container_id = findtext(container, "ContainerId") + self.remote_access_uri = findtext(container, "RemoteAccessInfo") lbprobe_ports = find(xml_doc, "LBProbePorts") self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") return self @@ -1242,6 +1368,70 @@ return self +class RemoteAccess(object): + """ + Object containing information about user accounts + """ + # + # + # + # + # + # + # + # + # + # + # + # + # + + def __init__(self, xml_text): + logger.verbose("Load RemoteAccess.xml") + self.version = None + self.incarnation = None + self.user_list = RemoteAccessUsersList() + + self.xml_text = None + self.parse(xml_text) + + def parse(self, xml_text): + """ + Parse xml document containing user account information + """ + if xml_text is None or len(xml_text) == 0: + return None + self.xml_text = xml_text + xml_doc = parse_doc(xml_text) + self.incarnation = findtext(xml_doc, "Incarnation") + self.version = findtext(xml_doc, "Version") + user_collection = find(xml_doc, "Users") + users = findall(user_collection, "User") + + for user in users: + remote_access_user = self.parse_user(user) + self.user_list.users.append(remote_access_user) + return self + + def parse_user(self, user): + name = findtext(user, "Name") + encrypted_password = findtext(user, "Password") + expiration = findtext(user, "Expiration") + remote_access_user = RemoteAccessUser(name, encrypted_password, expiration) + return remote_access_user + +class UserAccount(object): + """ + Stores information about single user account + """ + def __init__(self): + self.Name = None + self.EncryptedPassword = None + self.Password = None + self.Expiration = None + self.Groups = [] + + class Certificates(object): """ Object containing certificates of host and provisioned user. @@ -1401,14 +1591,10 @@ ext_handler.properties.version = getattrib(plugin, "version") ext_handler.properties.state = getattrib(plugin, "state") - ext_handler.properties.upgradeGuid = getattrib(plugin, "upgradeGuid") - if not ext_handler.properties.upgradeGuid: - ext_handler.properties.upgradeGuid = None - auto_upgrade = getattrib(plugin, "autoUpgrade") - if auto_upgrade is not None and auto_upgrade.lower() == "true": - ext_handler.properties.upgradePolicy = "auto" - else: - ext_handler.properties.upgradePolicy = "manual" + try: + ext_handler.properties.dependencyLevel = int(getattrib(plugin, "dependencyLevel")) + except ValueError: + ext_handler.properties.dependencyLevel = 0 location = getattrib(plugin, "location") failover_location = getattrib(plugin, "failoverlocation") @@ -1514,7 +1700,7 @@ * encryptedApplicationProfile (optional) """ def __init__(self, artifacts_profile): - if not textutil.is_str_none_or_whitespace(artifacts_profile): + if not textutil.is_str_empty(artifacts_profile): self.__dict__.update(parse_json(artifacts_profile)) def is_on_hold(self): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/rdma.py walinuxagent-2.2.32/azurelinuxagent/common/rdma.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/rdma.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/rdma.py 2018-09-30 15:05:27.000000000 +0000 @@ -84,6 +84,8 @@ """Retrieve the firmware version information from the system. This depends on information provided by the Linux kernel.""" + kvp_key_size = 512 + kvp_value_size = 2048 driver_info_source = '/var/lib/hyperv/.kvp_pool_0' base_kernel_err_msg = 'Kernel does not provide the necessary ' base_kernel_err_msg += 'information or the kvp daemon is not running.' @@ -93,21 +95,23 @@ logger.error(error_msg % driver_info_source) return - lines = open(driver_info_source).read() - if not lines: - error_msg = 'RDMA: Source file "%s" is empty. ' - error_msg += base_kernel_err_msg - logger.error(error_msg % driver_info_source) - return + f = open(driver_info_source) + while True : + key = f.read(kvp_key_size) + value = f.read(kvp_value_size) + if key and value : + key_0 = key.split("\x00")[0] + value_0 = value.split("\x00")[0] + if key_0 == "NdDriverVersion" : + f.close() + return value_0 + else : + break + f.close() - r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines) - if r: - NdDriverVersion = r.groups()[0] - return NdDriverVersion - else: - error_msg = 'RDMA: NdDriverVersion not found in "%s"' - logger.error(error_msg % driver_info_source) - return + error_msg = 'RDMA: NdDriverVersion not found in "%s"' + logger.error(error_msg % driver_info_source) + return @staticmethod def is_kvp_daemon_running(): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/archive.py walinuxagent-2.2.32/azurelinuxagent/common/utils/archive.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/archive.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/archive.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,218 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +import errno +import os +import re +import shutil +import zipfile + +from azurelinuxagent.common.utils import fileutil + +import azurelinuxagent.common.logger as logger + + +""" +archive.py + +The module supports the archiving of guest agent state. Guest +agent state is flushed whenever there is a incarnation change. +The flush is archived periodically (once a day). + +The process works as follows whenever a new incarnation arrives. + + 1. Flush - move all state files to a new directory under + .../history/timestamp/. + 2. Archive - enumerate all directories under .../history/timestamp + and create a .zip file named timestamp.zip. Delete the archive + directory + 3. Purge - glob the list .zip files, sort by timestamp in descending + order, keep the first 50 results, and delete the rest. + +... is the directory where the agent's state resides, by default this +is /var/lib/waagent. + +The timestamp is an ISO8601 formatted value. +""" + +ARCHIVE_DIRECTORY_NAME = 'history' + +MAX_ARCHIVED_STATES = 50 + +CACHE_PATTERNS = [ + re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), + re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), + re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) +] + +# 2018-04-06T08:21:37.142697 +# 2018-04-06T08:21:37.142697.zip +ARCHIVE_PATTERNS_DIRECTORY = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+$') +ARCHIVE_PATTERNS_ZIP = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+\.zip$') + + +class StateFlusher(object): + def __init__(self, lib_dir): + self._source = lib_dir + + d = os.path.join(self._source, ARCHIVE_DIRECTORY_NAME) + if not os.path.exists(d): + try: + fileutil.mkdir(d) + except OSError as e: + if e.errno != errno.EEXIST: + logger.error("{0} : {1}", self._source, e.strerror) + + def flush(self, timestamp): + files = self._get_files_to_archive() + if len(files) == 0: + return + + if self._mkdir(timestamp): + self._archive(files, timestamp) + else: + self._purge(files) + + def history_dir(self, timestamp): + return os.path.join(self._source, ARCHIVE_DIRECTORY_NAME, timestamp.isoformat()) + + def _get_files_to_archive(self): + files = [] + for f in os.listdir(self._source): + full_path = os.path.join(self._source, f) + for pattern in CACHE_PATTERNS: + m = pattern.match(f) + if m is not None: + files.append(full_path) + break + + return files + + def _archive(self, files, timestamp): + for f in files: + dst = os.path.join(self.history_dir(timestamp), os.path.basename(f)) + shutil.move(f, dst) + + def _purge(self, files): + for f in files: + os.remove(f) + + def _mkdir(self, timestamp): + d = self.history_dir(timestamp) + + try: + fileutil.mkdir(d, mode=0o700) + return True + except IOError as e: + logger.error("{0} : {1}".format(d, e.strerror)) + return False + + +# TODO: use @total_ordering once RHEL/CentOS and SLES 11 are EOL. +# @total_ordering first appeared in Python 2.7 and 3.2 +# If there are more use cases for @total_ordering, I will +# consider re-implementing it. +class State(object): + def __init__(self, path, timestamp): + self._path = path + self._timestamp = timestamp + + @property + def timestamp(self): + return self._timestamp + + def delete(self): + pass + + def archive(self): + pass + + def __eq__(self, other): + return self._timestamp == other.timestamp + + def __ne__(self, other): + return self._timestamp != other.timestamp + + def __lt__(self, other): + return self._timestamp < other.timestamp + + def __gt__(self, other): + return self._timestamp > other.timestamp + + def __le__(self, other): + return self._timestamp <= other.timestamp + + def __ge__(self, other): + return self._timestamp >= other.timestamp + + +class StateZip(State): + def __init__(self, path, timestamp): + super(StateZip,self).__init__(path, timestamp) + + def delete(self): + os.remove(self._path) + + +class StateDirectory(State): + def __init__(self, path, timestamp): + super(StateDirectory, self).__init__(path, timestamp) + + def delete(self): + shutil.rmtree(self._path) + + def archive(self): + fn_tmp = "{0}.zip.tmp".format(self._path) + fn = "{0}.zip".format(self._path) + + ziph = zipfile.ZipFile(fn_tmp, 'w') + for f in os.listdir(self._path): + full_path = os.path.join(self._path, f) + ziph.write(full_path, f, zipfile.ZIP_DEFLATED) + + ziph.close() + + os.rename(fn_tmp, fn) + shutil.rmtree(self._path) + + +class StateArchiver(object): + def __init__(self, lib_dir): + self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) + + if not os.path.isdir(self._source): + try: + fileutil.mkdir(self._source, mode=0o700) + except IOError as e: + if e.errno != errno.EEXIST: + logger.error("{0} : {1}", self._source, e.strerror) + + def purge(self): + """ + Delete "old" archive directories and .zip archives. Old + is defined as any directories or files older than the X + newest ones. + """ + states = self._get_archive_states() + states.sort(reverse=True) + + for state in states[MAX_ARCHIVED_STATES:]: + state.delete() + + def archive(self): + states = self._get_archive_states() + for state in states: + state.archive() + + def _get_archive_states(self): + states = [] + for f in os.listdir(self._source): + full_path = os.path.join(self._source, f) + m = ARCHIVE_PATTERNS_DIRECTORY.match(f) + if m is not None: + states.append(StateDirectory(full_path, m.group(0))) + + m = ARCHIVE_PATTERNS_ZIP.match(f) + if m is not None: + states.append(StateZip(full_path, m.group(0))) + + return states diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/cryptutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/cryptutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/cryptutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/cryptutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,17 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 +import errno import struct +import sys +import os.path +import subprocess from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import CryptError import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil + +DECRYPT_SECRET_CMD = "{0} cms -decrypt -inform DER -inkey {1} -in /dev/stdin" class CryptUtil(object): def __init__(self, openssl_cmd): @@ -43,33 +50,47 @@ prv_file, crt_file)) def get_pubkey_from_prv(self, file_name): - cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, + file_name) + pub = shellutil.run_get_output(cmd)[1] + return pub def get_pubkey_from_crt(self, file_name): - cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, + file_name) + pub = shellutil.run_get_output(cmd)[1] + return pub def get_thumbprint_from_crt(self, file_name): - cmd="{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, - file_name) - thumbprint = shellutil.run_get_output(cmd)[1] - thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() - return thumbprint + if not os.path.exists(file_name): + raise IOError(errno.ENOENT, "File not found", file_name) + else: + cmd = "{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, + file_name) + thumbprint = shellutil.run_get_output(cmd)[1] + thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() + return thumbprint def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): - cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " - "| {4} pkcs12 -nodes -password pass: -out {5}" - "").format(self.openssl_cmd, p7m_file, trans_prv_file, - trans_cert_file, self.openssl_cmd, pem_file) - shellutil.run(cmd) - rc = shellutil.run(cmd) - if rc != 0: - logger.error("Failed to decrypt {0}".format(p7m_file)) + if not os.path.exists(p7m_file): + raise IOError(errno.ENOENT, "File not found", p7m_file) + elif not os.path.exists(trans_prv_file): + raise IOError(errno.ENOENT, "File not found", trans_prv_file) + else: + cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " + "| {4} pkcs12 -nodes -password pass: -out {5}" + "").format(self.openssl_cmd, p7m_file, trans_prv_file, + trans_cert_file, self.openssl_cmd, pem_file) + shellutil.run(cmd) + rc = shellutil.run(cmd) + if rc != 0: + logger.error("Failed to decrypt {0}".format(p7m_file)) def crt_to_ssh(self, input_file, output_file): shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, @@ -128,3 +149,16 @@ index = 7 return bytes(byte_array) + def decrypt_secret(self, encrypted_password, private_key): + try: + decoded = base64.b64decode(encrypted_password) + args = DECRYPT_SECRET_CMD.format(self.openssl_cmd, private_key).split(' ') + p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) + p.stdin.write(decoded) + output = p.communicate()[0] + retcode = p.poll() + if retcode: + raise subprocess.CalledProcessError(retcode, "openssl cms -decrypt", output=output) + return output.decode('utf-16') + except Exception as e: + raise CryptError("Error decoding secret", e) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/fileutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/fileutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/fileutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/fileutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -27,7 +27,6 @@ import pwd import re import shutil -import string import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil @@ -46,13 +45,6 @@ ] -def copy_file(from_path, to_path=None, to_dir=None): - if to_path is None: - to_path = os.path.join(to_dir, os.path.basename(from_path)) - shutil.copyfile(from_path, to_path) - return to_path - - def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ Read and return contents of 'filepath'. @@ -141,7 +133,7 @@ def rm_dirs(*args): """ - Remove the contents of each directry + Remove the contents of each directory """ for p in args: if not os.path.isdir(p): @@ -194,9 +186,10 @@ (Trailing whitespace is ignored.) """ try: - for line in (open(file_path, 'r')).readlines(): - if line_str == line.rstrip(): - return True + with open(file_path, 'r') as fh: + for line in fh.readlines(): + if line_str == line.rstrip(): + return True except Exception: # swallow exception pass @@ -208,11 +201,12 @@ Return match object if found in file. """ try: - pattern = re.compile(line_re) - for line in (open(file_path, 'r')).readlines(): - match = re.search(pattern, line) - if match: - return match + with open(file_path, 'r') as fh: + pattern = re.compile(line_re) + for line in fh.readlines(): + match = re.search(pattern, line) + if match: + return match except: pass diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/flexible_version.py walinuxagent-2.2.32/azurelinuxagent/common/utils/flexible_version.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/flexible_version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/flexible_version.py 2018-09-30 15:05:27.000000000 +0000 @@ -147,6 +147,18 @@ return True + def matches(self, that): + if self.sep != that.sep or len(self.version) > len(that.version): + return False + + for i in range(len(self.version)): + if self.version[i] != that.version[i]: + return False + if self.prerel_tags: + return self.prerel_tags == that.prerel_tags + + return True + def _assemble(self, version, sep, prerel_sep, prerelease): s = sep.join(map(str, version)) if prerelease is not None: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/__init__.py walinuxagent-2.2.32/azurelinuxagent/common/utils/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/networkutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/networkutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/networkutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/networkutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,95 @@ +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + + +class RouteEntry(object): + """ + Represents a single route. The destination, gateway, and mask members are hex representations of the IPv4 address in + network byte order. + """ + def __init__(self, interface, destination, gateway, mask, flags, metric): + self.interface = interface + self.destination = destination + self.gateway = gateway + self.mask = mask + self.flags = int(flags, 16) + self.metric = int(metric) + + @staticmethod + def _net_hex_to_dotted_quad(value): + if len(value) != 8: + raise Exception("String to dotted quad conversion must be 8 characters") + octets = [] + for idx in range(6, -2, -2): + octets.append(str(int(value[idx:idx+2], 16))) + return ".".join(octets) + + def destination_quad(self): + return self._net_hex_to_dotted_quad(self.destination) + + def gateway_quad(self): + return self._net_hex_to_dotted_quad(self.gateway) + + def mask_quad(self): + return self._net_hex_to_dotted_quad(self.mask) + + def to_json(self): + f = '{{"Iface": "{0}", "Destination": "{1}", "Gateway": "{2}", "Mask": "{3}", "Flags": "{4:#06x}", "Metric": "{5}"}}' + return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), + self.flags, self.metric) + + def __str__(self): + f = "Iface: {0}\tDestination: {1}\tGateway: {2}\tMask: {3}\tFlags: {4:#06x}\tMetric: {5}" + return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), + self.flags, self.metric) + + def __repr__(self): + return 'RouteEntry("{0}", "{1}", "{2}", "{3}", "{4:#04x}", "{5}")'\ + .format(self.interface, self.destination, self.gateway, self.mask, self.flags, self.metric) + + +class NetworkInterfaceCard: + def __init__(self, name, link_info): + self.name = name + self.ipv4 = set() + self.ipv6 = set() + self.link = link_info + + def add_ipv4(self, info): + self.ipv4.add(info) + + def add_ipv6(self, info): + self.ipv6.add(info) + + def __eq__(self, other): + return self.link == other.link and \ + self.ipv4 == other.ipv4 and \ + self.ipv6 == other.ipv6 + + @staticmethod + def _json_array(items): + return "[{0}]".format(",".join(['"{0}"'.format(x) for x in sorted(items)])) + + def __str__(self): + entries = ['"name": "{0}"'.format(self.name), + '"link": "{0}"'.format(self.link)] + if len(self.ipv4) > 0: + entries.append('"ipv4": {0}'.format(self._json_array(self.ipv4))) + if len(self.ipv6) > 0: + entries.append('"ipv6": {0}'.format(self._json_array(self.ipv6))) + return "{{ {0} }}".format(", ".join(entries)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/processutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/processutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/processutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/processutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,207 @@ +# Microsoft Azure Linux Agent +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import multiprocessing +import subprocess +import sys +import os +import time +import signal +from errno import ESRCH +from multiprocessing import Process + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ExtensionError +from azurelinuxagent.common.future import ustr + +TELEMETRY_MESSAGE_MAX_LEN = 3200 + + +def sanitize(s): + return ustr(s, encoding='utf-8', errors='backslashreplace') + + +def format_stdout_stderr(stdout, stderr, max_len=TELEMETRY_MESSAGE_MAX_LEN): + """ + Format stdout and stderr's output to make it suitable in telemetry. + The goal is to maximize the amount of output given the constraints + of telemetry. + + For example, if there is more stderr output than stdout output give + more buffer space to stderr. + + :param str stdout: characters captured from stdout + :param str stderr: characters captured from stderr + :param int max_len: maximum length of the string to return + + :return: a string formatted with stdout and stderr that is less than + or equal to max_len. + :rtype: str + """ + template = "[stdout]\n{0}\n\n[stderr]\n{1}" + # +6 == len("{0}") + len("{1}") + max_len_each = int((max_len - len(template) + 6) / 2) + + if max_len_each <= 0: + return '' + + def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): + s = template.format(captured_stdout[stdout_offset:], captured_stderr[stderr_offset:]) + return s + + if len(stdout) + len(stderr) < max_len: + return to_s(stdout, 0, stderr, 0) + elif len(stdout) < max_len_each: + bonus = max_len_each - len(stdout) + stderr_len = min(max_len_each + bonus, len(stderr)) + return to_s(stdout, 0, stderr, -1*stderr_len) + elif len(stderr) < max_len_each: + bonus = max_len_each - len(stderr) + stdout_len = min(max_len_each + bonus, len(stdout)) + return to_s(stdout, -1*stdout_len, stderr, 0) + else: + return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) + + +def _destroy_process(process, signal_to_send=signal.SIGKILL): + """ + Completely destroy the target process. Close the stdout/stderr pipes, kill the process, reap the zombie. + If process is the leader of a process group, kill the entire process group. + + :param Popen process: Process to be sent a signal + :param int signal_to_send: Signal number to be sent + """ + process.stdout.close() + process.stderr.close() + try: + pid = process.pid + if os.getpgid(pid) == pid: + os.killpg(pid, signal_to_send) + else: + os.kill(pid, signal_to_send) + process.wait() + except OSError as e: + if e.errno != ESRCH: + raise + pass # If the process is already gone, that's fine + + +def capture_from_process_poll(process, cmd, timeout, code): + """ + Capture output from the process if it does not fork, or forks + and completes quickly. + """ + retry = timeout + while retry > 0 and process.poll() is None: + time.sleep(1) + retry -= 1 + + # process did not fork, timeout expired + if retry == 0: + os.killpg(os.getpgid(process.pid), signal.SIGKILL) + stdout, stderr = process.communicate() + msg = format_stdout_stderr(sanitize(stdout), sanitize(stderr)) + raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, cmd, msg), code=code) + + # process completed or forked + return_code = process.wait() + if return_code != 0: + raise ExtensionError("Non-zero exit code: {0}, {1}".format(return_code, cmd), code=code) + + stderr = b'' + stdout = b'cannot collect stdout' + + # attempt non-blocking process communication to capture output + def proc_comm(_process, _return): + try: + _stdout, _stderr = _process.communicate() + _return[0] = _stdout + _return[1] = _stderr + except Exception: + pass + + try: + mgr = multiprocessing.Manager() + ret_dict = mgr.dict() + + cproc = Process(target=proc_comm, args=(process, ret_dict)) + cproc.start() + + # allow 1s to capture output + cproc.join(1) + + if len(ret_dict) == 2: + stdout = ret_dict[0] + stderr = ret_dict[1] + + except Exception: + pass + + return stdout, stderr + + +def capture_from_process_no_timeout(process, cmd, code): + try: + stdout, stderr = process.communicate() + except OSError as e: + _destroy_process(process, signal.SIGKILL) + raise ExtensionError("Error while running '{0}': {1}".format(cmd, e.strerror), code=code) + except Exception as e: + _destroy_process(process, signal.SIGKILL) + raise ExtensionError("Exception while running '{0}': {1}".format(cmd, e), code=code) + + return stdout, stderr + + +def capture_from_process_raw(process, cmd, timeout, code): + """ + Captures stdout and stderr from an already-created process. + + :param subprocess.Popen process: Created by subprocess.Popen() + :param str cmd: The command string to be included in any exceptions + :param int timeout: Number of seconds the process is permitted to run + :return: The stdout and stderr captured from the process + :rtype: (str, str) + :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() + """ + if not timeout: + stdout, stderr = capture_from_process_no_timeout(process, cmd, code) + else: + if os.getpgid(process.pid) != process.pid: + _destroy_process(process, signal.SIGKILL) + raise ExtensionError("Subprocess was not root of its own process group", code=code) + + stdout, stderr = capture_from_process_poll(process, cmd, timeout, code) + + return stdout, stderr + + +def capture_from_process(process, cmd, timeout=0, code=-1): + """ + Captures stdout and stderr from an already-created process. The output is "cooked" + into a string of reasonable length. + + :param subprocess.Popen process: Created by subprocess.Popen() + :param str cmd: The command string to be included in any exceptions + :param int timeout: Number of seconds the process is permitted to run + :return: The stdout and stderr captured from the process + :rtype: (str, str) + :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() + """ + stdout, stderr = capture_from_process_raw(process, cmd, timeout, code) + return format_stdout_stderr(sanitize(stdout), sanitize(stderr)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/restutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/restutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/restutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/restutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os +import re import threading import time import traceback @@ -28,8 +29,7 @@ from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient, urlparse, ustr -from azurelinuxagent.common.version import PY_VERSION_MAJOR - +from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION SECURE_WARNING_EMITTED = False @@ -39,6 +39,9 @@ THROTTLE_RETRIES = 25 THROTTLE_DELAY_IN_SECONDS = 1 +REDACTED_TEXT = "" +SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$') + RETRY_CODES = [ httpclient.RESET_CONTENT, httpclient.PARTIAL_CONTENT, @@ -49,11 +52,10 @@ httpclient.SERVICE_UNAVAILABLE, httpclient.GATEWAY_TIMEOUT, httpclient.INSUFFICIENT_STORAGE, - 429, # Request Rate Limit Exceeded + 429, # Request Rate Limit Exceeded ] RESOURCE_GONE_CODES = [ - httpclient.BAD_REQUEST, httpclient.GONE ] @@ -63,6 +65,10 @@ httpclient.ACCEPTED ] +HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [ + 502 +] + THROTTLE_CODES = [ httpclient.FORBIDDEN, httpclient.SERVICE_UNAVAILABLE, @@ -78,8 +84,11 @@ HTTP_PROXY_ENV = "http_proxy" HTTPS_PROXY_ENV = "https_proxy" +HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION) +HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT) +INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration" -DEFAULT_PROTOCOL_ENDPOINT='168.63.129.16' +DEFAULT_PROTOCOL_ENDPOINT = '168.63.129.16' HOST_PLUGIN_PORT = 32526 @@ -122,15 +131,27 @@ fib = (fib[1], fib[0]+fib[1]) return delay*fib[1] + def _is_retry_status(status, retry_codes=RETRY_CODES): return status in retry_codes + def _is_retry_exception(e): return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0 + def _is_throttle_status(status): return status in THROTTLE_CODES + +def _is_invalid_container_configuration(response): + result = False + if response is not None and response.status == httpclient.BAD_REQUEST: + error_detail = read_response_error(response) + result = INVALID_CONTAINER_CONFIGURATION in error_detail + return result + + def _parse_url(url): o = urlparse(url) rel_uri = o.path @@ -166,39 +187,46 @@ return host, port +def redact_sas_tokens_in_urls(url): + return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url) + + def _http_request(method, host, rel_uri, port=None, data=None, secure=False, headers=None, proxy_host=None, proxy_port=None): headers = {} if headers is None else headers + headers['Connection'] = 'close' + use_proxy = proxy_host is not None and proxy_port is not None if port is None: port = 443 if secure else 80 + if 'User-Agent' not in headers: + headers['User-Agent'] = HTTP_USER_AGENT + if use_proxy: conn_host, conn_port = proxy_host, proxy_port scheme = "https" if secure else "http" url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri) - else: conn_host, conn_port = host, port url = rel_uri if secure: conn = httpclient.HTTPSConnection(conn_host, - conn_port, - timeout=10) + conn_port, + timeout=10) if use_proxy: conn.set_tunnel(host, port) - else: conn = httpclient.HTTPConnection(conn_host, - conn_port, - timeout=10) + conn_port, + timeout=10) logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]", method, - url, + redact_sas_tokens_in_urls(url), data, headers) @@ -293,8 +321,7 @@ if request_failed(resp): if _is_retry_status(resp.status, retry_codes=retry_codes): - msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format( - method, url, resp.status) + msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status) # Note if throttled and ensure a safe, minimum number of # retry attempts if _is_throttle_status(resp.status): @@ -305,28 +332,38 @@ if resp.status in RESOURCE_GONE_CODES: raise ResourceGoneError() + # Map invalid container configuration errors to resource gone in + # order to force a goal state refresh, which in turn updates the + # container-id header passed to HostGAPlugin. + # See #1294. + if _is_invalid_container_configuration(resp): + raise ResourceGoneError() + return resp except httpclient.HTTPException as e: - msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format( - method, url, e) + clean_url = redact_sas_tokens_in_urls(url) + msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e) if _is_retry_exception(e): continue break except IOError as e: IOErrorCounter.increment(host=host, port=port) - msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format( - method, url, e) + clean_url = redact_sas_tokens_in_urls(url) + msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e) continue - raise HttpError("{0} -- {1} attempts made".format(msg,attempt)) + raise HttpError("{0} -- {1} attempts made".format(msg, attempt)) -def http_get(url, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_get(url, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("GET", url, None, headers=headers, use_proxy=use_proxy, @@ -335,10 +372,13 @@ retry_delay=retry_delay) -def http_head(url, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_head(url, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("HEAD", url, None, headers=headers, use_proxy=use_proxy, @@ -347,10 +387,14 @@ retry_delay=retry_delay) -def http_post(url, data, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_post(url, + data, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("POST", url, data, headers=headers, use_proxy=use_proxy, @@ -359,10 +403,14 @@ retry_delay=retry_delay) -def http_put(url, data, headers=None, use_proxy=False, - max_retry=DEFAULT_RETRIES, - retry_codes=RETRY_CODES, - retry_delay=DELAY_IN_SECONDS): +def http_put(url, + data, + headers=None, + use_proxy=False, + max_retry=DEFAULT_RETRIES, + retry_codes=RETRY_CODES, + retry_delay=DELAY_IN_SECONDS): + return http_request("PUT", url, data, headers=headers, use_proxy=use_proxy, @@ -371,10 +419,13 @@ retry_delay=retry_delay) -def http_delete(url, headers=None, use_proxy=False, +def http_delete(url, + headers=None, + use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): + return http_request("DELETE", url, None, headers=headers, use_proxy=use_proxy, @@ -382,12 +433,22 @@ retry_codes=retry_codes, retry_delay=retry_delay) + def request_failed(resp, ok_codes=OK_CODES): return not request_succeeded(resp, ok_codes=ok_codes) + def request_succeeded(resp, ok_codes=OK_CODES): return resp is not None and resp.status in ok_codes + +def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES): + """ + Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502 + """ + return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes + + def read_response_error(resp): result = '' if resp is not None: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/shellutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/shellutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/shellutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/shellutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import subprocess @@ -57,6 +57,11 @@ Shell command util functions """ +def has_command(cmd): + """ + Return True if the given command is on the path + """ + return not run(cmd, False) def run(cmd, chk_err=True): """ @@ -76,7 +81,7 @@ Reports exceptions to Error if chk_err parameter is True """ if log_cmd: - logger.verbose(u"Run '{0}'", cmd) + logger.verbose(u"Command: [{0}]", cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, @@ -84,22 +89,21 @@ output = ustr(output, encoding='utf-8', errors="backslashreplace") + except subprocess.CalledProcessError as e: + output = ustr(e.output, + encoding='utf-8', + errors="backslashreplace") + if chk_err: + msg = u"Command: [{0}], " \ + u"return code: [{1}], " \ + u"result: [{2}]".format(cmd, e.returncode, output) + logger.error(msg) + return e.returncode, output except Exception as e: - if type(e) is subprocess.CalledProcessError: - output = ustr(e.output, - encoding='utf-8', - errors="backslashreplace") - if chk_err: - if log_cmd: - logger.error(u"Command: '{0}'", e.cmd) - logger.error(u"Return code: {0}", e.returncode) - logger.error(u"Result: {0}", output) - return e.returncode, output - else: - logger.error( - u"'{0}' raised unexpected exception: '{1}'".format( - cmd, ustr(e))) - return -1, ustr(e) + if chk_err: + logger.error(u"Command [{0}] raised unexpected exception: [{1}]" + .format(cmd, ustr(e))) + return -1, ustr(e) return 0, output diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/textutil.py walinuxagent-2.2.32/azurelinuxagent/common/utils/textutil.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/utils/textutil.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/utils/textutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,19 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ import base64 import crypt +import hashlib import random import re import string import struct import sys +import zlib import xml.dom.minidom as minidom -from distutils.version import LooseVersion as Version - def parse_doc(xml_text): """ @@ -272,9 +272,9 @@ def remove_bom(c): - ''' + """ bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8. - ''' + """ if not is_str_none_or_whitespace(c) and \ len(c) > 2 and \ str_to_ord(c[0]) > 128 and \ @@ -302,6 +302,21 @@ return base64_bytes +def compress(s): + """ + Compress a string, and return the base64 encoded result of the compression. + + This method returns a string instead of a byte array. It is expected + that this method is called to compress smallish strings, not to compress + the contents of a file. The output of this method is suitable for + embedding in log statements. + """ + from azurelinuxagent.common.version import PY_VERSION_MAJOR + if PY_VERSION_MAJOR > 2: + return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8') + return base64.b64encode(zlib.compress(s)) + + def b64encode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: @@ -323,6 +338,7 @@ return shlex.split(s.encode('utf-8')) return shlex.split(s) + def swap_hexstring(s, width=2): r = len(s) % width if r != 0: @@ -334,17 +350,36 @@ s, re.IGNORECASE))) + def parse_json(json_str): """ Parse json string and return a resulting dictionary """ # trim null and whitespaces result = None - if not is_str_none_or_whitespace(json_str): + if not is_str_empty(json_str): import json result = json.loads(json_str.rstrip(' \t\r\n\0')) return result + def is_str_none_or_whitespace(s): return s is None or len(s) == 0 or s.isspace() + + +def is_str_empty(s): + return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0')) + + +def hash_strings(string_list): + """ + Compute a cryptographic hash of a list of strings + + :param string_list: The strings to be hashed + :return: The cryptographic hash (digest) of the strings in the order provided + """ + sha1_hash = hashlib.sha1() + for item in string_list: + sha1_hash.update(item.encode()) + return sha1_hash.digest() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/version.py walinuxagent-2.2.32/azurelinuxagent/common/version.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/common/version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/common/version.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -23,7 +23,7 @@ import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion -from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.future import ustr, get_linux_distribution def get_f5_platform(): @@ -80,14 +80,17 @@ elif 'OpenBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] - elif 'linux_distribution' in dir(platform): - supported = platform._supported_dists + ('alpine',) - osinfo = list(platform.linux_distribution(full_distribution_name=0, - supported_dists=supported)) - full_name = platform.linux_distribution()[0].strip() - osinfo.append(full_name) + elif 'Linux' in platform.system(): + osinfo = get_linux_distribution(0, 'alpine') + elif 'NS-BSD' in platform.system(): + release = re.sub('\-.*\Z', '', ustr(platform.release())) + osinfo = ['nsbsd', release, '', 'nsbsd'] else: - osinfo = platform.dist() + try: + # dist() removed in Python 3.7 + osinfo = list(platform.dist()) + [''] + except: + osinfo = ['UNKNOWN', 'FFFF', '', ''] # The platform.py lib has issue with detecting oracle linux distribution. # Merge the following patch provided by oracle as a temporary fix. @@ -106,6 +109,9 @@ if os.path.exists("/etc/cp-release"): osinfo = get_checkpoint_platform() + if os.path.exists("/home/guestshell/azure"): + osinfo = ['iosxe', 'csr1000v', '', 'Cisco IOSXE Linux'] + # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo @@ -113,7 +119,7 @@ AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.2.20' +AGENT_VERSION = '2.2.32.2' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux @@ -161,14 +167,17 @@ version = AGENT_VERSION return agent, FlexibleVersion(version) + def is_agent_package(path): path = os.path.basename(path) return not re.match(AGENT_PKG_PATTERN, path) is None + def is_agent_path(path): path = os.path.basename(path) return not re.match(AGENT_NAME_PATTERN, path) is None + CURRENT_AGENT, CURRENT_VERSION = set_current_agent() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/__init__.py walinuxagent-2.2.32/azurelinuxagent/daemon/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.main import get_daemon_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/main.py walinuxagent-2.2.32/azurelinuxagent/daemon/main.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/main.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/main.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -26,6 +26,7 @@ import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.cgroups import CGroups from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil @@ -65,6 +66,9 @@ PY_VERSION_MICRO) self.check_pid() + self.initialize_environment() + + CGroups.setup() # If FIPS is enabled, set the OpenSSL environment variable # Note: @@ -95,6 +99,23 @@ fileutil.write_file(pid_file, ustr(os.getpid())) + def sleep_if_disabled(self): + agent_disabled_file_path = conf.get_disable_agent_file_path() + if os.path.exists(agent_disabled_file_path): + import threading + logger.warn("Disabling the guest agent by sleeping forever; " + "to re-enable, remove {0} and restart" + .format(agent_disabled_file_path)) + self.running = False + disable_event = threading.Event() + disable_event.wait() + + def initialize_environment(self): + # Create lib dir + if not os.path.isdir(conf.get_lib_dir()): + fileutil.mkdir(conf.get_lib_dir(), mode=0o700) + os.chdir(conf.get_lib_dir()) + def daemon(self, child_args=None): logger.info("Run daemon") @@ -105,11 +126,6 @@ self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() - # Create lib dir - if not os.path.isdir(conf.get_lib_dir()): - fileutil.mkdir(conf.get_lib_dir(), mode=0o700) - os.chdir(conf.get_lib_dir()) - if conf.get_detect_scvmm_env(): self.scvmm_handler.run() @@ -117,7 +133,7 @@ self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. - # on-premise) since a VHD can move between environments + # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() @@ -144,5 +160,7 @@ else: logger.info("RDMA capabilities are not enabled, skipping") + self.sleep_if_disabled() + while self.running: self.update_handler.run_latest(child_args=child_args) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/default.py walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/default.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -305,13 +305,13 @@ # Probable errors: # - OSError: Seen on Cygwin, libc notimpl? # - AttributeError: What if someone runs this under... - with open(filename, 'w') as f: - try: + try: + with open(filename, 'w') as f: os.posix_fallocate(f.fileno(), 0, nbytes) return 0 - except: - # Not confident with this thing, just keep trying... - pass + except: + # Not confident with this thing, just keep trying... + pass # fallocate command ret = shellutil.run( diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/factory.py walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/factory.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, \ DISTRO_VERSION, \ DISTRO_FULL_NAME @@ -24,6 +22,9 @@ from .freebsd import FreeBSDResourceDiskHandler from .openbsd import OpenBSDResourceDiskHandler +from distutils.version import LooseVersion as Version + + def get_resourcedisk_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/freebsd.py walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/freebsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/freebsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/freebsd.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/__init__.py walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openbsd.py walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/openbsd.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/resourcedisk/openbsd.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/resourcedisk/openbsd.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and OpenSSL 1.0+ +# Requires Python 2.6+ and OpenSSL 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/scvmm.py walinuxagent-2.2.32/azurelinuxagent/daemon/scvmm.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/daemon/scvmm.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/daemon/scvmm.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import re diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/__init__.py walinuxagent-2.2.32/azurelinuxagent/distro/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/distro/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/suse/__init__.py walinuxagent-2.2.32/azurelinuxagent/distro/suse/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/distro/suse/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/distro/suse/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/env.py walinuxagent-2.2.32/azurelinuxagent/ga/env.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/env.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/env.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import re @@ -36,6 +36,7 @@ from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import INCARNATION_FILE_NAME from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.utils.archive import StateArchiver from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CACHE_PATTERNS = [ @@ -46,7 +47,7 @@ MAXIMUM_CACHED_FILES = 50 -CACHE_PURGE_INTERVAL = datetime.timedelta(hours=24) +ARCHIVE_INTERVAL = datetime.timedelta(hours=24) def get_env_handler(): @@ -70,7 +71,8 @@ self.dhcp_id = None self.server_thread = None self.dhcp_warning_enabled = True - self.last_purge = None + self.last_archive = None + self.archiver = StateArchiver(conf.get_lib_dir()) def run(self): if not self.stopped: @@ -82,6 +84,12 @@ self.dhcp_handler.conf_routes() self.hostname = self.osutil.get_hostname_record() self.dhcp_id = self.osutil.get_dhcp_pid() + self.start() + + def is_alive(self): + return self.server_thread.is_alive() + + def start(self): self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() @@ -94,10 +102,21 @@ Purge unnecessary files from disk cache. """ protocol = self.protocol_util.get_protocol() + reset_firewall_fules = False while not self.stopped: self.osutil.remove_rules_files() if conf.enable_firewall(): + + # If the rules ever change we must reset all rules and start over again. + # + # There was a rule change at 2.2.26, which started dropping non-root traffic + # to WireServer. The previous rules allowed traffic. Having both rules in + # place negated the fix in 2.2.26. + if not reset_firewall_fules: + self.osutil.remove_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) + reset_firewall_fules = True + success = self.osutil.enable_firewall( dst_ip=protocol.endpoint, uid=os.getuid()) @@ -107,7 +126,7 @@ version=CURRENT_VERSION, op=WALAEventOperation.Firewall, is_success=success, - log_event=True) + log_event=False) timeout = conf.get_root_device_scsi_timeout() if timeout is not None: @@ -118,7 +137,7 @@ self.handle_dhclient_restart() - self.purge_disk_cache() + self.archive_history() time.sleep(5) @@ -152,71 +171,18 @@ self.dhcp_handler.conf_routes() self.dhcp_id = new_pid - def purge_disk_cache(self): + def archive_history(self): """ - Ensure the number of cached files does not exceed a maximum count. - Purge only once per interval, and never delete files related to the - current incarnation. + Purge history if we have exceed the maximum count. + Create a .zip of the history that has been preserved. """ - if self.last_purge is not None \ + if self.last_archive is not None \ and datetime.datetime.utcnow() < \ - self.last_purge + CACHE_PURGE_INTERVAL: + self.last_archive + ARCHIVE_INTERVAL: return - current_incarnation = -1 - self.last_purge = datetime.datetime.utcnow() - incarnation_file = os.path.join(conf.get_lib_dir(), - INCARNATION_FILE_NAME) - if os.path.exists(incarnation_file): - last_incarnation = fileutil.read_file(incarnation_file) - if last_incarnation is not None: - current_incarnation = int(last_incarnation) - - logger.info("Purging disk cache, current incarnation is {0}" - .format('not found' - if current_incarnation == -1 - else current_incarnation)) - - # Create tuples: (prefix, suffix, incarnation, name, file_modified) - files = [] - for f in os.listdir(conf.get_lib_dir()): - full_path = os.path.join(conf.get_lib_dir(), f) - for pattern in CACHE_PATTERNS: - m = pattern.match(f) - if m is not None: - prefix = m.group(1) - suffix = m.group(3) - incarnation = int(m.group(2)) - file_modified = os.path.getmtime(full_path) - t = (prefix, suffix, incarnation, f, file_modified) - files.append(t) - break - - if len(files) <= 0: - return - - # Sort by (prefix, suffix, file_modified) in reverse order - files = sorted(files, key=operator.itemgetter(0, 1, 4), reverse=True) - - # Remove any files in excess of the maximum allowed - # -- Restart then whenever the (prefix, suffix) change - count = 0 - last_match = [None, None] - for f in files: - if last_match != f[0:2]: - last_match = f[0:2] - count = 0 - - if current_incarnation == f[2]: - logger.verbose("Skipping {0}".format(f[3])) - continue - - count += 1 - - if count > MAXIMUM_CACHED_FILES: - full_name = os.path.join(conf.get_lib_dir(), f[3]) - logger.verbose("Deleting {0}".format(full_name)) - os.remove(full_name) + self.archiver.purge() + self.archiver.archive() def stop(self): """ diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/exthandlers.py walinuxagent-2.2.32/azurelinuxagent/ga/exthandlers.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/exthandlers.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/exthandlers.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,47 +14,49 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json +import operator import os -import os.path +import random import re import shutil import stat import subprocess import time +import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil -import azurelinuxagent.common.utils.restutil as restutil -import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.version as version -from azurelinuxagent.common.event import add_event, WALAEventOperation -from azurelinuxagent.common.exception import ExtensionError, ProtocolError, HttpError +from azurelinuxagent.common.cgroups import CGroups, CGroupsTelemetry +from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA_DEFAULT, ERROR_STATE_DELTA_INSTALL +from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds, report_event +from azurelinuxagent.common.exception import ExtensionError, ProtocolError, ProtocolNotFoundError from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.version import AGENT_VERSION from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ - Extension, \ VMStatus, ExtHandler, \ get_properties, \ set_properties from azurelinuxagent.common.utils.flexible_version import FlexibleVersion -from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.utils.processutil import capture_from_process from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.version import AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION -#HandlerEnvironment.json schema version + +# HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 +EXTENSION_STATUS_ERROR = 'error' VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] @@ -62,17 +64,19 @@ HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN+"$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" -HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN+"\\"+HANDLER_PKG_EXT+"$", - re.IGNORECASE) +HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE) + def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) + def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) + def parse_formatted_message(formatted_message): if formatted_message is None: return None @@ -80,8 +84,9 @@ validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') + def parse_ext_substatus(substatus): - #Check extension sub status format + # Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') @@ -93,23 +98,25 @@ status.message = parse_formatted_message(formatted_message) return status + def parse_ext_status(ext_status, data): if data is None or len(data) is None: return - #Currently, only the first status will be reported + # Currently, only the first status will be reported data = data[0] - #Check extension status format + # Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') - - validate_in_range(status_data['status'], VALID_EXTENSION_STATUS, - 'status/status') + + status = status_data['status'] + if status not in VALID_EXTENSION_STATUS: + status = EXTENSION_STATUS_ERROR applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') - ext_status.status = status_data.get('status') + ext_status.status = status ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) @@ -120,18 +127,20 @@ if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) -# This code migrates, if it exists, handler state and status from an -# agent-owned directory into the handler-owned config directory -# -# Notes: -# - The v2.0.x branch wrote all handler-related state into the handler-owned -# config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). -# - The v2.1.x branch original moved that state into an agent-owned handler -# state directory (e.g., /var/lib/waagent/handler_state). -# - This move can cause v2.1.x agents to multiply invoke a handler's install -# command. It also makes clean-up more difficult since the agent must -# remove the state as well as the handler directory. + def migrate_handler_state(): + """ + Migrate handler state and status (if they exist) from an agent-owned directory into the + handler-owned config directory + + Notes: + - The v2.0.x branch wrote all handler-related state into the handler-owned config + directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). + - The v2.1.x branch original moved that state into an agent-owned handler + state directory (e.g., /var/lib/waagent/handler_state). + - This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes + clean-up more difficult since the agent must remove the state as well as the handler directory. + """ handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return @@ -164,6 +173,7 @@ NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" + Failed = "Failed" def get_exthandlers_handler(): @@ -176,24 +186,41 @@ self.protocol = None self.ext_handlers = None self.last_etag = None - self.last_guids = {} self.log_report = False self.log_etag = True + self.log_process = False + + self.report_status_error_state = ErrorState() + self.get_artifact_error_state = ErrorState(min_timedelta=ERROR_STATE_DELTA_INSTALL) def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() + self.get_artifact_error_state.reset() except Exception as e: - msg = u"Exception retrieving extension handlers: {0}".format( - ustr(e)) - logger.warn(msg) + msg = u"Exception retrieving extension handlers: {0}".format(ustr(e)) + detailed_msg = '{0} {1}'.format(msg, traceback.format_exc()) + + self.get_artifact_error_state.incr() + + if self.get_artifact_error_state.is_triggered(): + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.GetArtifactExtended, + is_success=False, + message="Failed to get extension artifact for over " + "{0): {1}".format(self.get_artifact_error_state.min_timedelta, msg)) + self.get_artifact_error_state.reset() + else: + logger.warn(msg) + add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, - message=msg) + message=detailed_msg) return try: @@ -217,19 +244,6 @@ message=msg) return - def run_status(self): - self.report_ext_handlers_status() - return - - def get_guid(self, name): - return self.last_guids.get(name, None) - - def is_new_guid(self, ext_handler): - last_guid = self.get_guid(ext_handler.name) - if last_guid is None: - return True - return last_guid != ext_handler.properties.upgradeGuid - def cleanup_outdated_handlers(self): handlers = [] pkgs = [] @@ -256,7 +270,7 @@ eh.properties.version = str(FlexibleVersion(item[separator+1:])) handler = ExtHandlerInstance(eh, self.protocol) - except Exception as e: + except Exception: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue @@ -272,28 +286,27 @@ for pkg in pkgs: try: os.remove(pkg) - logger.verbose("Removed orphaned extension package " - "{0}".format(pkg)) - except Exception as e: - logger.warn("Failed to remove orphaned package: {0}".format( - pkg)) + logger.verbose("Removed orphaned extension package {0}".format(pkg)) + except OSError as e: + logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: handler.rm_ext_handler_dir() - pkg = os.path.join(conf.get_lib_dir(), - handler.get_full_name() + HANDLER_PKG_EXT) + pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) - logger.verbose("Removed extension package " - "{0}".format(pkg)) - except Exception as e: - logger.warn("Failed to remove extension package: " - "{0}".format(pkg)) + logger.verbose("Removed extension package {0}".format(pkg)) + except OSError as e: + logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror)) def handle_ext_handlers(self, etag=None): + if not conf.get_extensions_enabled(): + logger.verbose("Extension handling is disabled") + return + if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") @@ -305,6 +318,7 @@ logger.info("Extension handling is on hold") return + self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key')) for ext_handler in self.ext_handlers.extHandlers: # TODO: handle install in sequence, enable in parallel self.handle_ext_handler(ext_handler, etag) @@ -314,15 +328,16 @@ try: state = ext_handler.properties.state - # The extension is to be enabled, there is an upgrade GUID - # and the GUID is NOT new - if state == u"enabled" and \ - ext_handler.properties.upgradeGuid is not None and \ - not self.is_new_guid(ext_handler): - logger.info("New GUID is the same as the old GUID. Exiting without upgrading.") + if ext_handler_i.decide_version(target_state=state) is None: + version = ext_handler_i.ext_handler.properties.version + name = ext_handler_i.ext_handler.name + err_msg = "Unable to find version {0} in manifest for extension {1}".format(version, name) + ext_handler_i.set_operation(WALAEventOperation.Download) + ext_handler_i.set_handler_status(message=ustr(err_msg), code=-1) + ext_handler_i.report_event(message=ustr(err_msg), is_success=False) return - ext_handler_i.decide_version(target_state=state) + self.get_artifact_error_state.reset() if not ext_handler_i.is_upgrade and self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", @@ -336,29 +351,36 @@ ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) - if ext_handler.properties.upgradeGuid is not None: - ext_handler_i.logger.info("New Upgrade GUID: {0}", ext_handler.properties.upgradeGuid) - self.last_guids[ext_handler.name] = ext_handler.properties.upgradeGuid elif state == u"disabled": self.handle_disable(ext_handler_i) - # Remove the GUID from the dictionary so that it is upgraded upon re-enable - self.last_guids.pop(ext_handler.name, None) elif state == u"uninstall": self.handle_uninstall(ext_handler_i) - # Remove the GUID from the dictionary so that it is upgraded upon re-install - self.last_guids.pop(ext_handler.name, None) else: message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) + except ExtensionError as e: + self.handle_handle_ext_handler_error(ext_handler_i, e, e.code) except Exception as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) - ext_handler_i.report_event(message=ustr(e), is_success=False) + self.handle_handle_ext_handler_error(ext_handler_i, e) + + def handle_handle_ext_handler_error(self, ext_handler_i, e, code=-1): + msg = ustr(e) + ext_handler_i.set_handler_status(message=msg, code=code) + + self.get_artifact_error_state.incr() + if self.get_artifact_error_state.is_triggered(): + report_event(op=WALAEventOperation.GetArtifactExtended, + message="Failed to get artifact for over " + "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg), + is_success=False) + self.get_artifact_error_state.reset() + else: + ext_handler_i.logger.warn(msg) def handle_enable(self, ext_handler_i): + self.log_process = True old_ext_handler_i = ext_handler_i.get_installed_ext_handler() - if old_ext_handler_i is not None and \ - old_ext_handler_i.version_gt(ext_handler_i): - raise ExtensionError(u"Downgrade not allowed") + handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Enable] current handler state is: {0}", handler_state.lower()) @@ -368,10 +390,13 @@ ext_handler_i.update_settings() if old_ext_handler_i is None: ext_handler_i.install() - elif ext_handler_i.version_gt(old_ext_handler_i): + elif ext_handler_i.version_ne(old_ext_handler_i): old_ext_handler_i.disable() ext_handler_i.copy_status_files(old_ext_handler_i) - ext_handler_i.update() + if ext_handler_i.version_gt(old_ext_handler_i): + ext_handler_i.update() + else: + old_ext_handler_i.update(version=ext_handler_i.ext_handler.properties.version) old_ext_handler_i.uninstall() old_ext_handler_i.rm_ext_handler_dir() ext_handler_i.update_with_install() @@ -381,6 +406,7 @@ ext_handler_i.enable() def handle_disable(self, ext_handler_i): + self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Disable] current handler state is: {0}", handler_state.lower()) @@ -388,6 +414,7 @@ ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): + self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", handler_state.lower()) @@ -396,9 +423,11 @@ ext_handler_i.disable() ext_handler_i.uninstall() ext_handler_i.rm_ext_handler_dir() - + def report_ext_handlers_status(self): - """Go through handler_state dir, collect and report status""" + """ + Go through handler_state dir, collect and report status + """ vm_status = VMStatus(status="Ready", message="Guest Agent is running") if self.ext_handlers is not None: for ext_handler in self.ext_handlers.extHandlers: @@ -417,13 +446,31 @@ self.protocol.report_vm_status(vm_status) if self.log_report: logger.verbose("Completed vm agent status report") + self.report_status_error_state.reset() + except ProtocolNotFoundError as e: + self.report_status_error_state.incr() + message = "Failed to report vm agent status: {0}".format(e) + logger.verbose(message) except ProtocolError as e: + self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, - version=CURRENT_VERSION, - op=WALAEventOperation.ExtensionProcessing, - is_success=False, - message=message) + version=CURRENT_VERSION, + op=WALAEventOperation.ExtensionProcessing, + is_success=False, + message=message) + + if self.report_status_error_state.is_triggered(): + message = "Failed to report vm agent status for more than {0}"\ + .format(self.report_status_error_state.min_timedelta) + + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ReportStatusExtended, + is_success=False, + message=message) + + self.report_status_error_state.reset() def report_ext_handler_status(self, vm_status, ext_handler): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) @@ -431,9 +478,6 @@ handler_status = ext_handler_i.get_handler_status() if handler_status is None: return - guid = self.get_guid(ext_handler.name) - if guid is not None: - handler_status.upgradeGuid = guid handler_state = ext_handler_i.get_handler_state() if handler_state != ExtHandlerState.NotInstalled: @@ -441,14 +485,14 @@ active_exts = ext_handler_i.report_ext_status() handler_status.extensions.extend(active_exts) except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) + ext_handler_i.set_handler_status(message=ustr(e), code=e.code) try: heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) + ext_handler_i.set_handler_status(message=ustr(e), code=e.code) vm_status.vmAgent.extensionHandlers.append(handler_status) @@ -461,9 +505,8 @@ self.pkg = None self.pkg_file = None self.is_upgrade = False - - prefix = "[{0}]".format(self.get_full_name()) - self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) + self.logger = None + self.set_logger() try: fileutil.mkdir(self.get_log_dir(), mode=0o755) @@ -482,8 +525,7 @@ raise ExtensionError("Failed to get ext handler pkgs", e) # Determine the desired and installed versions - requested_version = FlexibleVersion( - str(self.ext_handler.properties.version)) + requested_version = FlexibleVersion(str(self.ext_handler.properties.version)) installed_version_string = self.get_installed_version() installed_version = requested_version \ if installed_version_string is None \ @@ -493,74 +535,21 @@ # - Find the installed package (its version must exactly match) # - Find the internal candidate (its version must exactly match) # - Separate the public packages - internal_pkg = None + selected_pkg = None installed_pkg = None - public_pkgs = [] + pkg_list.versions.sort(key=lambda p: FlexibleVersion(p.version)) for pkg in pkg_list.versions: pkg_version = FlexibleVersion(pkg.version) if pkg_version == installed_version: installed_pkg = pkg - if pkg.isinternal and pkg_version == requested_version: - internal_pkg = pkg - if not pkg.isinternal: - public_pkgs.append(pkg) - - internal_version = FlexibleVersion(internal_pkg.version) \ - if internal_pkg is not None \ - else FlexibleVersion() - public_pkgs.sort(key=lambda pkg: FlexibleVersion(pkg.version), reverse=True) - - # Determine the preferred version and type of upgrade occurring - preferred_version = max(requested_version, installed_version) - is_major_upgrade = preferred_version.major > installed_version.major - allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto' - - # Find the first public candidate which - # - Matches the preferred major version - # - Does not upgrade to a new, disallowed major version - # - And only increments the minor version if allowed - # Notes: - # - The patch / hotfix version is not considered - public_pkg = None - for pkg in public_pkgs: - pkg_version = FlexibleVersion(pkg.version) - if pkg_version.major == preferred_version.major \ - and (not pkg.disallow_major_upgrade or not is_major_upgrade) \ - and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor): - public_pkg = pkg - break - - # If there are no candidates, locate the highest public version whose - # major matches that installed - if internal_pkg is None and public_pkg is None: - for pkg in public_pkgs: - pkg_version = FlexibleVersion(pkg.version) - if pkg_version.major == installed_version.major: - public_pkg = pkg - break - - public_version = FlexibleVersion(public_pkg.version) \ - if public_pkg is not None \ - else FlexibleVersion() - - # Select the candidate - # - Use the public candidate if there is no internal candidate or - # the public is more recent (e.g., a hotfix patch) - # - Otherwise use the internal candidate - if internal_pkg is None or (public_pkg is not None and public_version > internal_version): - selected_pkg = public_pkg - else: - selected_pkg = internal_pkg - - selected_version = FlexibleVersion(selected_pkg.version) \ - if selected_pkg is not None \ - else FlexibleVersion() + if requested_version.matches(pkg_version): + selected_pkg = pkg # Finally, update the version only if not downgrading # Note: # - A downgrade, which will be bound to the same major version, # is allowed if the installed version is no longer available - if target_state == u"uninstall": + if target_state == u"uninstall" or target_state == u"disabled": if installed_pkg is None: msg = "Failed to find installed version of {0} " \ "to uninstall".format(self.ext_handler.name) @@ -568,31 +557,35 @@ self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ if installed_version is not None else None - elif selected_pkg is None \ - or (installed_pkg is not None and selected_version < installed_version): - self.pkg = installed_pkg - self.ext_handler.properties.version = str(installed_version) \ - if installed_version is not None else None else: self.pkg = selected_pkg - self.ext_handler.properties.version = str(selected_pkg.version) + if self.pkg is not None: + self.ext_handler.properties.version = str(selected_pkg.version) - # Note if the selected package is greater than that installed + # Note if the selected package is different than that installed if installed_pkg is None \ - or FlexibleVersion(self.pkg.version) > FlexibleVersion(installed_pkg.version): + or (self.pkg is not None and FlexibleVersion(self.pkg.version) != FlexibleVersion(installed_pkg.version)): self.is_upgrade = True - if self.pkg is None: - raise ExtensionError("Failed to find any valid extension package") + if self.pkg is not None: + self.logger.verbose("Use version: {0}", self.pkg.version) + self.set_logger() + return self.pkg - self.logger.verbose("Use version: {0}", self.pkg.version) - return + def set_logger(self): + prefix = "[{0}]".format(self.get_full_name()) + self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) def version_gt(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) > FlexibleVersion(other_version) + def version_ne(self, other): + self_version = self.ext_handler.properties.version + other_version = other.ext_handler.properties.version + return FlexibleVersion(self_version) != FlexibleVersion(other_version) + def get_installed_ext_handler(self): lastest_version = self.get_installed_version() if lastest_version is None: @@ -611,7 +604,7 @@ continue separator = path.rfind('-') - version = FlexibleVersion(path[separator+1:]) + version_from_path = FlexibleVersion(path[separator+1:]) state_path = os.path.join(path, 'config', 'HandlerState') if not os.path.exists(state_path) or \ @@ -621,8 +614,8 @@ "{0}".format(path)) continue - if lastest_version is None or lastest_version < version: - lastest_version = version + if lastest_version is None or lastest_version < version_from_path: + lastest_version = version_from_path return str(lastest_version) if lastest_version is not None else None @@ -647,50 +640,56 @@ def set_operation(self, op): self.operation = op - def report_event(self, message="", is_success=True): - version = self.ext_handler.properties.version - add_event(name=self.ext_handler.name, version=version, message=message, - op=self.operation, is_success=is_success) + def report_event(self, message="", is_success=True, duration=0, log_event=True): + ext_handler_version = self.ext_handler.properties.version + add_event(name=self.ext_handler.name, version=ext_handler_version, message=message, + op=self.operation, is_success=is_success, duration=duration, log_event=log_event) def download(self): + begin_utc = datetime.datetime.utcnow() self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) + if self.pkg is None: raise ExtensionError("No package uri found") - package = None - for uri in self.pkg.uris: + uris_shuffled = self.pkg.uris + random.shuffle(uris_shuffled) + file_downloaded = False + + for uri in uris_shuffled: try: - package = self.protocol.download_ext_handler_pkg(uri.uri) - if package is not None: + destination = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") + file_downloaded = self.protocol.download_ext_handler_pkg(uri.uri, destination) + + if file_downloaded and os.path.exists(destination): + self.pkg_file = destination break + except Exception as e: - logger.warn("Error while downloading extension: {0}", e) + logger.warn("Error while downloading extension: {0}", ustr(e)) - if package is None: - raise ExtensionError("Failed to download extension") + if not file_downloaded: + raise ExtensionError("Failed to download extension", code=1001) - self.logger.verbose("Unpack extension package") - self.pkg_file = os.path.join(conf.get_lib_dir(), - os.path.basename(uri.uri) + ".zip") + self.logger.verbose("Unzip extension package") try: - fileutil.write_file(self.pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir()) + os.remove(self.pkg_file) except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) - raise ExtensionError(u"Failed to write and unzip plugin", e) + fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) + raise ExtensionError(u"Failed to unzip extension package", e, code=1001) - #Add user execute permission to all files under the base dir + # Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) - self.report_event(message="Download succeeded") + duration = elapsed_milliseconds(begin_utc) + self.report_event(message="Download succeeded", duration=duration) self.logger.info("Initialize extension directory") - #Save HandlerManifest.json - man_file = fileutil.search_file(self.get_base_dir(), - 'HandlerManifest.json') + # Save HandlerManifest.json + man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") @@ -699,39 +698,37 @@ man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) + fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save HandlerManifest.json", e) - #Create status and config dir + # Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() - if seq_no > -1: + if status_path is not None: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, - "timestampUTC" : now, - "status" : { - "name" : self.ext_handler.name, - "operation" : "Enabling Handler", - "status" : "transitioning", - "code" : 0 + "timestampUTC": now, + "status": { + "name": self.ext_handler.name, + "operation": "Enabling Handler", + "status": "transitioning", + "code": 0 } } - fileutil.write_file(json.dumps(status), status_path) + fileutil.write_file(status_path, json.dumps(status)) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: - fileutil.clean_ioerror(e, - paths=[self.get_base_dir(), self.pkg_file]) + fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to create status or config dir", e) - #Save HandlerEnvironment.json + # Save HandlerEnvironment.json self.create_handler_env() def enable(self): @@ -739,7 +736,7 @@ man = self.load_manifest() enable_cmd = man.get_enable_command() self.logger.info("Enable extension [{0}]".format(enable_cmd)) - self.launch_command(enable_cmd, timeout=300) + self.launch_command(enable_cmd, timeout=300, extension_error_code=1009) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") @@ -748,7 +745,8 @@ man = self.load_manifest() disable_cmd = man.get_disable_command() self.logger.info("Disable extension [{0}]".format(disable_cmd)) - self.launch_command(disable_cmd, timeout=900) + self.launch_command(disable_cmd, timeout=900, + extension_error_code=1010) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") @@ -757,7 +755,7 @@ install_cmd = man.get_install_command() self.logger.info("Install extension [{0}]".format(install_cmd)) self.set_operation(WALAEventOperation.Install) - self.launch_command(install_cmd, timeout=900) + self.launch_command(install_cmd, timeout=900, extension_error_code=1007) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): @@ -782,12 +780,23 @@ self.report_event(message=message, is_success=False) self.logger.warn(message) - def update(self): - self.set_operation(WALAEventOperation.Update) - man = self.load_manifest() - update_cmd = man.get_update_command() - self.logger.info("Update extension [{0}]".format(update_cmd)) - self.launch_command(update_cmd, timeout=900) + def update(self, version=None): + if version is None: + version = self.ext_handler.properties.version + + try: + self.set_operation(WALAEventOperation.Update) + man = self.load_manifest() + update_cmd = man.get_update_command() + self.logger.info("Update extension [{0}]".format(update_cmd)) + self.launch_command(update_cmd, + timeout=900, + extension_error_code=1008, + env={'VERSION': version}) + except ExtensionError: + # prevent the handler update from being retried + self.set_handler_state(ExtHandlerState.Failed) + raise def update_with_install(self): man = self.load_manifest() @@ -805,19 +814,36 @@ item_path = os.path.join(conf_dir, item) if os.path.isfile(item_path): try: - seperator = item.rfind(".") - if seperator > 0 and item[seperator + 1:] == 'settings': + separator = item.rfind(".") + if separator > 0 and item[separator + 1:] == 'settings': curr_seq_no = int(item.split('.')[0]) if curr_seq_no > seq_no: seq_no = curr_seq_no - except Exception as e: + except (ValueError, IndexError, TypeError): self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no - def get_status_file_path(self): - seq_no = self.get_largest_seq_no() + def get_status_file_path(self, extension=None): path = None + seq_no = self.get_largest_seq_no() + + # Issue 1116: use the sequence number from goal state where possible + if extension is not None and extension.sequenceNumber is not None: + try: + gs_seq_no = int(extension.sequenceNumber) + + if gs_seq_no != seq_no: + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.SequenceNumberMismatch, + is_success=False, + message="Goal state: {0}, disk: {1}".format(gs_seq_no, seq_no), + log_event=False) + + seq_no = gs_seq_no + except ValueError: + logger.error('Sequence number [{0}] does not appear to be valid'.format(extension.sequenceNumber)) if seq_no > -1: path = os.path.join( @@ -829,7 +855,7 @@ def collect_ext_status(self, ext): self.logger.verbose("Collect extension status") - seq_no, ext_status_file = self.get_status_file_path() + seq_no, ext_status_file = self.get_status_file_path(ext) if seq_no == -1: return None @@ -842,7 +868,11 @@ ext_status.message = u"Failed to get status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" - except (ExtensionError, ValueError) as e: + except ExtensionError as e: + ext_status.message = u"Malformed status file {0}".format(e) + ext_status.code = e.code + ext_status.status = "error" + except ValueError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" @@ -851,6 +881,7 @@ def report_ext_status(self): active_exts = [] + # TODO Refactor or remove this common code pattern (for each extension subordinate to an ext_handler, do X). for ext in self.ext_handler.properties.extensions: ext_status = self.collect_ext_status(ext) if ext_status is None: @@ -886,47 +917,79 @@ except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat - - def is_responsive(self, heartbeat_file): + + @staticmethod + def is_responsive(heartbeat_file): + """ + Was heartbeat_file updated within the last ten (10) minutes? + + :param heartbeat_file: str + :return: bool + """ last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) - return last_update <= 600 # updated within the last 10 min - - def launch_command(self, cmd, timeout=300): + return last_update <= 600 + + def launch_command(self, cmd, timeout=300, extension_error_code=1000, env=None): + begin_utc = datetime.datetime.utcnow() self.logger.verbose("Launch command: [{0}]", cmd) base_dir = self.get_base_dir() - try: - devnull = open(os.devnull, 'w') - child = subprocess.Popen(base_dir + "/" + cmd, - shell=True, - cwd=base_dir, - stdout=devnull, - env=os.environ) - except Exception as e: - #TODO do not catch all exception - raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) - - retry = timeout - while retry > 0 and child.poll() is None: - time.sleep(1) - retry -= 1 - if retry == 0: - os.kill(child.pid, 9) - raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd)) - - ret = child.wait() - if ret == None or ret != 0: - raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd)) - self.report_event(message="Launch command succeeded: {0}".format(cmd)) + if env is None: + env = {} + env.update(os.environ) + + try: + # This should be .run(), but due to the wide variety + # of Python versions we must support we must use .communicate(). + # Some extensions erroneously begin cmd with a slash; don't interpret those + # as root-relative. (Issue #1170) + full_path = os.path.join(base_dir, cmd.lstrip(os.path.sep)) + + def pre_exec_function(): + """ + Change process state before the actual target process is started. Effectively, this runs between + the fork() and the exec() of sub-process creation. + :return: + """ + os.setsid() + CGroups.add_to_extension_cgroup(self.ext_handler.name) + + process = subprocess.Popen(full_path, + shell=True, + cwd=base_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + preexec_fn=pre_exec_function) + except OSError as e: + raise ExtensionError("Failed to launch '{0}': {1}".format(full_path, e.strerror), + code=extension_error_code) + + cg = CGroups.for_extension(self.ext_handler.name) + CGroupsTelemetry.track_extension(self.ext_handler.name, cg) + msg = capture_from_process(process, cmd, timeout, extension_error_code) + + ret = process.poll() + if ret is None: + raise ExtensionError("Process {0} was not terminated: {1}\n{2}".format(process.pid, cmd, msg), + code=extension_error_code) + if ret != 0: + raise ExtensionError("Non-zero exit code: {0}, {1}\n{2}".format(ret, cmd, msg), + code=extension_error_code) + + duration = elapsed_milliseconds(begin_utc) + log_msg = "{0}\n{1}".format(cmd, "\n".join([line for line in msg.split('\n') if line != ""])) + self.logger.verbose(log_msg) + self.report_event(message=log_msg, duration=duration, log_event=False) def load_manifest(self): man_file = self.get_manifest_file() try: data = json.loads(fileutil.read_file(man_file)) - except IOError as e: - raise ExtensionError('Failed to load manifest file.') - except ValueError as e: - raise ExtensionError('Malformed manifest file.') + except (IOError, OSError) as e: + raise ExtensionError('Failed to load manifest file ({0}): {1}'.format(man_file, e.strerror), code=1002) + except ValueError: + raise ExtensionError('Malformed manifest file ({0}).'.format(man_file), code=1003) return HandlerManifest(data[0]) @@ -942,8 +1005,8 @@ def update_settings(self): if self.ext_handler.properties.extensions is None or \ len(self.ext_handler.properties.extensions) == 0: - #This is the behavior of waagent 2.0.x - #The new agent has to be consistent with the old one. + # This is the behavior of waagent 2.0.x + # The new agent has to be consistent with the old one. self.logger.info("Extension has no settings, write empty 0.settings") self.update_settings_file("0.settings", "") return @@ -955,7 +1018,7 @@ 'protectedSettingsCertThumbprint': ext.certificateThumbprint } ext_settings = { - "runtimeSettings":[{ + "runtimeSettings": [{ "handlerSettings": settings }] } @@ -966,12 +1029,12 @@ def create_handler_env(self): env = [{ "name": self.ext_handler.name, - "version" : HANDLER_ENVIRONMENT_VERSION, - "handlerEnvironment" : { - "logFolder" : self.get_log_dir(), - "configFolder" : self.get_conf_dir(), - "statusFolder" : self.get_status_dir(), - "heartbeatFile" : self.get_heartbeat_file() + "version": HANDLER_ENVIRONMENT_VERSION, + "handlerEnvironment": { + "logFolder": self.get_log_dir(), + "configFolder": self.get_conf_dir(), + "statusFolder": self.get_status_dir(), + "heartbeatFile": self.get_heartbeat_file() } }] try: @@ -983,15 +1046,13 @@ def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() + state_file = os.path.join(state_dir, "HandlerState") try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) - - state_file = os.path.join(state_dir, "HandlerState") fileutil.write_file(state_file, handler_state) except IOError as e: - fileutil.clean_ioerror(e, - paths=[state_file]) + fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e) def get_handler_state(self): @@ -1018,11 +1079,16 @@ status_file = os.path.join(state_dir, "HandlerStatus") try: - fileutil.write_file(status_file, json.dumps(get_properties(handler_status))) + handler_status_json = json.dumps(get_properties(handler_status)) + if handler_status_json is not None: + fileutil.write_file(status_file, handler_status_json) + else: + self.logger.error("Failed to create JSON document of handler status for {0} version {1}".format( + self.ext_handler.name, + self.ext_handler.properties.version)) except (IOError, ValueError, ProtocolError) as e: - fileutil.clean_ioerror(e, - paths=[status_file]) - self.logger.error("Failed to save handler status: {0}", e) + fileutil.clean_ioerror(e, paths=[status_file]) + self.logger.error("Failed to save handler status: {0}, {1}", ustr(e), traceback.format_exc()) def get_handler_status(self): state_dir = self.get_conf_dir() @@ -1061,8 +1127,8 @@ return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') def get_log_dir(self): - return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name, - str(self.ext_handler.properties.version)) + return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name) + class HandlerEnvironment(object): def __init__(self, data): @@ -1083,6 +1149,7 @@ def get_heartbeat_file(self): return self.data["handlerEnvironment"]["heartbeatFile"] + class HandlerManifest(object): def __init__(self, data): if data is None or data['handlerManifest'] is None: @@ -1110,12 +1177,6 @@ def get_disable_command(self): return self.data['handlerManifest']["disableCommand"] - def is_reboot_after_install(self): - """ - Deprecated - """ - return False - def is_report_heartbeat(self): return self.data['handlerManifest'].get('reportHeartbeat', False) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/__init__.py walinuxagent-2.2.32/azurelinuxagent/ga/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/monitor.py walinuxagent-2.2.32/azurelinuxagent/ga/monitor.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/monitor.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/monitor.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import datetime @@ -21,23 +21,28 @@ import platform import time import threading +import traceback import uuid import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.errorstate import ErrorState -from azurelinuxagent.common.event import add_event, WALAEventOperation -from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError +from azurelinuxagent.common.cgroups import CGroups, CGroupsTelemetry +from azurelinuxagent.common.event import add_event, report_metric, WALAEventOperation +from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.protocol.healthservice import HealthService +from azurelinuxagent.common.protocol.imds import get_imds_client from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEventList, \ TelemetryEvent, \ set_properties +import azurelinuxagent.common.utils.networkutil as networkutil from azurelinuxagent.common.utils.restutil import IOErrorCounter -from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib, hash_strings from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION @@ -90,17 +95,62 @@ class MonitorHandler(object): + + EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1) + TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30) + CGROUP_TELEMETRY_PERIOD = datetime.timedelta(minutes=5) + # host plugin + HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) + HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5) + # imds + IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) + IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3) + def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() + self.imds_client = get_imds_client() + + self.event_thread = None + self.last_event_collection = None + self.last_telemetry_heartbeat = None + self.last_cgroup_telemetry = None + self.last_host_plugin_heartbeat = None + self.last_imds_heartbeat = None + self.protocol = None + self.health_service = None + self.last_route_table_hash = b'' + self.last_nic_state = {} + + self.counter = 0 self.sysinfo = [] + self.should_run = True + self.heartbeat_id = str(uuid.uuid4()).upper() + self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD) + self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD) def run(self): + self.init_protocols() self.init_sysinfo() + self.init_cgroups() + self.start() - event_thread = threading.Thread(target=self.daemon) - event_thread.setDaemon(True) - event_thread.start() + def stop(self): + self.should_run = False + if self.is_alive(): + self.event_thread.join() + + def init_protocols(self): + self.protocol = self.protocol_util.get_protocol() + self.health_service = HealthService(self.protocol.endpoint) + + def is_alive(self): + return self.event_thread is not None and self.event_thread.is_alive() + + def start(self): + self.event_thread = threading.Thread(target=self.daemon) + self.event_thread.setDaemon(True) + self.event_thread.start() def init_sysinfo(self): osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), @@ -121,8 +171,7 @@ logger.warn("Failed to get system info: {0}", e) try: - protocol = self.protocol_util.get_protocol() - vminfo = protocol.get_vminfo() + vminfo = self.protocol.get_vminfo() self.sysinfo.append(TelemetryEventParam("VMName", vminfo.vmName)) self.sysinfo.append(TelemetryEventParam("TenantName", @@ -136,6 +185,21 @@ except ProtocolError as e: logger.warn("Failed to get system info: {0}", e) + try: + vminfo = self.imds_client.get_compute() + self.sysinfo.append(TelemetryEventParam('Location', + vminfo.location)) + self.sysinfo.append(TelemetryEventParam('SubscriptionId', + vminfo.subscriptionId)) + self.sysinfo.append(TelemetryEventParam('ResourceGroupName', + vminfo.resourceGroupName)) + self.sysinfo.append(TelemetryEventParam('VMId', + vminfo.vmId)) + self.sysinfo.append(TelemetryEventParam('ImageOrigin', + vminfo.image_origin)) + except (HttpError, ValueError) as e: + logger.warn("failed to get IMDS info: {0}", e) + def collect_event(self, evt_file_name): try: logger.verbose("Found event file: {0}", evt_file_name) @@ -150,92 +214,240 @@ raise EventError(msg) def collect_and_send_events(self): - event_list = TelemetryEventList() - event_dir = os.path.join(conf.get_lib_dir(), "events") - event_files = os.listdir(event_dir) - for event_file in event_files: - if not event_file.endswith(".tld"): - continue - event_file_path = os.path.join(event_dir, event_file) - try: - data_str = self.collect_event(event_file_path) - except EventError as e: - logger.error("{0}", e) - continue - - try: - event = parse_event(data_str) - self.add_sysinfo(event) - event_list.events.append(event) - except (ValueError, ProtocolError) as e: - logger.warn("Failed to decode event file: {0}", e) - continue + if self.last_event_collection is None: + self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD - if len(event_list.events) == 0: - return + if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD): + try: + event_list = TelemetryEventList() + event_dir = os.path.join(conf.get_lib_dir(), "events") + event_files = os.listdir(event_dir) + for event_file in event_files: + if not event_file.endswith(".tld"): + continue + event_file_path = os.path.join(event_dir, event_file) + try: + data_str = self.collect_event(event_file_path) + except EventError as e: + logger.error("{0}", e) + continue + + try: + event = parse_event(data_str) + self.add_sysinfo(event) + event_list.events.append(event) + except (ValueError, ProtocolError) as e: + logger.warn("Failed to decode event file: {0}", e) + continue + + if len(event_list.events) == 0: + return + + try: + self.protocol.report_event(event_list) + except ProtocolError as e: + logger.error("{0}", e) + except Exception as e: + logger.warn("Failed to send events: {0}", e) - try: - protocol = self.protocol_util.get_protocol() - protocol.report_event(event_list) - except ProtocolError as e: - logger.error("{0}", e) + self.last_event_collection = datetime.datetime.utcnow() def daemon(self): - period = datetime.timedelta(minutes=30) - protocol = self.protocol_util.get_protocol() - last_heartbeat = datetime.datetime.utcnow() - period - - # Create a new identifier on each restart and reset the counter - heartbeat_id = str(uuid.uuid4()).upper() - counter = 0 - while True: - if datetime.datetime.utcnow() >= (last_heartbeat + period): - last_heartbeat = datetime.datetime.utcnow() - incarnation = protocol.get_incarnation() - dropped_packets = self.osutil.get_firewall_dropped_packets( - protocol.endpoint) + min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD, + MonitorHandler.CGROUP_TELEMETRY_PERIOD, + MonitorHandler.EVENT_COLLECTION_PERIOD, + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD, + MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds + while self.should_run: + self.send_telemetry_heartbeat() + self.send_cgroup_telemetry() + self.collect_and_send_events() + self.send_host_plugin_heartbeat() + self.send_imds_heartbeat() + self.log_altered_network_configuration() + time.sleep(min_delta) + + def add_sysinfo(self, event): + sysinfo_names = [v.name for v in self.sysinfo] + for param in event.parameters: + if param.name in sysinfo_names: + logger.verbose("Remove existing event parameter: [{0}:{1}]", + param.name, + param.value) + event.parameters.remove(param) + event.parameters.extend(self.sysinfo) + + def send_imds_heartbeat(self): + """ + Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have + successfully called and validated a response in the last IMDS_HEALTH_PERIOD. + """ + + if self.last_imds_heartbeat is None: + self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD): + try: + is_currently_healthy, response = self.imds_client.validate() + + if is_currently_healthy: + self.imds_errorstate.reset() + else: + self.imds_errorstate.incr() + + is_healthy = self.imds_errorstate.is_triggered() is False + logger.verbose("IMDS health: {0} [{1}]", is_healthy, response) - msg = "{0};{1};{2};{3}".format( - incarnation, counter, heartbeat_id, dropped_packets) + self.health_service.report_imds_status(is_healthy, response) + + except Exception as e: + msg = "Exception sending imds heartbeat: {0}".format(ustr(e)) + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.ImdsHeartbeat, + is_success=False, + message=msg, + log_event=False) + + self.last_imds_heartbeat = datetime.datetime.utcnow() + + def send_host_plugin_heartbeat(self): + """ + Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to + communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD. + """ + if self.last_host_plugin_heartbeat is None: + self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= (self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD): + try: + host_plugin = self.protocol.client.get_host_plugin() + host_plugin.ensure_initialized() + is_currently_healthy = host_plugin.get_health() + + if is_currently_healthy: + self.host_plugin_errorstate.reset() + else: + self.host_plugin_errorstate.incr() + + is_healthy = self.host_plugin_errorstate.is_triggered() is False + logger.verbose("HostGAPlugin health: {0}", is_healthy) + + self.health_service.report_host_plugin_heartbeat(is_healthy) + + if not is_healthy: + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HostPluginHeartbeatExtended, + is_success=False, + message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time), + log_event=False) + + except Exception as e: + msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e)) + add_event( + name=AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.HostPluginHeartbeat, + is_success=False, + message=msg, + log_event=False) + + self.last_host_plugin_heartbeat = datetime.datetime.utcnow() + + def send_telemetry_heartbeat(self): + + if self.last_telemetry_heartbeat is None: + self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD + + if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD): + try: + incarnation = self.protocol.get_incarnation() + dropped_packets = self.osutil.get_firewall_dropped_packets(self.protocol.endpoint) + msg = "{0};{1};{2};{3}".format(incarnation, self.counter, self.heartbeat_id, dropped_packets) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, - message=msg) + message=msg, + log_event=False) - counter += 1 + self.counter += 1 - ioerrors = IOErrorCounter.get_and_reset() - hostplugin_errors = ioerrors.get("hostplugin") - protocol_errors = ioerrors.get("protocol") - other_errors = ioerrors.get("other") - - if hostplugin_errors > 0 or \ - protocol_errors > 0 or \ - other_errors > 0: - msg = "hostplugin:{0};protocol:{1};other:{2}".format( - hostplugin_errors, protocol_errors, other_errors) + io_errors = IOErrorCounter.get_and_reset() + hostplugin_errors = io_errors.get("hostplugin") + protocol_errors = io_errors.get("protocol") + other_errors = io_errors.get("other") + + if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0: + msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, + protocol_errors, + other_errors) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HttpErrors, - is_success=False, - msg=msg) + is_success=True, + message=msg, + log_event=False) + except Exception as e: + logger.warn("Failed to send heartbeat: {0}", e) + self.last_telemetry_heartbeat = datetime.datetime.utcnow() + + @staticmethod + def init_cgroups(): + # Track metrics for the roll-up cgroup and for the agent cgroup + try: + CGroupsTelemetry.track_cgroup(CGroups.for_extension("")) + CGroupsTelemetry.track_agent() + except Exception as e: + # when a hierarchy is not mounted, we raise an exception + # and we should therefore only issue a warning, since this + # is not unexpected + logger.warn("Monitor: cgroups not initialized: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) + + def send_cgroup_telemetry(self): + if self.last_cgroup_telemetry is None: + self.last_cgroup_telemetry = datetime.datetime.utcnow() + + if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.CGROUP_TELEMETRY_PERIOD): try: - self.collect_and_send_events() + for cgroup_name, metrics in CGroupsTelemetry.collect_all_tracked().items(): + for metric_group, metric_name, value in metrics: + if value > 0: + report_metric(metric_group, metric_name, cgroup_name, value) except Exception as e: - logger.warn("Failed to send events: {0}", e) - time.sleep(60) + logger.warn("Monitor: failed to collect cgroups performance metrics: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) - def add_sysinfo(self, event): - sysinfo_names = [v.name for v in self.sysinfo] - for param in event.parameters: - if param.name in sysinfo_names: - logger.verbose("Remove existing event parameter: [{0}:{1}]", - param.name, - param.value) - event.parameters.remove(param) - event.parameters.extend(self.sysinfo) + # Look for extension cgroups we're not already tracking and track them + try: + CGroupsTelemetry.update_tracked(self.protocol.client.get_current_handlers()) + except Exception as e: + logger.warn("Monitor: failed to update cgroups tracked extensions: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) + + self.last_cgroup_telemetry = datetime.datetime.utcnow() + + def log_altered_network_configuration(self): + """ + Check various pieces of network configuration and, if altered since the last check, log the new state. + """ + raw_route_list = self.osutil.read_route_table() + digest = hash_strings(raw_route_list) + if digest != self.last_route_table_hash: + self.last_route_table_hash = digest + route_list = self.osutil.get_list_of_routes(raw_route_list) + logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list)))) + + nic_state = self.osutil.get_nic_state() + if nic_state != self.last_nic_state: + description = "Initial" if self.last_nic_state == {} else "Updated" + logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values())))) + self.last_nic_state = nic_state diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/remoteaccess.py walinuxagent-2.2.32/azurelinuxagent/ga/remoteaccess.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/remoteaccess.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/remoteaccess.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,183 @@ +# Microsoft Azure Linux Agent +# +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import datetime +import glob +import json +import operator +import os +import os.path +import pwd +import random +import re +import shutil +import stat +import subprocess +import textwrap +import time +import traceback +import zipfile + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.version as version +import azurelinuxagent.common.protocol.wire +import azurelinuxagent.common.protocol.metadata as metadata + +from datetime import datetime, timedelta +from pwd import getpwall +from azurelinuxagent.common.errorstate import ErrorState + +from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds +from azurelinuxagent.common.exception import ExtensionError, ProtocolError, RemoteAccessError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ + ExtensionStatus, \ + ExtensionSubStatus, \ + VMStatus, ExtHandler, \ + get_properties, \ + set_properties +from azurelinuxagent.common.protocol.metadata import MetadataProtocol +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.utils.processutil import capture_from_process +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION +from azurelinuxagent.common.osutil import get_osutil + +REMOTE_USR_EXPIRATION_FORMAT = "%a, %d %b %Y %H:%M:%S %Z" +DATE_FORMAT = "%Y-%m-%d" +TRANSPORT_PRIVATE_CERT = "TransportPrivate.pem" +REMOTE_ACCESS_ACCOUNT_COMMENT = "JIT_Account" +MAX_TRY_ATTEMPT = 5 +FAILED_ATTEMPT_THROTTLE = 1 + +def get_remote_access_handler(): + return RemoteAccessHandler() + +class RemoteAccessHandler(object): + def __init__(self): + self.os_util = get_osutil() + self.protocol_util = get_protocol_util() + self.protocol = None + self.cryptUtil = CryptUtil(conf.get_openssl_cmd()) + self.remote_access = None + self.incarnation = 0 + self.error_message = "" + + def run(self): + try: + if self.os_util.jit_enabled: + self.protocol = self.protocol_util.get_protocol() + current_incarnation = self.protocol.get_incarnation() + if self.incarnation != current_incarnation: + # something changed. Handle remote access if any. + self.incarnation = current_incarnation + self.remote_access = self.protocol.client.get_remote_access() + self.handle_remote_access() + except Exception as e: + msg = u"Exception processing remote access handler: {0} {1}".format(ustr(e), traceback.format_exc()) + logger.error(msg) + add_event(AGENT_NAME, + version=CURRENT_VERSION, + op=WALAEventOperation.RemoteAccessHandling, + is_success=False, + message=msg) + + def handle_remote_access(self): + # Get JIT user accounts. + all_users = self.os_util.get_users() + existing_jit_users = set(u[0] for u in all_users if self.validate_jit_user(u[4])) + self.err_message = "" + if self.remote_access is not None: + goal_state_users = set(u.name for u in self.remote_access.user_list.users) + for acc in self.remote_access.user_list.users: + try: + raw_expiration = acc.expiration + account_expiration = datetime.strptime(raw_expiration, REMOTE_USR_EXPIRATION_FORMAT) + now = datetime.utcnow() + if acc.name not in existing_jit_users and now < account_expiration: + self.add_user(acc.name, acc.encrypted_password, account_expiration) + elif acc.name in existing_jit_users and now > account_expiration: + # user account expired, delete it. + logger.info("user {0} expired from remote_access".format(acc.name)) + self.remove_user(acc.name) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error processing user {0}. Exception: {1}"\ + .format(acc.name, ustr(rae)) + for user in existing_jit_users: + try: + if user not in goal_state_users: + # user explicitly removed + logger.info("User {0} removed from remote_access".format(user)) + self.remove_user(user) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ + .format(user, ustr(rae)) + else: + # All users removed, remove any remaining JIT accounts. + for user in existing_jit_users: + try: + logger.info("User {0} removed from remote_access. remote_access empty".format(user)) + self.remove_user(user) + except RemoteAccessError as rae: + self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ + .format(user, ustr(rae)) + + def validate_jit_user(self, comment): + return comment == REMOTE_ACCESS_ACCOUNT_COMMENT + + def add_user(self, username, encrypted_password, account_expiration): + try: + expiration_date = (account_expiration + timedelta(days=1)).strftime(DATE_FORMAT) + logger.verbose("Adding user {0} with expiration date {1}".format(username, expiration_date)) + self.os_util.useradd(username, expiration_date, REMOTE_ACCESS_ACCOUNT_COMMENT) + except Exception as e: + raise RemoteAccessError("Error adding user {0}. {1}".format(username, ustr(e))) + try: + prv_key = os.path.join(conf.get_lib_dir(), TRANSPORT_PRIVATE_CERT) + pwd = self.cryptUtil.decrypt_secret(encrypted_password, prv_key) + self.os_util.chpasswd(username, pwd, conf.get_password_cryptid(), conf.get_password_crypt_salt_len()) + self.os_util.conf_sudoer(username) + logger.info("User '{0}' added successfully with expiration in {1}".format(username, expiration_date)) + except Exception as e: + error = "Error adding user {0}. {1} ".format(username, str(e)) + try: + self.handle_failed_create(username) + error += "cleanup successful" + except RemoteAccessError as rae: + error += "and error cleaning up {0}".format(str(rae)) + raise RemoteAccessError("Error adding user {0} cleanup successful".format(username), ustr(e)) + + def handle_failed_create(self, username): + try: + self.delete_user(username) + except Exception as e: + raise RemoteAccessError("Failed to clean up after account creation for {0}.".format(username), e) + + def remove_user(self, username): + try: + self.delete_user(username) + except Exception as e: + raise RemoteAccessError("Failed to delete user {0}".format(username), e) + + def delete_user(self, username): + self.os_util.del_account(username) + logger.info("User deleted {0}".format(username)) diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/update.py walinuxagent-2.2.32/azurelinuxagent/ga/update.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/ga/update.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/ga/update.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Windows Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob import json import os import platform +import random import re import shutil import signal @@ -71,10 +72,11 @@ MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted GOAL_STATE_INTERVAL = 3 +GOAL_STATE_INTERVAL_DISABLED = 5 * 60 ORPHAN_WAIT_INTERVAL = 15 * 60 -AGENT_SENTINAL_FILE = "current_version" +AGENT_SENTINEL_FILE = "current_version" READONLY_FILE_GLOBS = [ "*.crt", @@ -84,6 +86,7 @@ "ovf-env.xml" ] + def get_update_handler(): return UpdateHandler() @@ -175,7 +178,11 @@ start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: time.sleep(poll_interval) - ret = self.child_process.poll() + try: + ret = self.child_process.poll() + except OSError: + # if child_process has terminated, calling poll could raise an exception + ret = -1 if ret is not None: break @@ -189,7 +196,8 @@ version=agent_version, op=WALAEventOperation.Enable, is_success=True, - message=msg) + message=msg, + log_event=False) if ret is None: ret = self.child_process.wait() @@ -224,12 +232,13 @@ agent_cmd, ustr(e)) logger.warn(msg) + detailed_message = '{0} {1}'.format(msg, traceback.format_exc()) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, - message=msg) + message=detailed_message) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) @@ -247,26 +256,43 @@ # Launch monitoring threads from azurelinuxagent.ga.monitor import get_monitor_handler - get_monitor_handler().run() + monitor_thread = get_monitor_handler() + monitor_thread.run() from azurelinuxagent.ga.env import get_env_handler - get_env_handler().run() + env_thread = get_env_handler() + env_thread.run() from azurelinuxagent.ga.exthandlers import get_exthandlers_handler, migrate_handler_state exthandlers_handler = get_exthandlers_handler() migrate_handler_state() + from azurelinuxagent.ga.remoteaccess import get_remote_access_handler + remote_access_handler = get_remote_access_handler() + self._ensure_no_orphans() self._emit_restart_event() self._ensure_partition_assigned() self._ensure_readonly_files() + goal_state_interval = GOAL_STATE_INTERVAL \ + if conf.get_extensions_enabled() \ + else GOAL_STATE_INTERVAL_DISABLED + while self.running: if self._is_orphaned: logger.info("Agent {0} is an orphan -- exiting", CURRENT_AGENT) break + if not monitor_thread.is_alive(): + logger.warn(u"Monitor thread died, restarting") + monitor_thread.start() + + if not env_thread.is_alive(): + logger.warn(u"Environment thread died, restarting") + env_thread.start() + if self._upgrade_available(): available_agent = self.get_latest_agent() if available_agent is None: @@ -285,24 +311,25 @@ last_etag = exthandlers_handler.last_etag exthandlers_handler.run() + remote_access_handler.run() + if last_etag != exthandlers_handler.last_etag: self._ensure_readonly_files() + duration = elapsed_milliseconds(utc_start) + logger.info('ProcessGoalState completed [incarnation {0}; {1} ms]', + exthandlers_handler.last_etag, + duration) add_event( AGENT_NAME, - version=CURRENT_VERSION, op=WALAEventOperation.ProcessGoalState, - is_success=True, - duration=elapsed_milliseconds(utc_start), - message="Incarnation {0}".format( - exthandlers_handler.last_etag), - log_event=True) + duration=duration, + message="Incarnation {0}".format(exthandlers_handler.last_etag)) - time.sleep(GOAL_STATE_INTERVAL) + time.sleep(goal_state_interval) except Exception as e: - msg = u"Agent {0} failed with exception: {1}".format( - CURRENT_AGENT, ustr(e)) - self._set_sentinal(msg=msg) + msg = u"Agent {0} failed with exception: {1}".format(CURRENT_AGENT, ustr(e)) + self._set_sentinel(msg=msg) logger.warn(msg) logger.warn(traceback.format_exc()) sys.exit(1) @@ -313,12 +340,7 @@ sys.exit(0) def forward_signal(self, signum, frame): - # Note: - # - At present, the handler is registered only for SIGTERM. - # However, clean shutdown is both SIGTERM and SIGKILL. - # A SIGKILL handler is not being registered at this time to - # minimize perturbing the code. - if signum in (signal.SIGTERM, signal.SIGKILL): + if signum == signal.SIGTERM: self._shutdown() if self.child_process is None: @@ -329,13 +351,14 @@ CURRENT_AGENT, signum, self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) + self.child_process.send_signal(signum) if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): self.signal_handler(signum, frame) elif self.signal_handler is signal.SIG_DFL: if signum == signal.SIGTERM: - # TODO: This should set self.running to False vs. just exiting + self._shutdown() sys.exit(0) return @@ -360,7 +383,7 @@ try: if not self._is_clean_start: msg = u"Agent did not terminate cleanly: {0}".format( - fileutil.read_file(self._sentinal_file_path())) + fileutil.read_file(self._sentinel_file_path())) logger.info(msg) add_event( AGENT_NAME, @@ -371,7 +394,6 @@ except Exception: pass - self._set_sentinal(msg="Starting") return def _ensure_no_orphans(self, orphan_wait_interval=ORPHAN_WAIT_INTERVAL): @@ -489,7 +511,7 @@ @property def _is_clean_start(self): - return not os.path.isfile(self._sentinal_file_path()) + return not os.path.isfile(self._sentinel_file_path()) @property def _is_orphaned(self): @@ -534,7 +556,7 @@ known_versions = [agent.version for agent in self.agents] if CURRENT_VERSION not in known_versions: - logger.info( + logger.verbose( u"Running Agent {0} was not found in the agent manifest - adding to list", CURRENT_VERSION) known_versions.append(CURRENT_VERSION) @@ -559,33 +581,33 @@ self.agents.sort(key=lambda agent: agent.version, reverse=True) return - def _set_sentinal(self, agent=CURRENT_AGENT, msg="Unknown cause"): + def _set_sentinel(self, agent=CURRENT_AGENT, msg="Unknown cause"): try: fileutil.write_file( - self._sentinal_file_path(), + self._sentinel_file_path(), "[{0}] [{1}]".format(agent, msg)) except Exception as e: logger.warn( - u"Exception writing sentinal file {0}: {1}", - self._sentinal_file_path(), + u"Exception writing sentinel file {0}: {1}", + self._sentinel_file_path(), str(e)) return - def _sentinal_file_path(self): - return os.path.join(conf.get_lib_dir(), AGENT_SENTINAL_FILE) + def _sentinel_file_path(self): + return os.path.join(conf.get_lib_dir(), AGENT_SENTINEL_FILE) def _shutdown(self): self.running = False - if not os.path.isfile(self._sentinal_file_path()): + if not os.path.isfile(self._sentinel_file_path()): return try: - os.remove(self._sentinal_file_path()) + os.remove(self._sentinel_file_path()) except Exception as e: logger.warn( - u"Exception removing sentinal file {0}: {1}", - self._sentinal_file_path(), + u"Exception removing sentinel file {0}: {1}", + self._sentinel_file_path(), str(e)) return @@ -660,7 +682,7 @@ continue msg = u"Exception retrieving agent manifests: {0}".format( - ustr(e)) + ustr(traceback.format_exc())) logger.warn(msg) add_event( AGENT_NAME, @@ -727,6 +749,13 @@ if isinstance(e, ResourceGoneError): raise + # The agent was improperly blacklisting versions due to a timeout + # encountered while downloading a later version. Errors of type + # socket.error are IOError, so this should provide sufficient + # protection against a large class of I/O operation failures. + if isinstance(e, IOError): + raise + # Note the failure, blacklist the agent if the package downloaded # - An exception with a downloaded package indicates the package # is corrupt (e.g., missing the HandlerManifest.json file) @@ -817,7 +846,9 @@ self._load_error() def _download(self): - for uri in self.pkg.uris: + uris_shuffled = self.pkg.uris + random.shuffle(uris_shuffled) + for uri in uris_shuffled: if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break @@ -859,6 +890,8 @@ def _fetch(self, uri, headers=None, use_proxy=True): package = None try: + is_healthy = True + error_response = '' resp = restutil.http_get(uri, use_proxy=use_proxy, headers=headers) if restutil.request_succeeded(resp): package = resp.read() @@ -867,8 +900,13 @@ asbin=True) logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) else: - logger.verbose("Fetch was unsuccessful [{0}]", - restutil.read_response_error(resp)) + error_response = restutil.read_response_error(resp) + logger.verbose("Fetch was unsuccessful [{0}]", error_response) + is_healthy = not restutil.request_failed_at_hostplugin(resp) + + if self.host is not None: + self.host.report_fetch_health(uri, is_healthy, source='GuestAgent', response=error_response) + except restutil.HttpError as http_error: if isinstance(http_error, ResourceGoneError): raise diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/__init__.py walinuxagent-2.2.32/azurelinuxagent/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/arch.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/arch.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/arch.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/arch.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/clearlinux.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/clearlinux.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/clearlinux.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/clearlinux.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/coreos.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/coreos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/coreos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/coreos.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/default.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/default.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,21 +14,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob import os.path +import re import signal import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common import version from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN + def read_input(message): if sys.version_info[0] >= 3: @@ -118,6 +122,21 @@ actions.append(DeprovisionAction(fileutil.rm_files, ["/var/lib/NetworkManager/dhclient-*.lease"])) + def del_ext_handler_files(self, warnings, actions): + ext_dirs = [d for d in os.listdir(conf.get_lib_dir()) + if os.path.isdir(os.path.join(conf.get_lib_dir(), d)) + and re.match(HANDLER_NAME_PATTERN, d) is not None + and not version.is_agent_path(d)] + + for ext_dir in ext_dirs: + ext_base = os.path.join(conf.get_lib_dir(), ext_dir) + files = glob.glob(os.path.join(ext_base, 'status', '*.status')) + files += glob.glob(os.path.join(ext_base, 'config', '*.settings')) + files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus')) + files += glob.glob(os.path.join(ext_base, 'mrseq')) + + if len(files) > 0: + actions.append(DeprovisionAction(fileutil.rm_files, files)) def del_lib_dir_files(self, warnings, actions): known_files = [ @@ -144,44 +163,6 @@ if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) - def cloud_init_dirs(self, include_once=True): - dirs = [ - "/var/lib/cloud/instance", - "/var/lib/cloud/instances/", - "/var/lib/cloud/data" - ] - if include_once: - dirs += [ - "/var/lib/cloud/scripts/per-once" - ] - return dirs - - def cloud_init_files(self, include_once=True, deluser=False): - files = [] - if deluser: - files += [ - "/etc/sudoers.d/90-cloud-init-users" - ] - if include_once: - files += [ - "/var/lib/cloud/sem/config_scripts_per_once.once" - ] - return files - - def del_cloud_init(self, warnings, actions, - include_once=True, deluser=False): - dirs = [d for d in self.cloud_init_dirs(include_once=include_once) \ - if os.path.isdir(d)] - if len(dirs) > 0: - actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) - - files = [f for f in self.cloud_init_files( - include_once=include_once, - deluser=deluser) \ - if os.path.isfile(f)] - if len(files) > 0: - actions.append(DeprovisionAction(fileutil.rm_files, files)) - def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] actions.append(DeprovisionAction(self.osutil.set_hostname, @@ -203,7 +184,6 @@ if conf.get_delete_root_password(): self.del_root_password(warnings, actions) - self.del_cloud_init(warnings, actions, deluser=deluser) self.del_dirs(warnings, actions) self.del_files(warnings, actions) self.del_resolv(warnings, actions) @@ -217,10 +197,9 @@ warnings = [] actions = [] - self.del_cloud_init(warnings, actions, - include_once=False, deluser=False) self.del_dhcp_lease(warnings, actions) self.del_lib_dir_files(warnings, actions) + self.del_ext_handler_files(warnings, actions) return warnings, actions diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/factory.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/factory.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME @@ -24,7 +22,11 @@ from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler -from .ubuntu import UbuntuDeprovisionHandler +from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler + + +from distutils.version import LooseVersion as Version + def get_deprovision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, @@ -32,7 +34,10 @@ if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": - return UbuntuDeprovisionHandler() + if Version(distro_version) in [Version('18.04')]: + return Ubuntu1804DeprovisionHandler() + else: + return UbuntuDeprovisionHandler() if distro_name == "coreos": return CoreOSDeprovisionHandler() if distro_name == "clear linux": diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/__init__.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/ubuntu.py walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/deprovision/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/deprovision/ubuntu.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -40,3 +40,13 @@ files_to_del = ["/etc/resolvconf/resolv.conf.d/tail", "/etc/resolvconf/resolv.conf.d/original"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) + + +class Ubuntu1804DeprovisionHandler(UbuntuDeprovisionHandler): + def __init__(self): + super(Ubuntu1804DeprovisionHandler, self).__init__() + + def del_resolv(self, warnings, actions): + # no changes will be made to /etc/resolv.conf + warnings.append("WARNING! /etc/resolv.conf will NOT be removed, this is a behavior change to earlier " + "versions of Ubuntu.") diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/__init__.py walinuxagent-2.2.32/azurelinuxagent/pa/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/cloudinit.py walinuxagent-2.2.32/azurelinuxagent/pa/provision/cloudinit.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/cloudinit.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/provision/cloudinit.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os @@ -28,7 +28,7 @@ import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil -from azurelinuxagent.common.event import elapsed_milliseconds +from azurelinuxagent.common.event import elapsed_milliseconds, WALAEventOperation from azurelinuxagent.common.exception import ProvisionError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import OVF_FILE_NAME @@ -64,17 +64,18 @@ logger.info("Finished provisioning") self.report_ready(thumbprint) - self.report_event("Provision succeed", + self.report_event("Provisioning with cloud-init succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: - logger.error("Provisioning failed: {0}", ustr(e)) + msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) + self.report_event(msg) return - def wait_for_ovfenv(self, max_retry=360, sleep_time=5): + def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to copy ovf-env.xml file from provision ISO """ @@ -82,7 +83,8 @@ for retry in range(0, max_retry): if os.path.isfile(ovf_file_path): try: - OvfEnv(fileutil.read_file(ovf_file_path)) + ovf_env = OvfEnv(fileutil.read_file(ovf_file_path)) + self.handle_provision_guest_agent(ovf_env.provision_guest_agent) return except ProtocolError as pe: raise ProvisionError("OVF xml could not be parsed " diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/default.py walinuxagent-2.2.32/azurelinuxagent/pa/provision/default.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/provision/default.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ @@ -77,7 +77,7 @@ logger.info("Copying ovf-env.xml") ovf_env = self.protocol_util.copy_ovf_env() - self.protocol_util.get_protocol_by_file() + self.protocol_util.get_protocol(by_file=True) self.report_not_ready("Provisioning", "Starting") logger.info("Starting provisioning") @@ -88,17 +88,20 @@ self.write_provisioned() - self.report_event("Provision succeed", + self.report_event("Provisioning succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) + self.handle_provision_guest_agent(ovf_env.provision_guest_agent) + self.report_ready(thumbprint) logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: + msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) - logger.error("Provisioning failed: {0}", ustr(e)) + self.report_event(msg, is_success=False) return @staticmethod @@ -110,20 +113,30 @@ pids = [] for pid in pids: try: - pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() - if CLOUD_INIT_REGEX.match(pname): - is_running = True - msg = "cloud-init is running [PID {0}, {1}]".format(pid, - pname) - if is_expected: - logger.verbose(msg) - else: - logger.error(msg) - break + with open(os.path.join('/proc', pid, 'cmdline'), 'rb') as fh: + pname = fh.read() + if CLOUD_INIT_REGEX.match(pname): + is_running = True + msg = "cloud-init is running [PID {0}, {1}]".format(pid, + pname) + if is_expected: + logger.verbose(msg) + else: + logger.error(msg) + break except IOError: continue return is_running == is_expected + @staticmethod + def _get_uptime_seconds(): + try: + with open('/proc/uptime') as fh: + uptime, _ = fh.readline().split() + return uptime + except: + return 0 + def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): @@ -190,6 +203,19 @@ self.provisioned_file_path(), get_osutil().get_instance_id()) + @staticmethod + def write_agent_disabled(): + logger.warn("Disabling guest agent in accordance with ovf-env.xml") + fileutil.write_file(conf.get_disable_agent_file_path(), '') + + def handle_provision_guest_agent(self, provision_guest_agent): + self.report_event(message=provision_guest_agent, + is_success=True, + duration=0, + operation=WALAEventOperation.ProvisionGuestAgent) + if provision_guest_agent and provision_guest_agent.lower() == 'false': + self.write_agent_disabled() + def provision(self, ovfenv): logger.info("Handle ovf-env.xml.") try: @@ -264,12 +290,13 @@ logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) - def report_event(self, message, is_success=False, duration=0): + def report_event(self, message, is_success=False, duration=0, + operation=WALAEventOperation.Provision): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, - op=WALAEventOperation.Provision) + op=operation) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/factory.py walinuxagent-2.2.32/azurelinuxagent/pa/provision/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/provision/factory.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.logger as logger -from azurelinuxagent.common.utils.textutil import Version from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/__init__.py walinuxagent-2.2.32/azurelinuxagent/pa/provision/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/provision/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/provision/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.provision.factory import get_provision_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/centos.py walinuxagent-2.2.32/azurelinuxagent/pa/rdma/centos.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/centos.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/rdma/centos.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/factory.py walinuxagent-2.2.32/azurelinuxagent/pa/rdma/factory.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/factory.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/rdma/factory.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger @@ -30,7 +30,8 @@ ): """Return the handler object for RDMA driver handling""" if ( - distro_full_name == 'SUSE Linux Enterprise Server' and + (distro_full_name == 'SUSE Linux Enterprise Server' or + distro_full_name == 'SLES') and int(distro_version) > 11 ): return SUSERDMAHandler() diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/__init__.py walinuxagent-2.2.32/azurelinuxagent/pa/rdma/__init__.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/rdma/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.rdma.factory import get_rdma_handler diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/suse.py walinuxagent-2.2.32/azurelinuxagent/pa/rdma/suse.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/suse.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/rdma/suse.py 2018-09-30 15:05:27.000000000 +0000 @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob @@ -91,7 +91,7 @@ logger.info("Package '%s' is not a match." % entry) else: logger.info("Package '%s' is a match. Installing." % entry) - complete_name = '%s-%s' % (package_name, version) + complete_name = '%s-%s' % (package_name, entry) cmd = zypper_install % complete_name result = shellutil.run(cmd) if result: diff -Nru walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/ubuntu.py walinuxagent-2.2.32/azurelinuxagent/pa/rdma/ubuntu.py --- walinuxagent-2.2.21+really2.2.20/azurelinuxagent/pa/rdma/ubuntu.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/azurelinuxagent/pa/rdma/ubuntu.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,6 +1,6 @@ # Microsoft Azure Linux Agent # -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob diff -Nru walinuxagent-2.2.21+really2.2.20/bin/waagent walinuxagent-2.2.32/bin/waagent --- walinuxagent-2.2.21+really2.2.20/bin/waagent 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/bin/waagent 2018-09-30 15:05:27.000000000 +0000 @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.6+ and Openssl 1.0+ +# Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/bin/waagent2.0 walinuxagent-2.2.32/bin/waagent2.0 --- walinuxagent-2.2.21+really2.2.20/bin/waagent2.0 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/bin/waagent2.0 2018-09-30 15:05:27.000000000 +0000 @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.6+ and Openssl 1.0+ +# Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/config/66-azure-storage.rules walinuxagent-2.2.32/config/66-azure-storage.rules --- walinuxagent-2.2.21+really2.2.20/config/66-azure-storage.rules 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/66-azure-storage.rules 2018-09-30 15:05:27.000000000 +0000 @@ -6,6 +6,7 @@ # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" +ATTRS{device_id}=="?00000001-0001-*", ENV{fabric_name}="BEK", GOTO="azure_names" # Wellknown SCSI controllers ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" diff -Nru walinuxagent-2.2.21+really2.2.20/config/alpine/waagent.conf walinuxagent-2.2.32/config/alpine/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/alpine/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/alpine/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -79,14 +84,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/arch/waagent.conf walinuxagent-2.2.32/config/arch/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/arch/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/arch/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -14,7 +14,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -105,14 +106,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/bigip/waagent.conf walinuxagent-2.2.32/config/bigip/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/bigip/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/bigip/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -16,6 +16,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -25,7 +29,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -83,14 +88,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/clearlinux/waagent.conf walinuxagent-2.2.32/config/clearlinux/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/clearlinux/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/clearlinux/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -25,7 +25,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -77,8 +78,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -86,5 +87,5 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs +# - The default is false to protect the state of existing VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/coreos/waagent.conf walinuxagent-2.2.32/config/coreos/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/coreos/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/coreos/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. @@ -105,14 +110,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks OS.AllowHTTP=y # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/debian/waagent.conf walinuxagent-2.2.32/config/debian/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/debian/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/config/debian/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,129 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=y + +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=y + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=y + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=auto + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=y + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Allow reset password of sys user +Provisioning.AllowResetSysUser=n + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=y + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. +ResourceDisk.Filesystem=ext4 + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) +Logs.Verbose=n + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the SSH ClientAliveInterval +# OS.SshClientAliveInterval=180 + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +# Lib.Dir=/var/lib/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +# Extension.LogDir=/var/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +# AutoUpdate.Enabled=y + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is enabled +# EnableOverProvisioning=y + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/freebsd/waagent.conf walinuxagent-2.2.32/config/freebsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/freebsd/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/freebsd/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -103,14 +108,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/gaia/waagent.conf walinuxagent-2.2.32/config/gaia/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/gaia/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/gaia/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -102,14 +107,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/iosxe/waagent.conf walinuxagent-2.2.32/config/iosxe/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/iosxe/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/config/iosxe/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,119 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=n + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=y + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=n + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=rsa + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=n + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Allow reset password of sys user +Provisioning.AllowResetSysUser=n + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=n + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. +ResourceDisk.Filesystem=ext4 + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) +Logs.Verbose=n + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the SSH ClientAliveInterval +# OS.SshClientAliveInterval=180 + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +# Lib.Dir=/var/lib/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +# Extension.LogDir=/var/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +AutoUpdate.Enabled=y + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is enabled +# EnableOverProvisioning=y + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/nsbsd/waagent.conf walinuxagent-2.2.32/config/nsbsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/nsbsd/waagent.conf 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/config/nsbsd/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,117 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Enable instance creation +Provisioning.Enabled=y + +# Rely on cloud-init to provision +Provisioning.UseCloudInit=n + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=n + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=n + +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. +Provisioning.SshHostKeyPairType=rsa + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=y + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=n + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs' here. +ResourceDisk.Filesystem=ufs + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Comma-seperated list of mount options. See man(8) for valid options. +ResourceDisk.MountOptions=None + +# Enable verbose logging (y|n) TODO set n +Logs.Verbose=n + +# Is FIPS enabled +OS.EnableFIPS=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +# Set the path to SSH keys and configuration files +OS.SshDir=/etc/ssh + +OS.PasswordPath=/etc/master.passwd + +OS.SudoersDir=/usr/local/etc/sudoers.d + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +Lib.Dir=/usr/Firewall/var/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +Extension.LogDir=/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable goal state processing auto-update, default is enabled +AutoUpdate.Enabled=n + +# Determine the update family, this should not be changed +# AutoUpdate.GAFamily=Prod + +# Determine if the overprovisioning feature is enabled. If yes, hold extension +# handling until inVMArtifactsProfile.OnHold is false. +# Default is disabled +# EnableOverProvisioning=n + +# Allow fallback to HTTP if HTTPS is unavailable +# Note: Allowing HTTP (vs. HTTPS) may cause security risks +# OS.AllowHTTP=n + +# Add firewall rules to protect access to Azure host node services +# Note: +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n diff -Nru walinuxagent-2.2.21+really2.2.20/config/openbsd/waagent.conf walinuxagent-2.2.32/config/openbsd/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/openbsd/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/openbsd/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -15,6 +15,7 @@ Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. @@ -101,8 +102,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -110,5 +111,5 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs +# - The default is false to protect the state of existing VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/suse/waagent.conf walinuxagent-2.2.32/config/suse/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/suse/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/suse/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -105,14 +110,12 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y diff -Nru walinuxagent-2.2.21+really2.2.20/config/ubuntu/waagent.conf walinuxagent-2.2.32/config/ubuntu/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/ubuntu/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/ubuntu/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=n +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=y @@ -14,7 +18,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -93,14 +98,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/config/waagent.conf walinuxagent-2.2.32/config/waagent.conf --- walinuxagent-2.2.21+really2.2.20/config/waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/config/waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -5,6 +5,10 @@ # Enable instance creation Provisioning.Enabled=y +# Enable extension handling. Do not disable this unless you do not need password reset, +# backup, monitoring, or any extension handling whatsoever. +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=n @@ -15,6 +19,7 @@ Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. @@ -105,14 +110,18 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services -# Note: -# - The default is false to protect the state of exising VMs OS.EnableFirewall=y + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/debian/changelog walinuxagent-2.2.32/debian/changelog --- walinuxagent-2.2.21+really2.2.20/debian/changelog 2018-03-20 22:11:57.000000000 +0000 +++ walinuxagent-2.2.32/debian/changelog 2018-10-29 08:13:23.000000000 +0000 @@ -1,3 +1,17 @@ +walinuxagent (2.2.32-0ubuntu1~18.10.1) cosmic; urgency=medium + + * Backport of the disco version. + * New upstream release (LP: #1799498). + * debian/patches/disable_import_test.patch: refreshed patch. + * debian/control: + - Add the python3-mock build-dependency. + * debian/rules: + - Run unit tests but don't fail the build if they fail. Previously, due to + a bug in setup.py, those were never being run during build so the + failures are not regressions. + + -- Łukasz 'sil2100' Zemczak Mon, 29 Oct 2018 09:13:23 +0100 + walinuxagent (2.2.21+really2.2.20-0ubuntu3) bionic; urgency=medium * Drop Recommends of linux-image-extra-virtual, as that depends on a kernel diff -Nru walinuxagent-2.2.21+really2.2.20/debian/control walinuxagent-2.2.32/debian/control --- walinuxagent-2.2.21+really2.2.20/debian/control 2018-03-20 22:11:56.000000000 +0000 +++ walinuxagent-2.2.32/debian/control 2018-10-29 08:13:23.000000000 +0000 @@ -8,6 +8,7 @@ python3-all, python3-setuptools, python3-pyasn1, + python3-mock, openssl (>= 1.0), Standards-Version: 3.9.8 X-Python3-Version: >= 3.2 diff -Nru walinuxagent-2.2.21+really2.2.20/debian/patches/disable_import_test.patch walinuxagent-2.2.32/debian/patches/disable_import_test.patch --- walinuxagent-2.2.21+really2.2.20/debian/patches/disable_import_test.patch 2018-02-23 18:17:20.000000000 +0000 +++ walinuxagent-2.2.32/debian/patches/disable_import_test.patch 2018-10-29 08:13:23.000000000 +0000 @@ -1,15 +1,17 @@ -Index: walinuxagent-2.2.21+really2.2.20/config/waagent.conf +Index: walinuxagent-2.2.32/config/waagent.conf =================================================================== ---- walinuxagent-2.2.21+really2.2.20.orig/config/waagent.conf -+++ walinuxagent-2.2.21+really2.2.20/config/waagent.conf -@@ -3,16 +3,16 @@ +--- walinuxagent-2.2.32.orig/config/waagent.conf ++++ walinuxagent-2.2.32/config/waagent.conf +@@ -3,7 +3,7 @@ # # Enable instance creation -Provisioning.Enabled=y +Provisioning.Enabled=n - # Rely on cloud-init to provision + # Enable extension handling. Do not disable this unless you do not need password reset, + # backup, monitoring, or any extension handling whatsoever. +@@ -13,10 +13,10 @@ Extensions.Enabled=y Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. @@ -21,8 +23,8 @@ +Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". - Provisioning.SshHostKeyPairType=rsa -@@ -36,14 +36,14 @@ Provisioning.ExecuteCustomData=n + # The "auto" option is supported on OpenSSH 5.9 (2011) and later. +@@ -41,14 +41,14 @@ Provisioning.ExecuteCustomData=n Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. diff -Nru walinuxagent-2.2.21+really2.2.20/debian/rules walinuxagent-2.2.32/debian/rules --- walinuxagent-2.2.21+really2.2.20/debian/rules 2018-02-23 18:17:20.000000000 +0000 +++ walinuxagent-2.2.32/debian/rules 2018-10-29 08:13:23.000000000 +0000 @@ -25,3 +25,9 @@ override_dh_python3: dh_python3 -O--buildsystem=pybuild --shebang "/usr/bin/env python3" + +override_dh_auto_test: + # No test were run on any of the previous versions, temporarily + # ignoring the failures as they're not regressions (LP: #1800499) + # XXX: Those need to be fixed ASAP. + -dh_auto_test -O--buildsystem=pybuild diff -Nru walinuxagent-2.2.21+really2.2.20/.github/CONTRIBUTING.md walinuxagent-2.2.32/.github/CONTRIBUTING.md --- walinuxagent-2.2.21+really2.2.20/.github/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/.github/CONTRIBUTING.md 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,77 @@ +# Contributing to Linux Guest Agent +First, thank you for contributing to WALinuxAgent repository! + +## Basics +If you would like to become an active contributor to this project, please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +## Table of Contents +[Before starting](#before-starting) +- [Github basics](#github-basics) +- [Code of Conduct](#code-of-conduct) + +[Making Changes](#making-changes) +- [Pull Requests](#pull-requests) +- [Pull Request Guidelines](#pull-request-guidelines) + - [Cleaning up commits](#cleaning-up-commits) + - [General guidelines](#general-guidelines) + - [Testing guidelines](#testing-guidelines) + +## Before starting + +### Github basics + +#### GitHub workflow + +If you don't have experience with Git and Github, some of the terminology and process can be confusing. [Here's a guide to understanding Github](https://guides.github.com/introduction/flow/). + +#### Forking the Azure/Guest-Configuration-Extension repository + +Unless you are working with multiple contributors on the same file, we ask that you fork the repository and submit your Pull Request from there. [Here's a guide to forks in Github](https://guides.github.com/activities/forking/). + +### Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Making Changes + +### Pull Requests + +You can find all of the pull requests that have been opened in the [Pull Request](https://github.com/Azure/Guest-Configuration-Extension/pulls) section of the repository. + +To open your own pull request, click [here](https://github.com/Azure/WALinuxAgent/compare). When creating a pull request, keep the following in mind: +- Make sure you are pointing to the fork and branch that your changes were made in +- Choose the correct branch you want your pull request to be merged into +- The pull request template that is provided **should be filled out**; this is not something that should just be deleted or ignored when the pull request is created + - Deleting or ignoring this template will elongate the time it takes for your pull request to be reviewed + + +### Pull Request Guidelines + +A pull request template will automatically be included as a part of your PR. Please fill out the checklist as specified. Pull requests **will not be reviewed** unless they include a properly completed checklist. + +#### Cleaning up Commits + +If you are thinking about making a large change, **break up the change into small, logical, testable chunks, and organize your pull requests accordingly**. + +Often when a pull request is created with a large number of files changed and/or a large number of lines of code added and/or removed, GitHub will have a difficult time opening up the changes on their site. This forces the Azure Guest-Configuration-Extension team to use separate software to do a code review on the pull request. + +If you find yourself creating a pull request and are unable to see all the changes on GitHub, we recommend **splitting the pull request into multiple pull requests that are able to be reviewed on GitHub**. + +If splitting up the pull request is not an option, we recommend **creating individual commits for different parts of the pull request, which can be reviewed individually on GitHub**. + +For more information on cleaning up the commits in a pull request, such as how to rebase, squash, and cherry-pick, click [here](https://github.com/Azure/azure-powershell/blob/dev/documentation/cleaning-up-commits.md). + +#### General guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Title of the pull request is clear and informative +- There are a small number of commits that each have an informative message +- A description of the changes the pull request makes is included, and a reference to the issue being resolved, if the change address any +- All files have the Microsoft copyright header + +#### Testing Guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Pull request includes test coverage for the included changes \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/.github/PULL_REQUEST_TEMPLATE.md walinuxagent-2.2.32/.github/PULL_REQUEST_TEMPLATE.md --- walinuxagent-2.2.21+really2.2.20/.github/PULL_REQUEST_TEMPLATE.md 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/.github/PULL_REQUEST_TEMPLATE.md 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,22 @@ + + +## Description + +Issue # + + +--- + +### PR information +- [ ] The title of the PR is clear and informative. +- [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). +- [ ] Except for special cases involving multiple contributors, the PR is started from a fork of the main repository, not a branch. +- [ ] If applicable, the PR references the bug/issue that it fixes in the description. +- [ ] New Unit tests were added for the changes made and Travis.CI is passing. + +### Quality of Code and Contribution Guidelines +- [ ] I have read the [contribution guidelines](https://github.com/Azure/WALinuxAgent/blob/master/.github/CONTRIBUTING.md). \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/__main__.py walinuxagent-2.2.32/__main__.py --- walinuxagent-2.2.21+really2.2.20/__main__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/__main__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.agent as agent diff -Nru walinuxagent-2.2.21+really2.2.20/README.md walinuxagent-2.2.32/README.md --- walinuxagent-2.2.21+really2.2.20/README.md 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/README.md 2018-09-30 15:05:27.000000000 +0000 @@ -1,54 +1,74 @@ -## Microsoft Azure Linux Agent README +# Microsoft Azure Linux Agent -### INTRODUCTION +## Master branch status -The Microsoft Azure Linux Agent (waagent) manages Linux & BSD provisioning, -and VM interaction with the Azure Fabric Controller. It provides the following -functionality for Linux and BSD IaaS deployments: - - * Image Provisioning - - Creation of a user account - - Configuring SSH authentication types - - Deployment of SSH public keys and key pairs - - Setting the host name - - Publishing the host name to the platform DNS - - Reporting SSH host key fingerprint to the platform - - Resource Disk Management - - Formatting and mounting the resource disk - - Configuring swap space - - * Networking - - Manages routes to improve compatibility with platform DHCP servers - - Ensures the stability of the network interface name - - * Kernel - - Configure virtual NUMA (disable for kernel <2.6.37) - - Consume Hyper-V entropy for /dev/random - - Configure SCSI timeouts for the root device (which could be remote) - - * Diagnostics - - Console redirection to the serial port - - * SCVMM Deployments - - Detect and bootstrap the VMM agent for Linux when running in a System - Center Virtual Machine Manager 2012R2 environment - - * VM Extension - - Inject component authored by Microsoft and Partners into Linux VM (IaaS) - to enable software and configuration automation - - VM Extension reference implementation on https://github.com/Azure/azure-linux-extensions +Each badge below represents our basic validation tests for an image, which are executed several times each day. These include provisioning, user account, disk, extension and networking scenarios. +Image | Status | +------|--------| +Canonical UbuntuServer 14.04.5-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-LTS__agent--bvt.svg) +Canonical UbuntuServer 14.04.5-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-DAILY-LTS__agent--bvt.svg) +Canonical UbuntuServer 16.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-LTS__agent--bvt.svg) +Canonical UbuntuServer 16.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-DAILY-LTS__agent--bvt.svg) +Canonical UbuntuServer 18.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-LTS__agent--bvt.svg) +Canonical UbuntuServer 18.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-DAILY-LTS__agent--bvt.svg) +Credativ Debian 8|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8__agent--bvt.svg) +Credativ Debian 8-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8-DAILY__agent--bvt.svg) +Credativ Debian 9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9__agent--bvt.svg) +Credativ Debian 9-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9-DAILY__agent--bvt.svg) +OpenLogic CentOS 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_6.9__agent--bvt.svg) +OpenLogic CentOS 7.4|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_7.4__agent--bvt.svg) +RedHat RHEL 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_6.9__agent--bvt.svg) +RedHat RHEL 7-RAW|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_7-RAW__agent--bvt.svg) +SUSE SLES 12-SP3|![badge](https://dcrbadges.blob.core.windows.net/scenarios/SUSE_SLES_12-SP3__agent--bvt.svg) + +## Introduction + +The Microsoft Azure Linux Agent (waagent) manages Linux provisioning and VM interaction with the Azure Fabric Controller. It provides the following +functionality for Linux IaaS deployments: + +* Image Provisioning + * Creation of a user account + * Configuring SSH authentication types + * Deployment of SSH public keys and key pairs + * Setting the host name + * Publishing the host name to the platform DNS + * Reporting SSH host key fingerprint to the platform + * Resource Disk Management + * Formatting and mounting the resource disk + * Configuring swap space + +* Networking + * Manages routes to improve compatibility with platform DHCP servers + * Ensures the stability of the network interface name + +* Kernel + * Configure virtual NUMA (disable for kernel <2.6.37) + * Consume Hyper-V entropy for /dev/random + * Configure SCSI timeouts for the root device (which could be remote) + +* Diagnostics + * Console redirection to the serial port + +* SCVMM Deployments + * Detect and bootstrap the VMM agent for Linux when running in a System + Center Virtual Machine Manager 2012R2 environment + +* VM Extension + * Inject component authored by Microsoft and Partners into Linux VM (IaaS) + to enable software and configuration automation + * VM Extension reference implementation on [GitHub](https://github.com/Azure/azure-linux-extensions) -### COMMUNICATION +## Communication The information flow from the platform to the agent occurs via two channels: - * A boot-time attached DVD for IaaS deployments. - This DVD includes an OVF-compliant configuration file that includes all - provisioning information other than the actual SSH keypairs. +* A boot-time attached DVD for IaaS deployments. + This DVD includes an OVF-compliant configuration file that includes all + provisioning information other than the actual SSH keypairs. - * A TCP endpoint exposing a REST API used to obtain deployment and topology - configuration. +* A TCP endpoint exposing a REST API used to obtain deployment and topology + configuration. The agent will use an HTTP proxy if provided via the `http_proxy` (for `http` requests) or `https_proxy` (for `https` requests) environment variables. The `HttpProxy.Host` and @@ -56,115 +76,110 @@ settings. Due to limitations of Python, the agent *does not* support HTTP proxies requiring authentication. - -### REQUIREMENTS +## Requirements The following systems have been tested and are known to work with the Azure Linux Agent. Please note that this list may differ from the official list -of supported systems on the Microsoft Azure Platform as described here: -http://support.microsoft.com/kb/2805216 +of supported systems on the Microsoft Azure Platform as described [here](http://support.microsoft.com/kb/2805216). Waagent depends on some system packages in order to function properly: - * Python 2.6+ - * OpenSSL 1.0+ - * OpenSSH 5.3+ - * Filesystem utilities: sfdisk, fdisk, mkfs, parted - * Password tools: chpasswd, sudo - * Text processing tools: sed, grep - * Network tools: ip-route - +* Python 2.6+ +* OpenSSL 1.0+ +* OpenSSH 5.3+ +* Filesystem utilities: sfdisk, fdisk, mkfs, parted +* Password tools: chpasswd, sudo +* Text processing tools: sed, grep +* Network tools: ip-route -### INSTALLATION +## Installation Installation via your distribution's package repository is preferred. You can also customize your own RPM or DEB packages using the configuration samples provided (see deb and rpm sections below). -For more advanced installation options, such as installing to custom locations -or prefixes, you can use ***setuptools*** to install from source by running: - - #sudo python setup.py install --register-service +For more advanced installation options, such as installing to custom locations or prefixes, you can use **setuptools** to install from source by running: + +```bash + sudo python setup.py install --register-service +``` You can view more installation options by running: - #sudo python setup.py install --help +```bash + sudo python setup.py install --help +``` -The agent's log file is kept at /var/log/waagent.log. +The agent's log file is kept at `/var/log/waagent.log`. -### UPGRADE +## Upgrade -Upgrading via your distribution's package repository is preferred. +Upgrading via your distribution's package repository is strongly preferred. If upgrading manually, same with installation above by running: - #sudo python setup.py install --force +```bash + sudo python setup.py install --force +``` Restart waagent service,for most of linux distributions: - #sudo service waagent restart +```bash + sudo service waagent restart +``` For Ubuntu, use: - #sudo service walinuxagent restart +```bash + sudo service walinuxagent restart +``` For CoreOS, use: - #sudo systemctl restart waagent - -The agent's log file is kept at /var/log/waagent.log. +```bash + sudo systemctl restart waagent +``` +## Command line options -### COMMAND LINE OPTIONS +### Flags -Flags: +`-verbose`: Increase verbosity of specified command - -verbose: Increase verbosity of specified command +`-force`: Skip interactive confirmation for some commands - -force: Skip interactive confirmation for some commands +### Commands -Commands: +`-help`: Lists the supported commands and flags. --help: Lists the supported commands and flags. +`-deprovision`: Attempt to clean the system and make it suitable for re-provisioning, by deleting the following: --deprovision: Attempt to clean the system and make it suitable for -re-provisioning, by deleting the following: - - * All SSH host keys (if Provisioning.RegenerateSshHostKeyPair - is 'y' in the configuration file) - * Nameserver configuration in /etc/resolv.conf - * Root password from /etc/shadow (if - Provisioning.DeleteRootPassword is 'y' in the configuration file) - * Cached DHCP client leases - * Resets host name to localhost.localdomain +* All SSH host keys (if Provisioning.RegenerateSshHostKeyPair is 'y' in the configuration file) +* Nameserver configuration in /etc/resolv.conf +* Root password from /etc/shadow (if Provisioning.DeleteRootPassword is 'y' in the configuration file) +* Cached DHCP client leases +* Resets host name to localhost.localdomain - WARNING! Deprovision does not guarantee that the image is cleared of - all sensitive information and suitable for redistribution. + **WARNING!** Deprovision does not guarantee that the image is cleared of all sensitive information and suitable for redistribution. --deprovision+user: Performs everything under deprovision (above) -and also deletes the last provisioned user account and associated data. +`-deprovision+user`: Performs everything under deprovision (above) and also deletes the last provisioned user account and associated data. --version: Displays the version of waagent +`-version`: Displays the version of waagent --serialconsole: Configures GRUB to mark ttyS0 (the first serial port) -as the boot console. This ensures that kernel bootup logs are sent to -the serial port and made available for debugging. +`-serialconsole`: Configures GRUB to mark ttyS0 (the first serial port) as the boot console. This ensures that kernel bootup logs are sent to the serial port and made available for debugging. --daemon: Run waagent as a daemon to manage interaction with the -platform. This argument is specified to waagent in the waagent init -script. +`-daemon`: Run waagent as a daemon to manage interaction with the platform. This argument is specified to waagent in the waagent init script. --start: Run waagent as a background process +`-start`: Run waagent as a background process -### CONFIGURATION +## Configuration -A configuration file (/etc/waagent.conf) controls the actions of -waagent. Blank lines and lines whose first character is a `#` are -ignored (end-of-line comments are *not* supported). +A configuration file (/etc/waagent.conf) controls the actions of waagent. Blank lines and lines whose first character is a `#` are ignored (end-of-line comments are *not* supported). A sample configuration file is shown below: -``` +```yml +Extensions.Enabled=y Provisioning.Enabled=y Provisioning.UseCloudInit=n Provisioning.DeleteRootPassword=n @@ -190,6 +205,8 @@ OS.SshDir=/etc/ssh HttpProxy.Host=None HttpProxy.Port=None +CGroups.EnforceLimits=y +CGroups.Excluded=customscript,runcommand ``` The various configuration options are described in detail below. Configuration @@ -197,19 +214,38 @@ configuration options can be specified as "y" or "n". The special keyword "None" may be used for some string type configuration entries as detailed below. -#### Configuration File Options +### Configuration File Options + +#### __Extensions.Enabled__ + +_Type: Boolean_ +_Default: y_ + +This allows the user to enable or disable the extension handling functionality in the +agent. Valid values are "y" or "n". If extension handling is disabled, the goal state +will still be processed and VM status is still reported, but only every 5 minutes. +Extension config within the goal state will be ignored. Note that functionality such +as password reset, ssh key updates and backups depend on extensions. Only disable this +if you do not need extensions at all. + +_Note_: disabling extensions in this manner is not the same as running completely +without the agent. In order to do that, the `provisionVMAgent` flag must be set at +provisioning time, via whichever API is being used. We will provide more details on +this on our wiki when it is generally available. + +#### __Provisioning.Enabled__ -* __Provisioning.Enabled__ _Type: Boolean_ -_Default: y_ +_Default: y_ This allows the user to enable or disable the provisioning functionality in the agent. Valid values are "y" or "n". If provisioning is disabled, SSH host and user keys in the image are preserved and any configuration specified in the Azure provisioning API is ignored. -* __Provisioning.UseCloudInit__ -_Type: Boolean_ +#### __Provisioning.UseCloudInit__ + +_Type: Boolean_ _Default: n_ This options enables / disables support for provisioning by means of cloud-init. @@ -218,16 +254,18 @@ disabled ("n") for this option to have an effect. Setting _Provisioning.Enabled_ to true ("y") overrides this option and runs the built-in agent provisioning code. -* __Provisioning.DeleteRootPassword__ -_Type: Boolean_ +#### __Provisioning.DeleteRootPassword__ + +_Type: Boolean_ _Default: n_ If set, the root password in the /etc/shadow file is erased during the provisioning process. -* __Provisioning.RegenerateSshHostKeyPair__ -_Type: Boolean_ -_Default: y_ +#### __Provisioning.RegenerateSshHostKeyPair__ + +_Type: Boolean_ +_Default: y_ If set, all SSH host key pairs (ecdsa, dsa and rsa) are deleted during the provisioning process from /etc/ssh/. And a single fresh key pair is generated. @@ -236,9 +274,10 @@ re-create SSH key pairs for any missing encryption types when the SSH daemon is restarted (for example, upon a reboot). -* __Provisioning.SshHostKeyPairType__ -_Type: String_ -_Default: rsa_ +#### __Provisioning.SshHostKeyPairType__ + +_Type: String_ +_Default: rsa_ This can be set to an encryption algorithm type that is supported by the SSH daemon on the VM. The typically supported values are "rsa", "dsa" and "ecdsa". @@ -246,9 +285,10 @@ use putty.exe on Windows to connect to a Linux deployment, please use "rsa" or "dsa". -* __Provisioning.MonitorHostName__ -_Type: Boolean_ -_Default: y_ +#### __Provisioning.MonitorHostName__ + +_Type: Boolean_ +_Default: y_ If set, waagent will monitor the Linux VM for hostname changes (as returned by the "hostname" command) and automatically update the networking configuration in @@ -256,89 +296,99 @@ servers, networking will be restarted in the VM. This will result in brief loss of Internet connectivity. -* __Provisioning.DecodeCustomData__ -_Type: Boolean_ -_Default: n_ +#### __Provisioning.DecodeCustomData__ + +_Type: Boolean_ +_Default: n_ If set, waagent will decode CustomData from Base64. -* __Provisioning.ExecuteCustomData__ -_Type: Boolean_ -_Default: n_ +#### __Provisioning.ExecuteCustomData__ + +_Type: Boolean_ +_Default: n_ If set, waagent will execute CustomData after provisioning. -* __Provisioning.PasswordCryptId__ -_Type:String_ -_Default:6_ - -Algorithm used by crypt when generating password hash. - 1 - MD5 - 2a - Blowfish - 5 - SHA-256 - 6 - SHA-512 - -* __Provisioning.PasswordCryptSaltLength__ -_Type:String_ -_Default:10_ +#### __Provisioning.PasswordCryptId__ + +_Type: String_ +_Default: 6_ + +Algorithm used by crypt when generating password hash. + +* 1 - MD5 +* 2a - Blowfish +* 5 - SHA-256 +* 6 - SHA-512 + +#### __Provisioning.PasswordCryptSaltLength__ + +_Type: String_ +_Default: 10_ Length of random salt used when generating password hash. -* __ResourceDisk.Format__ -_Type: Boolean_ -_Default: y_ - -If set, the resource disk provided by the platform will be formatted and mounted -by waagent if the filesystem type requested by the user in -"ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of -type Linux (83) will be made available on the disk. Note that this partition -will not be formatted if it can be successfully mounted. - -* __ResourceDisk.Filesystem__ -_Type: String_ -_Default: ext4_ +#### __ResourceDisk.Format__ + +_Type: Boolean_ +_Default: y_ + +If set, the resource disk provided by the platform will be formatted and mounted by waagent if the filesystem type requested by the user in "ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of +type Linux (83) will be made available on the disk. Note that this partition will not be formatted if it can be successfully mounted. + +#### __ResourceDisk.Filesystem__ + +_Type: String_ +_Default: ext4_ This specifies the filesystem type for the resource disk. Supported values vary by Linux distribution. If the string is X, then mkfs.X should be present on the Linux image. SLES 11 images should typically use 'ext3'. BSD images should use 'ufs2' here. -* __ResourceDisk.MountPoint__ -_Type: String_ -_Default: /mnt/resource_ +#### __ResourceDisk.MountPoint__ + +_Type: String_ +_Default: /mnt/resource_ This specifies the path at which the resource disk is mounted. -* __ResourceDisk.MountOptions__ -_Type: String_ -_Default: None_ +#### __ResourceDisk.MountOptions__ + +_Type: String_ +_Default: None_ Specifies disk mount options to be passed to the mount -o command. This is a comma separated list of values, ex. 'nodev,nosuid'. See mount(8) for details. -* __ResourceDisk.EnableSwap__ -_Type: Boolean_ -_Default: n_ +#### __ResourceDisk.EnableSwap__ + +_Type: Boolean_ +_Default: n_ If set, a swap file (/swapfile) is created on the resource disk and added to the system swap space. -* __ResourceDisk.SwapSizeMB__ -_Type: Integer_ -_Default: 0_ - -The size of the swap file in megabytes. - -* Logs.Verbose -_Type: Boolean_ -_Default: n_ +#### __ResourceDisk.SwapSizeMB__ + +_Type: Integer_ +_Default: 0_ + +The size of the swap file in megabytes. + +#### __Logs.Verbose__ + +_Type: Boolean_ +_Default: n_ If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. -* __OS.AllowHTTP__ -_Type: Boolean_ -_Default: n_ +#### __OS.AllowHTTP__ + +_Type: Boolean_ +_Default: n_ If set to `y` and SSL support is not compiled into Python, the agent will fall-back to use HTTP. Otherwise, if SSL support is not compiled into Python, the agent will fail @@ -346,95 +396,143 @@ Note: Allowing HTTP may unintentionally expose secure data. -* __OS.EnableRDMA__ -_Type: Boolean_ -_Default: n_ +#### __OS.EnableRDMA__ + +_Type: Boolean_ +_Default: n_ If set, the agent will attempt to install and then load an RDMA kernel driver that matches the version of the firmware on the underlying hardware. -* __OS.EnableFIPS__ -_Type: Boolean_ -_Default: n_ +#### __OS.EnableFIPS__ + +_Type: Boolean_ +_Default: n_ If set, the agent will emit into the environment "OPENSSL_FIPS=1" when executing OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libraries. -Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant are +Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant certificates are installed, then enabling this option will cause all OpenSSL commands to fail._ -* __OS.RootDeviceScsiTimeout__ -_Type: Integer_ -_Default: 300_ +#### __OS.RootDeviceScsiTimeout__ + +_Type: Integer_ +_Default: 300_ This configures the SCSI timeout in seconds on the root device. If not set, the system defaults are used. -* __OS.OpensslPath__ -_Type: String_ -_Default: None_ +#### __OS.OpensslPath__ + +_Type: String_ +_Default: None_ This can be used to specify an alternate path for the openssl binary to use for cryptographic operations. -* __OS.SshClientAliveInterval__ -_Type: Integer_ +#### __OS.SshClientAliveInterval__ + +_Type: Integer_ _Default: 180_ This values sets the number of seconds the agent uses for the SSH ClientAliveInterval configuration option. -* __OS.SshDir__ -_Type: String_ -_Default: `/etc/ssh`_ +#### __OS.SshDir__ + +_Type: String_ +_Default: `/etc/ssh`_ This option can be used to override the normal location of the SSH configuration directory. -* __HttpProxy.Host, HttpProxy.Port__ -_Type: String_ -_Default: None_ +#### __HttpProxy.Host, HttpProxy.Port__ + +_Type: String_ +_Default: None_ If set, the agent will use this proxy server to access the internet. These values *will* override the `http_proxy` or `https_proxy` environment variables. Lastly, `HttpProxy.Host` is required (if to be used) and `HttpProxy.Port` is optional. -### APPENDIX +#### __CGroups.EnforceLimits__ -We do not maintain packaging information in this repo but some samples -are shown below as a reference. See the downstream distribution -repositories for officially maintained packaging. +_Type: Boolean_ +_Default: y_ + +If set, the agent will attempt to set cgroups limits for cpu and memory for the agent process itself +as well as extension processes. See the wiki for further details on this. + +#### __CGroups.Excluded__ + +_Type: String_ +_Default: customscript,runcommand_ + +The list of extensions which will be excluded from cgroups limits. This should be comma separated. + +### Telemetry + +WALinuxAgent collects usage data and sends it to Microsoft to help improve our products and services. The data collected is used to track service health and +assist with Azure support requests. Data collected does not include any personally identifiable information. Read our [privacy statement](http://go.microsoft.com/fwlink/?LinkId=521839) +to learn more. + +WALinuxAgent does not support disabling telemetry at this time. WALinuxAgent must be removed to disable telemetry collection. If you need this feature, +please open an issue in GitHub and explain your requirement. + +### Appendix + +We do not maintain packaging information in this repo but some samples are shown below as a reference. See the downstream distribution repositories for officially maintained packaging. #### deb packages -The official Ubuntu WALinuxAgent package can be found here: -https://launchpad.net/ubuntu/+source/walinuxagent +The official Ubuntu WALinuxAgent package can be found [here](https://launchpad.net/ubuntu/+source/walinuxagent). Run once: - 1. Install required packages: - `sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper` - - 2. Create the pbuilder environment: - `sudo pbuilder create --debootstrapopts --variant=buildd` - - 3. Obtain from a downstream package repo + +1. Install required packages + + ```bash + sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper + ``` + +2. Create the pbuilder environment + + ```bash + sudo pbuilder create --debootstrapopts --variant=buildd + ``` + +3. Obtain `waagent.dsc` from a downstream package repo To compile the package, from the top-most directory: - 1. Build the source package: - `dpkg-buildpackage -S` - 2. Build the package: - `sudo pbuilder build ` +1. Build the source package + + ```bash + dpkg-buildpackage -S + ``` + +2. Build the package + + ```bash + sudo pbuilder build waagent.dsc + ``` + +3. Fetch the built package, usually from `/var/cache/pbuilder/result` - 3. Fetch the built package, usually from `/var/cache/pbuilder/result` - #### rpm packages -The instructions below describe how to build an rpm package. +The instructions below describe how to build an rpm package. + +1. Install setuptools + + ```bash + curl https://bootstrap.pypa.io/ez_setup.py -o - | python + ``` + +2. The following command will build the binary and source RPMs: - 1. Install setuptools - `curl https://bootstrap.pypa.io/ez_setup.py -o - | python` - - 2. The following command will build the binary and source RPMs: - `python setup.py bdist_rpm` + ```bash + python setup.py bdist_rpm + ``` ----- diff -Nru walinuxagent-2.2.21+really2.2.20/setup.py walinuxagent-2.2.32/setup.py --- walinuxagent-2.2.21+really2.2.20/setup.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/setup.py 2018-09-30 15:05:27.000000000 +0000 @@ -26,6 +26,7 @@ import setuptools from setuptools import find_packages from setuptools.command.install import install as _install +import sys root_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(root_dir) @@ -131,7 +132,7 @@ # Ubuntu15.04+ uses systemd set_systemd_files(data_files, src=["init/ubuntu/walinuxagent.service"]) - elif name == 'suse': + elif name == 'suse' or name == 'opensuse': set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) @@ -153,6 +154,20 @@ set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/openbsd/waagent.conf"]) set_openbsd_rc_files(data_files) + elif name == 'debian': + set_bin_files(data_files) + set_conf_files(data_files, src=["config/debian/waagent.conf"]) + set_logrotate_files(data_files) + set_udev_files(data_files, dest="/lib/udev/rules.d") + elif name == 'iosxe': + set_bin_files(data_files) + set_conf_files(data_files, src=["config/iosxe/waagent.conf"]) + set_logrotate_files(data_files) + set_udev_files(data_files) + set_systemd_files(data_files, dest="/usr/lib/systemd/system") + if version.startswith("7.1"): + # TODO this is a mitigation to systemctl bug on 7.1 + set_sysv_files(data_files) else: # Use default setting set_bin_files(data_files) @@ -198,6 +213,14 @@ osutil.stop_agent_service() osutil.start_agent_service() +# Note to packagers and users from source. +# In version 3.5 of Python distribution information handling in the platform +# module was deprecated. Depending on the Linux distribution the +# implementation may be broken prior to Python 3.7 wher the functionality +# will be removed from Python 3 +requires = [] +if float(sys.version[:3]) >= 3.7: + requires = ['distro'] setuptools.setup( name=AGENT_NAME, @@ -208,8 +231,9 @@ platforms='Linux', url='https://github.com/Azure/WALinuxAgent', license='Apache License Version 2.0', - packages=find_packages(exclude=["tests"]), + packages=find_packages(exclude=["tests*"]), py_modules=["__main__"], + install_requires=requires, cmdclass={ 'install': install } diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/__init__.py walinuxagent-2.2.32/tests/common/dhcp/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/dhcp/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/test_dhcp.py walinuxagent-2.2.32/tests/common/dhcp/test_dhcp.py --- walinuxagent-2.2.21+really2.2.20/tests/common/dhcp/test_dhcp.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/dhcp/test_dhcp.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import mock diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/__init__.py walinuxagent-2.2.32/tests/common/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/__init__.py walinuxagent-2.2.32/tests/common/osutil/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/osutil/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/mock_osutil.py walinuxagent-2.2.32/tests/common/osutil/mock_osutil.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/mock_osutil.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/osutil/mock_osutil.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class MockOSUtil(DefaultOSUtil): + def __init__(self): + self.all_users = {} + self.sudo_users = set() + self.jit_enabled = True + + def useradd(self, username, expiration=None, comment=None): + if username == "": + raise Exception("test exception for bad username") + if username in self.all_users: + raise Exception("test exception, user already exists") + self.all_users[username] = (username, None, None, None, comment, None, None, expiration) + + def conf_sudoer(self, username, nopasswd=False, remove=False): + if not remove: + self.sudo_users.add(username) + else: + self.sudo_users.remove(username) + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + if password == "": + raise Exception("test exception for bad password") + user = self.all_users[username] + self.all_users[username] = (user[0], password, user[2], user[3], user[4], user[5], user[6], user[7]) + + def del_account(self, username): + if username == "": + raise Exception("test exception, bad data") + if username not in self.all_users: + raise Exception("test exception, user does not exist to delete") + self.all_users.pop(username) + + def get_users(self): + return self.all_users.values() \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_bigip.py walinuxagent-2.2.32/tests/common/osutil/test_bigip.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_bigip.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/osutil/test_bigip.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import os diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default.py walinuxagent-2.2.32/tests/common/osutil/test_default.py --- walinuxagent-2.2.21+really2.2.20/tests/common/osutil/test_default.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/osutil/test_default.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,23 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import socket import glob import mock +import traceback import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil -from azurelinuxagent.common.utils import fileutil -from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from tests.tools import * +actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route' + +def fake_is_loopback(_, iface): + return iface.startswith('lo') + + +def running_under_travis(): + return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' + + class TestOSUtil(AgentTestCase): def test_restart(self): # setup @@ -88,7 +98,83 @@ self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) - def test_get_first_if(self): + def test_empty_proc_net_route(self): + routing_table = "" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0) + + def test_no_routes(self): + routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_bogus_proc_net_route(self): + routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) + + def test_valid_routes(self): + routing_table = \ + 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \ + 'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \ + 'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \ + 'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ + 'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ + 'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n' + known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9' + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + raw_route_list = osutil.DefaultOSUtil().read_route_table() + + self.assertEqual(len(raw_route_list), 6) + self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash) + + route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list) + + self.assertEqual(len(route_list), 5) + self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') + self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') + self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') + self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') + self.assertEqual(route_list[1].flags, 1) + self.assertEqual(route_list[2].flags, 15) + self.assertEqual(route_list[3].flags, 7) + self.assertEqual(route_list[3].metric, 0) + self.assertEqual(route_list[4].metric, 10) + self.assertEqual(route_list[0].interface, 'eth0') + self.assertEqual(route_list[4].interface, 'docker0') + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0') + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'}) + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) + def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock): + """ + Validate that the agent can find the first active non-loopback + interface. + + This test case used to run live, but not all developers have an eth* + interface. It is perfectly valid to have a br*, but this test does not + account for that. + """ + ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() + self.assertEqual(ifname, 'eth0') + self.assertEqual(ipaddr, '10.0.0.1') + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0') + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'}) + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) + def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) @@ -97,9 +183,31 @@ except socket.error: self.fail("not a valid ip address") + def test_get_first_if_all_loopback(self): + fake_ifaces = {'lo':'127.0.0.1'} + with patch.object(osutil.DefaultOSUtil, 'get_primary_interface', return_value='bogus0'): + with patch.object(osutil.DefaultOSUtil, '_get_all_interfaces', return_value=fake_ifaces): + self.assertEqual(('', ''), osutil.DefaultOSUtil().get_first_if()) + + def test_get_all_interfaces(self): + loopback_count = 0 + non_loopback_count = 0 + + for iface in osutil.DefaultOSUtil()._get_all_interfaces(): + if iface == 'lo': + loopback_count += 1 + else: + non_loopback_count += 1 + + self.assertEqual(loopback_count, 1, 'Exactly 1 loopback network interface should exist') + self.assertGreater(loopback_count, 0, 'At least 1 non-loopback network interface should exist') + def test_isloopback(self): - self.assertTrue(osutil.DefaultOSUtil().is_loopback(b'lo')) - self.assertFalse(osutil.DefaultOSUtil().is_loopback(b'eth0')) + for iface in osutil.DefaultOSUtil()._get_all_interfaces(): + if iface == 'lo': + self.assertTrue(osutil.DefaultOSUtil().is_loopback(iface)) + else: + self.assertFalse(osutil.DefaultOSUtil().is_loopback(iface)) def test_isprimary(self): routing_table = "\ @@ -178,6 +286,7 @@ try: osutil.DefaultOSUtil().get_first_if()[0] except Exception as e: + print(traceback.format_exc()) exception = True self.assertFalse(exception) @@ -199,12 +308,23 @@ self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") + def test_dhcp_lease_custom_dns(self): + """ + Validate that the wireserver address is coming from option 245 + (on default configurations the address is also available in the domain-name-servers option, but users + may set up a custom dns server on their vnet) + """ + with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): + with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.custom.dns"))): + endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() + self.assertEqual(endpoint, "168.63.129.16") + def test_dhcp_lease_multi(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) - self.assertEqual(endpoint, "second") + self.assertEqual(endpoint, "168.63.129.2") def test_get_total_mem(self): """ @@ -516,6 +636,16 @@ self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') + def test_get_firewall_dropped_packets_transient_error_ignored(self, mock_output): + osutil._enable_firewall = True + util = osutil.DefaultOSUtil() + + mock_output.side_effect = [ + (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), + (3, "can't initialize iptables table `security': iptables who? (do you need to insmod?)")] + self.assertEqual(0, util.get_firewall_dropped_packets("not used")) + + @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() @@ -638,6 +768,34 @@ ]) self.assertFalse(osutil._enable_firewall) + @patch('azurelinuxagent.common.utils.shellutil.run_get_output') + @patch('azurelinuxagent.common.utils.shellutil.run') + def test_enable_firewall_checks_for_invalid_iptables_options(self, mock_run, mock_output): + osutil._enable_firewall = True + util = osutil.DefaultOSUtil() + + dst = '1.2.3.4' + version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) + wait = "-w" + + # iptables uses the following exit codes + # 0 - correct function + # 1 - other errors + # 2 - errors which appear to be caused by invalid or abused command + # line parameters + mock_run.side_effect = [2] + mock_output.return_value = (0, version) + + self.assertFalse(util.enable_firewall(dst_ip='1.2.3.4', uid=42)) + self.assertFalse(osutil._enable_firewall) + + mock_run.assert_has_calls([ + call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), + ]) + mock_output.assert_has_calls([ + call(osutil.IPTABLES_VERSION) + ]) + @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') @@ -670,12 +828,20 @@ version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" - mock_run.side_effect = [0, 0] + mock_run.side_effect = [0, 1, 0, 1, 0, 1] mock_output.side_effect = [(0, version), (0, "Output")] - self.assertTrue(util.remove_firewall()) + self.assertTrue(util.remove_firewall(dst, uid)) mock_run.assert_has_calls([ - call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) + # delete rules < 2.2.26 + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), + call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), + + # delete rules >= 2.2.26 + call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), + call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) @@ -689,15 +855,17 @@ osutil._enable_firewall = True util = osutil.DefaultOSUtil() + dst_ip='1.2.3.4' + uid=42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" - mock_run.side_effect = [1, 0] + mock_run.side_effect = [2] mock_output.side_effect = [(0, version), (1, "Output")] - self.assertFalse(util.remove_firewall()) + self.assertFalse(util.remove_firewall(dst_ip, uid)) mock_run.assert_has_calls([ - call(osutil.FIREWALL_FLUSH.format(wait), chk_err=True) + call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) @@ -713,6 +881,17 @@ self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) + @skip_if_predicate_true(running_under_travis, "The ip command isn't available in Travis") + def test_get_nic_state(self): + state = osutil.DefaultOSUtil().get_nic_state() + self.assertNotEqual(state, {}) + self.assertGreater(len(state.keys()), 1) + + another_state = osutil.DefaultOSUtil().get_nic_state() + name = list(another_state.keys())[0] + another_state[name].add_ipv4("xyzzy") + self.assertNotEqual(state, another_state) + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroups.py walinuxagent-2.2.32/tests/common/test_cgroups.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_cgroups.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_cgroups.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,270 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +from azurelinuxagent.common.cgroups import CGroupsTelemetry, CGroups, CGroupsException, BASE_CGROUPS, Cpu, Memory, \ + DEFAULT_MEM_LIMIT_MIN_MB +from azurelinuxagent.common.version import AGENT_NAME +from tests.tools import * + +import os +import random +import time + + +def consume_cpu_time(): + waste = 0 + for x in range(1, 200000): + waste += random.random() + return waste + + +def make_self_cgroups(): + """ + Build a CGroups object for the cgroup to which this process already belongs + + :return: CGroups containing this process + :rtype: CGroups + """ + def path_maker(hierarchy, __): + suffix = CGroups.get_my_cgroup_path(CGroups.get_hierarchy_id('cpu')) + return os.path.join(BASE_CGROUPS, hierarchy, suffix) + + return CGroups("inplace", path_maker) + + +def make_root_cgroups(): + """ + Build a CGroups object for the topmost cgroup + + :return: CGroups for most-encompassing cgroup + :rtype: CGroups + """ + def path_maker(hierarchy, _): + return os.path.join(BASE_CGROUPS, hierarchy) + + return CGroups("root", path_maker) + + +def i_am_root(): + return os.geteuid() == 0 + + +@skip_if_predicate_false(CGroups.enabled, "CGroups not supported in this environment") +class TestCGroups(AgentTestCase): + @classmethod + def setUpClass(cls): + CGroups.setup(True) + super(AgentTestCase, cls).setUpClass() + + def test_cgroup_utilities(self): + """ + Test utilities for querying cgroup metadata + """ + cpu_id = CGroups.get_hierarchy_id('cpu') + self.assertGreater(int(cpu_id), 0) + memory_id = CGroups.get_hierarchy_id('memory') + self.assertGreater(int(memory_id), 0) + self.assertNotEqual(cpu_id, memory_id) + + def test_telemetry_inplace(self): + """ + Test raw measures and basic statistics for the cgroup in which this process is currently running. + """ + cg = make_self_cgroups() + self.assertIn('cpu', cg.cgroups) + self.assertIn('memory', cg.cgroups) + ct = CGroupsTelemetry("test", cg) + cpu = Cpu(ct) + self.assertGreater(cpu.current_system_cpu, 0) + + consume_cpu_time() # Eat some CPU + cpu.update() + + self.assertGreater(cpu.current_cpu_total, cpu.previous_cpu_total) + self.assertGreater(cpu.current_system_cpu, cpu.previous_system_cpu) + + percent_used = cpu.get_cpu_percent() + self.assertGreater(percent_used, 0) + + def test_telemetry_in_place_leaf_cgroup(self): + """ + Ensure this leaf (i.e. not root of cgroup tree) cgroup has distinct metrics from the root cgroup. + """ + # Does nothing on systems where the default cgroup for a randomly-created process (like this test invocation) + # is the root cgroup. + cg = make_self_cgroups() + root = make_root_cgroups() + if cg.cgroups['cpu'] != root.cgroups['cpu']: + ct = CGroupsTelemetry("test", cg) + cpu = Cpu(ct) + self.assertLess(cpu.current_cpu_total, cpu.current_system_cpu) + + consume_cpu_time() # Eat some CPU + time.sleep(1) # Generate some idle time + cpu.update() + self.assertLess(cpu.current_cpu_total, cpu.current_system_cpu) + + def exercise_telemetry_instantiation(self, test_cgroup): + test_extension_name = test_cgroup.name + CGroupsTelemetry.track_cgroup(test_cgroup) + self.assertIn('cpu', test_cgroup.cgroups) + self.assertIn('memory', test_cgroup.cgroups) + self.assertTrue(CGroupsTelemetry.is_tracked(test_extension_name)) + consume_cpu_time() + time.sleep(1) + metrics = CGroupsTelemetry.collect_all_tracked() + my_metrics = metrics[test_extension_name] + self.assertEqual(len(my_metrics), 2) + for item in my_metrics: + metric_family, metric_name, metric_value = item + if metric_family == "Process": + self.assertEqual(metric_name, "% Processor Time") + self.assertGreater(metric_value, 0.0) + elif metric_family == "Memory": + self.assertEqual(metric_name, "Total Memory Usage") + self.assertGreater(metric_value, 100000) + else: + self.fail("Unknown metric {0}/{1} value {2}".format(metric_family, metric_name, metric_value)) + + @skip_if_predicate_false(i_am_root, "Test does not run when non-root") + def test_telemetry_instantiation_as_superuser(self): + """ + Tracking a new cgroup for an extension; collect all metrics. + """ + # Record initial state + initial_cgroup = make_self_cgroups() + + # Put the process into a different cgroup, consume some resources, ensure we see them end-to-end + test_cgroup = CGroups.for_extension("agent_unittest") + test_cgroup.add(os.getpid()) + self.assertNotEqual(initial_cgroup.cgroups['cpu'], test_cgroup.cgroups['cpu']) + self.assertNotEqual(initial_cgroup.cgroups['memory'], test_cgroup.cgroups['memory']) + + self.exercise_telemetry_instantiation(test_cgroup) + + # Restore initial state + CGroupsTelemetry.stop_tracking("agent_unittest") + initial_cgroup.add(os.getpid()) + + @skip_if_predicate_true(i_am_root, "Test does not run when root") + def test_telemetry_instantiation_as_normal_user(self): + """ + Tracking an existing cgroup for an extension; collect all metrics. + """ + self.exercise_telemetry_instantiation(make_self_cgroups()) + + def test_cpu_telemetry(self): + """ + Test Cpu telemetry class + """ + cg = make_self_cgroups() + self.assertIn('cpu', cg.cgroups) + ct = CGroupsTelemetry('test', cg) + self.assertIs(cg, ct.cgroup) + cpu = Cpu(ct) + self.assertIs(cg, cpu.cgt.cgroup) + ticks_before = cpu.current_cpu_total + consume_cpu_time() + time.sleep(1) + cpu.update() + ticks_after = cpu.current_cpu_total + self.assertGreater(ticks_after, ticks_before) + p2 = cpu.get_cpu_percent() + self.assertGreater(p2, 0) + # when running under PyCharm, this is often > 100 + # on a multi-core machine + self.assertLess(p2, 200) + + def test_memory_telemetry(self): + """ + Test Memory telemetry class + """ + cg = make_self_cgroups() + raw_usage_file_contents = cg.get_file_contents('memory', 'memory.usage_in_bytes') + self.assertIsNotNone(raw_usage_file_contents) + self.assertGreater(len(raw_usage_file_contents), 0) + self.assertIn('memory', cg.cgroups) + ct = CGroupsTelemetry('test', cg) + self.assertIs(cg, ct.cgroup) + memory = Memory(ct) + usage_in_bytes = memory.get_memory_usage() + self.assertGreater(usage_in_bytes, 100000) + + def test_format_memory_value(self): + """ + Test formatting of memory amounts into human-readable units + """ + self.assertEqual(-1, CGroups._format_memory_value('bytes', None)) + self.assertEqual(2048, CGroups._format_memory_value('kilobytes', 2)) + self.assertEqual(0, CGroups._format_memory_value('kilobytes', 0)) + self.assertEqual(2048000, CGroups._format_memory_value('kilobytes', 2000)) + self.assertEqual(2048*1024, CGroups._format_memory_value('megabytes', 2)) + self.assertEqual((1024 + 512) * 1024 * 1024, CGroups._format_memory_value('gigabytes', 1.5)) + self.assertRaises(CGroupsException, CGroups._format_memory_value, 'KiloBytes', 1) + + @patch('azurelinuxagent.common.event.add_event') + @patch('azurelinuxagent.common.conf.get_cgroups_enforce_limits') + @patch('azurelinuxagent.common.cgroups.CGroups.set_memory_limit') + @patch('azurelinuxagent.common.cgroups.CGroups.set_cpu_limit') + @patch('azurelinuxagent.common.cgroups.CGroups._try_mkdir') + def assert_limits(self, _, patch_set_cpu, patch_set_memory_limit, patch_get_enforce, patch_add_event, + ext_name, + expected_cpu_limit, + limits_enforced=True, + exception_raised=False): + + should_limit = expected_cpu_limit > 0 + patch_get_enforce.return_value = limits_enforced + + if exception_raised: + patch_set_memory_limit.side_effect = CGroupsException('set_memory_limit error') + + try: + cg = CGroups.for_extension(ext_name) + cg.set_limits() + if exception_raised: + self.fail('exception expected') + except CGroupsException: + if not exception_raised: + self.fail('exception not expected') + + self.assertEqual(should_limit, patch_set_cpu.called) + self.assertEqual(should_limit, patch_set_memory_limit.called) + self.assertEqual(should_limit, patch_add_event.called) + + if should_limit: + actual_cpu_limit = patch_set_cpu.call_args[0][0] + actual_memory_limit = patch_set_memory_limit.call_args[0][0] + event_kw_args = patch_add_event.call_args[1] + + self.assertEqual(expected_cpu_limit, actual_cpu_limit) + self.assertTrue(actual_memory_limit >= DEFAULT_MEM_LIMIT_MIN_MB) + self.assertEqual(event_kw_args['op'], 'SetCGroupsLimits') + self.assertEqual(event_kw_args['is_success'], not exception_raised) + self.assertTrue('{0}%'.format(expected_cpu_limit) in event_kw_args['message']) + self.assertTrue(ext_name in event_kw_args['message']) + self.assertEqual(exception_raised, 'set_memory_limit error' in event_kw_args['message']) + + def test_limits(self): + self.assert_limits(ext_name="normal_extension", expected_cpu_limit=40) + self.assert_limits(ext_name="customscript_extension", expected_cpu_limit=-1) + self.assert_limits(ext_name=AGENT_NAME, expected_cpu_limit=10) + self.assert_limits(ext_name="normal_extension", expected_cpu_limit=-1, limits_enforced=False) + self.assert_limits(ext_name=AGENT_NAME, expected_cpu_limit=-1, limits_enforced=False) + self.assert_limits(ext_name="normal_extension", expected_cpu_limit=40, exception_raised=True) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_conf.py walinuxagent-2.2.32/tests/common/test_conf.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_conf.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_conf.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import mock @@ -27,45 +27,48 @@ # Note: # -- These values *MUST* match those from data/test_waagent.conf EXPECTED_CONFIGURATION = { - "Provisioning.Enabled" : True, - "Provisioning.UseCloudInit" : True, - "Provisioning.DeleteRootPassword" : True, - "Provisioning.RegenerateSshHostKeyPair" : True, - "Provisioning.SshHostKeyPairType" : "rsa", - "Provisioning.MonitorHostName" : True, - "Provisioning.DecodeCustomData" : False, - "Provisioning.ExecuteCustomData" : False, - "Provisioning.PasswordCryptId" : '6', - "Provisioning.PasswordCryptSaltLength" : 10, - "Provisioning.AllowResetSysUser" : False, - "ResourceDisk.Format" : True, - "ResourceDisk.Filesystem" : "ext4", - "ResourceDisk.MountPoint" : "/mnt/resource", - "ResourceDisk.EnableSwap" : False, - "ResourceDisk.SwapSizeMB" : 0, - "ResourceDisk.MountOptions" : None, - "Logs.Verbose" : False, - "OS.EnableFIPS" : True, - "OS.RootDeviceScsiTimeout" : '300', - "OS.OpensslPath" : '/usr/bin/openssl', - "OS.SshClientAliveInterval" : 42, - "OS.SshDir" : "/notareal/path", - "HttpProxy.Host" : None, - "HttpProxy.Port" : None, - "DetectScvmmEnv" : False, - "Lib.Dir" : "/var/lib/waagent", - "DVD.MountPoint" : "/mnt/cdrom/secure", - "Pid.File" : "/var/run/waagent.pid", - "Extension.LogDir" : "/var/log/azure", - "OS.HomeDir" : "/home", - "OS.EnableRDMA" : False, - "OS.UpdateRdmaDriver" : False, - "OS.CheckRdmaDriver" : False, - "AutoUpdate.Enabled" : True, - "AutoUpdate.GAFamily" : "Prod", - "EnableOverProvisioning" : False, - "OS.AllowHTTP" : False, - "OS.EnableFirewall" : True + "Extensions.Enabled": True, + "Provisioning.Enabled": True, + "Provisioning.UseCloudInit": True, + "Provisioning.DeleteRootPassword": True, + "Provisioning.RegenerateSshHostKeyPair": True, + "Provisioning.SshHostKeyPairType": "rsa", + "Provisioning.MonitorHostName": True, + "Provisioning.DecodeCustomData": False, + "Provisioning.ExecuteCustomData": False, + "Provisioning.PasswordCryptId": '6', + "Provisioning.PasswordCryptSaltLength": 10, + "Provisioning.AllowResetSysUser": False, + "ResourceDisk.Format": True, + "ResourceDisk.Filesystem": "ext4", + "ResourceDisk.MountPoint": "/mnt/resource", + "ResourceDisk.EnableSwap": False, + "ResourceDisk.SwapSizeMB": 0, + "ResourceDisk.MountOptions": None, + "Logs.Verbose": False, + "OS.EnableFIPS": True, + "OS.RootDeviceScsiTimeout": '300', + "OS.OpensslPath": '/usr/bin/openssl', + "OS.SshClientAliveInterval": 42, + "OS.SshDir": "/notareal/path", + "HttpProxy.Host": None, + "HttpProxy.Port": None, + "DetectScvmmEnv": False, + "Lib.Dir": "/var/lib/waagent", + "DVD.MountPoint": "/mnt/cdrom/secure", + "Pid.File": "/var/run/waagent.pid", + "Extension.LogDir": "/var/log/azure", + "OS.HomeDir": "/home", + "OS.EnableRDMA": False, + "OS.UpdateRdmaDriver": False, + "OS.CheckRdmaDriver": False, + "AutoUpdate.Enabled": True, + "AutoUpdate.GAFamily": "Prod", + "EnableOverProvisioning": True, + "OS.AllowHTTP": False, + "OS.EnableFirewall": False, + "CGroups.EnforceLimits": False, + "CGroups.Excluded": "customscript,runcommand", } def setUp(self): @@ -78,6 +81,7 @@ def test_key_value_handling(self): self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad")) self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad")) + self.assertEqual("delalloc,rw,noatime,nobarrier,users,mode=777", self.conf.get("FauxKey3", "Bad")) def test_get_ssh_dir(self): self.assertTrue(get_ssh_dir(self.conf).startswith("/notareal/path")) @@ -110,4 +114,58 @@ for k in TestConf.EXPECTED_CONFIGURATION.keys(): self.assertEqual( TestConf.EXPECTED_CONFIGURATION[k], - configuration[k]) + configuration[k], + k) + + def test_get_agent_disabled_file_path(self): + self.assertEqual(get_disable_agent_file_path(self.conf), + os.path.join(self.tmp_dir, DISABLE_AGENT_FILE)) + + def test_write_agent_disabled(self): + """ + Test writing disable_agent is empty + """ + from azurelinuxagent.pa.provision.default import ProvisionHandler + + disable_file_path = get_disable_agent_file_path(self.conf) + self.assertFalse(os.path.exists(disable_file_path)) + ProvisionHandler.write_agent_disabled() + self.assertTrue(os.path.exists(disable_file_path)) + self.assertEqual('', fileutil.read_file(disable_file_path)) + + def test_get_extensions_enabled(self): + self.assertTrue(get_extensions_enabled(self.conf)) + + @patch('azurelinuxagent.common.conf.ConfigurationProvider.get') + def assert_get_cgroups_excluded(self, patch_get, config, expected_value): + patch_get.return_value = config + self.assertEqual(expected_value, conf.get_cgroups_excluded(self.conf)) + + def test_get_cgroups_excluded(self): + self.assert_get_cgroups_excluded(config=None, + expected_value=[]) + + self.assert_get_cgroups_excluded(config='', + expected_value=[]) + + self.assert_get_cgroups_excluded(config=' ', + expected_value=[]) + + self.assert_get_cgroups_excluded(config=' , ,, ,', + expected_value=[]) + + standard_values = ['customscript', 'runcommand'] + self.assert_get_cgroups_excluded(config='CustomScript, RunCommand', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config='customScript, runCommand , , ,,', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config=' customscript,runcommand ', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config='customscript,, runcommand', + expected_value=standard_values) + + self.assert_get_cgroups_excluded(config=',,customscript ,runcommand', + expected_value=standard_values) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_errorstate.py walinuxagent-2.2.32/tests/common/test_errorstate.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_errorstate.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_errorstate.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,104 @@ +from datetime import timedelta + +from azurelinuxagent.common.errorstate import * +from tests.tools import * + + +class TestErrorState(unittest.TestCase): + def test_errorstate00(self): + """ + If ErrorState is never incremented, it will never trigger. + """ + test_subject = ErrorState(timedelta(seconds=10000)) + self.assertFalse(test_subject.is_triggered()) + self.assertEqual(0, test_subject.count) + self.assertEqual('unknown', test_subject.fail_time) + + def test_errorstate01(self): + """ + If ErrorState is never incremented, and the timedelta is zero it will + not trigger. + """ + test_subject = ErrorState(timedelta(seconds=0)) + self.assertFalse(test_subject.is_triggered()) + self.assertEqual(0, test_subject.count) + self.assertEqual('unknown', test_subject.fail_time) + + def test_errorstate02(self): + """ + If ErrorState is triggered, and the current time is within timedelta + of now it will trigger. + """ + test_subject = ErrorState(timedelta(seconds=0)) + test_subject.incr() + + self.assertTrue(test_subject.is_triggered()) + self.assertEqual(1, test_subject.count) + self.assertEqual('0.0 min', test_subject.fail_time) + + @patch('azurelinuxagent.common.errorstate.datetime') + def test_errorstate03(self, mock_time): + """ + ErrorState will not trigger until + 1. ErrorState has been incr() at least once. + 2. The timedelta from the first incr() has elapsed. + """ + test_subject = ErrorState(timedelta(minutes=15)) + + for x in range(1, 10): + mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=x)) + + test_subject.incr() + self.assertFalse(test_subject.is_triggered()) + + mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=30)) + test_subject.incr() + self.assertTrue(test_subject.is_triggered()) + self.assertEqual('29.0 min', test_subject.fail_time) + + def test_errorstate04(self): + """ + If ErrorState is reset the timestamp of the last incr() is reset to + None. + """ + + test_subject = ErrorState(timedelta(minutes=15)) + self.assertTrue(test_subject.timestamp is None) + + test_subject.incr() + self.assertTrue(test_subject.timestamp is not None) + + test_subject.reset() + self.assertTrue(test_subject.timestamp is None) + + def test_errorstate05(self): + """ + Test the fail_time for various scenarios + """ + + test_subject = ErrorState(timedelta(minutes=15)) + self.assertEqual('unknown', test_subject.fail_time) + + test_subject.incr() + self.assertEqual('0.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60) + self.assertEqual('1.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=73) + self.assertEqual('1.22 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=120) + self.assertEqual('2.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 59) + self.assertEqual('59.0 min', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60) + self.assertEqual('1.0 hr', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 95) + self.assertEqual('1.58 hr', test_subject.fail_time) + + test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60 * 3) + self.assertEqual('3.0 hr', test_subject.fail_time) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_event.py walinuxagent-2.2.32/tests/common/test_event.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_event.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_event.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,24 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function -from datetime import datetime - -import azurelinuxagent.common.event as event -import azurelinuxagent.common.logger as logger +from datetime import datetime, timedelta from azurelinuxagent.common.event import add_event, \ - mark_event_status, should_emit_event, \ - WALAEventOperation + WALAEventOperation, elapsed_milliseconds from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import CURRENT_VERSION from tests.tools import * +import azurelinuxagent.common.event as event + class TestEvent(AgentTestCase): def test_event_status_event_marked(self): @@ -49,8 +47,7 @@ self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_records_status(self): - d = tempfile.mkdtemp() - es = event.EventStatus(tempfile.mkdtemp()) + es = event.EventStatus() es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) @@ -70,7 +67,7 @@ self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_should_emit_event_ignores_unknown_operations(self): - event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) + event.__event_status__ = event.EventStatus() self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) @@ -83,7 +80,7 @@ def test_should_emit_event_handles_known_operations(self): - event.__event_status__ = event.EventStatus(tempfile.mkdtemp()) + event.__event_status__ = event.EventStatus() # Known operations always initially "fire" for op in event.__event_status_operations__: @@ -113,7 +110,7 @@ event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") - mock_event.assert_called_once() + self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_does_not_emit_if_previously_sent(self, mock_event): @@ -218,3 +215,24 @@ with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) + + def test_elapsed_milliseconds(self): + utc_start = datetime.utcnow() + timedelta(days=1) + self.assertEqual(0, elapsed_milliseconds(utc_start)) + + @patch('azurelinuxagent.common.event.EventLogger.save_event') + def test_report_metric(self, mock_event): + event.report_metric("cpu", "%idle", "_total", 10.0) + self.assertEqual(1, mock_event.call_count) + event_json = mock_event.call_args[0][0] + self.assertIn("69B669B9-4AF8-4C50-BDC4-6006FA76E975", event_json) + self.assertIn("%idle", event_json) + import json + event_dictionary = json.loads(event_json) + self.assertEqual(event_dictionary['providerId'], "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + for parameter in event_dictionary["parameters"]: + if parameter['name'] == 'Counter': + self.assertEqual(parameter['value'], '%idle') + break + else: + self.fail("Counter '%idle' not found in event parameters: {0}".format(repr(event_dictionary))) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_logger.py walinuxagent-2.2.32/tests/common/test_logger.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_logger.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_logger.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,26 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +import json from datetime import datetime import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.event import add_log_event +from azurelinuxagent.common.version import CURRENT_AGENT, CURRENT_VERSION from tests.tools import * _MSG = "This is our test logging message {0} {1}" _DATA = ["arg1", "arg2"] -class TestLogger(AgentTestCase): +class TestLogger(AgentTestCase): @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_if_not_previously_sent(self, mock_info): logger.reset_periodic() logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) - mock_info.assert_called_once() + self.assertEqual(1, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_does_not_emit_if_previously_sent(self, mock_info): @@ -64,3 +67,38 @@ logger.periodic(logger.EVERY_DAY, _MSG, *_DATA) mock_info.assert_called_once_with(_MSG, *_DATA) + + def test_telemetry_logger(self): + mock = MagicMock() + appender = logger.TelemetryAppender(logger.LogLevel.WARNING, mock) + appender.write(logger.LogLevel.WARNING, "--unit-test--") + + mock.assert_called_once_with(logger.LogLevel.WARNING, "--unit-test--") + + @patch('azurelinuxagent.common.event.EventLogger.save_event') + def test_telemetry_logger1(self, mock_save): + appender = logger.TelemetryAppender(logger.LogLevel.WARNING, add_log_event) + appender.write(logger.LogLevel.WARNING, "--unit-test--") + + self.assertEqual(1, mock_save.call_count) + telemetry_json = json.loads(mock_save.call_args[0][0]) + + self.assertEqual('FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F', telemetry_json['providerId']) + self.assertEqual(7, telemetry_json['eventId']) + + self.assertEqual(5, len(telemetry_json['parameters'])) + for x in telemetry_json['parameters']: + if x['name'] == 'EventName': + self.assertEqual(x['value'], 'Log') + + elif x['name'] == 'CapabilityUsed': + self.assertEqual(x['value'], 'WARNING') + + elif x['name'] == 'Context1': + self.assertEqual(x['value'], '--unit-test--') + + elif x['name'] == 'Context2': + self.assertEqual(x['value'], '') + + elif x['name'] == 'Context3': + self.assertEqual(x['value'], '') diff -Nru walinuxagent-2.2.21+really2.2.20/tests/common/test_version.py walinuxagent-2.2.32/tests/common/test_version.py --- walinuxagent-2.2.21+really2.2.20/tests/common/test_version.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/common/test_version.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function @@ -23,10 +23,75 @@ from azurelinuxagent.common.version import set_current_agent, \ AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ - get_f5_platform + get_f5_platform, get_distro from tests.tools import * +def freebsd_system(): + return ["FreeBSD"] + + +def freebsd_system_release(x, y, z): + return "10.0" + + +def openbsd_system(): + return ["OpenBSD"] + + +def openbsd_system_release(x, y, z): + return "20.0" + + +def default_system(): + return [""] + + +def default_system_no_linux_distro(): + return '', '', '' + +def default_system_exception(): + raise Exception + + +class TestAgentVersion(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + return + + @mock.patch('platform.system', side_effect=freebsd_system) + @mock.patch('re.sub', side_effect=freebsd_system_release) + def test_distro_is_correct_format_when_freebsd(self, platform_system_name, mock_variable): + osinfo = get_distro() + freebsd_list = ['freebsd', "10.0", '', 'freebsd'] + self.assertListEqual(freebsd_list, osinfo) + return + + @mock.patch('platform.system', side_effect=openbsd_system) + @mock.patch('re.sub', side_effect=openbsd_system_release) + def test_distro_is_correct_format_when_openbsd(self, platform_system_name, mock_variable): + osinfo = get_distro() + openbsd_list = ['openbsd', "20.0", '', 'openbsd'] + self.assertListEqual(openbsd_list, osinfo) + return + + @mock.patch('platform.system', side_effect=default_system) + @mock.patch('platform.dist', side_effect=default_system_no_linux_distro) + def test_distro_is_correct_format_when_default_case(self, platform_system_name, default_system_no_linux): + osinfo = get_distro() + default_list = ['', '', '', ''] + self.assertListEqual(default_list, osinfo) + return + + @mock.patch('platform.system', side_effect=default_system) + @mock.patch('platform.dist', side_effect=default_system_exception) + def test_distro_is_correct_for_exception_case(self, platform_system_name, default_system_no_linux): + osinfo = get_distro() + default_list = ['unknown', 'FFFF', '', ''] + self.assertListEqual(default_list, osinfo) + return + + class TestCurrentAgentName(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/__init__.py walinuxagent-2.2.32/tests/daemon/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/daemon/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/test_daemon.py walinuxagent-2.2.32/tests/daemon/test_daemon.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/test_daemon.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/daemon/test_daemon.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +from multiprocessing import Process from azurelinuxagent.daemon import * from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT +from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * @@ -27,16 +29,17 @@ def __call__(self, *args, **kw): self.count = self.count - 1 - #Stop daemon after restarting for n times + # Stop daemon after restarting for n times if self.count <= 0: self.daemon_handler.running = False raise Exception("Mock unhandled exception") + class TestDaemon(AgentTestCase): @patch("time.sleep") def test_daemon_restart(self, mock_sleep): - #Mock daemon function + # Mock daemon function daemon_handler = get_daemon_handler() mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) daemon_handler.daemon = mock_daemon @@ -51,7 +54,7 @@ @patch("time.sleep") @patch("azurelinuxagent.daemon.main.conf") @patch("azurelinuxagent.daemon.main.sys.exit") - def test_check_pid(self, mock_exit, mock_conf, mock_sleep): + def test_check_pid(self, mock_exit, mock_conf, _): daemon_handler = get_daemon_handler() mock_pid_file = os.path.join(self.tmp_dir, "pid") @@ -65,7 +68,7 @@ @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True) - def test_set_openssl_fips(self, mock_conf, mock_daemon): + def test_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): @@ -75,13 +78,58 @@ @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False) - def test_does_not_set_openssl_fips(self, mock_conf, mock_daemon): + def test_does_not_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ) - + + @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest') + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run') + @patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()) + def test_daemon_agent_enabled(self, _, patch_run_provision, patch_run_latest): + """ + Agent should run normally when no disable_agent is found + """ + + self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) + daemon_handler = get_daemon_handler() + + def stop_daemon(child_args): + daemon_handler.running = False + + patch_run_latest.side_effect = stop_daemon + daemon_handler.run() + + self.assertEqual(1, patch_run_provision.call_count) + self.assertEqual(1, patch_run_latest.call_count) + + @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest', side_effect=AgentTestCase.fail) + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run', side_effect=ProvisionHandler.write_agent_disabled) + @patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()) + def test_daemon_agent_disabled(self, _, __, patch_run_latest): + """ + Agent should provision, then sleep forever when disable_agent is found + """ + + # file is created by provisioning handler + self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) + daemon_handler = get_daemon_handler() + + # we need to assert this thread will sleep forever, so fork it + daemon = Process(target=daemon_handler.run) + daemon.start() + daemon.join(timeout=5) + + self.assertTrue(daemon.is_alive()) + daemon.terminate() + + # disable_agent was written, run_latest was not called + self.assertTrue(os.path.exists(conf.get_disable_agent_file_path())) + self.assertEqual(0, patch_run_latest.call_count) + + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/daemon/test_resourcedisk.py walinuxagent-2.2.32/tests/daemon/test_resourcedisk.py --- walinuxagent-2.2.21+really2.2.20/tests/daemon/test_resourcedisk.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/daemon/test_resourcedisk.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.custom.dns walinuxagent-2.2.32/tests/data/dhcp.leases.custom.dns --- walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.custom.dns 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/dhcp.leases.custom.dns 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,56 @@ +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers invalid; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:01; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 never; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers expired; + option dhcp-renewal-time 4294967295; + option unknown-245 a8:3f:81:02; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 4 2015/06/16 16:58:54; + rebind 4 2015/06/16 16:58:54; + expire 4 2015/06/16 16:58:54; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 8.8.8.8; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2152/07/23 23:27:10; +} diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.multi walinuxagent-2.2.32/tests/data/dhcp.leases.multi --- walinuxagent-2.2.21+really2.2.20/tests/data/dhcp.leases.multi 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/dhcp.leases.multi 2018-09-30 15:05:27.000000000 +0000 @@ -10,7 +10,7 @@ option domain-name-servers first; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; @@ -29,7 +29,7 @@ option domain-name-servers second; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; @@ -48,7 +48,7 @@ option domain-name-servers expired; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; - option unknown-245 a8:3f:81:10; + option unknown-245 a8:3f:81:03; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; Binary files /tmp/tmpC0k90K/Nk2cteo9UB/walinuxagent-2.2.21+really2.2.20/tests/data/ga/WALinuxAgent-2.2.19.zip and /tmp/tmpC0k90K/jv1wWjgqsK/walinuxagent-2.2.32/tests/data/ga/WALinuxAgent-2.2.19.zip differ Binary files /tmp/tmpC0k90K/Nk2cteo9UB/walinuxagent-2.2.21+really2.2.20/tests/data/ga/WALinuxAgent-2.2.32.2.zip and /tmp/tmpC0k90K/jv1wWjgqsK/walinuxagent-2.2.32/tests/data/ga/WALinuxAgent-2.2.32.2.zip differ diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/imds/unicode.json walinuxagent-2.2.32/tests/data/imds/unicode.json --- walinuxagent-2.2.21+really2.2.20/tests/data/imds/unicode.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/imds/unicode.json 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "compute": { + "location": "wéstus", + "name": "héalth", + "offer": "UbuntuSérvér", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "tésts", + "sku": "16.04-LTS", + "subscriptionId": "21b2dc34-bcé6-4é63-9449-d2a8d1c2339é", + "tags": "", + "version": "16.04.201805220", + "vmId": "é7fdbfc4-2déb-4a4é-8615-éa6aaf50162é", + "vmScaleSetName": "", + "vmSize": "Standard_D2_V2", + "zone": "" + }, + "network": { + "interface": [ + { + "ipv4": { + "ipAddress": [ + { + "privateIpAddress": "10.0.1.4", + "publicIpAddress": "40.112.128.120" + } + ], + "subnet": [ + { + "address": "10.0.1.0", + "prefix": "24" + } + ] + }, + "ipv6": { + "ipAddress": [] + }, + "macAddress": "000D3A3382E8" + } + ] + } +} + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/imds/valid.json walinuxagent-2.2.32/tests/data/imds/valid.json --- walinuxagent-2.2.21+really2.2.20/tests/data/imds/valid.json 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/imds/valid.json 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "compute": { + "location": "westus", + "name": "health", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "tests", + "sku": "16.04-LTS", + "subscriptionId": "21b2dc34-bce6-4e63-9449-d2a8d1c2339e", + "tags": "", + "version": "16.04.201805220", + "vmId": "e7fdbfc4-2deb-4a4e-8615-ea6aaf50162e", + "vmScaleSetName": "", + "vmSize": "Standard_D2_V2", + "zone": "" + }, + "network": { + "interface": [ + { + "ipv4": { + "ipAddress": [ + { + "privateIpAddress": "10.0.1.4", + "publicIpAddress": "40.112.128.120" + } + ], + "subnet": [ + { + "address": "10.0.1.0", + "prefix": "24" + } + ] + }, + "ipv6": { + "ipAddress": [] + }, + "macAddress": "000D3A3382E8" + } + ] + } +} + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-2.xml walinuxagent-2.2.32/tests/data/ovf-env-2.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-2.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/ovf-env-2.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + true + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-3.xml walinuxagent-2.2.32/tests/data/ovf-env-3.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-3.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/ovf-env-3.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-4.xml walinuxagent-2.2.32/tests/data/ovf-env-4.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env-4.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/ovf-env-4.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,40 @@ + + + + 1.0 + + LinuxProvisioningConfiguration + HostName + UserName + UserPassword + false + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/authorized_keys + ssh-rsa AAAANOTAREALKEY== foo@bar.local + + + + + EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 + $HOME/UserName/.ssh/id_rsa + + + + CustomData + + + + 1.0 + + kms.core.windows.net + bad data + + true + true + false + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env.xml walinuxagent-2.2.32/tests/data/ovf-env.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/ovf-env.xml 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/ovf-env.xml 2018-09-30 15:05:27.000000000 +0000 @@ -26,4 +26,15 @@ CustomData + + 1.0 + + kms.core.windows.net + false + + true + true + false + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/test_waagent.conf walinuxagent-2.2.32/tests/data/test_waagent.conf --- walinuxagent-2.2.21+really2.2.20/tests/data/test_waagent.conf 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/test_waagent.conf 2018-09-30 15:05:27.000000000 +0000 @@ -6,10 +6,14 @@ =Value0 FauxKey1= Value1 FauxKey2=Value2 Value2 +FauxKey3=delalloc,rw,noatime,nobarrier,users,mode=777 # Enable instance creation Provisioning.Enabled=y +# Enable extension handling +Extensions.Enabled=y + # Rely on cloud-init to provision Provisioning.UseCloudInit=y @@ -19,7 +23,8 @@ # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y -# Supported values are "rsa", "dsa" and "ecdsa". +# Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". +# The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # An EOL comment that should be ignored # Monitor host name changes and publish changes via DHCP requests. @@ -113,8 +118,8 @@ # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. -# Default is disabled -# EnableOverProvisioning=n +# Default is enabled +# EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks @@ -122,5 +127,11 @@ # Add firewall rules to protect access to Azure host node services # Note: -# - The default is false to protect the state of exising VMs -OS.EnableFirewall=y +# - The default is false to protect the state of existing VMs +OS.EnableFirewall=n + +# Enforce control groups limits on the agent and extensions +CGroups.EnforceLimits=n + +# CGroups which are excluded from limits, comma separated +CGroups.Excluded=customscript,runcommand diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/encrypted.enc walinuxagent-2.2.32/tests/data/wire/encrypted.enc --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/encrypted.enc 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/encrypted.enc 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,9 @@ +MIIBlwYJKoZIhvcNAQcDoIIBiDCCAYQCAQIxggEwMIIBLAIBAoAUW4P+tNXlmDXW +H30raKBkpUhXYwUwDQYJKoZIhvcNAQEBBQAEggEAP0LpwacLdJyvNQVmSyXPGM0i +mNJSHPQsAXLFFcmWmCAGiEsQWiHKV9mON/eyd6DjtgbTuhVNHPY/IDSDXfjgLxdX +NK1XejuEaVTwdVtCJWl5l4luOeCMDueitoIgBqgkbFpteqV6s8RFwnv+a2HhM0lc +TUwim6skx1bFs0csDD5DkM7R10EWxWHjdKox8R8tq/C2xpaVWRvJ52/DCVgeHOfh +orV0GmBK0ue/mZVTxu8jz2BxQUBhHXNWjBuNuGNmUuZvD0VY1q2K6Fa3xzv32mfB +xPKgt6ru/wG1Kn6P8yMdKS3bQiNZxE1D1o3epDujiygQahUby5cI/WXk7ryZ1DBL +BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECAxpp+ZE6rpAgChqxBVpU047fb4zinTV +5xaG7lN15YEME4q8CqcF/Ji3NbHPmdw1/gtf diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_sequencing.xml walinuxagent-2.2.32/tests/data/wire/ext_conf_sequencing.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/ext_conf_sequencing.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/ext_conf_sequencing.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,29 @@ + + + + Prod + + http://manifest_of_ga.xml + + + + Test + + http://manifest_of_ga.xml + + + + + + + + + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + + +https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/goal_state_remote_access.xml walinuxagent-2.2.32/tests/data/wire/goal_state_remote_access.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/goal_state_remote_access.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/goal_state_remote_access.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,30 @@ + + + 2010-12-15 + 1 + + Started + + 16001 + + + + c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 + http://remoteaccessinfouri/ + + + + MachineRole_IN_0 + Started + + http://hostingenvuri/ + http://sharedconfiguri/ + http://certificatesuri/ + http://extensionsconfiguri/ + http://fullconfiguri/ + DummyRoleConfigName.xml + + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/manifest_deletion.xml walinuxagent-2.2.32/tests/data/wire/manifest_deletion.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/manifest_deletion.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/manifest_deletion.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + 1.0.0 + + http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 + + + + diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_10_accounts.xml walinuxagent-2.2.32/tests/data/wire/remote_access_10_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_10_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/remote_access_10_accounts.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,97 @@ + + + 1 + 1 + + + testAccount1 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount2 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount3 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount4 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount5 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount6 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount7 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount8 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount9 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount10 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_duplicate_accounts.xml walinuxagent-2.2.32/tests/data/wire/remote_access_duplicate_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_duplicate_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/remote_access_duplicate_accounts.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,25 @@ + + + 1 + 1 + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_no_accounts.xml walinuxagent-2.2.32/tests/data/wire/remote_access_no_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_no_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/remote_access_no_accounts.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,6 @@ + + + 1 + 1 + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_single_account.xml walinuxagent-2.2.32/tests/data/wire/remote_access_single_account.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_single_account.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/remote_access_single_account.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,16 @@ + + + 1 + 1 + + + testAccount + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_two_accounts.xml walinuxagent-2.2.32/tests/data/wire/remote_access_two_accounts.xml --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/remote_access_two_accounts.xml 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/remote_access_two_accounts.xml 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,25 @@ + + + 1 + 1 + + + testAccount1 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + testAccount2 + encryptedPasswordString + 2019-01-01 + + Administrators + RemoteDesktopUsers + + + + \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/data/wire/sample.pem walinuxagent-2.2.32/tests/data/wire/sample.pem --- walinuxagent-2.2.21+really2.2.20/tests/data/wire/sample.pem 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/data/wire/sample.pem 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC3zCdThkBDYu83 +M7ouc03caqyEwV6lioWbtYdnraoftbuCJrOhy+WipSCVAmhlu/tpaItuzwB9/VTw +eSWfB/hB2sabVTKgU8gTQrI6ISy2ocLjqTIZuOETJuGlAIw6OXorhdUr8acZ8ohb +ftZIbS9YKxbO7sQi+20sT2ugROJnO7IDGbb2vWhEhp2NAieJ8Nnq0SMv1+cZJZYk +6hiFVSl12g0egVFrRTJBvvTbPS7amLAQkauK/IxG28jZR61pMbHHX+xBg4Iayb2i +qp8YnwK3qtf0stc0h9snnLnHSODva1Bo6qVBEcrkuXmtrHL2nUMsV/MgWG3HMgJJ +6Jf/wSFpAgMBAAECggEBALepsS6cvADajzK5ZPXf0NFOY6CxXnPLrWGAj5NCDftr +7bjMFbq7dngFzD46zrnClCOsDZEoF1TO3p8CYF6/Zwvfo5E7HMDrl8XvYwwFdJn3 +oTlALMlZXsh1lQv+NSJFp1hwfylPbGzYV/weDeEIAkR3om4cWDCg0GJz5peb3iXK +5fimrZsnInhktloU2Ep20UepR8wbhS5WP7B2s32OULTlWiGdORUVrHJQbTN6O0NZ +WzmAcsgfmW1KEBOR9sDFbAdldt8/WcLJVIfWOdFVbCbOaxrnRnZ8j8tsafziVncD +QFRpNeyOHZR5S84oAPo2EIVeFCLLeo3Wit/O3IFmhhUCgYEA5jrs0VSowb/xU/Bw +wm1cKnSqsub3p3GLPL4TdODYMHH56Wv8APiwcW9O1+oRZoM9M/8KXkDlfFOz10tY +bMYvF8MzFKIzzi5TxaWqSWsNeXpoqtFqUed7KRh3ybncIqFAAauTwmAhAlEmGR/e +AY7Oy4b2lnRU1ssIOd0VnSnAqTcCgYEAzF6746DhsInlFIQGsUZBOmUtwyu0k1kc +gkWhJt5SyQHZtX1SMV2RI6CXFpUZcjv31jM30GmXdvkuj2dIHaDZB5V5BlctPJZq +FH0RFxmFHXk+npLJnKKSX1H3/2PxTUsSBcFHEaPCgvIz3720bX7fqRIFtVdrcbQA +cB9DARbjWl8CgYBKADyoWCbaB+EA0vLbe505RECtulF176gKgSnt0muKvsfOQFhC +06ya+WUFP4YSRjLA6MQjYYahvKG8nMoyRE1UvPhJNI2kQv3INKSUbqVpG3BTH3am +Ftpebi/qliPsuZnCL60RuCZEAWNWhgisxYMwphPSblfqpl3hg290EbyMZwKBgQCs +mypHQ166EozW+fcJDFQU9NVkrGoTtMR+Rj6oLEdxG037mb+sj+EAXSaeXQkj0QAt ++g4eyL+zLRuk5E8lLu9+F0EjGMfNDyDC8ypW/yfNT9SSa1k6IJhNR1aUbZ2kcU3k +bGwQuuWSYOttAbT8cZaHHgCSOyY03xkrmUunBOS6MwKBgBK4D0Uv7ZDf3Y38A07D +MblDQj3wZeFu6IWi9nVT12U3WuEJqQqqxWnWmETa+TS/7lhd0GjTB+79+qOIhmls +XSAmIS/rBUGlk5f9n+vBjQkpbqAvcXV7I/oQASpVga1xB9EuMvXc9y+x/QfmrYVM +zqxRWJIMASPLiQr79V0zXGXP +-----END PRIVATE KEY----- \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/__init__.py walinuxagent-2.2.32/tests/distro/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/distro/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/test_resourceDisk.py walinuxagent-2.2.32/tests/distro/test_resourceDisk.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/test_resourceDisk.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/distro/test_resourceDisk.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx @@ -78,7 +78,7 @@ # assert if sys.version_info >= (3,3): with patch("os.posix_fallocate") as posix_fallocate: - assert posix_fallocate.assert_not_called() + self.assertEqual(0, posix_fallocate.call_count) assert run_patch.call_count == 1 assert "dd if" in run_patch.call_args_list[0][0][0] diff -Nru walinuxagent-2.2.21+really2.2.20/tests/distro/test_scvmm.py walinuxagent-2.2.32/tests/distro/test_scvmm.py --- walinuxagent-2.2.21+really2.2.20/tests/distro/test_scvmm.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/distro/test_scvmm.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/__init__.py walinuxagent-2.2.32/tests/ga/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_env.py walinuxagent-2.2.32/tests/ga/test_env.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_env.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_env.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -import glob -import tempfile - -import os -from mock import patch - -from azurelinuxagent.common.utils import fileutil -from azurelinuxagent.ga.env import MAXIMUM_CACHED_FILES, EnvHandler -from tests.tools import AgentTestCase - - -class TestEnv(AgentTestCase): - @patch("azurelinuxagent.common.conf.get_lib_dir") - def test_purge_disk_cache(self, mock_conf, *args): - names = [ - ("Prod", "agentsManifest"), - ("Test", "agentsManifest"), - ("FauxExtension1", "manifest.xml"), - ("FauxExtension2", "manifest.xml"), - ("GoalState", "xml"), - ("ExtensionsConfig", "xml") - ] - - env = EnvHandler() - - tmp_dir = tempfile.mkdtemp() - mock_conf.return_value = tmp_dir - - # write incarnations 1-100 - for t in names: - self._create_files(tmp_dir, - t[0], - t[1], - 2 * MAXIMUM_CACHED_FILES, - with_sleep=0.001) - - # update incarnation 1 with the latest timestamp - for t in names: - f = os.path.join(tmp_dir, '.'.join((t[0], '1', t[1]))) - fileutil.write_file(f, "faux content") - - # ensure the expected number of files are created - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - self.assertEqual(2 * MAXIMUM_CACHED_FILES, len(glob.glob(p))) - - env.purge_disk_cache() - - # ensure the expected number of files remain - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) - incarnation2 = os.path.join(tmp_dir, '{0}.2.{1}'.format(t[0], t[1])) - self.assertEqual(MAXIMUM_CACHED_FILES, len(glob.glob(p))) - self.assertTrue(os.path.exists(incarnation1)) - self.assertFalse(os.path.exists(incarnation2)) - - # write incarnation 101 - for t in names: - f = os.path.join(tmp_dir, '.'.join((t[0], '101', t[1]))) - fileutil.write_file(f, "faux content") - - # call to purge should be ignored, since interval has not elapsed - env.purge_disk_cache() - - for t in names: - p = os.path.join(tmp_dir, '{0}.*.{1}'.format(*t)) - incarnation1 = os.path.join(tmp_dir, '{0}.1.{1}'.format(t[0], t[1])) - self.assertEqual(MAXIMUM_CACHED_FILES + 1, len(glob.glob(p))) - self.assertTrue(os.path.exists(incarnation1)) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_extension.py walinuxagent-2.2.32/tests/ga/test_extension.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_extension.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_extension.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,32 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import glob -import os import os.path -import shutil -import tempfile -import zipfile - -import azurelinuxagent.common.conf as conf -import azurelinuxagent.common.utils.fileutil as fileutil from tests.protocol.mockwiredata import * - -from azurelinuxagent.common.exception import * -from azurelinuxagent.common.protocol import get_protocol_util -from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ - ExtensionStatus, \ - ExtensionSubStatus, \ - Extension, \ - VMStatus, ExtHandler, \ - get_properties from azurelinuxagent.ga.exthandlers import * from azurelinuxagent.common.protocol.wire import WireProtocol + class TestExtensionCleanup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) @@ -135,6 +119,7 @@ self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) + class TestHandlerStateMigration(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) @@ -233,6 +218,24 @@ self.assertEquals(handler_status.message, message) return + def test_set_handler_status_ignores_none_content(self): + """ + Validate that set_handler_status ignore cases where json.dumps + returns a value of None. + """ + self._prepare_handler_state() + self._prepare_handler_config() + + status = "Ready" + code = 0 + message = "A message" + + try: + with patch('json.dumps', return_value=None): + self.ext_handler_i.set_handler_status(status=status, code=code, message=message) + except Exception as e: + self.fail("set_handler_status threw an exception") + @patch("shutil.move", side_effect=Exception) def test_migration_ignores_move_errors(self, shutil_mock): self._prepare_handler_state() @@ -255,19 +258,30 @@ self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return + +class ExtensionTestCase(AgentTestCase): + @classmethod + def setUpClass(cls): + CGroups.disable() + + @classmethod + def tearDownClass(cls): + CGroups.enable() + @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") -class TestExtension(AgentTestCase): +class TestExtension(ExtensionTestCase): def _assert_handler_status(self, report_vm_status, expected_status, - expected_ext_count, version): + expected_ext_count, version, + expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) - self.assertEquals("OSTCExtensions.ExampleHandlerLinux", + self.assertEquals(expected_handler_name, handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) @@ -321,7 +335,7 @@ #Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") - test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() @@ -331,7 +345,7 @@ #Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") - test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() @@ -381,104 +395,200 @@ exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) - def test_ext_handler_rollingupgrade(self, *args): - test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) + def test_ext_handler_sequencing(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self._assert_ext_status(protocol.report_ext_status, "success", 0) + # check handler list + self.assertTrue(exthandlers_handler.ext_handlers is not None) + self.assertTrue(exthandlers_handler.ext_handlers.extHandlers is not None) + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 1) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 2) + #Test goal state not changed exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") - #Test goal state changed without new GUID + #Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") + test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", + "seqNo=\"1\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", + "dependencyLevel=\"3\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", + "dependencyLevel=\"4\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") - self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_ext_status(protocol.report_ext_status, "success", 1) + + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 3) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 4) - #Test GUID change without new version available + #Test disable test_data.goal_state = test_data.goal_state.replace("2<", "3<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654321", "FE0987654322") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") - self._assert_ext_status(protocol.report_ext_status, "success", 0) + self._assert_handler_status(protocol.report_vm_status, "NotReady", + 1, "1.0.0", + expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 4) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 3) - #Test hotfix available without GUID change + #Test uninstall test_data.goal_state = test_data.goal_state.replace("3<", "4<") - test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"3\"", + "dependencyLevel=\"6\"") + test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"4\"", + "dependencyLevel=\"5\"") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) + self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.dependencyLevel, 6) + self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.dependencyLevel, 5) - #Test GUID change with hotfix - test_data.goal_state = test_data.goal_state.replace("4<", - "5<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654322", "FE0987654323") + def test_ext_handler_rollingupgrade(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + #Test enable scenario. + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + #Test goal state changed + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + #Test minor version bump + test_data.goal_state = test_data.goal_state.replace("2<", + "3<") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + #Test hotfix version bump + test_data.goal_state = test_data.goal_state.replace("3<", + "4<") + test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.1.1") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test disable - test_data.goal_state = test_data.goal_state.replace("5<", - "6<") + test_data.goal_state = test_data.goal_state.replace("4<", + "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.1.1") #Test uninstall - test_data.goal_state = test_data.goal_state.replace("6<", - "7<") + test_data.goal_state = test_data.goal_state.replace("5<", + "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! - test_data.goal_state = test_data.goal_state.replace("7<", - "8<") + test_data.goal_state = test_data.goal_state.replace("6<", + "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test re-install - test_data.goal_state = test_data.goal_state.replace("8<", - "9<") + test_data.goal_state = test_data.goal_state.replace("7<", + "8<") test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test upgrade available without GUID change - test_data.goal_state = test_data.goal_state.replace("9<", - "10<") - test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") + #Test version bump post-re-install + test_data.goal_state = test_data.goal_state.replace("8<", + "9<") + test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") exthandlers_handler.run() - self._assert_no_handler_status(protocol.report_vm_status) + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) - #Test GUID change with upgrade available - test_data.goal_state = test_data.goal_state.replace("10<", - "11<") - test_data.ext_conf = test_data.ext_conf.replace("FE0987654323", "FE0987654324") + #Test rollback + test_data.goal_state = test_data.goal_state.replace("9<", + "10<") + test_data.ext_conf = test_data.ext_conf.replace("1.2.0", "1.1.0") exthandlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.add_event') - def test_ext_handler_download_failure(self, mock_add_event, *args): + def test_ext_handler_download_failure_transient(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) exthandlers_handler.run() + self.assertEquals(0, mock_add_event.call_count) + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.ga.exthandlers.add_event') + def test_ext_handler_report_status_permanent(self, mock_add_event, mock_error_state, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.report_vm_status = Mock(side_effect=ProtocolError) + + mock_error_state.return_value = True + exthandlers_handler.run() + self.assertEquals(5, mock_add_event.call_count) + args, kw = mock_add_event.call_args + self.assertEquals(False, kw['is_success']) + self.assertTrue("Failed to report vm agent status" in kw['message']) + self.assertEquals("ReportStatusExtended", kw['op']) + + @patch('azurelinuxagent.ga.exthandlers.add_event') + def test_ext_handler_report_status_resource_gone(self, mock_add_event, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.report_vm_status = Mock(side_effect=ResourceGoneError) + + exthandlers_handler.run() + self.assertEquals(4, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) - self.assertEquals("OSTCExtensions.ExampleHandlerLinux", kw['name']) - self.assertEquals("Download", kw['op']) + self.assertTrue("ResourceGoneError" in kw['message']) + self.assertEquals("ExtensionProcessing", kw['op']) + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') + @patch('azurelinuxagent.common.event.add_event') + def test_ext_handler_download_failure_permanent(self, mock_add_event, mock_error_state, *args): + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + protocol.get_ext_handler_pkgs = Mock(side_effect=ProtocolError) + + mock_error_state.return_value = True + exthandlers_handler.run() + self.assertEquals(1, mock_add_event.call_count) + args, kw = mock_add_event.call_args_list[0] + self.assertEquals(False, kw['is_success']) + self.assertTrue("Failed to get ext handler pkgs" in kw['message']) + self.assertTrue("Failed to get artifact" in kw['message']) + self.assertEquals("GetArtifactExtended", kw['op']) @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): @@ -499,14 +609,13 @@ conf.get_enable_overprovisioning = Mock(return_value=False) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called() + self.assertEqual(1, patch_handle_ext_handler.call_count) # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_not_called() - + self.assertEqual(0, patch_handle_ext_handler.call_count) def test_handle_ext_handlers_on_hold_false(self, *args): test_data = WireProtocolData(DATA_FILE) @@ -524,13 +633,13 @@ protocol.get_artifacts_profile = Mock(return_value=mock_in_vm_artifacts_profile) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called_once() + self.assertEqual(1, patch_handle_ext_handler.call_count) #Test when in_vm_artifacts_profile is not available protocol.get_artifacts_profile = Mock(return_value=None) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() - patch_handle_ext_handler.assert_called_once() + self.assertEqual(1, patch_handle_ext_handler.call_count) def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): @@ -569,12 +678,11 @@ datafile = DATA_FILE_EXT_INTERNALVERSION else: config_version = '1.0.0' + decision_version = '1.0.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE - decision_version = '1.2.0' else: datafile = DATA_FILE - decision_version = '1.0.0' _, protocol = self._create_mock(WireProtocolData(datafile), *args) ext_handlers, _ = protocol.get_ext_handlers() @@ -595,22 +703,25 @@ # (installed_version, config_version, exptected_version, autoupgrade_expected_version) cases = [ - (None, '2.0', '2.0.0', '2.2.0'), - (None, '2.0.0', '2.0.0', '2.2.0'), - ('1.0', '1.0.0', '1.0.0', '1.2.0'), - (None, '2.1.0', '2.1.1', '2.2.0'), - (None, '2.2.0', '2.2.0', '2.2.0'), - (None, '2.3.0', '2.3.0', '2.3.0'), - (None, '2.4.0', '2.4.0', '2.4.0'), - (None, '3.0', '3.0', '3.1'), - (None, '4.0', '4.0.0.1', '4.1.0.0'), + (None, '2.0', '2.0.0'), + (None, '2.0.0', '2.0.0'), + ('1.0', '1.0.0', '1.0.0'), + (None, '2.1.0', '2.1.0'), + (None, '2.1.1', '2.1.1'), + (None, '2.2.0', '2.2.0'), + (None, '2.3.0', '2.3.0'), + (None, '2.4.0', '2.4.0'), + (None, '3.0', '3.0'), + (None, '3.1', '3.1'), + (None, '4.0', '4.0.0.1'), + (None, '4.1', '4.1.0.0'), ] _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) version_uri = Mock() version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' - for (installed_version, config_version, expected_version, autoupgrade_expected_version) in cases: + for (installed_version, config_version, expected_version) in cases: ext_handler = Mock() ext_handler.properties = Mock() ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' @@ -623,14 +734,188 @@ ext_handler_instance.decide_version() self.assertEqual(expected_version, ext_handler.properties.version) - ext_handler.properties.version = config_version - ext_handler.properties.upgradePolicy = 'auto' + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_extensions_disabled(self, _, *args): + # test status is reported for no extensions + test_data = WireProtocolData(DATA_FILE_NO_EXT) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) - ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) - ext_handler_instance.get_installed_version = Mock(return_value=installed_version) + # test status is reported, but extensions are not processed + test_data = WireProtocolData(DATA_FILE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self._assert_no_handler_status(protocol.report_vm_status) - ext_handler_instance.decide_version() - self.assertEqual(autoupgrade_expected_version, ext_handler.properties.version) + def test_extensions_deleted(self, *args): + test_data = WireProtocolData(DATA_FILE_EXT_DELETION) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial enable is successful + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Update incarnation, simulate new extension version and old one deleted + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', + 'version="1.0.1"') + test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') + + # Ensure new extension can be enabled + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.1") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + @patch('subprocess.Popen.poll') + def test_install_failure(self, patch_poll, *args): + """ + When extension install fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install is unsuccessful + patch_poll.call_count = 0 + patch_poll.return_value = 1 + exthandlers_handler.run() + + # capture process output also calls poll + self.assertEqual(2, patch_poll.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.0") + + # Ensure subsequent no further retries are made + exthandlers_handler.run() + self.assertEqual(2, patch_poll.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') + def test_enable_failure(self, patch_get_enable_command, *args): + """ + When extension enable fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install is successful, but enable fails + patch_get_enable_command.call_count = 0 + patch_get_enable_command.return_value = "exit 1" + exthandlers_handler.run() + + self.assertEqual(1, patch_get_enable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_enable_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') + def test_disable_failure(self, patch_get_disable_command, *args): + """ + When extension disable fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful, but disable fails + patch_get_disable_command.call_count = 0 + patch_get_disable_command.return_value = "exit 1" + exthandlers_handler.run() + + self.assertEqual(0, patch_get_disable_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, disable extension + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + # Ensure there are no further retries + exthandlers_handler.run() + self.assertEqual(1, patch_get_disable_command.call_count) + self.assertEqual(3, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') + def test_uninstall_failure(self, patch_get_uninstall_command, *args): + """ + When extension uninstall fails, the operation should not be retried. + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful, but uninstall fails + patch_get_uninstall_command.call_count = 0 + patch_get_uninstall_command.return_value = "exit 1" + exthandlers_handler.run() + + self.assertEqual(0, patch_get_uninstall_command.call_count) + self.assertEqual(1, protocol.report_vm_status.call_count) + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, disable extension + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") + + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(2, protocol.report_vm_status.call_count) + self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) + self._assert_no_handler_status(protocol.report_vm_status) + + # Ensure there are no further retries + exthandlers_handler.run() + self.assertEqual(1, patch_get_uninstall_command.call_count) + self.assertEqual(3, protocol.report_vm_status.call_count) + self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) + self._assert_no_handler_status(protocol.report_vm_status) + + @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') + def test_upgrade(self, patch_get_update_command, *args): + """ + Extension upgrade failure should not be retried + """ + test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) + exthandlers_handler, protocol = self._create_mock(test_data, *args) + + # Ensure initial install and enable is successful + exthandlers_handler.run() + self.assertEqual(0, patch_get_update_command.call_count) + + self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") + self._assert_ext_status(protocol.report_ext_status, "success", 0) + + # Next incarnation, update version + test_data.goal_state = test_data.goal_state.replace("1<", + "2<") + test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', + 'version="1.0.1"') + test_data.manifest = test_data.manifest.replace('1.0.0', + '1.0.1') + + # Update command should fail + patch_get_update_command.return_value = "exit 1" + exthandlers_handler.run() + self.assertEqual(1, patch_get_update_command.call_count) + + # On the next iteration, update should not be retried + exthandlers_handler.run() + self.assertEqual(1, patch_get_update_command.call_count) + + self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") if __name__ == '__main__': diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers.py walinuxagent-2.2.32/tests/ga/test_exthandlers.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_exthandlers.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_exthandlers.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,134 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +import json + +from azurelinuxagent.common.protocol.restapi import ExtensionStatus, Extension, ExtHandler, ExtHandlerProperties +from azurelinuxagent.ga.exthandlers import parse_ext_status, ExtHandlerInstance +from tests.tools import * + + +class TestExtHandlers(AgentTestCase): + def test_parse_extension_status00(self): + """ + Parse a status report for a successful execution of an extension. + """ + + s = '''[{ + "status": { + "status": "success", + "formattedMessage": { + "lang": "en-US", + "message": "Command is finished." + }, + "operation": "Daemon", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" + }, + "version": "1.0", + "timestampUTC": "2018-04-20T21:20:24Z" + } +]''' + ext_status = ExtensionStatus(seq_no=0) + parse_ext_status(ext_status, json.loads(s)) + + self.assertEqual('0', ext_status.code) + self.assertEqual(None, ext_status.configurationAppliedTime) + self.assertEqual('Command is finished.', ext_status.message) + self.assertEqual('Daemon', ext_status.operation) + self.assertEqual('success', ext_status.status) + self.assertEqual(0, ext_status.sequenceNumber) + self.assertEqual(0, len(ext_status.substatusList)) + + def test_parse_extension_status01(self): + """ + Parse a status report for a failed execution of an extension. + + The extension returned a bad status/status of failed. + The agent should handle this gracefully, and convert all unknown + status/status values into an error. + """ + + s = '''[{ + "status": { + "status": "failed", + "formattedMessage": { + "lang": "en-US", + "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..." + }, + "operation": "Enable", + "code": "0", + "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" + }, + "version": "1.0", + "timestampUTC": "2018-04-20T20:50:22Z" +}]''' + ext_status = ExtensionStatus(seq_no=0) + parse_ext_status(ext_status, json.loads(s)) + + self.assertEqual('0', ext_status.code) + self.assertEqual(None, ext_status.configurationAppliedTime) + self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message) + self.assertEqual('Enable', ext_status.operation) + self.assertEqual('error', ext_status.status) + self.assertEqual(0, ext_status.sequenceNumber) + self.assertEqual(0, len(ext_status.substatusList)) + + @patch('azurelinuxagent.common.event.EventLogger.add_event') + @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_largest_seq_no') + def assert_extension_sequence_number(self, + patch_get_largest_seq, + patch_add_event, + goal_state_sequence_number, + disk_sequence_number, + expected_sequence_number): + ext = Extension() + ext.sequenceNumber = goal_state_sequence_number + patch_get_largest_seq.return_value = disk_sequence_number + + ext_handler_props = ExtHandlerProperties() + ext_handler_props.version = "1.2.3" + ext_handler = ExtHandler(name='foo') + ext_handler.properties = ext_handler_props + + instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) + seq, path = instance.get_status_file_path(ext) + + try: + gs_seq_int = int(goal_state_sequence_number) + gs_int = True + except ValueError: + gs_int = False + + if gs_int and gs_seq_int != disk_sequence_number: + self.assertEqual(1, patch_add_event.call_count) + args, kw_args = patch_add_event.call_args + self.assertEqual('SequenceNumberMismatch', kw_args['op']) + self.assertEqual(False, kw_args['is_success']) + self.assertEqual('Goal state: {0}, disk: {1}' + .format(gs_seq_int, disk_sequence_number), + kw_args['message']) + else: + self.assertEqual(0, patch_add_event.call_count) + + self.assertEqual(expected_sequence_number, seq) + if seq > -1: + self.assertTrue(path.endswith('/foo-1.2.3/status/{0}.status'.format(expected_sequence_number))) + else: + self.assertIsNone(path) + + def test_extension_sequence_number(self): + self.assert_extension_sequence_number(goal_state_sequence_number="12", + disk_sequence_number=366, + expected_sequence_number=12) + + self.assert_extension_sequence_number(goal_state_sequence_number=" 12 ", + disk_sequence_number=366, + expected_sequence_number=12) + + self.assert_extension_sequence_number(goal_state_sequence_number=" foo", + disk_sequence_number=3, + expected_sequence_number=3) + + self.assert_extension_sequence_number(goal_state_sequence_number="-1", + disk_sequence_number=3, + expected_sequence_number=-1) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_monitor.py walinuxagent-2.2.32/tests/ga/test_monitor.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_monitor.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_monitor.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,23 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +from datetime import timedelta +from azurelinuxagent.common.protocol.wire import WireProtocol from tests.tools import * from azurelinuxagent.ga.monitor import * + +@patch('azurelinuxagent.common.event.EventLogger.add_event') +@patch('azurelinuxagent.common.osutil.get_osutil') +@patch('azurelinuxagent.common.protocol.get_protocol_util') +@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") +@patch("azurelinuxagent.common.utils.restutil.http_get") class TestMonitor(AgentTestCase): - def test_parse_xml_event(self): + + def test_parse_xml_event(self, *args): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) - self.assertNotEquals(None, event) - self.assertNotEquals(0, event.parameters) - self.assertNotEquals(None, event.parameters[0]) - - @patch('azurelinuxagent.common.osutil.get_osutil') - @patch('azurelinuxagent.common.protocol.get_protocol_util') - def test_add_sysinfo(self, _, __): + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertNotEqual(None, event.parameters[0]) + + def test_add_sysinfo(self, *args): data_str = load_data('ext/event.xml') event = parse_xml_event(data_str) monitor_handler = get_monitor_handler() @@ -53,25 +61,190 @@ monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) - self.assertNotEquals(None, event) - self.assertNotEquals(0, event.parameters) - self.assertNotEquals(None, event.parameters[0]) + self.assertNotEqual(None, event) + self.assertNotEqual(0, event.parameters) + self.assertNotEqual(None, event.parameters[0]) counter = 0 for p in event.parameters: if p.name == vm_name_param: - self.assertEquals(vm_name, p.value) + self.assertEqual(vm_name, p.value) counter += 1 elif p.name == tenant_name_param: - self.assertEquals(tenant_name, p.value) + self.assertEqual(tenant_name, p.value) counter += 1 elif p.name == role_name_param: - self.assertEquals(role_name, p.value) + self.assertEqual(role_name, p.value) counter += 1 elif p.name == role_instance_name_param: - self.assertEquals(role_instance_name, p.value) + self.assertEqual(role_instance_name, p.value) counter += 1 elif p.name == container_id_param: - self.assertEquals(container_id, p.value) + self.assertEqual(container_id, p.value) counter += 1 - self.assertEquals(5, counter) + self.assertEqual(5, counter) + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_heartbeat") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.collect_and_send_events") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_host_plugin_heartbeat") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_imds_heartbeat") + def test_heartbeats(self, + patch_imds_heartbeat, + patch_cgroup_telemetry, + patch_hostplugin_heartbeat, + patch_send_events, + patch_telemetry_heartbeat, + *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + + self.assertEqual(0, patch_hostplugin_heartbeat.call_count) + self.assertEqual(0, patch_send_events.call_count) + self.assertEqual(0, patch_telemetry_heartbeat.call_count) + self.assertEqual(0, patch_imds_heartbeat.call_count) + self.assertEqual(0, patch_cgroup_telemetry.call_count) + + monitor_handler.start() + time.sleep(1) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(0, patch_hostplugin_heartbeat.call_count) + self.assertNotEqual(0, patch_send_events.call_count) + self.assertNotEqual(0, patch_telemetry_heartbeat.call_count) + self.assertNotEqual(0, patch_imds_heartbeat.call_count) + self.assertNotEqual(0, patch_cgroup_telemetry.call_count) + + monitor_handler.stop() + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") + def test_heartbeat_timings_updates_after_window(self, *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) + + self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(None, monitor_handler.last_event_collection) + self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertEqual(None, monitor_handler.last_imds_heartbeat) + + monitor_handler.start() + time.sleep(0.2) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(None, monitor_handler.last_event_collection) + self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) + + heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat + heartbeat_imds = monitor_handler.last_imds_heartbeat + heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat + events_collection = monitor_handler.last_event_collection + + time.sleep(0.5) + + self.assertNotEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) + self.assertNotEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(events_collection, monitor_handler.last_event_collection) + self.assertNotEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) + + monitor_handler.stop() + + @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_cgroup_telemetry") + def test_heartbeat_timings_no_updates_within_window(self, *args): + monitor_handler = get_monitor_handler() + + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(seconds=1) + MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(seconds=1) + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(seconds=1) + MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(seconds=1) + + self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(None, monitor_handler.last_event_collection) + self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertEqual(None, monitor_handler.last_imds_heartbeat) + + monitor_handler.start() + time.sleep(0.2) + self.assertTrue(monitor_handler.is_alive()) + + self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) + self.assertNotEqual(None, monitor_handler.last_event_collection) + self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) + self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) + + heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat + heartbeat_imds = monitor_handler.last_imds_heartbeat + heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat + events_collection = monitor_handler.last_event_collection + + time.sleep(0.5) + + self.assertEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) + self.assertEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) + self.assertEqual(events_collection, monitor_handler.last_event_collection) + self.assertEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) + + monitor_handler.stop() + + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_heartbeat_creates_signal(self, patch_report_heartbeat, *args): + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.send_host_plugin_heartbeat() + self.assertEqual(1, patch_report_heartbeat.call_count) + self.assertEqual(0, args[5].call_count) + monitor_handler.stop() + + @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered', return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_failed_heartbeat_creates_telemetry(self, patch_report_heartbeat, _, *args): + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + monitor_handler.send_host_plugin_heartbeat() + self.assertEqual(1, patch_report_heartbeat.call_count) + self.assertEqual(1, args[5].call_count) + self.assertEqual('HostPluginHeartbeatExtended', args[5].call_args[1]['op']) + self.assertEqual(False, args[5].call_args[1]['is_success']) + monitor_handler.stop() + + +@patch('azurelinuxagent.common.event.EventLogger.add_event') +@patch("azurelinuxagent.common.utils.restutil.http_post") +@patch("azurelinuxagent.common.utils.restutil.http_get") +@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state') +@patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol('endpoint')) +class TestMonitorFailure(AgentTestCase): + + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") + def test_error_heartbeat_creates_no_signal(self, patch_report_heartbeat, *args): + patch_http_get = args[2] + patch_add_event = args[4] + + monitor_handler = get_monitor_handler() + monitor_handler.init_protocols() + monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) + + patch_http_get.side_effect = IOError('client error') + monitor_handler.send_host_plugin_heartbeat() + + # health report should not be made + self.assertEqual(0, patch_report_heartbeat.call_count) + + # telemetry with failure details is sent + self.assertEqual(1, patch_add_event.call_count) + self.assertEqual('HostPluginHeartbeat', patch_add_event.call_args[1]['op']) + self.assertTrue('client error' in patch_add_event.call_args[1]['message']) + + self.assertEqual(False, patch_add_event.call_args[1]['is_success']) + monitor_handler.stop() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess_handler.py walinuxagent-2.2.32/tests/ga/test_remoteaccess_handler.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess_handler.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_remoteaccess_handler.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,527 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +from datetime import timedelta + +from azurelinuxagent.common.exception import RemoteAccessError +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.ga.remoteaccess import RemoteAccessHandler +from tests.common.osutil.mock_osutil import MockOSUtil +from tests.tools import * + + +info_messages = [] +error_messages = [] + + +def get_user_dictionary(users): + user_dictionary = {} + for user in users: + user_dictionary[user[0]] = user + return user_dictionary + + +def log_info(msg_format, *args): + info_messages.append(msg_format.format(args)) + + +def log_error(msg_format, *args): + error_messages.append(msg_format.format(args)) + + +def mock_add_event(name, op, is_success, version, message): + TestRemoteAccessHandler.eventing_data = (name, op, is_success, version, message) + + +class TestRemoteAccessHandler(AgentTestCase): + eventing_data = [()] + + def setUp(self): + super(TestRemoteAccessHandler, self).setUp() + del info_messages[:] + del error_messages[:] + for data in TestRemoteAccessHandler.eventing_data: + del data + + # add_user tests + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_add_user(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + actual_user = users[tstuser] + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + self.assertEqual(actual_user[7], expected_expiration) + self.assertEqual(actual_user[4], "JIT_Account") + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual(info_messages[0], "User '{0}' added successfully with expiration in {1}" + .format(tstuser, expected_expiration)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_add_user_bad_creation_data(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + error = "Error adding user {0}. test exception for bad username".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + self.assertEqual(0, len(rah.os_util.get_users())) + self.assertEqual(0, len(error_messages)) + self.assertEqual(0, len(info_messages)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="") + def test_add_user_bad_password_data(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "" + tstuser = "foobar" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + error = "Error adding user {0} cleanup successful\nInner error: test exception for bad password".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + self.assertEqual(0, len(rah.os_util.get_users())) + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) + + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_add_user_already_existing(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + self.assertEqual(1, len(users.keys())) + actual_user = users[tstuser] + self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) + # add the new duplicate user, ensure it's not created and does not overwrite the existing user. + # this does not test the user add function as that's mocked, it tests processing skips the remaining + # calls after the initial failure + new_user_expiration = datetime.utcnow() + timedelta(days=5) + self.assertRaises(RemoteAccessError, rah.add_user, tstuser, pwd, new_user_expiration) + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users after dup user attempted".format(tstuser)) + self.assertEqual(1, len(users.keys())) + actual_user = users[tstuser] + self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) + + # delete_user tests + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") + def test_delete_user(self, _1, _2, _3): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + rah.delete_user(tstuser) + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertFalse(tstuser in users) + self.assertEqual(0, len(error_messages)) + self.assertEqual(2, len(info_messages)) + self.assertEqual("User '{0}' added successfully with expiration in {1}".format(tstuser, expected_expiration), + info_messages[0]) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[1]) + + def test_handle_failed_create_with_bad_data(self): + mock_os_util = MockOSUtil() + testusr = "foobar" + mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) + rah = RemoteAccessHandler() + rah.os_util = mock_os_util + self.assertRaises(RemoteAccessError, rah.handle_failed_create, "") + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(1, len(users.keys())) + self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + def test_delete_user_does_not_exist(self, _1, _2): + mock_os_util = MockOSUtil() + testusr = "foobar" + mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) + rah = RemoteAccessHandler() + rah.os_util = mock_os_util + testuser = "Carl" + error = "Failed to clean up after account creation for {0}.\n" \ + "Inner error: test exception, user does not exist to delete".format(testuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.handle_failed_create, testuser) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(1, len(users.keys())) + self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) + self.assertEqual(0, len(error_messages)) + self.assertEqual(0, len(info_messages)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_new_user(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + tstuser = remote_access.user_list.users[0].name + expiration_date = datetime.utcnow() + timedelta(days=1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + actual_user = users[tstuser] + expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") + self.assertEqual(actual_user[7], expected_expiration) + self.assertEqual(actual_user[4], "JIT_Account") + + def test_do_not_add_expired_user(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + expiration = (datetime.utcnow() - timedelta(days=2)).strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertFalse("testAccount" in users) + + @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) + @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) + def test_error_add_user(self, _1, _2): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstuser = "foobar" + expiration = datetime.utcnow() + timedelta(days=1) + pwd = "bad password" + error = "Error adding user foobar cleanup successful\n" \ + "Inner error: \[CryptError\] Error decoding secret\n" \ + "Inner error: Incorrect padding".format(tstuser) + self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(0, len(users)) + self.assertEqual(0, len(error_messages)) + self.assertEqual(1, len(info_messages)) + self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) + + def test_handle_remote_access_no_users(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_no_accounts.xml') + remote_access = RemoteAccess(data_str) + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(0, len(users.keys())) + + def test_handle_remote_access_validate_jit_user_valid(self): + rah = RemoteAccessHandler() + comment = "JIT_Account" + result = rah.validate_jit_user(comment) + self.assertTrue(result, "Did not identify '{0}' as a JIT_Account".format(comment)) + + def test_handle_remote_access_validate_jit_user_invalid(self): + rah = RemoteAccessHandler() + test_users = ["John Doe", None, "", " "] + failed_results = "" + for user in test_users: + if rah.validate_jit_user(user): + failed_results += "incorrectly identified '{0} as a JIT_Account'. ".format(user) + if len(failed_results) > 0: + self.fail(failed_results) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_two_accounts.xml') + remote_access = RemoteAccess(data_str) + testusers = [] + count = 0 + while count < 2: + user = remote_access.user_list.users[count].name + expiration_date = datetime.utcnow() + timedelta(days=count + 1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[count].expiration = expiration + testusers.append(user) + count += 1 + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(testusers[0] in users, "{0} missing from users".format(testusers[0])) + self.assertTrue(testusers[1] in users, "{0} missing from users".format(testusers[1])) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + # max fabric supports in the Goal State + def test_handle_remote_access_ten_users(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(10, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_user_removed(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(10, len(users.keys())) + del rah.remote_access.user_list.users[:] + self.assertEqual(10, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_bad_data_and_good_data(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + if count is 2: + user.name = "" + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertEqual(9, len(users.keys())) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_deleted_user_readded(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + tstuser = remote_access.user_list.users[0].name + expiration_date = datetime.utcnow() + timedelta(days=1) + expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + remote_access.user_list.users[0].expiration = expiration + rah.remote_access = remote_access + rah.handle_remote_access() + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + os_util = rah.os_util + os_util.__class__ = MockOSUtil + os_util.all_users.clear() + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser not in users) + rah.handle_remote_access() + # refresh users + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + @patch('azurelinuxagent.common.osutil.get_osutil', + return_value=MockOSUtil()) + @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', + return_value=WireProtocol("12.34.56.78")) + @patch('azurelinuxagent.common.protocol.wire.WireProtocol.get_incarnation', + return_value="1") + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_remote_access', + return_value="asdf") + def test_remote_access_handler_run_bad_data(self, _1, _2, _3, _4, _5): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + tstpassword = "]aPPEv}uNg1FPnl?" + tstuser = "foobar" + expiration_date = datetime.utcnow() + timedelta(days=1) + pwd = tstpassword + rah.add_user(tstuser, pwd, expiration_date) + users = get_user_dictionary(rah.os_util.get_users()) + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + rah.run() + self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_one_removed(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + deleted_user = rah.remote_access.user_list.users[3] + del rah.remote_access.user_list.users[3] + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) + self.assertEqual(9, len(users)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_null_remote_access(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + rah.remote_access = None + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(0, len(users)) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_multiple_users_error_with_null_remote_access(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + rah.remote_access = None + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(0, len(users)) + + def test_remove_user_error(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + error = "Failed to delete user {0}\nInner error: test exception, bad data".format("") + self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, "") + + def test_remove_user_not_exists(self): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + user = "bob" + error = "Failed to delete user {0}\n" \ + "Inner error: test exception, user does not exist to delete".format(user) + self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, user) + + @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', + return_value="]aPPEv}uNg1FPnl?") + def test_handle_remote_access_remove_and_add(self, _): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + count = 0 + for user in remote_access.user_list.users: + count += 1 + user.name = "tstuser{0}".format(count) + expiration_date = datetime.utcnow() + timedelta(days=count) + user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" + rah.remote_access = remote_access + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertEqual(10, len(users)) + # now remove the user from RemoteAccess + new_user = "tstuser11" + deleted_user = rah.remote_access.user_list.users[3] + rah.remote_access.user_list.users[3].name = new_user + rah.handle_remote_access() + users = rah.os_util.get_users() + self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) + self.assertTrue(new_user in [u[0] for u in users], "user {0} not in users".format(new_user)) + self.assertEqual(10, len(users)) + + @patch('azurelinuxagent.ga.remoteaccess.add_event', side_effect=mock_add_event) + @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', side_effect=RemoteAccessError("foobar!")) + def test_remote_access_handler_run_error(self, _1, _2): + rah = RemoteAccessHandler() + rah.os_util = MockOSUtil() + rah.run() + print(TestRemoteAccessHandler.eventing_data) + check_message = "foobar!" + self.assertTrue(check_message in TestRemoteAccessHandler.eventing_data[4], + "expected message {0} not found in {1}" + .format(check_message, TestRemoteAccessHandler.eventing_data[4])) + self.assertEqual(False, TestRemoteAccessHandler.eventing_data[2], "is_success is true") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess.py walinuxagent-2.2.32/tests/ga/test_remoteaccess.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_remoteaccess.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_remoteaccess.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,94 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import xml + +from tests.tools import * +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.osutil import get_osutil + +class TestRemoteAccess(AgentTestCase): + def test_parse_remote_access(self): + data_str = load_data('wire/remote_access_single_account.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals("1", remote_access.incarnation) + self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', + return_value=GoalState(load_data('wire/goal_state.xml'))) + def test_update_remote_access_conf_no_remote_access(self, _): + protocol = WireProtocol('12.34.56.78') + goal_state = protocol.client.get_goal_state() + protocol.client.update_remote_access_conf(goal_state) + + def test_parse_two_remote_access_accounts(self): + data_str = load_data('wire/remote_access_two_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals("1", remote_access.incarnation) + self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") + + def test_parse_ten_remote_access_accounts(self): + data_str = load_data('wire/remote_access_10_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.") + + def test_parse_duplicate_remote_access_accounts(self): + data_str = load_data('wire/remote_access_duplicate_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") + self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match") + self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") + self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") + + def test_parse_zero_remote_access_accounts(self): + data_str = load_data('wire/remote_access_no_accounts.xml') + remote_access = RemoteAccess(data_str) + self.assertNotEquals(None, remote_access) + self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.") + + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', + return_value=GoalState(load_data('wire/goal_state_remote_access.xml'))) + @patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config', + return_value=load_data('wire/remote_access_single_account.xml')) + @patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert') + def test_update_remote_access_conf_remote_access(self, _1, _2, _3): + protocol = WireProtocol('12.34.56.78') + goal_state = protocol.client.get_goal_state() + protocol.client.update_remote_access_conf(goal_state) + self.assertNotEquals(None, protocol.client.remote_access) + self.assertEquals(1, len(protocol.client.remote_access.user_list.users)) + self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name) + self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password) + + def test_parse_bad_remote_access_data(self): + data = "foobar" + self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data) \ No newline at end of file diff -Nru walinuxagent-2.2.21+really2.2.20/tests/ga/test_update.py walinuxagent-2.2.32/tests/ga/test_update.py --- walinuxagent-2.2.21+really2.2.20/tests/ga/test_update.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/ga/test_update.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,28 +1,8 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. from __future__ import print_function -from datetime import datetime - -import json -import shutil -import stat - from azurelinuxagent.common.event import * from azurelinuxagent.common.protocol.hostplugin import * from azurelinuxagent.common.protocol.metadata import * @@ -152,7 +132,7 @@ if len(agents) <= 0: agents = get_agent_pkgs() for agent in agents: - fileutil.copy_file(agent, to_dir=self.tmp_dir) + shutil.copy(agent, self.tmp_dir) return def expand_agents(self): @@ -181,7 +161,7 @@ return def prepare_agents(self, - count=5, + count=20, is_available=True): # Ensure the test data is copied over @@ -389,6 +369,30 @@ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") + def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded): + try: + mock_downloaded.side_effect = ResourceGoneError() + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_blacklisted) + except ResourceGoneError: + pass + except: + self.fail("Exception was not expected!") + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") + def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded): + try: + mock_downloaded.side_effect = IOError() + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_blacklisted) + except IOError: + pass + except: + self.fail("Exception was not expected!") + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_downloaded(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_downloaded) @@ -510,7 +514,8 @@ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") - def test_download_fallback(self, mock_http_get, mock_loaded, mock_downloaded): + @patch("azurelinuxagent.ga.update.restutil.http_post") + def test_download_fallback(self, mock_http_post, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) @@ -665,22 +670,17 @@ self.assertEqual(None, self.update_handler.signal_handler) - def test_emit_restart_event_writes_sentinal_file(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) - self.update_handler._emit_restart_event() - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) - def test_emit_restart_event_emits_event_if_not_clean_start(self): try: mock_event = self.event_patch.start() - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() self.update_handler._emit_restart_event() self.assertEqual(1, mock_event.call_count) except Exception as e: pass self.event_patch.stop() - def _create_protocol(self, count=5, versions=None): + def _create_protocol(self, count=20, versions=None): latest_version = self.prepare_agents(count=count) if versions is None or len(versions) <= 0: versions = [latest_version] @@ -842,8 +842,8 @@ self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) - kept_agents = self.update_handler.agents[1::2] - blacklisted_agents = self.update_handler.agents[::2] + kept_agents = self.update_handler.agents[::2] + blacklisted_agents = self.update_handler.agents[1::2] for agent in blacklisted_agents: agent.mark_failure(is_fatal=True) self.update_handler._filter_blacklisted_agents() @@ -879,7 +879,8 @@ protocol = WireProtocol('12.34.56.78') mock_get_host.return_value = "faux host" host = self.update_handler._get_host_plugin(protocol=protocol) - mock_get_host.assert_called_once() + print("mock_get_host call cound={0}".format(mock_get_host.call_count)) + self.assertEqual(1, mock_get_host.call_count) self.assertEqual("faux host", host) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') @@ -936,16 +937,16 @@ for p in pid_files: self.assertTrue(pid_re.match(os.path.basename(p))) - def test_is_clean_start_returns_true_when_no_sentinal(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_is_clean_start_returns_true_when_no_sentinel(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.assertTrue(self.update_handler._is_clean_start) - def test_is_clean_start_returns_false_when_sentinal_exists(self): - self.update_handler._set_sentinal(agent=CURRENT_AGENT) + def test_is_clean_start_returns_false_when_sentinel_exists(self): + self.update_handler._set_sentinel(agent=CURRENT_AGENT) self.assertFalse(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_for_exceptions(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception): self.assertFalse(self.update_handler._is_clean_start) @@ -998,12 +999,23 @@ # Ensure at least three agents initially exist self.assertTrue(2 < len(self.update_handler.agents)) - # Purge every other agent - kept_agents = self.update_handler.agents[1::2] - purged_agents = self.update_handler.agents[::2] + # Purge every other agent. Don't add the current version to agents_to_keep explicitly; + # the current version is never purged + agents_to_keep = [] + kept_agents = [] + purged_agents = [] + for i in range(0, len(self.update_handler.agents)): + if self.update_handler.agents[i].version == CURRENT_VERSION: + kept_agents.append(self.update_handler.agents[i]) + else: + if i % 2 == 0: + agents_to_keep.append(self.update_handler.agents[i]) + kept_agents.append(self.update_handler.agents[i]) + else: + purged_agents.append(self.update_handler.agents[i]) # Reload and assert only the kept agents remain on disk - self.update_handler.agents = kept_agents + self.update_handler.agents = agents_to_keep self.update_handler._purge_agents() self.update_handler._find_agents() self.assertEqual( @@ -1193,7 +1205,7 @@ self._test_run_latest() self.assertEqual(0, mock_signal.call_count) - def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False): + def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False, sleep_interval=(3,)): conf.get_autoupdate_enabled = Mock(return_value=enable_updates) # Note: @@ -1212,23 +1224,27 @@ fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: - with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: - with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: - with patch('time.sleep', side_effect=iterator) as mock_sleep: - with patch('sys.exit') as mock_exit: - if isinstance(os.getppid, MagicMock): - self.update_handler.run() - else: - with patch('os.getppid', return_value=42): + with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: + with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: + with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: + with patch('time.sleep', side_effect=iterator) as mock_sleep: + with patch('sys.exit') as mock_exit: + if isinstance(os.getppid, MagicMock): self.update_handler.run() - - self.assertEqual(1, mock_handler.call_count) - self.assertEqual(mock_handler.return_value.method_calls, calls) - self.assertEqual(invocations, mock_sleep.call_count) - self.assertEqual(1, mock_monitor.call_count) - self.assertEqual(1, mock_env.call_count) - self.assertEqual(1, mock_exit.call_count) - + else: + with patch('os.getppid', return_value=42): + self.update_handler.run() + + self.assertEqual(1, mock_handler.call_count) + self.assertEqual(mock_handler.return_value.method_calls, calls) + self.assertEqual(1, mock_ra_handler.call_count) + self.assertEqual(mock_ra_handler.return_value.method_calls, calls) + self.assertEqual(invocations, mock_sleep.call_count) + if invocations > 0: + self.assertEqual(sleep_interval, mock_sleep.call_args[0]) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_exit.call_count) def test_run(self): self._test_run() @@ -1244,14 +1260,14 @@ with patch('os.getppid', return_value=1): self._test_run(invocations=0, calls=[], enable_updates=True) - def test_run_clears_sentinal_on_successful_exit(self): + def test_run_clears_sentinel_on_successful_exit(self): self._test_run() - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_run_leaves_sentinal_on_unsuccessful_exit(self): + def test_run_leaves_sentinel_on_unsuccessful_exit(self): self.update_handler._upgrade_available = Mock(side_effect=Exception) self._test_run(invocations=0, calls=[], enable_updates=True) - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_emits_restart_event(self): self.update_handler._emit_restart_event = Mock() @@ -1275,31 +1291,31 @@ self.assertTrue(v > a.version) v = a.version - def test_set_sentinal(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) - self.update_handler._set_sentinal() - self.assertTrue(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_set_sentinel(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) + self.update_handler._set_sentinel() + self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_set_sentinal_writes_current_agent(self): - self.update_handler._set_sentinal() + def test_set_sentinel_writes_current_agent(self): + self.update_handler._set_sentinel() self.assertTrue( - fileutil.read_file(self.update_handler._sentinal_file_path()), + fileutil.read_file(self.update_handler._sentinel_file_path()), CURRENT_AGENT) def test_shutdown(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() self.update_handler._shutdown() self.assertFalse(self.update_handler.running) - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) - def test_shutdown_ignores_missing_sentinal_file(self): - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + def test_shutdown_ignores_missing_sentinel_file(self): + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._shutdown() self.assertFalse(self.update_handler.running) - self.assertFalse(os.path.isfile(self.update_handler._sentinal_file_path())) + self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_exceptions(self): - self.update_handler._set_sentinal() + self.update_handler._set_sentinel() try: with patch("os.remove", side_effect=Exception): @@ -1312,7 +1328,7 @@ base_version=FlexibleVersion(AGENT_VERSION), protocol=None, versions=None, - count=5): + count=20): if protocol is None: protocol = self._create_protocol(count=count, versions=versions) @@ -1360,7 +1376,7 @@ def test_upgrade_available_purges_old_agents(self): self.prepare_agents() agent_count = self.agent_count() - self.assertEqual(5, agent_count) + self.assertEqual(20, agent_count) agent_versions = self.agent_versions()[:3] self.assertTrue(self._test_upgrade_available(versions=agent_versions)) @@ -1477,6 +1493,170 @@ self.assertTrue(ga_manifest_2.allowed_versions[0] == '2.2.13') self.assertTrue(ga_manifest_2.allowed_versions[1] == '2.2.14') + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_update_happens_when_extensions_disabled(self, _): + """ + Although the extension enabled config will not get checked + before an update is found, this test attempts to ensure that + behavior never changes. + """ + self.update_handler._upgrade_available = Mock(return_value=True) + self._test_run(invocations=0, calls=[], enable_updates=True, sleep_interval=(300,)) + + @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) + def test_interval_changes_when_extensions_disabled(self, _): + """ + When extension processing is disabled, the goal state interval should be larger. + """ + self.update_handler._upgrade_available = Mock(return_value=False) + self._test_run(invocations=15, calls=[call.run()] * 15, sleep_interval=(300,)) + + +class MonitorThreadTest(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + self.event_patch = patch('azurelinuxagent.common.event.add_event') + self.update_handler = get_update_handler() + self.update_handler.protocol_util = Mock() + + def _test_run(self, invocations=1): + iterations = [0] + def iterator(*args, **kwargs): + iterations[0] += 1 + if iterations[0] >= invocations: + self.update_handler.running = False + return + + with patch('os.getpid', return_value=42): + with patch.object(UpdateHandler, '_is_orphaned') as mock_is_orphaned: + mock_is_orphaned.__get__ = Mock(return_value=False) + with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: + with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: + with patch('time.sleep', side_effect=iterator) as mock_sleep: + with patch('sys.exit') as mock_exit: + self.update_handler.run() + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_start_threads(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=0) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_check_if_monitor_thread_is_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=True) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=0) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_monitor_thread.is_alive.call_count) + self.assertEqual(0, mock_monitor_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_check_if_env_thread_is_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=True) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + self.assertEqual(1, mock_env_thread.is_alive.call_count) + self.assertEqual(0, mock_env_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_monitor_thread_if_not_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=False) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_monitor_thread.run.call_count) + self.assertEqual(1, mock_monitor_thread.is_alive.call_count) + self.assertEqual(1, mock_monitor_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_env_thread_if_not_alive(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=False) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=1) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_env_thread.run.call_count) + self.assertEqual(1, mock_env_thread.is_alive.call_count) + self.assertEqual(1, mock_env_thread.start.call_count) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_monitor_thread(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_monitor_thread = MagicMock() + mock_monitor_thread.run = MagicMock() + mock_monitor_thread.is_alive = MagicMock(return_value=False) + mock_monitor_thread.start = MagicMock() + mock_monitor.return_value = mock_monitor_thread + + self._test_run(invocations=0) + self.assertEqual(True, mock_monitor.called) + self.assertEqual(True, mock_monitor_thread.run.called) + self.assertEqual(True, mock_monitor_thread.is_alive.called) + self.assertEqual(True, mock_monitor_thread.start.called) + + @patch('azurelinuxagent.ga.monitor.get_monitor_handler') + @patch('azurelinuxagent.ga.env.get_env_handler') + def test_restart_env_thread(self, mock_env, mock_monitor): + self.assertTrue(self.update_handler.running) + + mock_env_thread = MagicMock() + mock_env_thread.run = MagicMock() + mock_env_thread.is_alive = MagicMock(return_value=False) + mock_env_thread.start = MagicMock() + mock_env.return_value = mock_env_thread + + self._test_run(invocations=0) + self.assertEqual(True, mock_env.called) + self.assertEqual(True, mock_env_thread.run.called) + self.assertEqual(True, mock_env_thread.is_alive.called) + self.assertEqual(True, mock_env_thread.start.called) + class ChildMock(Mock): def __init__(self, return_value=0, side_effect=None): @@ -1550,6 +1730,7 @@ self.call_counts["update_goal_state"] += 1 self.goal_state_forced = self.goal_state_forced or forced + class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) @@ -1579,5 +1760,6 @@ self.next_time += self.time_increment return current_time + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/__init__.py walinuxagent-2.2.32/tests/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/__init__.py walinuxagent-2.2.32/tests/pa/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/pa/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/test_deprovision.py walinuxagent-2.2.32/tests/pa/test_deprovision.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/test_deprovision.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/pa/test_deprovision.py 2018-09-30 15:05:27.000000000 +0000 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import signal @@ -53,68 +53,6 @@ dh.run(force=True) self.assertEqual(2, dh.do_actions.call_count) - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") - def test_del_cloud_init_without_once(self, - mock_files, - mock_dirs): - deprovision_handler = get_deprovision_handler("","","") - deprovision_handler.del_cloud_init([], [], - include_once=False, deluser=False) - - mock_dirs.assert_called_with(include_once=False) - mock_files.assert_called_with(include_once=False, deluser=False) - - @patch("signal.signal") - @patch("azurelinuxagent.common.protocol.get_protocol_util") - @patch("azurelinuxagent.common.osutil.get_osutil") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_dirs") - @patch("azurelinuxagent.pa.deprovision.default.DeprovisionHandler.cloud_init_files") - def test_del_cloud_init(self, - mock_files, - mock_dirs, - mock_osutil, - mock_util, - mock_signal): - try: - with tempfile.NamedTemporaryFile() as f: - warnings = [] - actions = [] - - dirs = [tempfile.mkdtemp()] - mock_dirs.return_value = dirs - - files = [f.name] - mock_files.return_value = files - - deprovision_handler = get_deprovision_handler("","","") - deprovision_handler.del_cloud_init(warnings, actions, - deluser=True) - - mock_dirs.assert_called_with(include_once=True) - mock_files.assert_called_with(include_once=True, deluser=True) - - self.assertEqual(len(warnings), 0) - self.assertEqual(len(actions), 2) - for da in actions: - if da.func == fileutil.rm_dirs: - self.assertEqual(da.args, dirs) - elif da.func == fileutil.rm_files: - self.assertEqual(da.args, files) - else: - self.assertTrue(False) - - try: - for da in actions: - da.invoke() - self.assertEqual(len([d for d in dirs if os.path.isdir(d)]), 0) - self.assertEqual(len([f for f in files if os.path.isfile(f)]), 0) - except Exception as e: - self.assertTrue(False, "Exception {0}".format(e)) - except OSError: - # Ignore the error caused by removing the file within the "with" - pass - @distros("ubuntu") @patch('azurelinuxagent.common.conf.get_lib_dir') def test_del_lib_dir_files(self, @@ -122,6 +60,11 @@ distro_version, distro_full_name, mock_conf): + dirs = [ + 'WALinuxAgent-2.2.26/config', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' + ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', @@ -133,11 +76,19 @@ 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', - 'GoalState.2.xml' + 'GoalState.2.xml', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', + 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', + 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp + for d in dirs: + fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") @@ -147,14 +98,18 @@ warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) + deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) - self.assertTrue(len(actions) == 1) + self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) - self.assertTrue(len(actions[0].args) > 0) + self.assertEqual(fileutil.rm_files, actions[1].func) + self.assertEqual(11, len(actions[0].args)) + self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) - + for f in actions[1].args: + self.assertTrue(f[len(tmp)+1:] in files) @distros("redhat") def test_deprovision(self, @@ -180,5 +135,6 @@ warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolvconf/resolv.conf.d/tail" in w for w in warnings) + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/pa/test_provision.py walinuxagent-2.2.32/tests/pa/test_provision.py --- walinuxagent-2.2.21+really2.2.20/tests/pa/test_provision.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/pa/test_provision.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import azurelinuxagent.common.utils.fileutil as fileutil - -from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.exception import ProvisionError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.pa.provision import get_provision_handler @@ -38,7 +36,6 @@ provision_handler.osutil = mock_osutil provision_handler.protocol_util.osutil = mock_osutil - provision_handler.protocol_util.get_protocol_by_file = MagicMock() provision_handler.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) @@ -67,9 +64,9 @@ ph.run() - ph.is_provisioned.assert_not_called() - ph.report_ready.assert_called_once() - ph.write_provisioned.assert_called_once() + self.assertEqual(0, ph.is_provisioned.call_count) + self.assertEqual(1, ph.report_ready.call_count) + self.assertEqual(1, ph.write_provisioned.call_count) @patch('os.path.isfile', return_value=False) def test_is_provisioned_not_provisioned(self, mock_isfile): @@ -92,8 +89,8 @@ mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) - ph.osutil.is_current_instance_id.assert_called_once() - deprovision_handler.run_changed_unique_id.assert_not_called() + self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) + self.assertEqual(0, deprovision_handler.run_changed_unique_id.call_count) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', @@ -112,8 +109,208 @@ mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) - ph.osutil.is_current_instance_id.assert_called_once() - deprovision_handler.run_changed_unique_id.assert_called_once() + self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) + self.assertEqual(1, deprovision_handler.run_changed_unique_id.call_count) + + @distros() + def test_provision_telemetry_pga_false(self, + distro_name, + distro_version, + distro_full_name): + """ + ProvisionGuestAgent flag is 'false' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + OVF_FILE_NAME, + 'false', + True) + + @distros() + def test_provision_telemetry_pga_true(self, + distro_name, + distro_version, + distro_full_name): + """ + ProvisionGuestAgent flag is 'true' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-2.xml', + 'true', + True) + + @distros() + def test_provision_telemetry_pga_empty(self, + distro_name, + distro_version, + distro_full_name): + """ + ProvisionGuestAgent flag is '' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-3.xml', + 'true', + False) + + @distros() + def test_provision_telemetry_pga_bad(self, + distro_name, + distro_version, + distro_full_name): + """ + ProvisionGuestAgent flag is 'bad data' + """ + self._provision_test(distro_name, + distro_version, + distro_full_name, + 'ovf-env-4.xml', + 'bad data', + True) + + @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', + return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') + def _provision_test(self, + distro_name, + distro_version, + distro_full_name, + ovf_file, + provisionMessage, + expect_success, + patch_write_agent_disabled, + patch_get_instance_id): + """ + Assert that the agent issues two telemetry messages as part of a + successful provisioning. + + 1. Provision + 2. GuestState + """ + ph = get_provision_handler(distro_name, + distro_version, + distro_full_name) + ph.report_event = MagicMock() + ph.reg_ssh_host_key = MagicMock(return_value='--thumprint--') + + mock_osutil = MagicMock() + mock_osutil.decode_customdata = Mock(return_value="") + + ph.osutil = mock_osutil + ph.protocol_util.osutil = mock_osutil + ph.protocol_util.get_protocol = MagicMock() + + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) + ovfenv_data = load_data(ovf_file) + fileutil.write_file(ovfenv_file, ovfenv_data) + + ph.run() + + if expect_success: + self.assertEqual(2, ph.report_event.call_count) + positional_args, kw_args = ph.report_event.call_args_list[0] + # [call('Provisioning succeeded (146473.68s)', duration=65, is_success=True)] + self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+s\)', positional_args[0]) is not None) + self.assertTrue(isinstance(kw_args['duration'], int)) + self.assertTrue(kw_args['is_success']) + + positional_args, kw_args = ph.report_event.call_args_list[1] + self.assertTrue(kw_args['operation'] == 'ProvisionGuestAgent') + self.assertTrue(kw_args['message'] == provisionMessage) + self.assertTrue(kw_args['is_success']) + + expected_disabled = True if provisionMessage == 'false' else False + self.assertTrue(patch_write_agent_disabled.call_count == expected_disabled) + + else: + self.assertEqual(1, ph.report_event.call_count) + positional_args, kw_args = ph.report_event.call_args_list[0] + # [call(u'[ProtocolError] Failed to validate OVF: ProvisionGuestAgent not found')] + self.assertTrue('Failed to validate OVF: ProvisionGuestAgent not found' in positional_args[0]) + self.assertFalse(kw_args['is_success']) + + @distros() + @patch( + 'azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', + return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') + def test_provision_telemetry_fail(self, + mock_util, + distro_name, + distro_version, + distro_full_name): + """ + Assert that the agent issues one telemetry message as part of a + failed provisioning. + + 1. Provision + """ + ph = get_provision_handler(distro_name, distro_version, + distro_full_name) + ph.report_event = MagicMock() + ph.reg_ssh_host_key = MagicMock(side_effect=ProvisionError( + "--unit-test--")) + + mock_osutil = MagicMock() + mock_osutil.decode_customdata = Mock(return_value="") + + ph.osutil = mock_osutil + ph.protocol_util.osutil = mock_osutil + ph.protocol_util.get_protocol = MagicMock() + + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) + ovfenv_data = load_data("ovf-env.xml") + fileutil.write_file(ovfenv_file, ovfenv_data) + + ph.run() + positional_args, kw_args = ph.report_event.call_args_list[0] + self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) + + @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') + @distros() + def test_handle_provision_guest_agent(self, + patch_write_agent_disabled, + distro_name, + distro_version, + distro_full_name): + ph = get_provision_handler(distro_name, + distro_version, + distro_full_name) + + patch_write_agent_disabled.call_count = 0 + + ph.handle_provision_guest_agent(provision_guest_agent='false') + self.assertEqual(1, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='False') + self.assertEqual(2, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='FALSE') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent=' ') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent=None) + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='true') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='True') + self.assertEqual(3, patch_write_agent_disabled.call_count) + + ph.handle_provision_guest_agent(provision_guest_agent='TRUE') + self.assertEqual(3, patch_write_agent_disabled.call_count) + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/__init__.py walinuxagent-2.2.32/tests/protocol/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/mockmetadata.py walinuxagent-2.2.32/tests/protocol/mockmetadata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/mockmetadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/mockmetadata.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/mockwiredata.py walinuxagent-2.2.32/tests/protocol/mockwiredata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/mockwiredata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/mockwiredata.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * @@ -55,6 +55,16 @@ DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml" +DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy() +DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml" + +DATA_FILE_EXT_DELETION = DATA_FILE.copy() +DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml" + +DATA_FILE_EXT_SINGLE = DATA_FILE.copy() +DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml" + + class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.emulate_stale_goal_state = False @@ -164,4 +174,3 @@ with open(trans_cert_file, 'w+') as cert_file: cert_file.write(self.trans_cert) - diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_healthservice.py walinuxagent-2.2.32/tests/protocol/test_healthservice.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_healthservice.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_healthservice.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,235 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +import json + +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.protocol.healthservice import Observation, HealthService +from azurelinuxagent.common.utils import restutil +from tests.protocol.test_hostplugin import MockResponse +from tests.tools import * + + +class TestHealthService(AgentTestCase): + + def assert_status_code(self, status_code, expected_healthy): + response = MockResponse('response', status_code) + is_healthy = not restutil.request_failed_at_hostplugin(response) + self.assertEqual(expected_healthy, is_healthy) + + def assert_observation(self, call_args, name, is_healthy, value, description): + endpoint = call_args[0][0] + content = call_args[0][1] + + jo = json.loads(content) + api = jo['Api'] + source = jo['Source'] + version = jo['Version'] + obs = jo['Observations'] + fo = obs[0] + obs_name = fo['ObservationName'] + obs_healthy = fo['IsHealthy'] + obs_value = fo['Value'] + obs_description = fo['Description'] + + self.assertEqual('application/json', call_args[1]['headers']['Content-Type']) + self.assertEqual('http://endpoint:80/HealthService', endpoint) + self.assertEqual('reporttargethealth', api) + self.assertEqual('WALinuxAgent', source) + self.assertEqual('1.0', version) + + self.assertEqual(name, obs_name) + self.assertEqual(value, obs_value) + self.assertEqual(is_healthy, obs_healthy) + self.assertEqual(description, obs_description) + + def assert_telemetry(self, call_args, response=''): + args, kw_args = call_args + self.assertFalse(kw_args['is_success']) + self.assertEqual('HealthObservation', kw_args['op']) + obs = json.loads(kw_args['message']) + self.assertEqual(obs['Value'], response) + + def test_observation_validity(self): + try: + Observation(name=None, is_healthy=True) + self.fail('Empty observation name should raise ValueError') + except ValueError: + pass + + try: + Observation(name='Name', is_healthy=None) + self.fail('Empty measurement should raise ValueError') + except ValueError: + pass + + o = Observation(name='Name', is_healthy=True, value=None, description=None) + self.assertEqual('', o.value) + self.assertEqual('', o.description) + + long_str = 's' * 200 + o = Observation(name=long_str, is_healthy=True, value=long_str, description=long_str) + self.assertEqual(200, len(o.name)) + self.assertEqual(200, len(o.value)) + self.assertEqual(200, len(o.description)) + + self.assertEqual(64, len(o.as_obj['ObservationName'])) + self.assertEqual(128, len(o.as_obj['Value'])) + self.assertEqual(128, len(o.as_obj['Description'])) + + def test_observation_json(self): + health_service = HealthService('endpoint') + health_service.observations.append(Observation(name='name', + is_healthy=True, + value='value', + description='description')) + expected_json = '{"Source": "WALinuxAgent", ' \ + '"Api": "reporttargethealth", ' \ + '"Version": "1.0", ' \ + '"Observations": [{' \ + '"Value": "value", ' \ + '"ObservationName": "name", ' \ + '"Description": "description", ' \ + '"IsHealthy": true' \ + '}]}' + expected = sorted(json.loads(expected_json).items()) + actual = sorted(json.loads(health_service.as_json).items()) + self.assertEqual(expected, actual) + + @patch('azurelinuxagent.common.event.add_event') + @patch("azurelinuxagent.common.utils.restutil.http_post") + def test_reporting(self, patch_post, patch_add_event): + health_service = HealthService('endpoint') + health_service.report_host_plugin_status(is_healthy=True, response='response') + self.assertEqual(1, patch_post.call_count) + self.assertEqual(0, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_status(is_healthy=False, response='error') + self.assertEqual(2, patch_post.call_count) + self.assertEqual(1, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='error') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, + is_healthy=False, + value='error', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_extension_artifact(is_healthy=True, source='source', response='response') + self.assertEqual(3, patch_post.call_count) + self.assertEqual(1, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='source') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_extension_artifact(is_healthy=False, source='source', response='response') + self.assertEqual(4, patch_post.call_count) + self.assertEqual(2, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='response') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, + is_healthy=False, + value='response', + description='source') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_heartbeat(is_healthy=True) + self.assertEqual(5, patch_post.call_count) + self.assertEqual(2, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=True, + value='', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_heartbeat(is_healthy=False) + self.assertEqual(3, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args) + self.assertEqual(6, patch_post.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, + is_healthy=False, + value='', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_versions(is_healthy=True, response='response') + self.assertEqual(7, patch_post.call_count) + self.assertEqual(3, patch_add_event.call_count) + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=True, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + health_service.report_host_plugin_versions(is_healthy=False, response='response') + self.assertEqual(8, patch_post.call_count) + self.assertEqual(4, patch_add_event.call_count) + self.assert_telemetry(call_args=patch_add_event.call_args, response='response') + self.assert_observation(call_args=patch_post.call_args, + name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, + is_healthy=False, + value='response', + description='') + self.assertEqual(0, len(health_service.observations)) + + patch_post.side_effect = HttpError() + health_service.report_host_plugin_versions(is_healthy=True, response='') + + self.assertEqual(9, patch_post.call_count) + self.assertEqual(4, patch_add_event.call_count) + self.assertEqual(0, len(health_service.observations)) + + def test_observation_length(self): + health_service = HealthService('endpoint') + + # make 100 observations + for i in range(0, 100): + health_service._observe(is_healthy=True, name='{0}'.format(i)) + + # ensure we keep only 10 + self.assertEqual(10, len(health_service.observations)) + + # ensure we keep the most recent 10 + self.assertEqual('90', health_service.observations[0].name) + self.assertEqual('99', health_service.observations[9].name) + + def test_status_codes(self): + # healthy + self.assert_status_code(status_code=200, expected_healthy=True) + self.assert_status_code(status_code=201, expected_healthy=True) + self.assert_status_code(status_code=302, expected_healthy=True) + self.assert_status_code(status_code=400, expected_healthy=True) + self.assert_status_code(status_code=416, expected_healthy=True) + self.assert_status_code(status_code=419, expected_healthy=True) + self.assert_status_code(status_code=429, expected_healthy=True) + self.assert_status_code(status_code=502, expected_healthy=True) + + # unhealthy + self.assert_status_code(status_code=500, expected_healthy=False) + self.assert_status_code(status_code=501, expected_healthy=False) + self.assert_status_code(status_code=503, expected_healthy=False) + self.assert_status_code(status_code=504, expected_healthy=False) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_hostplugin.py walinuxagent-2.2.32/tests/protocol/test_hostplugin.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_hostplugin.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_hostplugin.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import base64 import json import sys +import datetime +import azurelinuxagent.common.protocol.restapi as restapi +import azurelinuxagent.common.protocol.wire as wire +import azurelinuxagent.common.protocol.hostplugin as hostplugin +from azurelinuxagent.common.errorstate import ErrorState + +from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.hostplugin import API_VERSION +from azurelinuxagent.common.utils import restutil +from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE +from tests.protocol.test_wire import MockResponse +from tests.tools import * if sys.version_info[0] == 3: import http.client as httpclient @@ -28,19 +40,10 @@ import httplib as httpclient bytebuffer = buffer -import azurelinuxagent.common.protocol.restapi as restapi -import azurelinuxagent.common.protocol.wire as wire -import azurelinuxagent.common.protocol.hostplugin as hostplugin - -from azurelinuxagent.common import event -from azurelinuxagent.common.exception import ProtocolError, HttpError -from azurelinuxagent.common.protocol.hostplugin import API_VERSION -from azurelinuxagent.common.utils import restutil - -from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE -from tests.tools import * hostplugin_status_url = "http://168.63.129.16:32526/status" +hostplugin_versions_url = "http://168.63.129.16:32526/versions" +health_service_url = 'http://168.63.129.16:80/HealthService' sas_url = "http://sas_url" wireserver_url = "168.63.129.16" @@ -55,15 +58,30 @@ if PY_VERSION_MAJOR > 2: faux_status_b64 = faux_status_b64.decode('utf-8') + class TestHostPlugin(AgentTestCase): + def _init_host(self): + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + host_plugin = wire.HostPluginProtocol(wireserver_url, + test_goal_state.container_id, + test_goal_state.role_config_name) + self.assertTrue(host_plugin.health_service is not None) + return host_plugin + + def _init_status_blob(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + status_blob = wire_protocol_client.status_blob + status_blob.data = faux_status + status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") + return status_blob + def _compare_data(self, actual, expected): for k in iter(expected.keys()): if k == 'content' or k == 'requestUri': if actual[k] != expected[k]: - print("Mismatch: Actual '{0}'='{1}', " \ - "Expected '{0}'='{3}'".format( - k, actual[k], expected[k])) + print("Mismatch: Actual '{0}'='{1}', " + "Expected '{0}'='{2}'".format(k, actual[k], expected[k])) return False elif k == 'headers': for h in expected['headers']: @@ -93,7 +111,7 @@ s = s.decode('utf-8') data['content'] = s return data - + def _hostplugin_headers(self, goal_state): return { 'x-ms-version': '2015-09-01', @@ -101,7 +119,7 @@ 'x-ms-containerid': goal_state.container_id, 'x-ms-host-config-name': goal_state.role_config_name } - + def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): args, kwargs = args self.assertEqual(exp_method, args[0]) @@ -112,62 +130,239 @@ self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) - def test_fallback(self): + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") + @patch("azurelinuxagent.ga.update.restutil.http_get") + @patch("azurelinuxagent.common.event.report_event") + def assert_ensure_initialized(self, patch_event, patch_http_get, patch_report_health, + response_body, + response_status_code, + should_initialize, + should_report_healthy): + + host = hostplugin.HostPluginProtocol(endpoint='ws', container_id='cid', role_config_name='rcf') + + host.is_initialized = False + patch_http_get.return_value = MockResponse(body=response_body, + reason='reason', + status_code=response_status_code) + return_value = host.ensure_initialized() + + self.assertEqual(return_value, host.is_available) + self.assertEqual(should_initialize, host.is_initialized) + + self.assertEqual(1, patch_event.call_count) + self.assertEqual('InitializeHostPlugin', patch_event.call_args[0][0]) + + self.assertEqual(should_initialize, patch_event.call_args[1]['is_success']) + self.assertEqual(1, patch_report_health.call_count) + + self.assertEqual(should_report_healthy, patch_report_health.call_args[1]['is_healthy']) + + actual_response = patch_report_health.call_args[1]['response'] + if should_initialize: + self.assertEqual('', actual_response) + else: + self.assertTrue('HTTP Failed' in actual_response) + self.assertTrue(response_body in actual_response) + self.assertTrue(ustr(response_status_code) in actual_response) + + def test_ensure_initialized(self): """ - Validate fallback to upload status using HostGAPlugin is happening when - status reporting via default method is unsuccessful + Test calls to ensure_initialized + """ + self.assert_ensure_initialized(response_body=api_versions, + response_status_code=200, + should_initialize=True, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='invalid ip', + response_status_code=400, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='generic bad request', + response_status_code=400, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='resource gone', + response_status_code=410, + should_initialize=False, + should_report_healthy=True) + + self.assert_ensure_initialized(response_body='generic error', + response_status_code=500, + should_initialize=False, + should_report_healthy=False) + + self.assert_ensure_initialized(response_body='upstream error', + response_status_code=502, + should_initialize=False, + should_report_healthy=True) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status") + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_default_channel(self, patch_update, patch_put, patch_upload, _): + """ + Status now defaults to HostPlugin. Validate that any errors on the public + channel are ignored. Validate that the default channel is never changed + as part of status upload. """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", - return_value=True): - with patch.object(wire.StatusBlob, "upload", return_value=False) as patch_upload: - with patch.object(wire.HostPluginProtocol, - "_put_page_blob_status") as patch_put: - wire_protocol_client = wire.WireProtocol(wireserver_url).client - wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) - wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) - wire_protocol_client.ext_conf.status_upload_blob = sas_url - wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type - wire_protocol_client.status_blob.set_vm_status(status) - wire_protocol_client.upload_status_blob() - self.assertEqual(patch_upload.call_count, 1) - self.assertTrue(patch_put.call_count == 1, - "Fallback was not engaged") - self.assertTrue(patch_put.call_args[0][0] == sas_url) - self.assertTrue(wire.HostPluginProtocol.is_default_channel()) - wire.HostPluginProtocol.set_default_channel(False) - def test_fallback_failure(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is not called + self.assertEqual(0, patch_upload.call_count, "Direct channel was used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is only called once, non-forced + self.assertEqual(1, patch_update.call_count, "Unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Unexpected parameters") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=True) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=HttpError("503")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_503(self, patch_update, patch_put, patch_upload, _): """ - Validate that when host plugin fails, the default channel is reset + When host plugin returns a 503, we should fall back to the direct channel """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) - status = restapi.VMStatus(status="Ready", - message="Guest Agent is running") - wire.HostPluginProtocol.set_default_channel(False) - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", - return_value=True): - with patch.object(wire.StatusBlob, - "upload", - return_value=False): - with patch.object(wire.HostPluginProtocol, - "_put_page_blob_status", - side_effect=wire.HttpError("put failure")) as patch_put: - client = wire.WireProtocol(wireserver_url).client - client.get_goal_state = Mock(return_value=test_goal_state) - client.ext_conf = wire.ExtensionsConfig(None) - client.ext_conf.status_upload_blob = sas_url - client.ext_conf.status_upload_blob_type = page_blob_type - client.status_blob.set_vm_status(status) - client.upload_status_blob() - self.assertTrue(patch_put.call_count == 1, - "Fallback was not engaged") - self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") - def test_put_status_error_reporting(self): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is called + self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is only called once, non-forced + self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=True) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=ResourceGoneError("410")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_410(self, patch_update, patch_put, patch_upload, _): + """ + When host plugin returns a 410, we should force the goal state update and return + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") + + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + wire_protocol_client.upload_status_blob() + + # assert direct route is not called + self.assertEqual(0, patch_upload.call_count, "Direct channel was used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is called twice, forced=True on the second + self.assertEqual(2, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(1, len(patch_update.call_args[1]), "Update goal state unexpected call count") + self.assertTrue(patch_update.call_args[1]['forced'], "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", + return_value=True) + @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", + return_value=False) + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", + side_effect=HttpError("500")) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_fallback_channel_failure(self, patch_update, patch_put, patch_upload, _): + """ + When host plugin returns a 500, and direct fails, we should raise a ProtocolError + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") + + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type + wire_protocol_client.status_blob.set_vm_status(status) + + # act + self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) + + # assert direct route is not called + self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") + + # assert host plugin route is called + self.assertEqual(1, patch_put.call_count, "Host plugin was not used") + + # assert update goal state is called twice, forced=True on the second + self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") + self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") + + # ensure the correct url is used + self.assertEqual(sas_url, patch_put.call_args[0][0]) + + # ensure host plugin is not set as default + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + @patch("azurelinuxagent.common.event.add_event") + def test_put_status_error_reporting(self, patch_add_event, _): """ Validate the telemetry when uploading status fails """ @@ -184,17 +379,25 @@ wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.set_vm_status(status) put_error = wire.HttpError("put status http error") - with patch.object(event, - "add_event") as patch_add_event: - with patch.object(restutil, - "http_put", - side_effect=put_error) as patch_http_put: - with patch.object(wire.HostPluginProtocol, - "ensure_initialized", return_value=True): - wire_protocol_client.upload_status_blob() - self.assertFalse(wire.HostPluginProtocol.is_default_channel()) - self.assertTrue(patch_add_event.call_count == 1) + with patch.object(restutil, + "http_put", + side_effect=put_error) as patch_http_put: + with patch.object(wire.HostPluginProtocol, + "ensure_initialized", return_value=True): + self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) + # The agent tries to upload via HostPlugin and that fails due to + # http_put having a side effect of "put_error" + # + # The agent tries to upload using a direct connection, and that succeeds. + self.assertEqual(1, wire_protocol_client.status_blob.upload.call_count) + # The agent never touches the default protocol is this code path, so no change. + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + # The agent never logs telemetry event for direct fallback + self.assertEqual(1, patch_add_event.call_count) + self.assertEqual('ReportStatus', patch_add_event.call_args[1]['op']) + self.assertTrue('Falling back to direct' in patch_add_event.call_args[1]['message']) + self.assertEqual(True, patch_add_event.call_args[1]['is_success']) def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" @@ -209,8 +412,8 @@ exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( - status_blob.get_block_blob_headers(len(faux_status)), - bytearray(faux_status, encoding='utf-8')) + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) @@ -222,13 +425,20 @@ patch_api.return_value = API_VERSION plugin.put_vm_status(status_blob, sas_url, block_blob_type) - self.assertTrue(patch_http.call_count == 1) + self.assertTrue(patch_http.call_count == 2) + + # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) - def test_no_fallback(self): + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + def test_no_fallback(self, _): """ Validate fallback to upload status using HostGAPlugin is not happening when status reporting via default method is successful @@ -254,6 +464,7 @@ test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) + self.assertTrue(host_client.health_service is not None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status @@ -263,23 +474,29 @@ exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( - status_blob.get_block_blob_headers(len(faux_status)), - bytearray(faux_status, encoding='utf-8')) + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) with patch.object(wire.HostPluginProtocol, - "get_api_versions") as patch_get: + "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) - self.assertTrue(patch_http.call_count == 1) + self.assertTrue(patch_http.call_count == 2) + + # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) - + + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + def test_validate_page_blobs(self): """Validate correct set of data is sent for page blobs""" wire_protocol_client = wire.WireProtocol(wireserver_url).client @@ -308,29 +525,35 @@ mock_response = MockResponse('', httpclient.OK) with patch.object(restutil, "http_request", - return_value=mock_response) as patch_http: + return_value=mock_response) as patch_http: with patch.object(wire.HostPluginProtocol, - "get_api_versions") as patch_get: + "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) - self.assertTrue(patch_http.call_count == 2) + self.assertTrue(patch_http.call_count == 3) + # first call is to host plugin exp_data = self._hostplugin_data( - status_blob.get_page_blob_create_headers( - page_size)) + status_blob.get_page_blob_create_headers( + page_size)) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) + # second call is to health service + self.assertEqual('POST', patch_http.call_args_list[1][0][0]) + self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) + + # last call is to host plugin exp_data = self._hostplugin_data( - status_blob.get_page_blob_page_headers( - 0, page_size), - page) - exp_data['requestUri'] += "?comp=page" + status_blob.get_page_blob_page_headers( + 0, page_size), + page) + exp_data['requestUri'] += "?comp=page" self._validate_hostplugin_args( - patch_http.call_args_list[1], + patch_http.call_args_list[2], test_goal_state, exp_method, exp_url, exp_data) @@ -347,6 +570,7 @@ test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) + self.assertTrue(host_client.health_service is not None) with patch.object(wire.HostPluginProtocol, "get_api_versions", return_value=api_versions) as patch_get: actual_url, actual_headers = host_client.get_artifact_request(sas_url) @@ -356,15 +580,249 @@ for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) - + + @patch("azurelinuxagent.common.utils.restutil.http_get") + def test_health(self, patch_http_get): + host_plugin = self._init_host() + + patch_http_get.return_value = MockResponse('', 200) + result = host_plugin.get_health() + self.assertEqual(1, patch_http_get.call_count) + self.assertTrue(result) + + patch_http_get.return_value = MockResponse('', 500) + result = host_plugin.get_health() + self.assertFalse(result) + + patch_http_get.side_effect = IOError('client IO error') + try: + host_plugin.get_health() + self.fail('IO error expected to be raised') + except IOError: + # expected + pass + + @patch("azurelinuxagent.common.utils.restutil.http_get", + return_value=MockResponse(status_code=200, body=b'')) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") + def test_ensure_health_service_called(self, patch_http_get, patch_report_versions): + host_plugin = self._init_host() + + host_plugin.get_api_versions() + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(1, patch_report_versions.call_count) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_healthy_signal(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 201) + + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(2, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[0][0][0]) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[1][0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_unhealthy_signal_transient(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 500) + + if sys.version_info < (2, 7): + self.assertRaises(HttpError, host_plugin.put_vm_status, status_blob, sas_url) + else: + with self.assertRaises(HttpError): + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(1, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.utils.restutil.http_get") + @patch("azurelinuxagent.common.utils.restutil.http_post") + @patch("azurelinuxagent.common.utils.restutil.http_put") + def test_put_status_unhealthy_signal_permanent(self, patch_http_put, patch_http_post, patch_http_get): + host_plugin = self._init_host() + status_blob = self._init_status_blob() + # get_api_versions + patch_http_get.return_value = MockResponse(api_versions, 200) + # put status blob + patch_http_put.return_value = MockResponse(None, 500) + + host_plugin.status_error_state.is_triggered = Mock(return_value=True) + + if sys.version_info < (2, 7): + self.assertRaises(HttpError, host_plugin.put_vm_status, status_blob, sas_url) + else: + with self.assertRaises(HttpError): + host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) + + self.assertEqual(1, patch_http_get.call_count) + self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) + + self.assertEqual(1, patch_http_put.call_count) + self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) + + self.assertEqual(2, patch_http_post.call_count) + + # signal for /versions + self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) + jstr = patch_http_post.call_args_list[0][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertTrue(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) + + # signal for /status + self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) + jstr = patch_http_post.call_args_list[1][0][1] + obj = json.loads(jstr) + self.assertEqual(1, len(obj['Observations'])) + self.assertFalse(obj['Observations'][0]['IsHealthy']) + self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") + def test_report_fetch_health(self, patch_report_artifact, patch_should_report): + host_plugin = self._init_host() + host_plugin.report_fetch_health(uri='', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + host_plugin.report_fetch_health(uri='http://169.254.169.254/extensionArtifact', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/status', is_healthy=True) + self.assertEqual(0, patch_should_report.call_count) + + self.assertEqual(None, host_plugin.fetch_last_timestamp) + host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/extensionArtifact', is_healthy=True) + self.assertNotEqual(None, host_plugin.fetch_last_timestamp) + self.assertEqual(1, patch_should_report.call_count) + self.assertEqual(1, patch_report_artifact.call_count) + + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_status") + def test_report_status_health(self, patch_report_status, patch_should_report): + host_plugin = self._init_host() + self.assertEqual(None, host_plugin.status_last_timestamp) + host_plugin.report_status_health(is_healthy=True) + self.assertNotEqual(None, host_plugin.status_last_timestamp) + self.assertEqual(1, patch_should_report.call_count) + self.assertEqual(1, patch_report_status.call_count) + + def test_should_report(self): + host_plugin = self._init_host() + error_state = ErrorState(min_timedelta=datetime.timedelta(minutes=5)) + period = datetime.timedelta(minutes=1) + last_timestamp = None + + # first measurement at 0s, should report + is_healthy = True + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(True, actual) + + # second measurement at 30s, should not report + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(False, actual) + + # third measurement at 60s, should report + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(True, actual) + + # fourth measurement unhealthy, should report and increment counter + is_healthy = False + self.assertEqual(0, error_state.count) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(1, error_state.count) + self.assertEqual(True, actual) + + # fifth measurement, should not report and reset counter + is_healthy = True + last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) + self.assertEqual(1, error_state.count) + actual = host_plugin.should_report(is_healthy, + error_state, + last_timestamp, + period) + self.assertEqual(0, error_state.count) + self.assertEqual(False, actual) + class MockResponse: - def __init__(self, body, status_code): + def __init__(self, body, status_code, reason=''): self.body = body self.status = status_code + self.reason = reason def read(self): - return self.body + return self.body if sys.version_info[0] == 2 else bytes(self.body, encoding='utf-8') + if __name__ == '__main__': unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_image_info_matcher.py walinuxagent-2.2.32/tests/protocol/test_image_info_matcher.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_image_info_matcher.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_image_info_matcher.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +import json + +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.protocol.imds import ComputeInfo, ImdsClient, IMDS_IMAGE_ORIGIN_CUSTOM, \ + IMDS_IMAGE_ORIGIN_ENDORSED, IMDS_IMAGE_ORIGIN_PLATFORM, ImageInfoMatcher +from azurelinuxagent.common.protocol.restapi import set_properties +from azurelinuxagent.common.utils import restutil +from tests.ga.test_update import ResponseMock +from tests.tools import * + + +class TestImageInfoMatcher(unittest.TestCase): + def test_image_does_not_exist(self): + doc = '{}' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("Red Hat", "RHEL", "6.3", "")) + + def test_image_exists_by_sku(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "16.04-LTS": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "")) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "16.04.201805090")) + + self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "16.04.201805090")) + + def test_image_exists_by_version(self): + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3" + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.1", "")) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.2", "")) + + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.4", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.5", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.0", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.1", "")) + + def test_image_exists_by_version01(self): + """ + Test case to ensure the matcher exhaustively searches all cases. + + REDHAT/RHEL have a SKU >= 6.3 is less precise than + REDHAT/RHEL/7-LVM have a any version. + + Both should return a successful match. + """ + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3", + "7-LVM": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7-LVM", "")) + + def test_ignores_case(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "16.04-LTS": { "Match": ".*" } + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("canonical", "ubuntuserver", "16.04-lts", "")) + self.assertFalse(test_subject.is_match("canonical", "ubuntuserver", "14.04.0-lts", "16.04.201805090")) + + def test_list_operator(self): + doc = '''{ + "CANONICAL": { + "UBUNTUSERVER": { + "List": [ "14.04.0-LTS", "14.04.1-LTS" ] + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "")) + self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.1-LTS", "")) + + self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "22.04-LTS", "")) + + def test_invalid_version(self): + doc = '''{ + "REDHAT": { + "RHEL": { + "Minimum": "6.3" + } + } + }''' + + test_subject = ImageInfoMatcher(doc) + self.assertFalse(test_subject.is_match("RedHat", "RHEL", "16.04-LTS", "")) + + # This is *expected* behavior as opposed to desirable. The specification is + # controlled by the agent, so there is no reason to use these values, but if + # one does this is expected behavior. + # + # FlexibleVersion chops off all leading zeros. + self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.04", "")) + # FlexibleVersion coerces everything to a string + self.assertTrue(test_subject.is_match("RedHat", "RHEL", 6.04, "")) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_imds.py walinuxagent-2.2.32/tests/protocol/test_imds.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_imds.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_imds.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,349 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +import json + +import azurelinuxagent.common.protocol.imds as imds + +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.restapi import set_properties +from azurelinuxagent.common.utils import restutil +from tests.ga.test_update import ResponseMock +from tests.tools import * + + +class TestImds(AgentTestCase): + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get(self, mock_http_get): + mock_http_get.return_value = ResponseMock(response='''{ + "location": "westcentralus", + "name": "unit_test", + "offer": "UnitOffer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "UnitPublisher", + "resourceGroupName": "UnitResourceGroupName", + "sku": "UnitSku", + "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", + "tags": "Key1:Value1;Key2:Value2", + "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", + "version": "UnitVersion", + "vmSize": "Standard_D1_v2" + }'''.encode('utf-8')) + + test_subject = imds.ImdsClient() + test_subject.get_compute() + + self.assertEqual(1, mock_http_get.call_count) + positional_args, kw_args = mock_http_get.call_args + + self.assertEqual('http://169.254.169.254/metadata/instance/compute?api-version=2018-02-01', positional_args[0]) + self.assertTrue('User-Agent' in kw_args['headers']) + self.assertTrue('Metadata' in kw_args['headers']) + self.assertEqual(True, kw_args['headers']['Metadata']) + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get_bad_request(self, mock_http_get): + mock_http_get.return_value = ResponseMock(status=restutil.httpclient.BAD_REQUEST) + + test_subject = imds.ImdsClient() + self.assertRaises(HttpError, test_subject.get_compute) + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_get_empty_response(self, mock_http_get): + mock_http_get.return_value = ResponseMock(response=''.encode('utf-8')) + + test_subject = imds.ImdsClient() + self.assertRaises(ValueError, test_subject.get_compute) + + def test_deserialize_ComputeInfo(self): + s = '''{ + "location": "westcentralus", + "name": "unit_test", + "offer": "UnitOffer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "UnitPublisher", + "resourceGroupName": "UnitResourceGroupName", + "sku": "UnitSku", + "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", + "tags": "Key1:Value1;Key2:Value2", + "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", + "version": "UnitVersion", + "vmSize": "Standard_D1_v2", + "vmScaleSetName": "MyScaleSet", + "zone": "In" + }''' + + data = json.loads(s, encoding='utf-8') + + compute_info = imds.ComputeInfo() + set_properties("compute", compute_info, data) + + self.assertEqual('westcentralus', compute_info.location) + self.assertEqual('unit_test', compute_info.name) + self.assertEqual('UnitOffer', compute_info.offer) + self.assertEqual('Linux', compute_info.osType) + self.assertEqual('', compute_info.placementGroupId) + self.assertEqual('0', compute_info.platformFaultDomain) + self.assertEqual('0', compute_info.platformUpdateDomain) + self.assertEqual('UnitPublisher', compute_info.publisher) + self.assertEqual('UnitResourceGroupName', compute_info.resourceGroupName) + self.assertEqual('UnitSku', compute_info.sku) + self.assertEqual('e4402c6c-2804-4a0a-9dee-d61918fc4d28', compute_info.subscriptionId) + self.assertEqual('Key1:Value1;Key2:Value2', compute_info.tags) + self.assertEqual('f62f23fb-69e2-4df0-a20b-cb5c201a3e7a', compute_info.vmId) + self.assertEqual('UnitVersion', compute_info.version) + self.assertEqual('Standard_D1_v2', compute_info.vmSize) + self.assertEqual('MyScaleSet', compute_info.vmScaleSetName) + self.assertEqual('In', compute_info.zone) + + self.assertEqual('UnitPublisher:UnitOffer:UnitSku:UnitVersion', compute_info.image_info) + + def test_is_custom_image(self): + image_origin = self._setup_image_origin_assert("", "", "", "") + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_CUSTOM, image_origin) + + def test_is_endorsed_CentOS(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.5", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.6", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.9", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.0", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-LVM", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-RAW", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.5", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.1", "")) + + def test_is_endorsed_CoreOS(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.4.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "899.17.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "1688.5.3")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.3.0")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "alpha", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "beta", "")) + + def test_is_endorsed_Debian(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "9", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "9-DAILY", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "10-DAILY", "")) + + def test_is_endorsed_Rhel(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.7", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.8", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.9", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.0", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-LVM", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-RAW", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("RedHat", "RHEL", "6.6", "")) + + def test_is_endorsed_SuSE(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "11-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "11-SP4", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP1", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP2", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP3", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP4", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP5", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("SuSE", "SLES", "11-SP3", "")) + + def test_is_endorsed_UbuntuServer(self): + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.0-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.1-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.2-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.3-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.4-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.5-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.6-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.7-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.8-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "16.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "20.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "22.04-LTS", "")) + + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "12.04-LTS", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "17.10", "")) + self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-DAILY-LTS", "")) + + @staticmethod + def _setup_image_origin_assert(publisher, offer, sku, version): + s = '''{{ + "publisher": "{0}", + "offer": "{1}", + "sku": "{2}", + "version": "{3}" + }}'''.format(publisher, offer, sku, version) + + data = json.loads(s, encoding='utf-8') + compute_info = imds.ComputeInfo() + set_properties("compute", compute_info, data) + + return compute_info.image_origin + + def test_response_validation(self): + # invalid json or empty response + self._assert_validation(http_status_code=200, + http_response='', + expected_valid=False, + expected_response='JSON parsing failed') + + self._assert_validation(http_status_code=200, + http_response=None, + expected_valid=False, + expected_response='JSON parsing failed') + + self._assert_validation(http_status_code=200, + http_response='{ bad json ', + expected_valid=False, + expected_response='JSON parsing failed') + + # 500 response + self._assert_validation(http_status_code=500, + http_response='error response', + expected_valid=False, + expected_response='[HTTP Failed] [500: reason] error response') + + # 429 response + self._assert_validation(http_status_code=429, + http_response='server busy', + expected_valid=False, + expected_response='[HTTP Failed] [429: reason] server busy') + + # valid json + self._assert_validation(http_status_code=200, + http_response=self._imds_response('valid'), + expected_valid=True, + expected_response='') + # unicode + self._assert_validation(http_status_code=200, + http_response=self._imds_response('unicode'), + expected_valid=True, + expected_response='') + + def test_field_validation(self): + # TODO: compute fields (#1249) + + self._assert_field('network', 'interface', 'ipv4', 'ipAddress', 'privateIpAddress') + self._assert_field('network', 'interface', 'ipv4', 'ipAddress') + self._assert_field('network', 'interface', 'ipv4') + self._assert_field('network', 'interface', 'macAddress') + self._assert_field('network') + + def _assert_field(self, *fields): + response = self._imds_response('valid') + response_obj = json.loads(ustr(response, encoding="utf-8")) + + # assert empty value + self._update_field(response_obj, fields, '') + altered_response = json.dumps(response_obj).encode() + self._assert_validation(http_status_code=200, + http_response=altered_response, + expected_valid=False, + expected_response='Empty field: [{0}]'.format(fields[-1])) + + # assert missing value + self._update_field(response_obj, fields, None) + altered_response = json.dumps(response_obj).encode() + self._assert_validation(http_status_code=200, + http_response=altered_response, + expected_valid=False, + expected_response='Missing field: [{0}]'.format(fields[-1])) + + def _update_field(self, obj, fields, val): + if isinstance(obj, list): + self._update_field(obj[0], fields, val) + else: + f = fields[0] + if len(fields) == 1: + if val is None: + del obj[f] + else: + obj[f] = val + else: + self._update_field(obj[f], fields[1:], val) + + @staticmethod + def _imds_response(f): + path = os.path.join(data_dir, "imds", "{0}.json".format(f)) + with open(path, "rb") as fh: + return fh.read() + + def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response): + test_subject = imds.ImdsClient() + with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get: + mock_http_get.return_value = ResponseMock(status=http_status_code, + reason='reason', + response=http_response) + validate_response = test_subject.validate() + + self.assertEqual(1, mock_http_get.call_count) + positional_args, kw_args = mock_http_get.call_args + + self.assertTrue('User-Agent' in kw_args['headers']) + self.assertEqual(restutil.HTTP_USER_AGENT_HEALTH, kw_args['headers']['User-Agent']) + self.assertTrue('Metadata' in kw_args['headers']) + self.assertEqual(True, kw_args['headers']['Metadata']) + self.assertEqual('http://169.254.169.254/metadata/instance/?api-version=2018-02-01', + positional_args[0]) + self.assertEqual(expected_valid, validate_response[0]) + self.assertTrue(expected_response in validate_response[1], + "Expected: '{0}', Actual: '{1}'" + .format(expected_response, validate_response[1])) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_metadata.py walinuxagent-2.2.32/tests/protocol/test_metadata.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_metadata.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_metadata.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import json diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_protocol_util.py walinuxagent-2.2.32/tests/protocol/test_protocol_util.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_protocol_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_protocol_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * @@ -60,7 +60,7 @@ tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist - protocol_util.get_protocol_by_file() + protocol_util.get_protocol(by_file=True) protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() @@ -71,7 +71,7 @@ with open(tag_file, "w+") as tag_fd: tag_fd.write("") - protocol_util.get_protocol_by_file() + protocol_util.get_protocol(by_file=True) protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_restapi.py walinuxagent-2.2.32/tests/protocol/test_restapi.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_restapi.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_restapi.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/protocol/test_wire.py walinuxagent-2.2.32/tests/protocol/test_wire.py --- walinuxagent-2.2.21+really2.2.20/tests/protocol/test_wire.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/protocol/test_wire.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import glob +import stat +import zipfile from azurelinuxagent.common import event from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.utils.shellutil import run_get_output +from tests.common.osutil.test_default import running_under_travis from tests.protocol.mockwiredata import * data_with_bom = b'\xef\xbb\xbfhehe' @@ -28,13 +32,14 @@ @patch("time.sleep") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") class TestWireProtocol(AgentTestCase): def setUp(self): super(TestWireProtocol, self).setUp() HostPluginProtocol.set_default_channel(False) - def _test_getters(self, test_data, MockCryptUtil, _): + def _test_getters(self, test_data, __, MockCryptUtil, _): MockCryptUtil.side_effect = test_data.mock_crypt_util with patch.object(restutil, 'http_get', test_data.mock_http_get): @@ -79,7 +84,8 @@ test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) self._test_getters(test_data, *args) - def test_getters_with_stale_goal_state(self, *args): + @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") + def test_getters_with_stale_goal_state(self, patch_report, *args): test_data = WireProtocolData(DATA_FILE) test_data.emulate_stale_goal_state = True @@ -92,10 +98,9 @@ # fetched often; however, the dependent documents, such as the # HostingEnvironmentConfig, will be retrieved the expected number self.assertEqual(2, test_data.call_counts["hostingenvuri"]) + self.assertEqual(1, patch_report.call_count) - def test_call_storage_kwargs(self, - mock_cryptutil, - mock_sleep): + def test_call_storage_kwargs(self, *args): from azurelinuxagent.common.utils import restutil with patch.object(restutil, 'http_get') as http_patch: http_req = restutil.http_get @@ -153,33 +158,59 @@ host_plugin = wire_protocol_client.get_host_plugin() self.assertEqual(goal_state.container_id, host_plugin.container_id) self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name) - patch_get_goal_state.assert_called_once() + self.assertEqual(1, patch_get_goal_state.call_count) - def test_download_ext_handler_pkg_fallback(self, *args): + @patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=IOError) + @patch("azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") + def test_download_ext_handler_pkg_fallback(self, patch_request, patch_get_host, patch_http, *args): ext_uri = 'extension_uri' host_uri = 'host_uri' - mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') - with patch.object(restutil, - "http_request", - side_effect=IOError) as patch_http: - with patch.object(WireClient, - "get_host_plugin", - return_value=mock_host): - with patch.object(HostPluginProtocol, - "get_artifact_request", - return_value=[host_uri, {}]) as patch_request: + destination = 'destination' + patch_get_host.return_value = HostPluginProtocol(host_uri, 'container_id', 'role_config') + patch_request.return_value = [host_uri, {}] + + WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) + + self.assertEqual(patch_http.call_count, 2) + self.assertEqual(patch_request.call_count, 1) + self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri) + self.assertEqual(patch_http.call_args_list[1][0][1], host_uri) + + @skip_if_predicate_true(running_under_travis, "Travis unit tests should not have external dependencies") + def test_download_ext_handler_pkg_stream(self, *args): + ext_uri = 'https://dcrdata.blob.core.windows.net/files/packer.zip' + tmp = tempfile.mkdtemp() + destination = os.path.join(tmp, 'test_download_ext_handler_pkg_stream.zip') + + success = WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) + self.assertTrue(success) + self.assertTrue(os.path.exists(destination)) + + # verify size + self.assertEqual(18380915, os.stat(destination).st_size) + + # verify unzip + zipfile.ZipFile(destination).extractall(tmp) + packer = os.path.join(tmp, 'packer') + self.assertTrue(os.path.exists(packer)) + fileutil.chmod(packer, os.stat(packer).st_mode | stat.S_IXUSR) + + # verify unpacked size + self.assertEqual(87393596, os.stat(packer).st_size) + + # execute, verify result + packer_version = '{0} --version'.format(packer) + rc, stdout = run_get_output(packer_version) + self.assertEqual(0, rc) + self.assertEqual('1.2.5\n', stdout) - WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri) - - self.assertEqual(patch_http.call_count, 2) - self.assertEqual(patch_request.call_count, 1) - - self.assertEqual(patch_http.call_args_list[0][0][1], - ext_uri) - self.assertEqual(patch_http.call_args_list[1][0][1], - host_uri) + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_default(self, *args): + """ + Default status blob method is HostPlugin. + """ vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) @@ -193,10 +224,14 @@ HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() - patch_default_upload.assert_called_once_with(testurl) - patch_get_goal_state.assert_not_called() - patch_host_ga_plugin_upload.assert_not_called() + # do not call the direct method unless host plugin fails + patch_default_upload.assert_not_called() + # host plugin always fetches a goal state + patch_get_goal_state.assert_called_once_with() + # host plugin uploads the status blob + patch_host_ga_plugin_upload.assert_called_once_with(ANY, testurl, 'BlockBlob') + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_host_ga_plugin(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client @@ -217,13 +252,14 @@ HostPluginProtocol.set_default_channel(False) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.upload_status_blob() - patch_default_upload.assert_called_once_with(testurl) - wire_protocol_client.get_goal_state.assert_called_once() + patch_default_upload.assert_not_called() + self.assertEqual(1, wire_protocol_client.get_goal_state.call_count) patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) - self.assertTrue(HostPluginProtocol.is_default_channel()) - HostPluginProtocol.set_default_channel(False) + self.assertFalse(HostPluginProtocol.is_default_channel()) - def test_upload_status_blob_unknown_type_assumes_block(self, *args): + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") + @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized") + def test_upload_status_blob_unknown_type_assumes_block(self, _, __, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) @@ -239,8 +275,9 @@ patch_prepare.assert_called_once_with("BlockBlob") patch_default_upload.assert_called_once_with(testurl) - patch_get_goal_state.assert_not_called() + patch_get_goal_state.assert_called_once_with() + @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_reports_prepare_error(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client @@ -250,13 +287,9 @@ wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) - with patch.object(StatusBlob, "prepare", - side_effect=Exception) as mock_prepare: - with patch.object(WireClient, "report_status_event") as mock_event: - wire_protocol_client.upload_status_blob() - - mock_prepare.assert_called_once() - mock_event.assert_called_once() + with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare: + self.assertRaises(ProtocolError, wire_protocol_client.upload_status_blob) + self.assertEqual(1, mock_prepare.call_count) def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client @@ -297,6 +330,26 @@ host_plugin_get_artifact_url_and_headers.assert_called_with(testurl) + @patch("azurelinuxagent.common.event.add_event") + def test_artifacts_profile_json_parsing(self, patch_event, *args): + wire_protocol_client = WireProtocol(wireserver_url).client + wire_protocol_client.ext_conf = ExtensionsConfig(None) + wire_protocol_client.ext_conf.artifacts_profile_blob = testurl + goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) + wire_protocol_client.get_goal_state = Mock(return_value=goal_state) + + # response is invalid json + wire_protocol_client.call_storage_service = Mock(return_value=MockResponse("invalid json".encode('utf-8'), 200)) + in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() + + # ensure response is empty + self.assertEqual(None, in_vm_artifacts_profile) + + # ensure event is logged + self.assertEqual(1, patch_event.call_count) + self.assertFalse(patch_event.call_args[1]['is_success']) + self.assertTrue('invalid json' in patch_event.call_args[1]['message']) + self.assertEqual('ArtifactsProfileBlob', patch_event.call_args[1]['op']) def test_get_in_vm_artifacts_profile_default(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client @@ -310,8 +363,7 @@ self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) - @patch("time.sleep") - def test_fetch_manifest_fallback(self, patch_sleep, *args): + def test_fetch_manifest_fallback(self, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) @@ -342,7 +394,7 @@ wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) - wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}'.encode('utf-8')]) + wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}']) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as artifact_request: diff -Nru walinuxagent-2.2.21+really2.2.20/tests/test_agent.py walinuxagent-2.2.32/tests/test_agent.py --- walinuxagent-2.2.21+really2.2.20/tests/test_agent.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/test_agent.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import mock import os.path -import sys from azurelinuxagent.agent import * from azurelinuxagent.common.conf import * @@ -28,10 +26,13 @@ """AutoUpdate.Enabled = True AutoUpdate.GAFamily = Prod Autoupdate.Frequency = 3600 +CGroups.EnforceLimits = False +CGroups.Excluded = customscript,runcommand DVD.MountPoint = /mnt/cdrom/secure DetectScvmmEnv = False -EnableOverProvisioning = False +EnableOverProvisioning = True Extension.LogDir = /var/log/azure +Extensions.Enabled = True HttpProxy.Host = None HttpProxy.Port = None Lib.Dir = /var/lib/waagent @@ -39,7 +40,7 @@ OS.AllowHTTP = False OS.CheckRdmaDriver = False OS.EnableFIPS = True -OS.EnableFirewall = True +OS.EnableFirewall = False OS.EnableRDMA = False OS.HomeDir = /home OS.OpensslPath = /usr/bin/openssl @@ -119,7 +120,7 @@ agent.daemon() mock_daemon.run.assert_called_once_with(child_args=None) - mock_load.assert_called_once() + self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") @@ -133,7 +134,7 @@ agent.daemon() mock_daemon.run.assert_called_once_with(child_args="-configuration-path:/foo/bar.conf") - mock_load.assert_called_once() + self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_ensures_extension_log_directory(self, mock_dir): @@ -158,7 +159,7 @@ conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) - mock_log.assert_called_once() + self.assertEqual(1, mock_log.call_count) def test_agent_get_configuration(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) @@ -168,3 +169,22 @@ for k in sorted(configuration.keys()): actual_configuration.append("{0} = {1}".format(k, configuration[k])) self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration) + + def test_agent_usage_message(self): + message = usage() + + # Python 2.6 does not have assertIn() + self.assertTrue("-verbose" in message) + self.assertTrue("-force" in message) + self.assertTrue("-help" in message) + self.assertTrue("-configuration-path" in message) + self.assertTrue("-deprovision" in message) + self.assertTrue("-register-service" in message) + self.assertTrue("-version" in message) + self.assertTrue("-daemon" in message) + self.assertTrue("-start" in message) + self.assertTrue("-run-exthandlers" in message) + self.assertTrue("-show-configuration" in message) + + # sanity check + self.assertFalse("-not-a-valid-option" in message) diff -Nru walinuxagent-2.2.21+really2.2.20/tests/test_import.py walinuxagent-2.2.32/tests/test_import.py --- walinuxagent-2.2.21+really2.2.20/tests/test_import.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/test_import.py 2018-09-30 15:05:27.000000000 +0000 @@ -9,8 +9,10 @@ import azurelinuxagent.daemon.scvmm as scvmm import azurelinuxagent.ga.exthandlers as exthandlers import azurelinuxagent.ga.monitor as monitor +import azurelinuxagent.ga.remoteaccess as remoteaccess import azurelinuxagent.ga.update as update + class TestImportHandler(AgentTestCase): def test_get_handler(self): osutil.get_osutil() @@ -24,3 +26,4 @@ monitor.get_monitor_handler() update.get_update_handler() exthandlers.get_exthandlers_handler() + remoteaccess.get_remote_access_handler() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/tools.py walinuxagent-2.2.32/tests/tools.py --- walinuxagent-2.2.21+really2.2.20/tests/tools.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/tools.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # """ Define util functions for unit test """ - +import difflib import os +import pprint import re import shutil import tempfile @@ -37,9 +38,9 @@ # Import mock module for Python2 and Python3 try: - from unittest.mock import Mock, patch, MagicMock, DEFAULT, call + from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call except ImportError: - from mock import Mock, patch, MagicMock, DEFAULT, call + from mock import Mock, patch, MagicMock, ANY, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") @@ -54,7 +55,81 @@ logger.LogLevel.VERBOSE) +def _do_nothing(): + pass + + +_MAX_LENGTH = 120 + +_MAX_LENGTH_SAFE_REPR = 80 + +def safe_repr(obj, short=False): + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < _MAX_LENGTH: + return result + return result[:_MAX_LENGTH_SAFE_REPR] + ' [truncated]...' + + +def skip_if_predicate_false(predicate, message): + if not predicate(): + if hasattr(unittest, "skip"): + return unittest.skip(message) + return lambda func: None + return lambda func: func + + +def skip_if_predicate_true(predicate, message): + if predicate(): + if hasattr(unittest, "skip"): + return unittest.skip(message) + return lambda func: None + return lambda func: func + + +def _safe_repr(obj, short=False): + """ + Copied from Python 3.x + """ + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < _MAX_LENGTH: + return result + return result[:_MAX_LENGTH] + ' [truncated]...' + + class AgentTestCase(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Setup newer unittest assertions missing in prior versions of Python + + if not hasattr(cls, "assertRegex"): + cls.assertRegex = cls.assertRegexpMatches if hasattr(cls, "assertRegexpMatches") else _do_nothing + if not hasattr(cls, "assertNotRegex"): + cls.assertNotRegex = cls.assertNotRegexpMatches if hasattr(cls, "assertNotRegexpMatches") else _do_nothing + if not hasattr(cls, "assertIn"): + cls.assertIn = cls.emulate_assertIn + if not hasattr(cls, "assertNotIn"): + cls.assertNotIn = cls.emulate_assertNotIn + if not hasattr(cls, "assertGreater"): + cls.assertGreater = cls.emulate_assertGreater + if not hasattr(cls, "assertLess"): + cls.assertLess = cls.emulate_assertLess + if not hasattr(cls, "assertIsNone"): + cls.assertIsNone = cls.emulate_assertIsNone + if not hasattr(cls, "assertIsNotNone"): + cls.assertIsNotNone = cls.emulate_assertIsNotNone + if hasattr(cls, "assertRaisesRegexp"): + cls.assertRaisesRegex = cls.assertRaisesRegexp + if not hasattr(cls, "assertRaisesRegex"): + cls.assertRaisesRegex = cls.emulate_raises_regex + if not hasattr(cls, "assertListEqual"): + cls.assertListEqual = cls.emulate_assertListEqual + def setUp(self): prefix = "{0}_".format(self.__class__.__name__) @@ -76,7 +151,153 @@ if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) - def _create_files(self, tmp_dir, prefix, suffix, count, with_sleep=0): + def emulate_assertIn(self, a, b, msg=None): + if a not in b: + msg = msg if msg is not None else "{0} not found in {1}".format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertNotIn(self, a, b, msg=None): + if a in b: + msg = msg if msg is not None else "{0} unexpectedly found in {1}".format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertGreater(self, a, b, msg=None): + if not a > b: + msg = msg if msg is not None else '{0} not greater than {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertLess(self, a, b, msg=None): + if not a < b: + msg = msg if msg is not None else '{0} not less than {1}'.format(_safe_repr(a), _safe_repr(b)) + self.fail(msg) + + def emulate_assertIsNone(self, x, msg=None): + if x is not None: + msg = msg if msg is not None else '{0} is not None'.format(_safe_repr(x)) + self.fail(msg) + + def emulate_assertIsNotNone(self, x, msg=None): + if x is None: + msg = msg if msg is not None else '{0} is None'.format(_safe_repr(x)) + self.fail(msg) + + + def emulate_raises_regex(self, exception_type, regex, function, *args, **kwargs): + try: + function(*args, **kwargs) + except Exception as e: + if re.search(regex, str(e), flags=1) is not None: + return + else: + self.fail("Expected exception {0} matching {1}. Actual: {2}".format( + exception_type, regex, str(e))) + self.fail("No exception was thrown. Expected exception {0} matching {1}".format(exception_type, regex)) + + def emulate_assertListEqual(self, seq1, seq2, msg=None, seq_type=None): + """An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + """ + if seq_type is not None: + seq_type_name = seq_type.__name__ + if not isinstance(seq1, seq_type): + raise self.failureException('First sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq1))) + if not isinstance(seq2, seq_type): + raise self.failureException('Second sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq2))) + else: + seq_type_name = "sequence" + + differing = None + try: + len1 = len(seq1) + except (TypeError, NotImplementedError): + differing = 'First %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + try: + len2 = len(seq2) + except (TypeError, NotImplementedError): + differing = 'Second %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + if seq1 == seq2: + return + + seq1_repr = safe_repr(seq1) + seq2_repr = safe_repr(seq2) + if len(seq1_repr) > 30: + seq1_repr = seq1_repr[:30] + '...' + if len(seq2_repr) > 30: + seq2_repr = seq2_repr[:30] + '...' + elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) + differing = '%ss differ: %s != %s\n' % elements + + for i in xrange(min(len1, len2)): + try: + item1 = seq1[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of first %s\n' % + (i, seq_type_name)) + break + + try: + item2 = seq2[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of second %s\n' % + (i, seq_type_name)) + break + + if item1 != item2: + differing += ('\nFirst differing element %d:\n%s\n%s\n' % + (i, safe_repr(item1), safe_repr(item2))) + break + else: + if (len1 == len2 and seq_type is None and + type(seq1) != type(seq2)): + # The sequences are the same, but have differing types. + return + + if len1 > len2: + differing += ('\nFirst %s contains %d additional ' + 'elements.\n' % (seq_type_name, len1 - len2)) + try: + differing += ('First extra element %d:\n%s\n' % + (len2, safe_repr(seq1[len2]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of first %s\n' % (len2, seq_type_name)) + elif len1 < len2: + differing += ('\nSecond %s contains %d additional ' + 'elements.\n' % (seq_type_name, len2 - len1)) + try: + differing += ('First extra element %d:\n%s\n' % + (len1, safe_repr(seq2[len1]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of second %s\n' % (len1, seq_type_name)) + standardMsg = differing + diffMsg = '\n' + '\n'.join( + difflib.ndiff(pprint.pformat(seq1).splitlines(), + pprint.pformat(seq2).splitlines())) + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + @staticmethod + def _create_files(tmp_dir, prefix, suffix, count, with_sleep=0): for i in range(count): f = os.path.join(tmp_dir, '.'.join((prefix, str(i), suffix))) fileutil.write_file(f, "faux content") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/__init__.py walinuxagent-2.2.32/tests/utils/__init__.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/__init__.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/__init__.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/process_target.sh walinuxagent-2.2.32/tests/utils/process_target.sh --- walinuxagent-2.2.21+really2.2.20/tests/utils/process_target.sh 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/process_target.sh 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,41 @@ +#!/bin/sh + +count=0 +exitcode=0 + +while [ $# -gt 0 ]; do + case "$1" in + -e) + shift + echo $1 1>&2 + shift + ;; + -o) + shift + echo $1 + shift + ;; + -t) + shift + count=$1 + shift + ;; + -x) + shift + exitcode=$1 + shift + ;; + *) + break + ;; + esac +done + +if [ $count -gt 0 ]; then + for iter in $(seq 1 $count); do + sleep 1 + echo "Iteration $iter" + done +fi + +exit $exitcode diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_archive.py walinuxagent-2.2.32/tests/utils/test_archive.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_archive.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_archive.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,248 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the Apache License. +from datetime import datetime, timedelta + +import zipfile + +from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, MAX_ARCHIVED_STATES +from tests.tools import * + +debug = False +if os.environ.get('DEBUG') == '1': + debug = True + +# Enable verbose logger to stdout +if debug: + logger.add_logger_appender(logger.AppenderType.STDOUT, + logger.LogLevel.VERBOSE) + + +class TestArchive(AgentTestCase): + def setUp(self): + prefix = "{0}_".format(self.__class__.__name__) + + self.tmp_dir = tempfile.mkdtemp(prefix=prefix) + + def tearDown(self): + if not debug and self.tmp_dir is not None: + shutil.rmtree(self.tmp_dir) + + def _write_file(self, fn, contents=None): + full_name = os.path.join(self.tmp_dir, fn) + fileutil.mkdir(os.path.dirname(full_name)) + + with open(full_name, 'w') as fh: + data = contents if contents is not None else fn + fh.write(data) + return full_name + + @property + def history_dir(self): + return os.path.join(self.tmp_dir, 'history') + + def test_archive00(self): + """ + StateFlusher should move all 'goal state' files to a new directory + under the history folder that is timestamped. + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + for f in temp_files: + self._write_file(f) + + test_subject = StateFlusher(self.tmp_dir) + test_subject.flush(datetime.utcnow()) + + self.assertTrue(os.path.exists(self.history_dir)) + self.assertTrue(os.path.isdir(self.history_dir)) + + timestamp_dirs = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_dirs)) + + self.assertIsIso8601(timestamp_dirs[0]) + ts = self.parse_isoformat(timestamp_dirs[0]) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + for f in temp_files: + history_path = os.path.join(self.history_dir, timestamp_dirs[0], f) + msg = "expected the temp file {0} to exist".format(history_path) + self.assertTrue(os.path.exists(history_path), msg) + + def test_archive01(self): + """ + StateArchiver should archive all history directories by + + 1. Creating a .zip of a timestamped directory's files + 2. Saving the .zip to /var/lib/waagent/history/ + 2. Deleting the timestamped directory + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + for f in temp_files: + self._write_file(f) + + flusher = StateFlusher(self.tmp_dir) + flusher.flush(datetime.utcnow()) + + test_subject = StateArchiver(self.tmp_dir) + test_subject.archive() + + timestamp_zips = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_zips)) + + zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000.zip + ts_s = os.path.splitext(zip_fn)[0] # 2000-01-01T00:00:00.000000 + + self.assertIsIso8601(ts_s) + ts = self.parse_isoformat(ts_s) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + zip_full = os.path.join(self.history_dir, zip_fn) + self.assertZipContains(zip_full, temp_files) + + def test_archive02(self): + """ + StateArchiver should purge the MAX_ARCHIVED_STATES oldest files + or directories. The oldest timestamps are purged first. + + This test case creates a mixture of archive files and directories. + It creates 5 more values than MAX_ARCHIVED_STATES to ensure that + 5 archives are cleaned up. It asserts that the files and + directories are properly deleted from the disk. + """ + count = 6 + total = MAX_ARCHIVED_STATES + count + + start = datetime.now() + timestamps = [] + + for i in range(0, total): + ts = start + timedelta(seconds=i) + timestamps.append(ts) + + if i % 2 == 0: + fn = os.path.join('history', ts.isoformat(), 'Prod.0.manifest.xml') + else: + fn = os.path.join('history', "{0}.zip".format(ts.isoformat())) + + self._write_file(fn) + + self.assertEqual(total, len(os.listdir(self.history_dir))) + + test_subject = StateArchiver(self.tmp_dir) + test_subject.purge() + + archived_entries = os.listdir(self.history_dir) + self.assertEqual(MAX_ARCHIVED_STATES, len(archived_entries)) + + archived_entries.sort() + + for i in range(0, MAX_ARCHIVED_STATES): + ts = timestamps[i + count].isoformat() + if i % 2 == 0: + fn = ts + else: + fn = "{0}.zip".format(ts) + self.assertTrue(fn in archived_entries, "'{0}' is not in the list of unpurged entires".format(fn)) + + def test_archive03(self): + """ + If the StateFlusher has to flush the same file, it should + overwrite the existing one. + """ + temp_files = [ + 'Prod.0.manifest.xml', + 'Prod.0.agentsManifest', + 'Microsoft.Azure.Extensions.CustomScript.0.xml' + ] + + def _write_goal_state_files(temp_files, content=None): + for f in temp_files: + self._write_file(f, content) + + def _check_history_files(timestamp_dir, files, content=None): + for f in files: + history_path = os.path.join(self.history_dir, timestamp_dir, f) + msg = "expected the temp file {0} to exist".format(history_path) + self.assertTrue(os.path.exists(history_path), msg) + expected_content = f if content is None else content + actual_content = fileutil.read_file(history_path) + self.assertEqual(expected_content, actual_content) + + timestamp = datetime.utcnow() + + _write_goal_state_files(temp_files) + test_subject = StateFlusher(self.tmp_dir) + test_subject.flush(timestamp) + + # Ensure history directory exists, has proper timestamped-based name, + self.assertTrue(os.path.exists(self.history_dir)) + self.assertTrue(os.path.isdir(self.history_dir)) + + timestamp_dirs = os.listdir(self.history_dir) + self.assertEqual(1, len(timestamp_dirs)) + + self.assertIsIso8601(timestamp_dirs[0]) + ts = self.parse_isoformat(timestamp_dirs[0]) + self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) + + # Ensure saved files contain the right content + _check_history_files(timestamp_dirs[0], temp_files) + + # re-write all of the same files with different content, and flush again. + # .flush() should overwrite the existing ones + _write_goal_state_files(temp_files, "--this-has-been-changed--") + test_subject.flush(timestamp) + + # The contents of the saved files were overwritten as a result of the flush. + _check_history_files(timestamp_dirs[0], temp_files, "--this-has-been-changed--") + + def test_archive04(self): + """ + The archive directory is created if it does not exist. + + This failure was caught when .purge() was called before .archive(). + """ + test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist')) + test_subject.purge() + + def parse_isoformat(self, s): + return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f') + + def assertIsIso8601(self, s): + try: + self.parse_isoformat(s) + except: + raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(s)) + + def _total_seconds(self, td): + """ + Compute the total_seconds for a timedelta because 2.6 does not have total_seconds. + """ + return (0.0 + td.microseconds + (td.seconds + td.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6 + + def assertDateTimeCloseTo(self, t1, t2, within): + if t1 <= t2: + diff = t2 -t1 + else: + diff = t1 - t2 + + secs = self._total_seconds(within - diff) + if secs < 0: + self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs)) + + def assertZipContains(self, zip_fn, files): + ziph = zipfile.ZipFile(zip_fn, 'r') + zip_files = [x.filename for x in ziph.filelist] + for f in files: + self.assertTrue(f in zip_files, "'{0}' was not found in {1}".format(f, zip_fn)) + + ziph.close() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_crypt_util.py walinuxagent-2.2.32/tests/utils/test_crypt_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_crypt_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_crypt_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import base64 +import binascii +import errno as errno +import glob +import random +import string +import subprocess +import sys +import tempfile +import uuid +import unittest + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.exception import CryptError +from azurelinuxagent.common.version import PY_VERSION_MAJOR +from tests.tools import * +from subprocess import CalledProcessError + + +def is_python_version_26(): + return sys.version_info[0] == 2 and sys.version_info[1] == 6 + + +class TestCryptoUtilOperations(AgentTestCase): + def test_decrypt_encrypted_text(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/sample.pem")) + secret = ']aPPEv}uNg1FPnl?' + crypto = CryptUtil(conf.get_openssl_cmd()) + decrypted_string = crypto.decrypt_secret(encrypted_string, prv_key) + self.assertEquals(secret, decrypted_string, "decrypted string does not match expected") + + def test_decrypt_encrypted_text_missing_private_key(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, "abc" + prv_key) + + @skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6") + def test_decrypt_encrypted_text_wrong_private_key(self): + encrypted_string = load_data("wire/encrypted.enc") + prv_key = os.path.join(self.tmp_dir, "wrong.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/trans_prv")) + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) + + def test_decrypt_encrypted_text_text_not_encrypted(self): + encrypted_string = "abc@123" + prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") + with open(prv_key, 'w+') as c: + c.write(load_data("wire/sample.pem")) + crypto = CryptUtil(conf.get_openssl_cmd()) + self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_file_util.py walinuxagent-2.2.32/tests/utils/test_file_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_file_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_file_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # import errno as errno @@ -38,7 +38,19 @@ content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) - + + def test_write_file_content_is_None(self): + """ + write_file throws when content is None. No file is created. + """ + try: + test_file=os.path.join(self.tmp_dir, self.test_file) + fileutil.write_file(test_file, None) + + self.fail("expected write_file to throw an exception") + except: + self.assertEquals(False, os.path.exists(test_file)) + def test_rw_utf8_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = u"\u6211" diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_network_util.py walinuxagent-2.2.32/tests/utils/test_network_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_network_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_network_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,62 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +import azurelinuxagent.common.utils.networkutil as networkutil + +from tests.tools import * + + +class TestNetworkOperations(AgentTestCase): + def test_route_entry(self): + interface = "eth0" + mask = "C0FFFFFF" # 255.255.255.192 + destination = "C0BB910A" # + gateway = "C1BB910A" + flags = "1" + metric = "0" + + expected = 'Iface: eth0\tDestination: 10.145.187.192\tGateway: 10.145.187.193\tMask: 255.255.255.192\tFlags: 0x0001\tMetric: 0' + expected_json = '{"Iface": "eth0", "Destination": "10.145.187.192", "Gateway": "10.145.187.193", "Mask": "255.255.255.192", "Flags": "0x0001", "Metric": "0"}' + + entry = networkutil.RouteEntry(interface, destination, gateway, mask, flags, metric) + + self.assertEqual(str(entry), expected) + self.assertEqual(entry.to_json(), expected_json) + + def test_nic_link_only(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info" }') + + def test_nic_ipv4(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + nic.add_ipv4("ipv4-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1"] }') + nic.add_ipv4("ipv4-2") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1","ipv4-2"] }') + + def test_nic_ipv6(self): + nic = networkutil.NetworkInterfaceCard("test0", "link info") + nic.add_ipv6("ipv6-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1"] }') + nic.add_ipv6("ipv6-2") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1","ipv6-2"] }') + + def test_nic_ordinary(self): + nic = networkutil.NetworkInterfaceCard("test0", "link INFO") + nic.add_ipv6("ipv6-1") + nic.add_ipv4("ipv4-1") + self.assertEqual(str(nic), '{ "name": "test0", "link": "link INFO", "ipv4": ["ipv4-1"], "ipv6": ["ipv6-1"] }') diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_process_util.py walinuxagent-2.2.32/tests/utils/test_process_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_process_util.py 1970-01-01 00:00:00.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_process_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -0,0 +1,260 @@ +# Copyright Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# +import datetime +import subprocess + +from azurelinuxagent.common.exception import ExtensionError +from azurelinuxagent.common.utils.processutil \ + import format_stdout_stderr, capture_from_process +from tests.tools import * +import sys + +process_target = "{0}/process_target.sh".format(os.path.abspath(os.path.join(__file__, os.pardir))) +process_cmd_template = "{0} -o '{1}' -e '{2}'" + +EXTENSION_ERROR_CODE = 1000 + +class TestProcessUtils(AgentTestCase): + def test_format_stdout_stderr00(self): + """ + If stdout and stderr are both smaller than the max length, + the full representation should be displayed. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) + actual = format_stdout_stderr(stdout, stderr, 1000) + self.assertEqual(expected, actual) + + def test_format_stdout_stderr01(self): + """ + If stdout and stderr both exceed the max length, + then both stdout and stderr are trimmed equally. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + # noinspection SpellCheckingInspection + expected = '[stdout]\ns over the lazy dog.\n\n[stderr]\nizards jump quickly.' + actual = format_stdout_stderr(stdout, stderr, 60) + self.assertEqual(expected, actual) + self.assertEqual(60, len(actual)) + + def test_format_stdout_stderr02(self): + """ + If stderr is much larger than stdout, stderr is allowed + to borrow space from stdout's quota. + """ + stdout = "empty" + stderr = "The five boxing wizards jump quickly." + + expected = '[stdout]\nempty\n\n[stderr]\ns jump quickly.' + actual = format_stdout_stderr(stdout, stderr, 40) + self.assertEqual(expected, actual) + self.assertEqual(40, len(actual)) + + def test_format_stdout_stderr03(self): + """ + If stdout is much larger than stderr, stdout is allowed + to borrow space from stderr's quota. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "empty" + + expected = '[stdout]\nr the lazy dog.\n\n[stderr]\nempty' + actual = format_stdout_stderr(stdout, stderr, 40) + self.assertEqual(expected, actual) + self.assertEqual(40, len(actual)) + + def test_format_stdout_stderr04(self): + """ + If the max length is not sufficient to even hold the stdout + and stderr markers an empty string is returned. + """ + stdout = "The quick brown fox jumps over the lazy dog." + stderr = "The five boxing wizards jump quickly." + + expected = '' + actual = format_stdout_stderr(stdout, stderr, 4) + self.assertEqual(expected, actual) + self.assertEqual(0, len(actual)) + + def test_format_stdout_stderr05(self): + """ + If stdout and stderr are empty, an empty template is returned. + """ + + expected = '[stdout]\n\n\n[stderr]\n' + actual = format_stdout_stderr('', '', 1000) + self.assertEqual(expected, actual) + + def test_process_stdout_stderr(self): + """ + If the command has no timeout, the process need not be the leader of its own process group. + """ + stdout = "The quick brown fox jumps over the lazy dog.\n" + stderr = "The five boxing wizards jump quickly.\n" + + expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) + + cmd = process_cmd_template.format(process_target, stdout, stderr) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ) + + actual = capture_from_process(process, cmd) + self.assertEqual(expected, actual) + + def test_process_timeout_non_forked(self): + """ + non-forked process runs for 20 seconds, timeout is 10 seconds + we expect: + - test to run in just over 10 seconds + - exception should be thrown + - output should be collected + """ + cmd = "{0} -t 20".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + try: + capture_from_process(process, 'sleep 20', 10, EXTENSION_ERROR_CODE) + self.fail('Timeout exception was expected') + except ExtensionError as e: + body = str(e) + self.assertTrue('Timeout(10)' in body) + self.assertTrue('Iteration 9' in body) + self.assertFalse('Iteration 11' in body) + self.assertEqual(EXTENSION_ERROR_CODE, e.code) + except Exception as gen_ex: + self.fail('Unexpected exception: {0}'.format(gen_ex)) + + def test_process_timeout_forked(self): + """ + forked process runs for 20 seconds, timeout is 10 seconds + we expect: + - test to run in less than 3 seconds + - no exception should be thrown + - no output is collected + """ + cmd = "{0} -t 20 &".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + start = datetime.datetime.utcnow() + try: + cap = capture_from_process(process, 'sleep 20 &', 10) + except Exception as e: + self.fail('No exception should be thrown for a long running process which forks: {0}'.format(e)) + duration = datetime.datetime.utcnow() - start + + self.assertTrue(duration < datetime.timedelta(seconds=3)) + self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', cap) + + def test_process_behaved_non_forked(self): + """ + non-forked process runs for 10 seconds, timeout is 20 seconds + we expect: + - test to run in just over 10 seconds + - no exception should be thrown + - output should be collected + """ + cmd = "{0} -t 10".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + try: + body = capture_from_process(process, 'sleep 10', 20) + except Exception as gen_ex: + self.fail('Unexpected exception: {0}'.format(gen_ex)) + + self.assertFalse('Timeout' in body) + self.assertTrue('Iteration 9' in body) + self.assertTrue('Iteration 10' in body) + + def test_process_behaved_forked(self): + """ + forked process runs for 10 seconds, timeout is 20 seconds + we expect: + - test to run in under 3 seconds + - no exception should be thrown + - output is not collected + """ + cmd = "{0} -t 10 &".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + start = datetime.datetime.utcnow() + try: + body = capture_from_process(process, 'sleep 10 &', 20) + except Exception as e: + self.fail('No exception should be thrown for a well behaved process which forks: {0}'.format(e)) + duration = datetime.datetime.utcnow() - start + + self.assertTrue(duration < datetime.timedelta(seconds=3)) + self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', body) + + def test_process_bad_pgid(self): + """ + If a timeout is requested but the process is not the root of the process group, raise an exception. + """ + stdout = "stdout\n" + stderr = "stderr\n" + + cmd = process_cmd_template.format(process_target, stdout, stderr) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ) + + if sys.version_info < (2, 7): + self.assertRaises(ExtensionError, capture_from_process, process, cmd, 10, EXTENSION_ERROR_CODE) + else: + with self.assertRaises(ExtensionError) as ee: + capture_from_process(process, cmd, 10, EXTENSION_ERROR_CODE) + + body = str(ee.exception) + if sys.version_info >= (3, 2): + self.assertRegex(body, "process group") + else: + self.assertRegexpMatches(body, "process group") + + self.assertEqual(EXTENSION_ERROR_CODE, ee.exception.code) + + +if __name__ == '__main__': + unittest.main() diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_rest_util.py walinuxagent-2.2.32/tests/utils/test_rest_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_rest_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_rest_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # -import os -import unittest - from azurelinuxagent.common.exception import HttpError, \ - ProtocolError, \ ResourceGoneError + import azurelinuxagent.common.utils.restutil as restutil +from azurelinuxagent.common.utils.restutil import HTTP_USER_AGENT from azurelinuxagent.common.future import httpclient, ustr @@ -88,6 +86,7 @@ {"hostplugin":0, "protocol":0, "other":0}, restutil.IOErrorCounter._counts) + class TestHttpOperations(AgentTestCase): def test_parse_url(self): test_uri = "http://abc.def/ghi#hash?jkl=mn" @@ -117,6 +116,68 @@ self.assertEquals(None, host) self.assertEquals(rel_uri, "None") + def test_cleanup_sas_tokens_from_urls_for_normal_cases(self): + test_url = "http://abc.def/ghi#hash?jkl=mn" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "http://abc.def:80/" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "http://abc.def/" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + test_url = "https://abc.def/ghi?jkl=mn" + filtered_url = restutil.redact_sas_tokens_in_urls(test_url) + self.assertEquals(test_url, filtered_url) + + def test_cleanup_sas_tokens_from_urls_containing_sas_tokens(self): + # Contains pair of URLs (RawURL, RedactedURL) + urls_tuples = [("https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" + "=sXBjML1Fpk9UnTBtajo05ZTFSk0LWFGvARZ6WlVcAog%3D&srt=o&ss=b&" + "spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00%3A21%3A38Z&" + "st=2017-07-01T23%3A16%3A38Z", + "https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" + "=" + restutil.REDACTED_TEXT + + "&srt=o&ss=b&spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00" + "%3A21%3A38Z&st=2017-07-01T23%3A16%3A38Z"), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=DavQgRtl99DsEPv9Xeb63GnLXCuaLYw5ay%2BE1cFckQY%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=ttSCKmyjiDEeIzT9q7HtYYgbCRIXuesFSOhNEab52NM%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=X0imGmcj5KcBPFcqlfYjIZakzGrzONGbRv5JMOnGrwc%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=9hfxYvaZzrMahtGO1OgMUiFGnDOtZXulZ3skkv1eVBg%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," + "http&sig=" + restutil.REDACTED_TEXT), + ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" + "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https" + "&sig=cmluQEHnOGsVK9NDm83ruuPdPWNQcerfjOAbkspNZXU%3D", + "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" + "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https&sig" + "=" + restutil.REDACTED_TEXT) + ] + + for x in urls_tuples: + self.assertEquals(restutil.redact_sas_tokens_in_urls(x[0]), x[1]) + @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_none_is_default(self, mock_host, mock_port): @@ -134,8 +195,8 @@ h, p = restutil._get_http_proxy() self.assertEqual("host", h) self.assertEqual(None, p) - mock_host.assert_called_once() - mock_port.assert_called_once() + self.assertEqual(1, mock_host.call_count) + self.assertEqual(1, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') @@ -145,8 +206,8 @@ h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) - mock_host.assert_called_once() - mock_port.assert_not_called() + self.assertEqual(1, mock_host.call_count) + self.assertEqual(0, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_http_uses_httpproxy(self, mock_host): @@ -197,9 +258,9 @@ ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ - call(method="GET", url="/bar", body=None, headers={}) + call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -220,9 +281,9 @@ call("foo", 443, timeout=10) ]) mock_conn.request.assert_has_calls([ - call(method="GET", url="/bar", body=None, headers={}) + call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -244,9 +305,9 @@ ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ - call(method="GET", url="http://foo:80/bar", body=None, headers={}) + call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -269,9 +330,9 @@ call("foo.bar", 23333, timeout=10) ]) mock_conn.request.assert_has_calls([ - call(method="GET", url="https://foo:443/bar", body=None, headers={}) + call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) - mock_conn.getresponse.assert_called_once() + self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @@ -391,9 +452,9 @@ @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") - def test_http_request_raises_for_bad_request(self, _http_request, _sleep): + def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): _http_request.side_effect = [ - Mock(status=httpclient.BAD_REQUEST) + Mock(status=httpclient.GONE) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") @@ -401,9 +462,12 @@ @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") - def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): + def test_http_request_raises_for_invalid_container_configuration(self, _http_request, _sleep): + def read(): + return b'{ "errorCode": "InvalidContainerConfiguration", "message": "Invalid request." }' + _http_request.side_effect = [ - Mock(status=httpclient.GONE) + Mock(status=httpclient.BAD_REQUEST, reason='Bad Request', read=read) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_shell_util.py walinuxagent-2.2.32/tests/utils/test_shell_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_shell_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_shell_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * diff -Nru walinuxagent-2.2.21+really2.2.20/tests/utils/test_text_util.py walinuxagent-2.2.32/tests/utils/test_text_util.py --- walinuxagent-2.2.21+really2.2.20/tests/utils/test_text_util.py 2018-01-08 22:57:56.000000000 +0000 +++ walinuxagent-2.2.32/tests/utils/test_text_util.py 2018-09-30 15:05:27.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2014 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Requires Python 2.4+ and Openssl 1.0+ +# Requires Python 2.6+ and Openssl 1.0+ # +from distutils.version import LooseVersion as Version from tests.tools import * -import uuid -import unittest -import os -from azurelinuxagent.common.future import ustr + +import hashlib + import azurelinuxagent.common.utils.textutil as textutil -from azurelinuxagent.common.utils.textutil import Version + +from azurelinuxagent.common.future import ustr class TestTextUtil(AgentTestCase): @@ -43,7 +44,7 @@ data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdXeXfghijkXlm", - textutil.replace_non_ascii(data, replace_char='X')) + textutil.replace_non_ascii(data, replace_char='X')) self.assertEqual('', textutil.replace_non_ascii(None)) @@ -138,6 +139,61 @@ for t in data: self.assertEqual(t[2], textutil.swap_hexstring(t[0], width=t[1])) - + + def test_compress(self): + result = textutil.compress('[stdout]\nHello World\n\n[stderr]\n\n') + self.assertEqual('eJyLLi5JyS8tieXySM3JyVcIzy/KSeHiigaKphYVxXJxAQDAYQr2', result) + + def test_hash_empty_list(self): + result = textutil.hash_strings([]) + self.assertEqual(b'\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t', result) + + def test_hash_list(self): + test_list = ["abc", "123"] + result_from_list = textutil.hash_strings(test_list) + + test_string = "".join(test_list) + hash_from_string = hashlib.sha1() + hash_from_string.update(test_string.encode()) + + self.assertEqual(result_from_list, hash_from_string.digest()) + self.assertEqual(hash_from_string.hexdigest(), '6367c48dd193d56ea7b0baad25b19455e529f5ee') + + def test_empty_strings(self): + self.assertTrue(textutil.is_str_none_or_whitespace(None)) + self.assertTrue(textutil.is_str_none_or_whitespace(' ')) + self.assertTrue(textutil.is_str_none_or_whitespace('\t')) + self.assertTrue(textutil.is_str_none_or_whitespace('\n')) + self.assertTrue(textutil.is_str_none_or_whitespace(' \t')) + self.assertTrue(textutil.is_str_none_or_whitespace(' \r\n')) + + self.assertTrue(textutil.is_str_empty(None)) + self.assertTrue(textutil.is_str_empty(' ')) + self.assertTrue(textutil.is_str_empty('\t')) + self.assertTrue(textutil.is_str_empty('\n')) + self.assertTrue(textutil.is_str_empty(' \t')) + self.assertTrue(textutil.is_str_empty(' \r\n')) + + self.assertFalse(textutil.is_str_none_or_whitespace(u' \x01 ')) + self.assertFalse(textutil.is_str_none_or_whitespace(u'foo')) + self.assertFalse(textutil.is_str_none_or_whitespace('bar')) + + self.assertFalse(textutil.is_str_empty(u' \x01 ')) + self.assertFalse(textutil.is_str_empty(u'foo')) + self.assertFalse(textutil.is_str_empty('bar')) + + hex_null_1 = u'\x00' + hex_null_2 = u' \x00 ' + + self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_1)) + self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_2)) + + self.assertTrue(textutil.is_str_empty(hex_null_1)) + self.assertTrue(textutil.is_str_empty(hex_null_2)) + + self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_1), textutil.is_str_empty(hex_null_1)) + self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_2), textutil.is_str_empty(hex_null_2)) + + if __name__ == '__main__': unittest.main()